code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def alias_tags(tags_list, alias_map):
def _alias_dict(tags):
tags_ = [alias_map.get(t, t) for t in tags]
return list(set([t for t in tags_ if (t is not None)]))
tags_list_ = [_alias_dict(tags) for tags in tags_list]
return tags_list_
|
update tags to new values
Args:
tags_list (list):
alias_map (list): list of 2-tuples with regex, value
Returns:
list: updated tags
CommandLine:
python -m utool.util_tags alias_tags --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']]
>>> ut.build_alias_map()
>>> result = alias_tags(tags_list, alias_map)
>>> print(result)
|
codesearchnet
|
def taubin(script, iterations=10, t_lambda=0.5, t_mu=(- 0.53), selected=False):
filter_xml = ''.join([' <filter name="Taubin Smooth">\n', ' <Param name="lambda" ', 'value="{}" '.format(t_lambda), 'description="Lambda" ', 'type="RichFloat" ', '/>\n', ' <Param name="mu" ', 'value="{}" '.format(t_mu), 'description="mu" ', 'type="RichFloat" ', '/>\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
return None
|
The lambda & mu Taubin smoothing, it make two steps of smoothing, forth
and back, for each iteration.
Based on:
Gabriel Taubin
"A signal processing approach to fair surface design"
Siggraph 1995
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): The number of times that the taubin smoothing is
iterated. Usually it requires a larger number of iteration than the
classical laplacian.
t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm
t_mu (float): The mu parameter of the Taubin Smoothing algorithm
selected (bool): If selected the filter is performed only on the
selected faces
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
|
codesearchnet
|
def get_channel_dimension_axis(image: np.ndarray, input_data_format: Optional[Union[ChannelDimension, str]]=None) -> int:
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.FIRST:
return image.ndim - 3
elif input_data_format == ChannelDimension.LAST:
return image.ndim - 1
raise ValueError(f'Unsupported data format: {input_data_format}')
|
Returns the channel dimension axis of the image.
Args:
image (`np.ndarray`):
The image to get the channel dimension axis of.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the image. If `None`, will infer the channel dimension from the image.
Returns:
The channel dimension axis of the image.
|
github-repos
|
def CopyToDateTimeString(self):
if (self._timestamp is None):
return None
(number_of_days, hours, minutes, seconds) = self._GetTimeValues(int(self._timestamp))
(year, month, day_of_month) = self._GetDateValuesWithEpoch(number_of_days, self._EPOCH)
microseconds = int(((self._timestamp % 1) * definitions.MICROSECONDS_PER_SECOND))
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:06d}'.format(year, month, day_of_month, hours, minutes, seconds, microseconds)
|
Copies the Cocoa timestamp to a date and time string.
Returns:
str: date and time value formatted as: YYYY-MM-DD hh:mm:ss.###### or
None if the timestamp cannot be copied to a date and time string.
|
codesearchnet
|
def look_up(self, **keys: Dict[InstanceName, ScalarValue]) -> "ArrayEntry":
if not isinstance(self.schema_node, ListNode):
raise InstanceValueError(self.json_pointer(), "lookup on non-list")
try:
for i in range(len(self.value)):
en = self.value[i]
flag = True
for k in keys:
if en[k] != keys[k]:
flag = False
break
if flag:
return self._entry(i)
raise NonexistentInstance(self.json_pointer(), "entry lookup failed")
except KeyError:
raise NonexistentInstance(self.json_pointer(), "entry lookup failed") from None
except TypeError:
raise InstanceValueError(self.json_pointer(), "lookup on non-list") from None
|
Return the entry with matching keys.
Args:
keys: Keys and values specified as keyword arguments.
Raises:
InstanceValueError: If the receiver's value is not a YANG list.
NonexistentInstance: If no entry with matching keys exists.
|
juraj-google-style
|
def from_service_account_file(cls, filename, **kwargs):
info, signer = _service_account_info.from_filename(
filename, require=['client_email', 'token_uri'])
return cls._from_signer_and_info(signer, info, **kwargs)
|
Creates a Credentials instance from a service account json file.
Args:
filename (str): The path to the service account json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.service_account.Credentials: The constructed
credentials.
|
juraj-google-style
|
def _fdopen(self, *args, **kwargs):
if not is_int_type(args[0]):
raise TypeError('an integer is required')
return FakeFileOpen(self.filesystem)(*args, **kwargs)
|
Redirector to open() builtin function.
Args:
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
File object corresponding to file_des.
Raises:
TypeError: if file descriptor is not an integer.
|
juraj-google-style
|
def assertArrayNear(self, farray1, farray2, err, msg=None):
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
|
Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
|
github-repos
|
def set_window_size(self, width, height, window_handle='current'):
self._execute(Command.SET_WINDOW_SIZE, {
'width': int(width),
'height': int(height),
'window_handle': window_handle})
|
Sets the width and height of the current window.
Support:
Web(WebView)
Args:
width(int): the width in pixels.
height(int): the height in pixels.
window_handle(str): Identifier of window_handle,
default to 'current'.
Returns:
WebDriver Object.
|
juraj-google-style
|
def matches_alias(self, alias: str) -> bool:
del self
del alias
return False
|
Indicates whether the expression will be selected as the given alias.
Intended to be over-ridden by sub-classes which can safely implement it.
Given an expression and an alias, indicates whether the expression will be
SELECT'd as the given alias. For example, an expression like `SELECT a.b`
matches the alias 'b', making it equivalent to the expression
`SELECT a.b AS b`.
Args:
alias: The alias to compare the expression against.
Returns:
True when the expression evaluates to the same name as the alias and False
otherwise.
|
github-repos
|
def get_patched_request(requires, patchlist):
rules = {'': (True, True, True), '!': (False, False, False), '~': (False, False, True), '^': (True, True, True)}
requires = [(Requirement(x) if (not isinstance(x, Requirement)) else x) for x in requires]
appended = []
for patch in patchlist:
if (patch and (patch[0] in ('!', '~', '^'))):
ch = patch[0]
name = Requirement(patch[1:]).name
else:
ch = ''
name = Requirement(patch).name
rule = rules[ch]
replaced = (ch == '^')
for (i, req) in enumerate(requires):
if ((req is None) or (req.name != name)):
continue
if (not req.conflict):
replace = rule[0]
elif (not req.weak):
replace = rule[1]
else:
replace = rule[2]
if replace:
if replaced:
requires[i] = None
else:
requires[i] = Requirement(patch)
replaced = True
if (not replaced):
appended.append(Requirement(patch))
result = ([x for x in requires if (x is not None)] + appended)
return result
|
Apply patch args to a request.
For example, consider:
>>> print get_patched_request(["foo-5", "bah-8.1"], ["foo-6"])
["foo-6", "bah-8.1"]
>>> print get_patched_request(["foo-5", "bah-8.1"], ["^bah"])
["foo-5"]
The following rules apply wrt how normal/conflict/weak patches override
(note though that the new request is always added, even if it doesn't
override an existing request):
PATCH OVERRIDES: foo !foo ~foo
----- ---------- --- ---- -----
foo Y Y Y
!foo N N N
~foo N N Y
^foo Y Y Y
Args:
requires (list of str or `version.Requirement`): Request.
patchlist (list of str): List of patch requests.
Returns:
List of `version.Requirement`: Patched request.
|
codesearchnet
|
def __field_to_parameter_type(self, field):
variant = field.variant
if (variant == messages.Variant.MESSAGE):
raise TypeError("A message variant can't be used in a parameter.")
custom_variant_map = {messages.Variant.SINT32: 'int32', messages.Variant.SINT64: 'int64', messages.Variant.BOOL: 'boolean', messages.Variant.ENUM: 'string'}
return (custom_variant_map.get(variant) or variant.name.lower())
|
Converts the field variant type into a string describing the parameter.
Args:
field: An instance of a subclass of messages.Field.
Returns:
A string corresponding to the variant enum of the field, with a few
exceptions. In the case of signed ints, the 's' is dropped; for the BOOL
variant, 'boolean' is used; and for the ENUM variant, 'string' is used.
Raises:
TypeError: if the field variant is a message variant.
|
codesearchnet
|
def copy_pkg(self, filename, id_=(- 1)):
self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE)
|
Copy a package to the distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
|
codesearchnet
|
def _Enter(tensor, frame_name, is_constant=False, parallel_iterations=10, use_ref=True, use_input_shape=True, name=None):
tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)
if isinstance(tensor, tensor_lib.Tensor):
if tensor.dtype._is_ref_dtype and use_ref:
result = gen_control_flow_ops.ref_enter(tensor, frame_name, is_constant, parallel_iterations, name=name)
else:
result = gen_control_flow_ops.enter(tensor, frame_name, is_constant, parallel_iterations, name=name)
if use_input_shape:
result.set_shape(tensor.get_shape())
return result
elif isinstance(tensor, composite_tensor.CompositeTensor):
def enter_component(t):
return _Enter(t, frame_name, is_constant, parallel_iterations, use_ref, use_input_shape)
return nest.map_structure(enter_component, tensor, expand_composites=True)
else:
raise TypeError(f"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.")
|
Creates or finds a child frame, and makes `tensor` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `tensor` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations`
iterations are run in parallel in the child frame.
Args:
tensor: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if tensor is of ref type.
use_input_shape: If true, set the result's shape based on tensor's shape.
name: A name for this operation (optional).
Returns:
The same tensor as `tensor`.
Raises:
ValueError: If any tensor in `tensor` has a less specific shape
than its corresponding shape in `shape_invariant`.
|
github-repos
|
def get_vep_info(vep_string, vep_header):
vep_annotations = [
dict(zip(vep_header, vep_annotation.split('|')))
for vep_annotation in vep_string.split(',')
]
return vep_annotations
|
Make the vep annotations into a dictionaries
A vep dictionary will have the vep column names as keys and
the vep annotations as values.
The dictionaries are stored in a list
Args:
vep_string (string): A string with the CSQ annotation
vep_header (list): A list with the vep header
Return:
vep_annotations (list): A list of vep dicts
|
juraj-google-style
|
def peek_step(self, val: ObjectValue, sn: 'DataNode') -> Tuple[(Value, 'DataNode')]:
cn = sn.get_data_child(self.name, self.namespace)
try:
return (val[cn.iname()], cn)
except (IndexError, KeyError, TypeError):
return (None, cn)
|
Return member value addressed by the receiver + its schema node.
Args:
val: Current value (object).
sn: Current schema node.
|
codesearchnet
|
def get_stats_for_node_def(graph, node, statistic_type) -> Any:
try:
stats_func = _stats_registry.lookup(node.op + ',' + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
|
Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
|
github-repos
|
def add_archive_as_dir(self, zip_file_obj):
BalancedDiscStorage._check_interface(zip_file_obj)
file_hash = self._get_hash(zip_file_obj)
dir_path = self._create_dir_path(file_hash)
full_path = os.path.join(dir_path, file_hash)
if os.path.exists(full_path):
shutil.rmtree(full_path)
os.mkdir(full_path)
try:
self._unpack_zip(zip_file_obj, full_path)
except Exception:
shutil.rmtree(full_path)
raise
return PathAndHash(path=full_path, hash=file_hash)
|
Add archive to the storage and unpack it.
Args:
zip_file_obj (file): Opened file-like object.
Returns:
obj: Path where the `zip_file_obj` was unpacked wrapped in \
:class:`.PathAndHash` structure.
Raises:
ValueError: If there is too many files in .zip archive. \
See :attr:`._max_zipfiles` for details.
AssertionError: If the `zip_file_obj` is not file-like object.
|
juraj-google-style
|
def fetch_token(self, **kwargs):
kwargs.setdefault('client_secret', self.client_config['client_secret'])
return self.oauth2session.fetch_token(self.client_config['token_uri'], **kwargs)
|
Completes the Authorization Flow and obtains an access token.
This is the final step in the OAuth 2.0 Authorization Flow. This is
called after the user consents.
This method calls
:meth:`requests_oauthlib.OAuth2Session.fetch_token`
and specifies the client configuration's token URI (usually Google's
token server).
Args:
kwargs: Arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
one of ``code`` or ``authorization_response`` must be
specified.
Returns:
Mapping[str, str]: The obtained tokens. Typically, you will not use
return value of this function and instead and use
:meth:`credentials` to obtain a
:class:`~google.auth.credentials.Credentials` instance.
|
codesearchnet
|
def attention_bias_batch(batch_coordinates_q,
batch_coordinates_k=None,
condition_fn=None):
if batch_coordinates_k is None:
batch_coordinates_k = batch_coordinates_q
def to_float(bc):
bc = tf.squeeze(bc, 1)
bc = tf.to_float(bc)
return bc
bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1)
bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0)
bias_batch = bc_h - bc_v
bias_batch = condition_fn(bias_batch)
bias_batch *= -1e9
return bias_batch
|
Generate a mask to prevent the batch to attend to each others.
Args:
batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the
coordinates of the batches
batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the
coordinates of the batches. If None, do self-attention.
condition_fn: Callable defining the attention mask.
Returns:
Float-like Tensor of shape [length_q, length_k] containing either 0 or
-infinity (-1e9).
|
juraj-google-style
|
def hashed(field_name, percent, fields=None, count=0):
if (field_name is None):
raise Exception('Hash field must be specified')
def _hashed_sampling(sql):
projection = Sampling._create_projection(fields)
sql = ('SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % (projection, sql, field_name, percent))
if (count != 0):
sql = ('%s LIMIT %d' % (sql, count))
return sql
return _hashed_sampling
|
Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling.
|
codesearchnet
|
def parse(self, **global_args):
if self.build_file not in ParseContext._parsed:
butcher_context = {}
for str_to_exec in self._strs_to_exec:
ast = compile(str_to_exec, '<string>', 'exec')
exec_function(ast, butcher_context)
with ParseContext.activate(self):
startdir = os.path.abspath(os.curdir)
try:
os.chdir(self.build_file.path_on_disk)
if self.build_file not in ParseContext._parsed:
ParseContext._parsed.add(self.build_file)
eval_globals = copy.copy(butcher_context)
eval_globals.update(
{'ROOT_DIR': self.build_file.path_on_disk,
'__file__': 'bogus please fix this'})
eval_globals.update(global_args)
exec_function(self.build_file.code, eval_globals)
finally:
os.chdir(startdir)
|
Entry point to parsing a BUILD file.
Args:
**global_args: Variables to include in the parsing environment.
|
juraj-google-style
|
def __init__(self, root_path, root_url, site_title, site_desc=None):
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url)
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
|
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
|
juraj-google-style
|
def query(self, expr, **kwargs):
columns = self.columns
def query_builder(df, **kwargs):
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
|
Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
|
codesearchnet
|
def read(self, nodes=None, **kwargs):
if (nodes is None):
required_nodes = (self.wishlist - set(self.datasets.keys()))
nodes = self.dep_tree.leaves(nodes=required_nodes)
return self._read_datasets(nodes, **kwargs)
|
Load datasets from the necessary reader.
Args:
nodes (iterable): DependencyTree Node objects
**kwargs: Keyword arguments to pass to the reader's `load` method.
Returns:
DatasetDict of loaded datasets
|
codesearchnet
|
def create_resource(self, resource_type=None, uri=None):
if (resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]):
return resource_type(self, uri)
else:
raise TypeError('expecting Resource type, such as BasicContainer or NonRDFSource')
|
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
|
codesearchnet
|
def month_name_to_number(month, to_int=False):
number = {
'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05',
'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10',
'Nov': '11', 'Dec': '12',
}.get(month)
return int(number) if to_int else number
|
Convert a month name (MMM) to its number (01-12).
Args:
month (str): 3-letters string describing month.
to_int (bool): cast number to int or not.
Returns:
str/int: the month's number (between 01 and 12).
|
juraj-google-style
|
def pad(self, images: 'torch.Tensor', size: int) -> 'torch.Tensor':
height, width = get_image_size(images, ChannelDimension.FIRST)
pad_height = (height
pad_width = (width
return F.pad(images, (0, 0, pad_width, pad_height), padding_mode='symmetric')
|
Pad an image to make the height and width divisible by `size`.
Args:
images (`torch.Tensor`):
Images to pad.
size (`int`):
The size to make the height and width divisible by.
Returns:
`torch.Tensor`: The padded images.
|
github-repos
|
def signature(cert, sig, body):
body = six.b(body)
sig = base64.decodestring(sig)
padder = padding.PKCS1v15()
public_key = cert.public_key()
try:
public_key.verify(sig, body, padder, hashes.SHA1())
return True
except InvalidSignature:
warnings.warn('Signature verification failed.')
return False
|
Validate data request signature.
See `validate.request` for additional info.
Args:
cert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon
signing certificate.
sig: str. Signature header value sent by request.
body: str. HTTPS request body.
Returns:
bool: True if valid, False otherwise.
|
codesearchnet
|
def get_cost_per_kg(self, comp):
comp = comp if isinstance(comp, Composition) else Composition(comp)
return self.get_cost_per_mol(comp) / (
comp.weight.to("kg") * const.N_A)
|
Get best estimate of minimum cost/kg based on known data
Args:
comp:
Composition as a pymatgen.core.structure.Composition
Returns:
float of cost/kg
|
juraj-google-style
|
def Deserialize(self, reader):
super(SpentCoinState, self).Deserialize(reader)
self.TransactionHash = reader.ReadUInt256()
self.TransactionHeight = reader.ReadUInt32()
count = reader.ReadVarInt()
items = [0] * count
for i in range(0, count):
index = reader.ReadUInt16()
height = reader.ReadUInt32()
items[i] = SpentCoinItem(index=index, height=height)
self.Items = items
|
Deserialize full object.
Args:
reader (neocore.IO.BinaryReader):
|
juraj-google-style
|
def WriteSignedBinaryBlobs(binary_urn, blobs, token=None):
if _ShouldUseLegacyDatastore():
aff4.FACTORY.Delete(binary_urn, token=token)
with data_store.DB.GetMutationPool() as mutation_pool:
with aff4.FACTORY.Create(binary_urn, collects.GRRSignedBlob, mode='w', mutation_pool=mutation_pool, token=token) as fd:
for blob in blobs:
fd.Add(blob, mutation_pool=mutation_pool)
if data_store.RelationalDBEnabled():
blob_references = rdf_objects.BlobReferences()
current_offset = 0
for blob in blobs:
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob.SerializeToString())
blob_references.items.Append(rdf_objects.BlobReference(offset=current_offset, size=len(blob.data), blob_id=blob_id))
current_offset += len(blob.data)
data_store.REL_DB.WriteSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn), blob_references)
|
Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
token: ACL token to use with the legacy (non-relational) datastore.
|
codesearchnet
|
def initialize(self, prefix_name='default', *args, **kwargs):
if self.loaded:
raise WorkdirError(('Workdir %s already initialized' % self.path))
if (not os.path.exists(self.path)):
LOGGER.debug('Creating workdir %s', self.path)
os.makedirs(self.path)
self.prefixes[prefix_name] = self.prefix_class(self.join(prefix_name), *args, **kwargs)
self.prefixes[prefix_name].initialize()
if (self.current is None):
self._set_current(prefix_name)
self.load()
return self.prefixes[prefix_name]
|
Initializes a workdir by adding a new prefix to the workdir.
Args:
prefix_name(str): Name of the new prefix to add
*args: args to pass along to the prefix constructor
*kwargs: kwargs to pass along to the prefix constructor
Returns:
The newly created prefix
Raises:
PrefixAlreadyExists: if the prefix name already exists in the
workdir
|
codesearchnet
|
def _get_dtype_from_nested_lists(list_or_tuple):
for elem in list_or_tuple:
if isinstance(elem, core.Tensor):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
|
Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be converted
to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
|
github-repos
|
def make_value_from_datastore(self, value):
if value is None:
return None
_json = json.loads(value, cls=JsonDecoder)
if self.data_type == dict:
return _json
return self.data_type.from_json(_json)
|
Convert value from datastore representation.
Args:
value: datastore value.
Returns:
value to store in the model.
|
juraj-google-style
|
def job_stories(self, raw=False, limit=None):
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
|
Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
|
codesearchnet
|
def register(cls, config_class, model_class, exist_ok=False) -> None:
if hasattr(model_class, 'config_class') and model_class.config_class.__name__ != config_class.__name__:
raise ValueError(f'The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix one of those so they match!')
cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok)
|
Register a new model for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
model_class ([`PreTrainedModel`]):
The model to register.
|
github-repos
|
def train_and_maybe_evaluate(hparams):
schema = taxi.read_schema(hparams.schema_file)
tf_transform_output = tft.TFTransformOutput(hparams.tf_transform_dir)
train_input = lambda: model.input_fn(hparams.train_files, tf_transform_output, batch_size=TRAIN_BATCH_SIZE)
eval_input = lambda: model.input_fn(hparams.eval_files, tf_transform_output, batch_size=EVAL_BATCH_SIZE)
train_spec = tf_estimator.TrainSpec(train_input, max_steps=hparams.train_steps)
serving_receiver_fn = lambda: model.example_serving_receiver_fn(tf_transform_output, schema)
exporter = tf_estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf_estimator.EvalSpec(eval_input, steps=hparams.eval_steps, exporters=[exporter], name='chicago-taxi-eval')
run_config = tf_estimator.RunConfig(save_checkpoints_steps=999, keep_checkpoint_max=1)
serving_model_dir = os.path.join(hparams.output_dir, SERVING_MODEL_DIR)
run_config = run_config.replace(model_dir=serving_model_dir)
estimator = model.build_estimator(tf_transform_output, hidden_units=[max(2, int(FIRST_DNN_LAYER_SIZE * DNN_DECAY_FACTOR ** i)) for i in range(NUM_DNN_LAYERS)], config=run_config)
tf_estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return estimator
|
Run the training and evaluate using the high level API.
Args:
hparams: Holds hyperparameters used to train the model as name/value pairs.
Returns:
The estimator that was used for training (and maybe eval)
|
github-repos
|
def SetAndLoadTagFile(self, tagging_file_path):
tag_file = tagging_file.TaggingFile(tagging_file_path)
self._tagging_rules = tag_file.GetEventTaggingRules()
|
Sets the tag file to be used by the plugin.
Args:
tagging_file_path (str): path of the tagging file.
|
juraj-google-style
|
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
return self.client.api.get_archive(self.id, path, chunk_size)
|
Retrieve a file or folder from the container in the form of a tar
archive.
Args:
path (str): Path to the file or folder to retrieve
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(tuple): First element is a raw tar data stream. Second element is
a dict containing ``stat`` information on the specified ``path``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = container.get_archive('/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
>>> for chunk in bits:
... f.write(chunk)
>>> f.close()
|
codesearchnet
|
def skip(reason, extras=None):
raise signals.TestSkip(reason, extras)
|
Skip a test.
Args:
reason: The reason this test is skipped.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestSkip: Mark a test as skipped.
|
github-repos
|
def stop(self, timeout_s=None):
self._stopping.set()
with self._current_phase_thread_lock:
phase_thread = self._current_phase_thread
if (not phase_thread):
return
if phase_thread.is_alive():
phase_thread.kill()
_LOG.debug('Waiting for cancelled phase to exit: %s', phase_thread)
timeout = timeouts.PolledTimeout.from_seconds(timeout_s)
while (phase_thread.is_alive() and (not timeout.has_expired())):
time.sleep(0.1)
_LOG.debug('Cancelled phase %s exit', ("didn't" if phase_thread.is_alive() else 'did'))
self.test_state.stop_running_phase()
|
Stops execution of the current phase, if any.
It will raise a ThreadTerminationError, which will cause the test to stop
executing and terminate with an ERROR state.
Args:
timeout_s: int or None, timeout in seconds to wait for the phase to stop.
|
codesearchnet
|
def parse_gene_panel(path, institute='cust000', panel_id='test', panel_type='clinical', date=datetime.now(), version=1.0, display_name=None, genes=None):
LOG.info('Parsing gene panel %s', panel_id)
gene_panel = {}
gene_panel['path'] = path
gene_panel['type'] = panel_type
gene_panel['date'] = date
gene_panel['panel_id'] = panel_id
gene_panel['institute'] = institute
version = (version or 1.0)
gene_panel['version'] = float(version)
gene_panel['display_name'] = (display_name or panel_id)
if (not path):
panel_handle = genes
else:
panel_handle = get_file_handle(gene_panel['path'])
gene_panel['genes'] = parse_genes(gene_lines=panel_handle)
return gene_panel
|
Parse the panel info and return a gene panel
Args:
path(str): Path to panel file
institute(str): Name of institute that owns the panel
panel_id(str): Panel id
date(datetime.datetime): Date of creation
version(float)
full_name(str): Option to have a long name
Returns:
gene_panel(dict)
|
codesearchnet
|
def dot_product(p1, p2, o=(0, 0)):
v1 = vector(o, p1)
v2 = vector(o, p2)
return v1[0] * v2[0] + v1[1] * v2[1]
|
Returns dot product
Args:
p1, p2: point (x, y)
o: origin
|
juraj-google-style
|
def from_spec(cls, spec: Spec, _run_init: bool=True) -> Union[Self, type[Self]]:
if spec.type is None:
raise ValueError(f'Spec type not found in {spec}')
subspace = _spec_type_to_subspace(spec.type)
subclass: type[Self] = _KNOWN_SPECIFIABLE[subspace].get(spec.type, None)
if subclass is None:
raise ValueError(f"Unknown spec type '{spec.type}' in {spec}")
if spec.config is None:
return subclass
kwargs = {k: _specifiable_from_spec_helper(v, _run_init) for k, v in spec.config.items()}
if _run_init:
kwargs['_run_init'] = True
return subclass(**kwargs)
|
Generate a `Specifiable` subclass object based on a spec.
Args:
spec: the specification of a `Specifiable` subclass object
_run_init: whether to call `__init__` or not for the initial instantiation
Returns:
Self: the `Specifiable` subclass object
|
github-repos
|
def _distributed_apply(self, distribution, grads_and_vars, global_step=None, name=None):
reduced_grads = distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
with ops.init_scope():
self._create_slots(var_list)
def update(v, g):
assert v is not None
try:
g = indexed_slices.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError('Gradient must be convertible to a Tensor or IndexedSlices, or None: %s' % g)
if not isinstance(g, (tensor.Tensor, indexed_slices.IndexedSlices)):
raise TypeError('Gradient must be a Tensor, IndexedSlices, or None: %s' % g)
p = _get_processor(v)
if context.executing_eagerly() or (resource_variable_ops.is_resource_variable(v) and (not v._in_graph_mode)):
scope_name = v.name.split(':')[0]
else:
scope_name = v.op.name
with ops.name_scope('update_' + scope_name):
return p.update_op(self, g)
with ops.name_scope(name, self._name) as name:
self._prepare()
update_ops = [op for grad, var in grads_and_vars for op in distribution.extended.update(var, update, args=(grad,), group=False)]
def finish(self, update_ops):
return self._finish(update_ops, 'update')
non_slot_devices = distribution.extended.non_slot_devices(var_list)
finish_updates = distribution.extended.update_non_slot(non_slot_devices, finish, args=(self, update_ops), group=False)
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(finish_updates):
apply_updates = distribution.extended.update(global_step, state_ops.assign_add, args=(1,), kwargs={'name': name})
if not context.executing_eagerly():
if isinstance(apply_updates, tensor.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
|
A version of `apply_gradients` for cross-replica context.
This is a version of `apply_gradients()` for when you are using a
`DistributionStrategy` and are in a cross-replica context. If in a
replica context, use `apply_gradients()` as normal.
Args:
distribution: A `DistributionStrategy` object.
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`, and then aggregated across replicas.
global_step: Optional (mirrored) `Variable` to increment by one
after the variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients across all
replicas. If `global_step` was not None, that operation also
increments `global_step`
|
github-repos
|
def find_call(self, path, method):
if (not path.endswith('/')):
path += '/'
path = path.split('/')[1:]
return self._recursive_route_match(self._routes, path, method, [])
|
Find callable for the specified URL path and HTTP method.
Args:
path (:obj:`str`): URL path to match
method (:obj:`str`): HTTP method
Note:
A trailing '/' is always assumed in the path.
|
codesearchnet
|
def _GetRowValue(self, query_hash, row, value_name):
keys_name_to_index_map = self._keys_per_query.get(query_hash, None)
if not keys_name_to_index_map:
keys_name_to_index_map = {
name: index for index, name in enumerate(row.keys())}
self._keys_per_query[query_hash] = keys_name_to_index_map
value_index = keys_name_to_index_map.get(value_name)
return row[value_index]
|
Retrieves a value from the row.
Args:
query_hash (int): hash of the query, that uniquely identifies the query
that produced the row.
row (sqlite3.Row): row.
value_name (str): name of the value.
Returns:
object: value.
|
juraj-google-style
|
def setup(self, puller: bool=None, subscriptions: Dict[(str, Any)]={}):
if puller:
puller = self._zmq.socket(zmq.PULL)
(ip, port, host) = self.rslv('rcv')
puller.bind('tcp:
self.poll(puller)
if subscriptions:
for publisher in subscriptions:
self.add(publisher, subscriptions[publisher].get('slots'), subscriptions[publisher].get('buffer-length'))
logger.info('Listening to %s', {k: (1 if (subscriptions[k].get('slots') is None) else len(subscriptions[k].get('slots'))) for k in subscriptions})
|
Sets up this Node with the specified Interfaces before it is run.
Args:
puller: Indication if a Puller Interface should be created.
subscriptions: Collection of the Subscriber Interfaces to be created and their Slots.
|
codesearchnet
|
def _take_lease(self, lease, uuid_path, safe=True):
if safe:
lease_taken_by = self._lease_valid(lease)
if lease_taken_by and lease_taken_by != uuid_path:
raise LagoSubnetLeaseTakenException(
lease.subnet, lease_taken_by
)
with open(uuid_path) as f:
uuid = f.read()
with open(lease.path, 'wt') as f:
utils.json_dump((uuid_path, uuid), f)
LOGGER.debug(
'Assigned subnet lease {} to {}'.format(lease.path, uuid_path)
)
|
Persist the given lease to the store and make the prefix in uuid_path
his owner
Args:
lease(lago.subnet_lease.Lease): Object representation of the lease
uuid_path (str): Path to the prefix uuid
safe (bool): If true (the default), validate the the lease
isn't taken.
Raises:
LagoSubnetLeaseException: If safe == True and the lease is already
taken.
|
juraj-google-style
|
def recipe_iam(config, auth_write, role, email):
iam(config, {'auth': auth_write, 'role': role, 'email': email})
|
Sets project permissions for an email.
Args:
auth_write (authentication) - Credentials used for writing data.
role (string) - projects/[project name]/roles/[role name]
email (string) - Email address to grant role to.
|
github-repos
|
def create_from_binary(cls, ignore_signature_check, binary_view):
sig, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, \
usage_flags, entry_len, alloc_len, base_record, next_attr_id, record_n = \
cls._REPR.unpack(binary_view[:cls._REPR.size])
baad = None
if not ignore_signature_check:
if sig == b"FILE":
baad = False
elif sig == b"BAAD":
baad = True
else:
raise HeaderError("Entry has no valid signature.", "MFTHeader")
if fx_offset < MFTHeader._REPR.size:
raise HeaderError("Fix up array begins within the header.", "MFTHeader")
if first_attr_offset < cls._REPR.size:
raise HeaderError("First attribute offset points to inside of the header.", "MFTHeader")
if entry_len > alloc_len:
raise HeaderError("Logical size of the MFT is bigger than MFT allocated size.", "MFTHeader")
file_ref, file_seq = get_file_reference(base_record)
nw_obj = cls((baad, fx_offset, fx_count, lsn, seq_number, hard_link_count,
first_attr_offset, MftUsageFlags(usage_flags), entry_len, alloc_len,
file_ref, file_seq, next_attr_id, record_n))
return nw_obj
|
Creates a new object MFTHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
MFTHeader: New object using hte binary stream as source
|
juraj-google-style
|
def get_value(self):
try:
self.raw_value
except (AttributeError, KeyError) as err:
self._reraise_if_required(err)
default_value = self.default_value
if self.transform_default:
return self.transform(default_value)
return default_value
else:
value = {}
for (key, subsetting) in self.settings.items():
value[key] = subsetting.get_value()
return value
|
Return dictionary with values of subsettings.
Returns:
dict: values of subsettings.
|
codesearchnet
|
def make_legacy_input_feature_spec(include_label=True):
result = {}
if include_label:
result['clicked'] = tf.io.FixedLenFeature(shape=[], dtype=tf.int64)
for name in _INTEGER_COLUMN_NAMES:
result[name] = tf.io.FixedLenFeature(shape=[], dtype=tf.int64, default_value=-1)
for name in _CATEGORICAL_COLUMN_NAMES:
result[name] = tf.io.FixedLenFeature(shape=[], dtype=tf.string, default_value='')
return result
|
Input schema definition.
Args:
include_label: Indicates whether the label feature should be included.
Returns:
A `Schema` object.
|
github-repos
|
def register(self, type_name: str, cls: Type[Any], override_existing: bool=False) -> None:
if type_name in self._type_to_cls_map and (not override_existing):
raise KeyError(f'Type {type_name!r} has already been registered with class {self._type_to_cls_map[type_name].__name__}.')
self._type_to_cls_map[type_name] = cls
|
Register a ``symbolic.Object`` class with a type name.
Args:
type_name: String identifier for the class, which will be used as the
value of `_type` property when deciding which class to construct object
when converting a JSON value to object.
cls: Class to register.
override_existing: Whether allow to override existing value if type name
is already registered.
Raises:
KeyError: If type_name is already registered and override_existing is set
to False.
|
github-repos
|
def __call__(self, input_ids: torch.LongTensor, z_threshold: float=3.0, return_dict: bool=False) -> Union[WatermarkDetectorOutput, np.array]:
if input_ids[0, 0] == self.bos_token_id:
input_ids = input_ids[:, 1:]
if input_ids.shape[-1] - self.processor.context_width < 1:
raise ValueError(f'Must have at least `1` token to score after the first min_prefix_len={self.processor.context_width} tokens required by the seeding scheme.')
num_tokens_scored, green_token_count = self._score_ngrams_in_passage(input_ids)
z_score = self._compute_z_score(green_token_count, num_tokens_scored)
prediction = z_score > z_threshold
if return_dict:
p_value = self._compute_pval(z_score)
confidence = 1 - p_value
return WatermarkDetectorOutput(num_tokens_scored=num_tokens_scored, num_green_tokens=green_token_count, green_fraction=green_token_count / num_tokens_scored, z_score=z_score, p_value=p_value, prediction=prediction, confidence=confidence)
return prediction
|
Args:
input_ids (`torch.LongTensor`):
The watermark generated text. It is advised to remove the prompt, which can affect the detection.
z_threshold (`Dict`, *optional*, defaults to `3.0`):
Changing this threshold will change the sensitivity of the detector. Higher z threshold gives less
sensitivity and vice versa for lower z threshold.
return_dict (`bool`, *optional*, defaults to `False`):
Whether to return `~generation.WatermarkDetectorOutput` or not. If not it will return boolean predictions,
ma
Return:
[`~generation.WatermarkDetectorOutput`] or `np.array`: A [`~generation.WatermarkDetectorOutput`]
if `return_dict=True` otherwise a `np.array`.
|
github-repos
|
def get_profiles(adapter, vcf_file):
vcf = get_file_handle(vcf_file)
individuals = vcf.samples
profiles = {individual: [] for individual in individuals}
for profile_variant in adapter.profile_variants():
ref = profile_variant['ref']
alt = profile_variant['alt']
pos = profile_variant['pos']
end = (pos + 1)
chrom = profile_variant['chrom']
region = f'{chrom}:{pos}-{end}'
found_variant = False
for variant in vcf(region):
variant_id = get_variant_id(variant)
if (variant_id == profile_variant['_id']):
found_variant = True
for (i, individual) in enumerate(individuals):
genotype = GENOTYPE_MAP[variant.gt_types[i]]
if (genotype == 'hom_alt'):
gt_str = f'{alt}{alt}'
elif (genotype == 'het'):
gt_str = f'{ref}{alt}'
else:
gt_str = f'{ref}{ref}'
profiles[individual].append(gt_str)
break
if (not found_variant):
for individual in individuals:
profiles[individual].append(f'{ref}{ref}')
return profiles
|
Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf.
|
codesearchnet
|
def _evolve(self, state, qargs=None):
if qargs is not None:
return SuperOp(self)._evolve(state, qargs)
state = self._format_state(state, density_matrix=True)
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is not equal to state dimension."
)
return np.einsum('AB,AiBj->ij', state,
np.reshape(self._data, self._bipartite_shape))
|
Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
DensityMatrix: the output quantum state as a density matrix.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
|
juraj-google-style
|
def stop(self, drain_queue_and_join=True):
with self.start_stop_lock:
if not self.running:
return
self.running = False
if drain_queue_and_join:
while True:
try:
value = self.future_queue.get(block=True, timeout=0.1)
if isinstance(value, Exception):
raise value
inputs = value.get()
self.future_queue.task_done()
if inputs is not None:
self.ready_queue.put(inputs)
except queue.Empty:
break
self.run_thread.join()
self.run_thread = None
_SHARED_SEQUENCES[self.uid] = None
|
Stops running threads and wait for them to exit, if necessary.
This method is thread safe and is called from various threads. Note that
the `drain_queue_and_join` argument must be set correctly.
It is safe to call this method multiple times, extra calls are ignored.
Args:
drain_queue_and_join: set to True to drain the queue of pending
items and wait for the worker thread to complete. Set to False
if invoked from a worker thread to avoid deadlocks. Note that
setting this to False means this enqueuer won't be reused.
|
github-repos
|
def Validate(self, problems, validate_children=True):
self.ValidateRouteId(problems)
self.ValidateServicePeriod(problems)
self.ValidateDirectionId(problems)
self.ValidateTripId(problems)
self.ValidateShapeIdsExistInShapeList(problems)
self.ValidateRouteIdExistsInRouteList(problems)
self.ValidateServiceIdExistsInServiceList(problems)
self.ValidateBikesAllowed(problems)
self.ValidateWheelchairAccessible(problems)
if (self._schedule and validate_children):
self.ValidateChildren(problems)
|
Validate attributes of this object.
Check that this object has all required values set to a valid value without
reference to the rest of the schedule. If the _schedule attribute is set
then check that references such as route_id and service_id are correct.
Args:
problems: A ProblemReporter object
validate_children: if True and the _schedule attribute is set than call
ValidateChildren
|
codesearchnet
|
def experimental_set_type(self, type_proto) -> None:
with self.graph._c_graph.get() as c_graph:
if type_proto.type_id not in (full_type_pb2.TFT_UNSET, full_type_pb2.TFT_PRODUCT):
raise ValueError('error setting the type of ', self.name, ': expected TFT_UNSET or TFT_PRODUCT, got ', type_proto.type_id)
with c_api_util.tf_buffer(type_proto.SerializeToString()) as serialized:
pywrap_tf_session.SetFullType(c_graph, self._c_op, serialized)
|
Sets the corresponding node's `experimental_type` field.
See the description of `NodeDef.experimental_type` for more info.
Args:
type_proto: A FullTypeDef proto message. The root type_if of this object
must be `TFT_PRODUCT`, even for ops which only have a singlre return
value.
|
github-repos
|
def volatility(self, strike: types.FloatTensor, expiry_dates: Optional[types.DateTensor]=None, expiry_times: Optional[types.FloatTensor]=None, term: Optional[types.Period]=None) -> types.FloatTensor:
pass
|
Returns the interpolated volatility on a specified set of expiries.
Args:
strike: The strikes for which the interpolation is desired.
expiry_dates: Optional input specifying the expiry dates for which
interpolation is desired. The user should supply either `expiry_dates`
or `expiry_times` for interpolation.
expiry_times: Optional real `Tensor` containing the time to expiration
for which interpolation is desired. The user should supply either
`expiry_dates` or `expiry_times` for interpolation.
term: Optional input specifying the term of the underlying rate for
which the interpolation is desired. Relevant for interest rate implied
volatility data.
Returns:
A `Tensor` of the same shape as `expiry` with the interpolated volatility
from the volatility surface.
|
github-repos
|
def write_payload(payload=None, objectInput=None):
temp = tempfile.mkstemp()[1]
log.debug('Write payload in temp file {!r}'.format(temp))
with open(temp, 'wb') as f:
if payload:
payload = base64.b64decode(payload)
elif objectInput:
if six.PY3:
payload = objectInput.buffer.read()
elif six.PY2:
payload = objectInput.read()
f.write(payload)
return temp
|
This function writes a base64 payload or file object on disk.
Args:
payload (string): payload in base64
objectInput (object): file object/standard input to analyze
Returns:
Path of file
|
codesearchnet
|
def adaptive_set(
self,
reannealing_per=50,
thermostat=0.9,
t_min=0.001,
t_default=1.0
):
self.__reannealing_per = reannealing_per
self.__thermostat = thermostat
self.__t_min = t_min
self.__t_default = t_default
|
Init for Adaptive Simulated Annealing.
Args:
reannealing_per: How often will this model reanneals there per cycles.
thermostat: Thermostat.
t_min: The minimum temperature.
t_default: The default temperature.
|
juraj-google-style
|
def from_dict(cls, tx):
inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
outputs = [Output.from_dict(output) for output in tx['outputs']]
return cls(tx['operation'], tx['asset'], inputs, outputs,
tx['metadata'], tx['version'], hash_id=tx['id'])
|
Transforms a Python dictionary to a Transaction object.
Args:
tx_body (dict): The Transaction to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
juraj-google-style
|
def writeTable(self, tableName):
lock_and_call((lambda : self._impl.writeTable(tableName)), self._lock)
|
Write the table corresponding to the specified name, equivalent to the
AMPL statement
.. code-block:: ampl
write table tableName;
Args:
tableName: Name of the table to be written.
|
codesearchnet
|
def _get_offset_from_gcs(self):
headers = {'content-range': 'bytes */*'}
(status, response_headers, content) = self._api.put_object(self._path_with_token, headers=headers)
errors.check_status(status, [308], self._path, headers, response_headers, content, {'upload_path': self._path_with_token})
val = response_headers.get('range')
if (val is None):
return (- 1)
(_, offset) = val.rsplit('-', 1)
return int(offset)
|
Get the last offset that has been written to GCS.
This is a utility method that does not modify self.
Returns:
an int of the last offset written to GCS by this upload, inclusive.
-1 means nothing has been written.
|
codesearchnet
|
def autodiscover(self, message):
if message["version"] in self.allowed_versions:
logger.debug("<%s> Client version matches server "
"version." % message["cuuid"])
response = serialize_data({"method": "OHAI Client",
"version": self.version,
"server_name": self.server_name},
self.compression,
encryption=False)
else:
logger.warning("<%s> Client version %s does not match allowed server "
"versions %s" % (message["cuuid"],
message["version"],
self.version))
response = serialize_data({"method": "BYE REGISTER"},
self.compression,
encryption=False)
return response
|
This function simply returns the server version number as a response
to the client.
Args:
message (dict): A dictionary of the autodiscover message from the
client.
Returns:
A JSON string of the "OHAI Client" server response with the server's
version number.
Examples:
>>> response
'{"method": "OHAI Client", "version": "1.0"}'
|
juraj-google-style
|
def get_image_features(self, pixel_values: torch.FloatTensor):
image_outputs = self.vision_tower(pixel_values).last_hidden_state
return self.multi_modal_projector(image_outputs)
|
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
|
github-repos
|
def get_symmetric_wallace_tensor(self, tau):
wallace = self.get_wallace_tensor(tau)
return Tensor((0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1]))))
|
Gets the symmetrized wallace tensor for determining
yield strength criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor.
|
codesearchnet
|
class Constant(Initializer):
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
del kwargs
return constant_op.constant(self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
|
Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
|
github-repos
|
def concrete(self, other=None):
new_system = self.clone()
if other:
new_system.applyFeatures(other, missing="other")
soft_features = self.getValue(SoftFeatures.SOFT, [])
score = 0
for f in sorted(soft_features, key=lambda f: f.soft, reverse=True):
try:
new_system.applyFeatures(f, missing="other")
score += f.soft
except:
pass
new_system.delValue(SoftFeatures.SOFT)
return new_system, score
|
Return copy and score after being applied other system and soft features.
Args:
- other(system, optional): system to apply just before soft features.
Return(tuple): tuple of the resulting system and its score.
|
juraj-google-style
|
def find_custom_args_with_details(file_content: str, custom_args_var_name: str) -> list[dict]:
escaped_variable_name = re.escape(custom_args_var_name)
regex_pattern = f'^\\s*({escaped_variable_name})\\s*=\\s*(r?\\"\\"\\")(.*?)(\\"\\"\\")'
flags = re.MULTILINE | re.DOTALL
match = re.search(regex_pattern, file_content, flags)
if match:
content = match.group(3).strip()
return content
return None
|
Find the given custom args variable in the file content and return its content.
Args:
file_content: The string content of the Python file.
custom_args_var_name: The name of the custom args variable.
|
github-repos
|
def from_string(rxn_string):
(rct_str, prod_str) = rxn_string.split('->')
def get_comp_amt(comp_str):
return {Composition(m.group(2)): float((m.group(1) or 1)) for m in re.finditer('([\\d\\.]*(?:[eE]-?[\\d\\.]+)?)\\s*([A-Z][\\w\\.\\(\\)]*)', comp_str)}
return BalancedReaction(get_comp_amt(rct_str), get_comp_amt(prod_str))
|
Generates a balanced reaction from a string. The reaction must
already be balanced.
Args:
rxn_string:
The reaction string. For example, "4 Li + O2-> 2Li2O"
Returns:
BalancedReaction
|
codesearchnet
|
def list_devices(device_type=None):
return distribution_lib.list_devices(device_type)
|
Return all the available devices based on the device type.
Note: in a distributed setting, global devices are returned.
Args:
device_type: string, one of `"cpu"`, `"gpu"` or `"tpu"`.
Defaults to `"gpu"` or `"tpu"` if available when
`device_type` is not provided. Otherwise
will return the `"cpu"` devices.
Return:
List of devices that are available for distribute computation.
|
github-repos
|
def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]:
batch_size = hidden_states[0].shape[0]
hidden_states = torch.cat(hidden_states, dim=0)
cls_token, hidden_states = (hidden_states[:, 0], hidden_states[:, 1:])
total_batch_size, sequence_length, num_channels = hidden_states.shape
hidden_states = hidden_states.reshape(total_batch_size, patch_height, patch_width, num_channels)
hidden_states = hidden_states.permute(0, 3, 1, 2).contiguous()
if self.readout_type == 'project':
hidden_states = hidden_states.flatten(2).permute((0, 2, 1))
readout = cls_token.unsqueeze(dim=1).expand_as(hidden_states)
hidden_states = torch.cat((hidden_states, readout), -1)
elif self.readout_type == 'add':
hidden_states = hidden_states + cls_token.unsqueeze(-1)
out = []
for stage_idx, hidden_state in enumerate(hidden_states.split(batch_size, dim=0)):
if self.readout_type == 'project':
hidden_state = self.readout_projects[stage_idx](hidden_state)
hidden_state = hidden_state.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width)
hidden_state = self.layers[stage_idx](hidden_state)
out.append(hidden_state)
return out
|
Args:
hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
|
github-repos
|
def parse_case_data(config=None, ped=None, owner=None, vcf_snv=None, vcf_sv=None, vcf_cancer=None, vcf_str=None, peddy_ped=None, peddy_sex=None, peddy_check=None, delivery_report=None, multiqc=None):
config_data = (copy.deepcopy(config) or {})
if ('analysis_date' not in config_data):
config_data['analysis_date'] = datetime.datetime.now()
if ped:
(family_id, samples) = parse_ped(ped)
config_data['family'] = family_id
config_data['samples'] = samples
if ('owner' not in config_data):
if (not owner):
raise SyntaxError('Case has no owner')
else:
config_data['owner'] = owner
if ('gene_panels' in config_data):
config_data['gene_panels'] = [panel.strip() for panel in config_data['gene_panels']]
config_data['default_gene_panels'] = [panel.strip() for panel in config_data['default_gene_panels']]
config_data['peddy_ped'] = (peddy_ped or config_data.get('peddy_ped'))
config_data['peddy_sex_check'] = (peddy_sex or config_data.get('peddy_sex'))
config_data['peddy_ped_check'] = (peddy_check or config_data.get('peddy_check'))
add_peddy_information(config_data)
config_data['multiqc'] = (multiqc or config_data.get('multiqc'))
config_data['vcf_snv'] = (vcf_snv if vcf_snv else config_data.get('vcf_snv'))
config_data['vcf_sv'] = (vcf_sv if vcf_sv else config_data.get('vcf_sv'))
config_data['vcf_str'] = (vcf_str if vcf_str else config_data.get('vcf_str'))
log.debug('Config vcf_str set to {0}'.format(config_data['vcf_str']))
config_data['vcf_cancer'] = (vcf_cancer if vcf_cancer else config_data.get('vcf_cancer'))
config_data['delivery_report'] = (delivery_report if delivery_report else config_data.get('delivery_report'))
config_data['rank_model_version'] = config_data.get('rank_model_version')
config_data['rank_score_threshold'] = config_data.get('rank_score_threshold', 0)
config_data['track'] = config_data.get('track', 'rare')
if config_data['vcf_cancer']:
config_data['track'] = 'cancer'
return config_data
|
Parse all data necessary for loading a case into scout
This can be done either by providing a VCF file and other information
on the command line. Or all the information can be specified in a config file.
Please see Scout documentation for further instructions.
Args:
config(dict): A yaml formatted config file
ped(iterable(str)): A ped formatted family file
owner(str): The institute that owns a case
vcf_snv(str): Path to a vcf file
vcf_str(str): Path to a VCF file
vcf_sv(str): Path to a vcf file
vcf_cancer(str): Path to a vcf file
peddy_ped(str): Path to a peddy ped
multiqc(str): Path to dir with multiqc information
Returns:
config_data(dict): Holds all the necessary information for loading
Scout
|
codesearchnet
|
def switch_to_window(self, window_name):
data = {'name': window_name}
self._execute(Command.SWITCH_TO_WINDOW, data)
|
Switch to the given window.
Support:
Web(WebView)
Args:
window_name(str): The window to change focus to.
Returns:
WebDriver Object.
|
codesearchnet
|
def get(self, tx_id):
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
tx = bigchain.get_transaction(tx_id)
if not tx:
return make_error(404)
return tx.to_dict()
|
API endpoint to get details about a transaction.
Args:
tx_id (str): the id of the transaction.
Return:
A JSON string containing the data about the transaction.
|
juraj-google-style
|
def run_inference(self, batch: Sequence[np.ndarray], engine: TensorRTEngine, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return self.inference_fn(batch, engine, inference_args)
|
Runs inferences on a batch of Tensors and returns an Iterable of
TensorRT Predictions.
Args:
batch: A np.ndarray or a np.ndarray that represents a concatenation
of multiple arrays as a batch.
engine: A TensorRT engine.
inference_args: Any additional arguments for an inference
that are not applicable to TensorRT.
Returns:
An Iterable of type PredictionResult.
|
github-repos
|
def Images(self, run, tag):
accumulator = self.GetAccumulator(run)
return accumulator.Images(tag)
|
Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ImageEvents`.
|
juraj-google-style
|
def join(self, *args, **kwargs):
super(ThreadReturn, self).join(*args, **kwargs)
return self._return
|
Joins the thread.
Args:
self (ThreadReturn): the ``ThreadReturn`` instance
args: optional list of arguments
kwargs: optional key-word arguments
Returns:
The return value of the exited thread.
|
juraj-google-style
|
def authenticate(self):
basic_auth = request.authorization
is_valid = False
user = None
if basic_auth:
(is_valid, user) = self.check_basic_auth(basic_auth.username, basic_auth.password)
else:
token = request.headers.get('Authorization', None)
param_token = request.args.get('access_token')
if (token or param_token):
if token:
token = token[6:]
else:
token = param_token
log.debug('Received token: %s', token)
(is_valid, user) = self.check_token_auth(token)
return (is_valid, user)
|
Authenticate user by any means and return either true or false.
Args:
Returns:
tuple (is_valid, username): True is valid user, False if not
|
codesearchnet
|
def FindFileByName(self, file_name):
try:
return self._file_descriptors[file_name]
except KeyError:
pass
try:
file_proto = self._internal_db.FindFileByName(file_name)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
raise error
if (not file_proto):
raise KeyError(('Cannot find a file named %s' % file_name))
return self._ConvertFileProtoToFileDescriptor(file_proto)
|
Gets a FileDescriptor by file name.
Args:
file_name: The path to the file to get a descriptor for.
Returns:
A FileDescriptor for the named file.
Raises:
KeyError: if the file cannot be found in the pool.
|
codesearchnet
|
def output_vars(self, transitive: bool=False) -> Set[str]:
output_vars = set()
def list_var_defs(k, v, p):
del k, p
if isinstance(v, SymbolDefinition):
output_vars.add(v.name)
if isinstance(v, Function):
return pg.TraverseAction.CONTINUE
return pg.TraverseAction.ENTER
pg.traverse(self.line(), list_var_defs)
if transitive:
parent_func = self.parent_func()
if parent_func is not None:
for i in range(self.line_number(), len(parent_func.body)):
line = parent_func.body[i]
line_input_vars = line.input_vars()
if output_vars & line_input_vars:
output_vars.update(line.output_vars())
return output_vars
|
Returns the output context from this instruction.
Args:
transitive: If True, transitive output context will be included.
Returns:
A set of output variable names.
|
github-repos
|
def ParseFileObject(self, parser_mediator, file_object):
file_offset = 0
try:
(timestamp, event_data) = self._ReadEntry(parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
raise errors.UnableToParseFile('Unable to parse first utmp entry with error: {0!s}'.format(exception))
if (not event_data.username):
raise errors.UnableToParseFile('Unable to parse first utmp entry with error: missing username')
if (not timestamp):
raise errors.UnableToParseFile('Unable to parse first utmp entry with error: missing timestamp')
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell()
file_size = file_object.get_size()
while (file_offset < file_size):
if parser_mediator.abort:
break
try:
(timestamp, event_data) = self._ReadEntry(parser_mediator, file_object, file_offset)
except errors.ParseError:
break
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell()
|
Parses an utmp file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
codesearchnet
|
def _adjusted_script_code(self, script):
script_code = ByteData()
if script[0] == len(script) - 1:
return script
script_code += VarInt(len(script))
script_code += script
return script_code
|
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
|
juraj-google-style
|
def _ParseFileEntry(self, knowledge_base, file_entry):
if not file_entry or not file_entry.link:
raise errors.PreProcessFail(
'Unable to read: {0:s} with error: not a symbolic link'.format(
self.ARTIFACT_DEFINITION_NAME))
_, _, time_zone = file_entry.link.partition('zoneinfo/')
if time_zone:
try:
knowledge_base.SetTimeZone(time_zone)
except ValueError:
pass
|
Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def _ExtractContentFromDataStream(
self, mediator, file_entry, data_stream_name):
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseDataStream(
mediator, file_entry, data_stream_name)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
self.last_activity_timestamp = time.time()
|
Extracts content from a data stream.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract its content.
data_stream_name (str): name of the data stream whose content is to be
extracted.
|
juraj-google-style
|
def affine_coupling(name, x, mid_channels=512, activation='relu', reverse=False, dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
(x1, x2) = tf.split(x, num_or_size_splits=2, axis=(- 1))
z1 = x1
log_scale_and_shift = conv_stack('nn', x1, mid_channels, x_shape[(- 1)], activation=activation, dropout=dropout)
shift = log_scale_and_shift[(:, :, :, 0::2)]
scale = tf.nn.sigmoid((log_scale_and_shift[(:, :, :, 1::2)] + 2.0))
if (not reverse):
z2 = ((x2 + shift) * scale)
else:
z2 = ((x2 / scale) - shift)
objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])
if reverse:
objective *= (- 1)
return (tf.concat([z1, z2], axis=3), objective)
|
Reversible affine coupling layer.
Args:
name: variable scope.
x: 4-D Tensor.
mid_channels: number of channels in the coupling layer.
activation: Can be either "relu" or "gatu".
reverse: Forward or reverse operation.
dropout: default, 0.0
Returns:
output: x shifted and scaled by an affine transformation.
objective: log-determinant of the jacobian
|
codesearchnet
|
def _Execute(self, http):
message = mime_multipart.MIMEMultipart('mixed')
setattr(message, '_write_headers', lambda self: None)
for key in self.__request_response_handlers:
msg = mime_nonmultipart.MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._ConvertIdToHeader(key)
body = self._SerializeRequest(
self.__request_response_handlers[key].request)
msg.set_payload(body)
message.attach(msg)
request = http_wrapper.Request(self.__batch_url, 'POST')
request.body = message.as_string()
request.headers['content-type'] = (
'multipart/mixed; boundary="%s"') % message.get_boundary()
response = http_wrapper.MakeRequest(http, request)
if response.status_code >= 300:
raise exceptions.HttpError.FromResponse(response)
header = 'content-type: %s\r\n\r\n' % response.info['content-type']
content = response.content
if isinstance(content, bytes) and self.__response_encoding:
content = response.content.decode(self.__response_encoding)
parser = email_parser.Parser()
mime_response = parser.parsestr(header + content)
if not mime_response.is_multipart():
raise exceptions.BatchError(
'Response not in multipart/mixed format.')
for part in mime_response.get_payload():
request_id = self._ConvertHeaderToId(part['Content-ID'])
response = self._DeserializeResponse(part.get_payload())
self.__request_response_handlers[request_id] = (
self.__request_response_handlers[request_id]._replace(
response=response))
|
Serialize batch request, send to server, process response.
Args:
http: A httplib2.Http object to be used to make the request with.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
|
juraj-google-style
|
def GetResults(self):
result = analyzer_result.AnalyzerResult()
result.analyzer_name = self.NAME
result.attribute_name = self._ATTRIBUTE_NAME
rule_names = [match.rule for match in self._matches]
result.attribute_value = ','.join(rule_names)
return [result]
|
Retrieves results of the most recent analysis.
Returns:
list[AnalyzerResult]: results.
|
codesearchnet
|
def get_proj(prj_code):
if (prj_code in CUSTOM_PRJ):
proj = pyproj.Proj(CUSTOM_PRJ[prj_code])
else:
proj = pyproj.Proj(init=prj_code)
return proj
|
Helper method for handling projection codes that are unknown to pyproj
Args:
prj_code (str): an epsg proj code
Returns:
projection: a pyproj projection
|
codesearchnet
|
def set_disk_timeout(timeout, power='ac', scheme=None):
return _set_powercfg_value(scheme=scheme, sub_group='SUB_DISK', setting_guid='DISKIDLE', power=power, value=timeout)
|
Set the disk timeout in minutes for the given power scheme
Args:
timeout (int):
The amount of time in minutes before the disk will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
# Sets the disk timeout to 30 minutes on battery
salt '*' powercfg.set_disk_timeout 30 power=dc
|
codesearchnet
|
def _process_new(self, feed_item):
if feed_item.get(FieldMap.AD_ACTIVE, None):
self._wait_all_creative_activation(feed_item)
campaign = self._campaign_dao.get(feed_item, required=True)
creative_assignments = []
placement_assignments = []
event_tag_assignments = []
self._process_assignments(feed_item, creative_assignments, placement_assignments, event_tag_assignments, campaign)
creative_rotation = {'creativeAssignments': creative_assignments}
self._setup_rotation_strategy(creative_rotation, feed_item)
delivery_schedule = {'impressionRatio': '1', 'priority': feed_item.get(FieldMap.AD_PRIORITY, None), 'hardCutoff': feed_item.get(FieldMap.AD_HARDCUTOFF, None)}
ad = {'active': feed_item.get(FieldMap.AD_ACTIVE, None), 'archived': feed_item.get(FieldMap.AD_ARCHIVED, None), 'campaignId': campaign['id'], 'creativeRotation': creative_rotation, 'deliverySchedule': delivery_schedule, 'endTime': feed_item.get(FieldMap.AD_END_DATE, None) if 'T' in feed_item.get(FieldMap.AD_END_DATE, None) else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_END_DATE, None), '23:59:59'), 'name': feed_item.get(FieldMap.AD_NAME, None), 'placementAssignments': placement_assignments, 'startTime': feed_item.get(FieldMap.AD_START_DATE, None) if 'T' in feed_item.get(FieldMap.AD_START_DATE, None) else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_START_DATE, None)), 'type': feed_item.get(FieldMap.AD_TYPE, 'AD_SERVING_STANDARD_AD'), 'eventTagOverrides': event_tag_assignments}
self._process_landing_page(ad, feed_item)
return ad
|
Creates a new ad DCM object from a feed item representing an ad from the Bulkdozer feed.
This function simply creates the object to be inserted later by the BaseDAO
object.
Args:
feed_item: Feed item representing the ad from the Bulkdozer feed.
Returns:
An ad object ready to be inserted in DCM through the API.
|
github-repos
|
def set_window_position(self, x, y, window_handle='current'):
self._execute(Command.SET_WINDOW_POSITION, {'x': int(x), 'y': int(y), 'window_handle': window_handle})
|
Sets the x,y position of the current window.
Support:
Web(WebView)
Args:
x(int): the x-coordinate in pixels.
y(int): the y-coordinate in pixels.
window_handle(str): Identifier of window_handle,
default to 'current'.
Returns:
WebDriver Object.
|
codesearchnet
|
def ping(self, suffix='public_tokens/'):
return self.remote_utils.ping(super(neuroRemote, self).url(), suffix)
|
Return the status-code of the API (estimated using the public-tokens
lookup page).
Arguments:
suffix (str : 'public_tokens/'): The url endpoint to check
Returns:
int: status code
|
juraj-google-style
|
def add_to_gitignore(line: str):
if not line.endswith('\n'):
line = f'{line}\n'
if GIT_IGNORE.exists():
if line in GIT_IGNORE.read_text(encoding='utf8'):
return
previous_content = GIT_IGNORE.read_text(encoding='utf8')
else:
previous_content = ''
GIT_IGNORE.write_text(previous_content + line, encoding='utf8')
|
Adds a line to the .gitignore file of the repo
Args:
line: line to add
|
juraj-google-style
|
def normalize(array, min_value=0., max_value=1.):
arr_min = np.min(array)
arr_max = np.max(array)
normalized = (array - arr_min) / (arr_max - arr_min + K.epsilon())
return (max_value - min_value) * normalized + min_value
|
Normalizes the numpy array to (min_value, max_value)
Args:
array: The numpy array
min_value: The min value in normalized array (Default value = 0)
max_value: The max value in normalized array (Default value = 1)
Returns:
The array normalized to range between (min_value, max_value)
|
juraj-google-style
|
def data(self, resource_value, return_value=False):
if return_value:
self._request_entity = None
self._request.add_payload('returnValue', True)
self._request_uri = '{}/{}/data'.format(self._request_uri, resource_value)
|
Alias for metric_name method
+--------------+------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+====================================+
| POST | /v2/customMetrics/{id}|{name}/data |
+--------------+------------------------------------+
Example
-------
The weight value is optional.
.. code-block:: javascript
{
"value": 1,
"weight": 1,
}
**Keyed Example**
The weight value is optional.
.. code-block:: javascript
{
"value": 1,
"weight": 1,
"name": "src1"
}
Args:
resource_name (string): The metric name.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.