code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def add_site(self, site):
start_angle = 0
radius = 0
total_occu = 0
for (specie, occu) in site.species.items():
radius += (occu * (specie.ionic_radius if (isinstance(specie, Specie) and specie.ionic_radius) else specie.average_ionic_radius))
total_occu += occu
vis_radius = (0.2 + (0.002 * radius))
for (specie, occu) in site.species.items():
if (not specie):
color = (1, 1, 1)
elif (specie.symbol in self.el_color_mapping):
color = [(i / 255) for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, vis_radius, color, start_angle, (start_angle + (360 * occu)))
self.mapper_map[mapper] = [site]
start_angle += (360 * occu)
if (total_occu < 1):
mapper = self.add_partial_sphere(site.coords, vis_radius, (1, 1, 1), start_angle, (start_angle + (360 * (1 - total_occu))))
self.mapper_map[mapper] = [site]
|
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
|
codesearchnet
|
def __init__(self, physaddr, size):
self.mapping = None
self._open(physaddr, size)
|
Instantiate an MMIO object and map the region of physical memory
specified by the address base `physaddr` and size `size` in bytes.
Args:
physaddr (int, long): base physical address of memory region.
size (int, long): size of memory region.
Returns:
MMIO: MMIO object.
Raises:
MMIOError: if an I/O or OS error occurs.
TypeError: if `physaddr` or `size` types are invalid.
|
juraj-google-style
|
def fts_match(self, features, segment):
features = set(features)
if self.seg_known(segment):
return (features <= self.fts(segment))
else:
return None
|
Answer question "are `ft_mask`'s features a subset of ft_seg?"
This is like `FeatureTable.match` except that it checks whether a
segment is valid and returns None if it is not.
Args:
features (set): pattern defined as set of (value, feature) tuples
segment (set): segment defined as a set of (value, feature) tuples
Returns:
bool: True iff all features in `ft_mask` are also in `ft_seg`; None
if segment is not valid
|
codesearchnet
|
def report_line(zipfilename: str, contentsfilename: str, line: str, show_inner_file: bool) -> None:
if show_inner_file:
print('{} [{}]: {}'.format(zipfilename, contentsfilename, line))
else:
print('{}: {}'.format(zipfilename, line))
|
Prints a line from a file, with the ``.zip`` filename and optionally also
the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
line: the line from the inner file
show_inner_file: if ``True``, show both filenames; if ``False``, show
just the ``.zip`` filename
|
codesearchnet
|
def _authenticate(secrets_file):
flow = oauthclient.flow_from_clientsecrets(secrets_file, scope=OAUTH_SCOPE, message=('Failed to initialized OAuth 2.0 flow with secrets file: %s' % secrets_file))
storage = oauthfile.Storage(OAUTH_CREDENTIALS_FILE)
credentials = storage.get()
if ((credentials is None) or credentials.invalid):
credentials = oauthtools.run_flow(flow, storage, oauthtools.argparser.parse_args(args=[]))
http = httplib2.Http()
return credentials.authorize(http)
|
Runs the OAuth 2.0 installed application flow.
Returns:
An authorized httplib2.Http instance.
|
codesearchnet
|
def match_rules(tree, rules, fun=None, multi=False):
if multi:
context = match_rules_context_multi(tree, rules)
else:
context = match_rules_context(tree, rules)
if (not context):
return None
if fun:
args = fun.__code__.co_varnames
if multi:
res = []
for c in context:
action_context = {}
for arg in args:
if (arg in c):
action_context[arg] = c[arg]
res.append(fun(**action_context))
return res
else:
action_context = {}
for arg in args:
if (arg in context):
action_context[arg] = context[arg]
return fun(**action_context)
else:
return context
|
Matches a Tree structure with the given query rules.
Query rules are represented as a dictionary of template to action.
Action is either a function, or a dictionary of subtemplate parameter to rules::
rules = { 'template' : { 'key': rules } }
| { 'template' : {} }
Args:
tree (Tree): Parsed tree structure
rules (dict): A dictionary of query rules
fun (function): Function to call with context (set to None if you want to return context)
multi (Bool): If True, returns all matched contexts, else returns first matched context
Returns:
Contexts from matched rules
|
codesearchnet
|
def authenticate_identify(self, api_token, override=True):
if (self.context.has_auth_params('Gem-Identify') and (not override)):
raise OverrideError('Gem-Identify')
if ((not api_token) or (not self.context.authorize('Gem-Identify', api_token=api_token))):
raise AuthUsageError(self.context, 'Gem-Identify')
return True
|
Set credentials for Identify authentication.
Args:
api_token (str): Token issued to your Application through the Gem
Developer Console.
override (boolean): Replace existing Application credentials.
|
codesearchnet
|
class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper):
def __init__(self, name='mean_absolute_error', dtype=None):
super().__init__(mean_absolute_error, name, dtype=dtype)
self._direction = 'down'
def get_config(self):
return {'name': self.name, 'dtype': self.dtype}
|
Computes the mean absolute error between the labels and predictions.
Formula:
```python
loss = mean(abs(y_true - y_pred))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.25
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.MeanAbsoluteError()])
```
|
github-repos
|
def process_alias_create_namespace(namespace):
namespace = filter_alias_create_namespace(namespace)
_validate_alias_name(namespace.alias_name)
_validate_alias_command(namespace.alias_command)
_validate_alias_command_level(namespace.alias_name, namespace.alias_command)
_validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)
|
Validate input arguments when the user invokes 'az alias create'.
Args:
namespace: argparse namespace object.
|
codesearchnet
|
def operator_driven(drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS):
return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)
|
Define an operator-driven consistent region configuration.
The source operator triggers drain and checkpoint cycles for the region.
Args:
drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds.
reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds.
max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5.
Returns:
ConsistentRegionConfig: the configuration.
|
codesearchnet
|
def pprint_table(table, out=sys.stdout, rstrip=False):
def max_width_col(table, col_idx):
return max([len(row[col_idx]) for row in table])
if rstrip:
for row_idx, row in enumerate(table):
table[row_idx] = [c.rstrip() for c in row]
col_paddings = []
ncols = len(table[0])
for i in range(ncols):
col_paddings.append(max_width_col(table, i))
for row in table:
out.write(row[0].ljust(col_paddings[0] + 1))
for i in range(1, len(row)):
col = row[i].rjust(col_paddings[i] + 2)
out.write(col)
out.write("\n")
|
Prints out a table of data, padded for alignment
Each row must have the same number of columns.
Args:
table: The table to print. A list of lists.
out: Output stream (file-like object)
rstrip: if True, trailing withespaces are removed from the entries.
|
juraj-google-style
|
def _UnserializableObjectFallback(self, obj):
if isinstance(obj, libpython.PyInstanceObjectPtr):
in_class = obj.pyop_field('in_class')
result_dict = in_class.pyop_field('cl_dict').proxyval(set())
instanceproxy = obj.proxyval(set())
result_dict.update(instanceproxy.attrdict)
result_dict['__pyringe_type_name__'] = instanceproxy.cl_name
result_dict['__pyringe_address__'] = instanceproxy.address
return result_dict
if isinstance(obj, libpython.HeapTypeObjectPtr):
try:
type_ptr = obj.field('ob_type')
tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)
result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())
except gdb.error:
result_dict = {}
try:
result_dict.update(obj.get_attr_dict().proxyval(set()))
result_dict['__pyringe_type_name__'] = obj.safe_tp_name()
result_dict['__pyringe_address__'] = long(obj._gdbval)
return result_dict
except TypeError:
pass
try:
proxy = obj.proxyval(set())
if isinstance(proxy, dict):
return {str(key): val for key, val in proxy.iteritems()}
return proxy
except AttributeError:
return str(obj)
|
Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter
|
juraj-google-style
|
def change_window(self, size_window):
self.size_window = size_window
self.window = self.lambert_window(self.size_window, self.lat0, self.lon0)
|
Change the region of interest
Args:
size_window (float): Radius of the region of interest (km)
Notes:
Change the attributes ``size_window`` and ``window`` to
correspond to the new region of interest.
|
codesearchnet
|
def account_distance(A1, A2):
return (sum([action.alpha for action in A1]) -
sum([action.alpha for action in A2]))
|
Return the distance between two accounts. Here that is just the
difference in sum(alpha)
Args:
A1 (Account): The first account.
A2 (Account): The second account
Returns:
float: The distance between the two accounts.
|
juraj-google-style
|
def write_additional(self, productversion, channel):
self.fileobj.seek(self.additional_offset)
extras = extras_header.build(dict(
count=1,
sections=[dict(
channel=six.u(channel),
productversion=six.u(productversion),
size=len(channel) + len(productversion) + 2 + 8,
padding=b'',
)],
))
self.fileobj.write(extras)
self.last_offset = self.fileobj.tell()
|
Write the additional information to the MAR header.
Args:
productversion (str): product and version string
channel (str): channel string
|
juraj-google-style
|
def _add_arg_python(self, key, value=None, mask=False):
self._data[key] = value
if (not value):
pass
elif (value is True):
self._args.append('--{}'.format(key))
self._args_quoted.append('--{}'.format(key))
self._args_masked.append('--{}'.format(key))
else:
self._args.append('--{}={}'.format(key, value))
if mask:
value = ('x' * len(str(value)))
else:
value = self.quote(value)
self._args_quoted.append('--{}={}'.format(key, value))
self._args_masked.append('--{}={}'.format(key, value))
|
Add CLI Arg formatted specifically for Python.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value.
|
codesearchnet
|
def __init__(self, client, search_query: str) -> None:
self._client = client
self._search_query = search_query
|
Initializer.
Initializes the FhirSearchRunner with user provided FHIR Client, and search
query.
Args:
client: FHIR Client for the FHIR server where queries will be run against.
Views will be created from the response.
search_query: Query used to fetch the subset of data from the FHIR server.
|
github-repos
|
def acknowledge(self, **kwargs):
device_id = kwargs['device_id']
config = self.get_config()
if 'r_folder_id' in kwargs:
r_folder_id = kwargs['r_folder_id']
remote_folder = syncthing_adt.Folder(
id=r_folder_id,
label=kwargs['label'],
path=kwargs['local_path'],
deviceID=self.get_device_id(),
rescanIntervalS=kwargs['interval']
)
remote_folder.add_device(device_id)
remote_folder = remote_folder.obj
else:
remote_folder = kwargs['folder_obj']
remote_folder['path'] = kwargs['local_path']
if kwargs['interval']:
remote_folder['rescanIntervalS'] = kwargs['interval']
r_folder_id = remote_folder['id']
if self.folder_exists({'path' : kwargs['local_path']}, config):
raise ValueError('This folder has already been added.')
config['folders'].append(remote_folder)
config['label'] = kwargs['label']
self.new_device(config=config, device_id=device_id)
device = self.find_device(device_id, config)
if device:
device['name'] = kwargs['hostname']
self.adapter.set_dir_config({
'device_id' : device_id,
'api_key' : kwargs['api_key'] if 'api_key' in kwargs else '',
'label' : kwargs['label'],
'local_path' : kwargs['local_path'],
'is_shared' : True,
'server' : kwargs['server'] if 'server' in kwargs else False,
'host' : kwargs['host'] if 'host' in kwargs else None,
'remote_path': kwargs['remote_path'] if 'remote_path' in kwargs else '',
'port' : kwargs['port'] if 'port' in kwargs else None
})
self.set_config(config)
self.restart()
|
Commit the shared remote folder data into local config.xml file
1. Update the remote_folder path and label
2. Append the remote_folder to config folders list
Args:
remote_folder(folder): syncthing folder object
local_path: existing local path
|
juraj-google-style
|
def refresh(self, request):
try:
self._retrieve_info(request)
(self.token, self.expiry) = _metadata.get_service_account_token(request, service_account=self._service_account_email)
except exceptions.TransportError as caught_exc:
new_exc = exceptions.RefreshError(caught_exc)
six.raise_from(new_exc, caught_exc)
|
Refresh the access token and scopes.
Args:
request (google.auth.transport.Request): The object used to make
HTTP requests.
Raises:
google.auth.exceptions.RefreshError: If the Compute Engine metadata
service can't be reached if if the instance has not
credentials.
|
codesearchnet
|
def pow(x, y, name=None):
with ops.name_scope(name, 'Pow', [x]) as name:
return gen_math_ops._pow(x, y, name=name)
|
Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
|
github-repos
|
def read_profile(name):
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
profile = config[name]
repo = profile['repo']
token = profile['token']
return {'repo': repo, 'token': token}
|
Get a named profile from the CONFIG_FILE.
Args:
name
The name of the profile to load.
Returns:
A dictionary with the profile's ``repo`` and ``token`` values.
|
codesearchnet
|
def reset_logformat(logger: logging.Logger,
fmt: str,
datefmt: str = '%Y-%m-%d %H:%M:%S') -> None:
handler = logging.StreamHandler()
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler.setFormatter(formatter)
remove_all_logger_handlers(logger)
logger.addHandler(handler)
logger.propagate = False
|
Create a new formatter and apply it to the logger.
:func:`logging.basicConfig` won't reset the formatter if another module
has called it, so always set the formatter like this.
Args:
logger: logger to modify
fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter`
datefmt: passed to the ``datefmt=`` argument of
:class:`logging.Formatter`
|
juraj-google-style
|
def shebang(self, new_shebang):
if (not self.shebang):
raise ValueError('Cannot modify a shebang if it does not exist.')
if (not new_shebang.startswith('
raise ValueError('Invalid shebang.')
self.writeline(new_shebang, 0)
|
Write a new shebang to the file.
Raises:
ValueError: If the file has no shebang to modify.
ValueError: If the new shebang is invalid.
|
codesearchnet
|
def fingerprints(data):
Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512")
if six.PY2:
if not isinstance(data, str):
data = data.encode("utf-8")
elif six.PY3:
if not isinstance(data, bytes):
data = data.encode("utf-8")
md5 = hashlib.md5()
md5.update(data)
md5 = md5.hexdigest()
sha1 = hashlib.sha1()
sha1.update(data)
sha1 = sha1.hexdigest()
sha256 = hashlib.sha256()
sha256.update(data)
sha256 = sha256.hexdigest()
sha512 = hashlib.sha512()
sha512.update(data)
sha512 = sha512.hexdigest()
return Hashes(md5, sha1, sha256, sha512)
|
This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512
|
juraj-google-style
|
def get_qemu_info(path, backing_chain=False, fail_on_error=True):
cmd = ['qemu-img', 'info', '--output=json', path]
if backing_chain:
cmd.insert(-1, '--backing-chain')
result = run_command_with_validation(
cmd, fail_on_error, msg='Failed to get info for {}'.format(path)
)
return json.loads(result.out)
|
Get info on a given qemu disk
Args:
path(str): Path to the required disk
backing_chain(boo): if true, include also info about
the image predecessors.
Return:
object: if backing_chain == True then a list of dicts else a dict
|
juraj-google-style
|
def consume(self, data):
try:
self._streamer.consume(data)
except YajlError as ye:
print(ye.value)
raise JSONStreamerException(ye.value)
|
Takes input that must be parsed
Note:
Attach all your listeners before calling this method
Args:
data (str): input json string
|
juraj-google-style
|
def print_object_results(obj_result):
print_results_header(obj_result.object_id, obj_result.is_valid)
if obj_result.warnings:
print_warning_results(obj_result, 1)
if obj_result.errors:
print_schema_results(obj_result, 1)
|
Print the results of validating an object.
Args:
obj_result: An ObjectValidationResults instance.
|
juraj-google-style
|
def blacken_code(code):
if black is None:
raise NotImplementedError
major, minor, _ = platform.python_version_tuple()
pyversion = 'py{major}{minor}'.format(major=major, minor=minor)
target_versions = [black.TargetVersion[pyversion.upper()]]
line_length = black.DEFAULT_LINE_LENGTH
string_normalization = True
mode = black.FileMode(
target_versions=target_versions,
line_length=line_length,
string_normalization=string_normalization,
)
return black.format_file_contents(code, fast=False, mode=mode)
|
Format code content using Black
Args:
code (str): code as string
Returns:
str
|
juraj-google-style
|
def create_package(name, data, package_cls=None):
from rez.package_maker__ import PackageMaker
maker = PackageMaker(name, data, package_cls=package_cls)
return maker.get_package()
|
Create a package given package data.
Args:
name (str): Package name.
data (dict): Package data. Must conform to `package_maker.package_schema`.
Returns:
`Package` object.
|
codesearchnet
|
def tap_hold(self, x, y, duration=1.0):
data = {'x': x, 'y': y, 'duration': duration}
return self.http.post('/wda/touchAndHold', data=data)
|
Tap and hold for a moment
Args:
- x, y(int): position
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
|
codesearchnet
|
def __init__(self, window, root):
self.root = root
self.selenium = window.selenium
self.wait = window.wait
self.window = window
|
Create a Region object.
Args:
window (:py:class:`BaseWindow`): Window object this region appears
in.
root
(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):
WebDriver element object that serves as the root for the
region.
|
juraj-google-style
|
def request_unwatch(self, node_name, output_slot, debug_op):
self._debug_ops_state_change_queue.put(_state_change(debug_service_pb2.EventReply.DebugOpStateChange.DISABLED, node_name, output_slot, debug_op))
|
Request disabling a debug tensor watchpoint or breakpoint.
This is the opposite of `request_watch()`.
Args:
node_name: (`str`) name of the node that the to-be-watched tensor belongs
to, e.g., "hidden/Weights".
output_slot: (`int`) output slot index of the tensor to watch.
debug_op: (`str`) name of the debug op to enable. This should not include
any attribute substrings.
|
github-repos
|
def read_tabular(filepath):
_, fn, ext = splitext2(filepath)
if ext == '.h5':
return _read_tabular_h5(filepath)
elif ext == '.pkl':
return _read_tabular_pickle(filepath)
else:
raise NotImplementedError
|
Read tabular object in HDF5 or pickle format
Args:
filepath (path-like): path to read to; must end in '.h5' or '.pkl'
|
juraj-google-style
|
def _processing_limit(self, spec):
processing_rate = float(spec.mapper.params.get("processing_rate", 0))
slice_processing_limit = -1
if processing_rate > 0:
slice_processing_limit = int(math.ceil(
parameters.config._SLICE_DURATION_SEC*processing_rate/
int(spec.mapper.shard_count)))
return slice_processing_limit
|
Get the limit on the number of map calls allowed by this slice.
Args:
spec: a Mapreduce spec.
Returns:
The limit as a positive int if specified by user. -1 otherwise.
|
juraj-google-style
|
def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
serial_num=None, dp_desc=None):
super().__init__()
self.mfr_desc = mfr_desc
self.hw_desc = hw_desc
self.sw_desc = sw_desc
self.serial_num = serial_num
self.dp_desc = dp_desc
|
Create a Desc with the optional parameters below.
Args:
mfr_desc (str): Manufacturer description
hw_desc (str): Hardware description
sw_desc (str): Software description
serial_num (str): Serial number
dp_desc (str): Datapath description
|
juraj-google-style
|
def compatible_firmware_version(self):
identifier = self.firmware_version.split('compiled')[0]
buf_size = self.MAX_BUF_SIZE
buf = (ctypes.c_char * buf_size)()
res = self._dll.JLINKARM_GetEmbeddedFWString(identifier.encode(), buf, buf_size)
if res < 0:
raise errors.JLinkException(res)
return ctypes.string_at(buf).decode()
|
Returns the DLL's compatible J-Link firmware version.
Args:
self (JLink): the ``JLink`` instance
Returns:
The firmware version of the J-Link that the DLL is compatible
with.
Raises:
JLinkException: on error.
|
juraj-google-style
|
def cube():
app = Ice()
@app.get('/')
def default_home_page():
'Return a default home page.'
return simple_html('It works!', '<h1>It works!</h1>\n<p>This is the default ice web page.</p>')
@app.error()
def generic_error_page():
'Return a simple and generic error page.'
return simple_html(app.response.status_line, '<h1>{title}</h1>\n<p>{description}</p>\n<hr>\n<address>Ice/{version}</address>'.format(title=app.response.status_line, description=app.response.status_detail, version=__version__))
def simple_html(title, body):
'Return a simple HTML page.'
return '<!DOCTYPE html>\n<html>\n<head><title>{title}</title></head>\n<body>\n{body}\n</body>\n</html>\n'.format(title=title, body=body)
return app
|
Return an Ice application with a default home page.
Create :class:`Ice` object, add a route to return the default page
when a client requests the server root, i.e. /, using HTTP GET
method, add an error handler to return HTTP error pages when an
error occurs and return this object. The returned object can be used
as a WSGI application.
Returns:
Ice: WSGI application.
|
codesearchnet
|
def poisson_ll(data, means):
if sparse.issparse(data):
return sparse_poisson_ll(data, means)
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose() + eps
ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0)
return ll
|
Calculates the Poisson log-likelihood.
Args:
data (array): 2d numpy array of genes x cells
means (array): 2d numpy array of genes x k
Returns:
cells x k array of log-likelihood for each cell/cluster pair
|
juraj-google-style
|
def __init__(self, assign_defaults=(), method_name=None, overwrite=False):
if isinstance(assign_defaults, str):
self._assign_defaults = [assign_defaults]
else:
self._assign_defaults = assign_defaults
self._method_name = method_name
self._overwrite = overwrite
_valid_defaults.update(self._assign_defaults)
default_args = sorted(_valid_defaults)
default_values = [None] * len(_valid_defaults)
if six.PY2:
default_func = PrettyTensor.with_defaults.__func__
else:
default_func = PrettyTensor.with_defaults
_set_ipython_string(default_func, default_args, default_values,
_original_set_defaults_doc)
_set_ipython_string(defaults_scope, default_args, default_values,
_original_defaults_scope_doc)
|
Assigns arguments to the decorator.
Args:
assign_defaults: A sequence of strings for the default values that should
be provided.
method_name: If provided, use this as the method_name instead of the
wrapped function's name.
overwrite: If False, throw an exception if this method has already been
registered. True should be used in interactive environments or with
great care.
|
juraj-google-style
|
def put_event(self, evt):
evt.step = self.global_step
evt.wall_time = time.time()
self._dispatch(lambda m: m.process_event(evt))
|
Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event):
|
juraj-google-style
|
def torch_distributed_zero_first(local_rank: int):
if local_rank not in [-1, 0]:
dist.barrier()
yield
if local_rank == 0:
dist.barrier()
|
Decorator to make all processes in distributed training wait for each local_master to do something.
Args:
local_rank (`int`): The rank of the local process.
|
github-repos
|
def transfer_project(self, to_project_id, **kwargs):
path = '/groups/%s/projects/%s' % (self.id, to_project_id)
self.manager.gitlab.http_post(path, **kwargs)
|
Transfer a project to this group.
Args:
to_project_id (int): ID of the project to transfer
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTransferProjectError: If the project could not be transfered
|
juraj-google-style
|
def _init_journal(self, permissive=True):
nowstamp = datetime.now().strftime('%d-%b-%Y %H:%M:%S.%f')[:(- 3)]
self._add_entry(templates.INIT.format(time_stamp=nowstamp))
if permissive:
self._add_entry(templates.INIT_DEBUG)
|
Add the initialization lines to the journal.
By default adds JrnObj variable and timestamp to the journal contents.
Args:
permissive (bool): if True most errors in journal will not
cause Revit to stop journal execution.
Some still do.
|
codesearchnet
|
def placeOrder(self, contract: Contract, order: Order) -> Trade:
orderId = order.orderId or self.client.getReqId()
self.client.placeOrder(orderId, contract, order)
now = datetime.datetime.now(datetime.timezone.utc)
key = self.wrapper.orderKey(
self.wrapper.clientId, orderId, order.permId)
trade = self.wrapper.trades.get(key)
if trade:
assert trade.orderStatus.status not in OrderStatus.DoneStates
logEntry = TradeLogEntry(now, trade.orderStatus.status, 'Modify')
trade.log.append(logEntry)
self._logger.info(f'placeOrder: Modify order {trade}')
trade.modifyEvent.emit(trade)
self.orderModifyEvent.emit(trade)
else:
order.clientId = self.wrapper.clientId
order.orderId = orderId
orderStatus = OrderStatus(status=OrderStatus.PendingSubmit)
logEntry = TradeLogEntry(now, orderStatus.status, '')
trade = Trade(
contract, order, orderStatus, [], [logEntry])
self.wrapper.trades[key] = trade
self._logger.info(f'placeOrder: New order {trade}')
self.newOrderEvent.emit(trade)
return trade
|
Place a new order or modify an existing order.
Returns a Trade that is kept live updated with
status changes, fills, etc.
Args:
contract: Contract to use for order.
order: The order to be placed.
|
juraj-google-style
|
def save_images(images, filenames, output_dir):
for i, filename in enumerate(filenames):
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png')
|
Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
|
juraj-google-style
|
def _global_batch_size(self):
return True
|
`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
|
github-repos
|
def pow(x, a):
return math_ops.pow(x, a)
|
Element-wise exponentiation.
Args:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
|
github-repos
|
def decode_fast(self, token_ids: Union[int, List[int]]) -> str:
return self.sp_model.decode(token_ids)
|
Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
functionality but is often much faster.
Args:
token_ids (`int` or `List[int]`): Encoded token or text as token id(s).
Returns:
`str`: Decoded text
|
github-repos
|
def path_is_empty(p: tcod.path.AStar) -> bool:
return bool(lib.TCOD_path_is_empty(p._path_c))
|
Return True if a path is empty.
Args:
p (AStar): An AStar instance.
Returns:
bool: True if a path is empty. Otherwise False.
|
juraj-google-style
|
def returns_scalar(return_type: Optional[FhirPathDataType]) -> bool:
return not return_type or return_type.cardinality == Cardinality.SCALAR
|
Indicates if the return type evaluates to a scalar.
Args:
return_type: The data type to describe.
Returns:
True if `return_type` represents an element with cardinality less than or
equal to one whose parents are all also scalars.
False otherwise. For example, the path Patient.name.use does not return a
scalar, despite 'use' being a scalar, because it is a child of the
collection, 'name.'
|
github-repos
|
def get_representations_of_kind(kind, start=None, end=None):
q = Property.query(ancestor=Property.key_for_kind(kind))
if ((start is not None) and (start != '')):
q = q.filter((Property.key >= Property.key_for_property(kind, start)))
if (end is not None):
if (end == ''):
return {}
q = q.filter((Property.key < Property.key_for_property(kind, end)))
result = {}
for property in q:
result[property.property_name] = property.property_representation
return result
|
Return all representations of properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A dictionary mapping property names to its list of representations.
|
codesearchnet
|
def login(self, username, password, state=None, sync=True):
auth = APIAuth(self.OAUTH_SCOPES)
ret = auth.login(username, password, get_mac())
if ret:
self.load(auth, state, sync)
return ret
|
Authenticate to Google with the provided credentials & sync.
Args:
email (str): The account to use.
password (str): The account password.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
|
codesearchnet
|
def setKstar(self,term_i,Ks):
assert Ks.shape[0]==self.N
self.vd.getTerm(term_i).getKcf().setK0cross(Ks)
|
Set the kernel for predictions
Args:
term_i: index of the term we are interested in
Ks: (TODO: is this the covariance between train and test or the covariance between test points?)
|
juraj-google-style
|
def to_hising(self):
if (self.vartype is Vartype.BINARY):
return self.to_spin().to_hising()
h = {}
J = {}
offset = 0
for (term, bias) in self.items():
if (len(term) == 0):
offset += bias
elif (len(term) == 1):
(v,) = term
h[v] = bias
else:
J[tuple(term)] = bias
return (h, J, offset)
|
Construct a higher-order Ising problem from a binary polynomial.
Returns:
tuple: A 3-tuple of the form (`h`, `J`, `offset`) where `h` includes
the linear biases, `J` has the higher-order biases and `offset` is
the linear offset.
Examples:
>>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': 1, 'abc': -1}, dimod.SPIN)
>>> h, J, off = poly.to_hising()
>>> h
{'a': -1}
|
codesearchnet
|
def _set_current(self, new_current):
new_cur_full_path = self.join(new_current)
if not os.path.exists(new_cur_full_path):
raise PrefixNotFound(
'Prefix "%s" does not exist in workdir %s' %
(new_current, self.path)
)
if os.path.lexists(self.join('current')):
os.unlink(self.join('current'))
os.symlink(new_current, self.join('current'))
self.current = new_current
|
Change the current default prefix, for internal usage
Args:
new_current(str): Name of the new current prefix, it must already
exist
Returns:
None
Raises:
PrefixNotFound: if the given prefix name does not exist in the
workdir
|
juraj-google-style
|
def _maybe_set_current_user_vars(method, api_info=None, request=None):
if _is_auth_info_available():
return
os.environ[_ENV_AUTH_EMAIL] = ''
os.environ[_ENV_AUTH_DOMAIN] = ''
try:
api_info = api_info or method.im_self.api_info
except AttributeError:
_logger.warning('AttributeError when accessing %s.im_self. An unbound '
'method was probably passed as an endpoints handler.',
method.__name__)
scopes = method.method_info.scopes
audiences = method.method_info.audiences
allowed_client_ids = method.method_info.allowed_client_ids
else:
scopes = (method.method_info.scopes
if method.method_info.scopes is not None
else api_info.scopes)
audiences = (method.method_info.audiences
if method.method_info.audiences is not None
else api_info.audiences)
allowed_client_ids = (method.method_info.allowed_client_ids
if method.method_info.allowed_client_ids is not None
else api_info.allowed_client_ids)
if not scopes and not audiences and not allowed_client_ids:
return
token = _get_token(request)
if not token:
return None
if allowed_client_ids and _is_local_dev():
allowed_client_ids = (constants.API_EXPLORER_CLIENT_ID,) + tuple(allowed_client_ids)
if ((scopes == [_EMAIL_SCOPE] or scopes == (_EMAIL_SCOPE,)) and
allowed_client_ids):
_logger.debug('Checking for id_token.')
issuers = api_info.issuers
if issuers is None:
issuers = _DEFAULT_GOOGLE_ISSUER
elif 'google_id_token' not in issuers:
issuers.update(_DEFAULT_GOOGLE_ISSUER)
time_now = long(time.time())
user = _get_id_token_user(token, issuers, audiences, allowed_client_ids,
time_now, memcache)
if user:
os.environ[_ENV_AUTH_EMAIL] = user.email()
os.environ[_ENV_AUTH_DOMAIN] = user.auth_domain()
return
if scopes:
_logger.debug('Checking for oauth token.')
if _is_local_dev():
_set_bearer_user_vars_local(token, allowed_client_ids, scopes)
else:
_set_bearer_user_vars(allowed_client_ids, scopes)
|
Get user information from the id_token or oauth token in the request.
Used internally by Endpoints to set up environment variables for user
authentication.
Args:
method: The class method that's handling this request. This method
should be annotated with @endpoints.method.
api_info: An api_config._ApiInfo instance. Optional. If None, will attempt
to parse api_info from the implicit instance of the method.
request: The current request, or None.
|
juraj-google-style
|
def _TransmitBreakpointUpdates(self, service):
reconnect = False
retry_list = []
while self._transmission_queue:
(breakpoint, retry_count) = self._transmission_queue.popleft()
try:
service.debuggees().breakpoints().update(debuggeeId=self._debuggee_id, id=breakpoint['id'], body={'breakpoint': breakpoint}).execute()
native.LogInfo(('Breakpoint %s update transmitted successfully' % breakpoint['id']))
except apiclient.errors.HttpError as err:
status = err.resp.status
is_transient = ((status >= 500) or (status == 408))
if (is_transient and (retry_count < (self.max_transmit_attempts - 1))):
native.LogInfo(('Failed to send breakpoint %s update: %s' % (breakpoint['id'], traceback.format_exc())))
retry_list.append((breakpoint, (retry_count + 1)))
elif is_transient:
native.LogWarning(('Breakpoint %s retry count exceeded maximum' % breakpoint['id']))
else:
native.LogInfo(('%s, breakpoint: %s' % (err, breakpoint['id'])))
except BaseException:
native.LogWarning(('Fatal error sending breakpoint %s update: %s' % (breakpoint['id'], traceback.format_exc())))
reconnect = True
self._transmission_queue.extend(retry_list)
if (not self._transmission_queue):
self.update_backoff.Succeeded()
return (reconnect, None)
else:
return (reconnect, self.update_backoff.Failed())
|
Tries to send pending breakpoint updates to the backend.
Sends all the pending breakpoint updates. In case of transient failures,
the breakpoint is inserted back to the top of the queue. Application
failures are not retried (for example updating breakpoint in a final
state).
Each pending breakpoint maintains a retry counter. After repeated transient
failures the breakpoint is discarded and dropped from the queue.
Args:
service: client to use for API calls
Returns:
(reconnect, timeout) tuple. The first element ("reconnect") is set to
true on unexpected HTTP responses. The caller should discard the HTTP
connection and create a new one. The second element ("timeout") is
set to None if all pending breakpoints were sent successfully. Otherwise
returns time interval in seconds to stall before retrying.
|
codesearchnet
|
def _check_registry_type(folder=None):
folder = _registry_folder(folder)
default_file = os.path.join(folder, 'registry_type.txt')
try:
with open(default_file, "r") as infile:
data = infile.read()
data = data.strip()
ComponentRegistry.SetBackingStore(data)
except IOError:
pass
|
Check if the user has placed a registry_type.txt file to choose the registry type
If a default registry type file is found, the DefaultBackingType and DefaultBackingFile
class parameters in ComponentRegistry are updated accordingly.
Args:
folder (string): The folder that we should check for a default registry type
|
juraj-google-style
|
def os_version(self, value):
if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values:
del self._values['ai.device.osVersion']
else:
self._values['ai.device.osVersion'] = value
|
The os_version property.
Args:
value (string). the property value.
|
juraj-google-style
|
def ParseRecord(self, parser_mediator, key, structure):
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
date_time = dfdatetime_time_elements.TimeElements()
try:
iso_date_time = self._GetISO8601String(structure.date_time)
date_time.CopyFromStringISO8601(iso_date_time)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_RECORDED)
event_data = ApacheAccessEventData()
event_data.ip_address = structure.ip_address
event_data.remote_name = structure.remote_name
event_data.user_name = structure.user_name
event_data.http_request = structure.http_request
event_data.http_response_code = structure.response_code
event_data.http_response_bytes = structure.response_bytes
if key == 'combined_log_format':
event_data.http_request_referer = structure.referer
event_data.http_request_user_agent = structure.user_agent
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a matching entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): elements parsed from the file.
Raises:
ParseError: when the structure type is unknown.
|
juraj-google-style
|
def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T:
result = self._source.get(self._source_type, deepcopy(query), context)
LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source))
LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result))
for sink in self._before_transform:
sink.put(result, context)
LOGGER.info("Converting result \"{result}\" to request type".format(result=result))
result = self._transform(data=result, context=context)
LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result))
for sink in self._after_transform:
sink.put(result, context)
return result
|
Gets a query from the data source.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
|
juraj-google-style
|
def highwater(self, partition):
if (not isinstance(partition, TopicPartition)):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
return self._subscription.assignment[partition].highwater
|
Last known highwater offset for a partition.
A highwater offset is the offset that will be assigned to the next
message that is produced. It may be useful for calculating lag, by
comparing with the reported position. Note that both position and
highwater refer to the *next* offset -- i.e., highwater offset is
one greater than the newest available message.
Highwater offsets are returned in FetchResponse messages, so will
not be available if no FetchRequests have been sent for this partition
yet.
Arguments:
partition (TopicPartition): Partition to check
Returns:
int or None: Offset if available
|
codesearchnet
|
def AddEventData(self, event_data):
self._RaiseIfNotWritable()
self._AddAttributeContainer(self._CONTAINER_TYPE_EVENT_DATA, event_data)
|
Adds event data.
Args:
event_data (EventData): event data.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
|
juraj-google-style
|
def set_device_name(self, new_name):
device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)
if device_name is None:
logger.warn('Failed to find handle for device name')
return False
if len(new_name) > MAX_DEVICE_NAME_LEN:
logger.error('Device name exceeds maximum length ({} > {})'.format(len(new_name), MAX_DEVICE_NAME_LEN))
return False
if self.dongle._write_attribute(self.conn_handle, device_name, new_name.encode('ascii')):
self.name = new_name
return True
return False
|
Sets a new BLE device name for this SK8.
Args:
new_name (str): the new device name as an ASCII string, max 20 characters.
Returns:
True if the name was updated successfully, False otherwise.
|
juraj-google-style
|
def youtube(keyword=None):
if keyword is None:
web.open('https:
else:
web.open(quote('https:
|
Open youtube.
Args:
keyword (optional): Search word.
|
juraj-google-style
|
def form_uri(item_id, service, is_track):
if is_track:
uri = service.sonos_uri_from_id(item_id)
else:
uri = 'x-rincon-cpcontainer:' + item_id
return uri
|
Form and return a music service item uri
Args:
item_id (str): The item id
service (MusicService): The music service that the item originates from
is_track (bool): Whether the item_id is from a track or not
Returns:
str: The music service item uri
|
juraj-google-style
|
def __init__(self, details):
if not isinstance(details, list):
raise ValueError('details in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a list')
self.validation_failures = {}
self._optional = True
self._nodes = []
for i in range(len(details)):
if isinstance(details[i], _BaseNode):
self._nodes.append(details[i])
continue
elif isinstance(details[i], (dict, list)):
self._nodes.append(_child(details[i]))
else:
raise ValueError('details[' + str(i) + '] in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a dict')
if not self._nodes[-1]._optional:
self._optional = False
|
Constructor
Initialises the instance
Arguments:
details {dict} -- Details describing the type of values allowed for
the node
Raises:
ValueError
Returns:
OptionsNode
|
juraj-google-style
|
def __init__(self, pad_mask):
self.nonpad_ids = None
self.dim_origin = None
with tf.name_scope("pad_reduce/get_ids"):
pad_mask = tf.reshape(pad_mask, [-1])
self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
self.dim_origin = tf.shape(pad_mask)[:1]
|
Compute and store the location of the padding.
Args:
pad_mask (tf.Tensor): Reference padding tensor of shape
[batch_size,length] or [dim_origin] (dim_origin=batch_size*length)
containing non-zeros positive values to indicate padding location.
|
juraj-google-style
|
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: Optional[bool]=False):
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask)
tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return (hidden_states, self_attn_weights)
|
Args:
hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`tf.Tensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*
|
github-repos
|
def ConvertGlobIntoPathComponents(self, pattern):
components = []
for path_component in pattern.split('/'):
m = rdf_paths.GlobExpression.RECURSION_REGEX.search(path_component)
if m:
path_component = path_component.replace(m.group(0), '*')
component = rdf_paths.PathSpec(path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.RECURSIVE)
if m.group(1):
component.recursion_depth = int(m.group(1))
elif self.GLOB_MAGIC_CHECK.search(path_component):
component = rdf_paths.PathSpec(path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.REGEX)
else:
pathtype = self.state.pathtype
if ((pathtype == rdf_paths.PathSpec.PathType.TSK) and re.match('^.:$', path_component)):
path_component = ('%s\\' % path_component)
component = rdf_paths.PathSpec(path=path_component, pathtype=pathtype, path_options=rdf_paths.PathSpec.Options.CASE_INSENSITIVE)
components.append(component)
return components
|
r"""Converts a glob pattern into a list of pathspec components.
Wildcards are also converted to regular expressions. The pathspec components
do not span directories, and are marked as a regex or a literal component.
We also support recursion into directories using the ** notation. For
example, /home/**2/foo.txt will find all files named foo.txt recursed 2
directories deep. If the directory depth is omitted, it defaults to 3.
Example:
/home/test/* -> ['home', 'test', '.*\\Z(?ms)']
Args:
pattern: A glob expression with wildcards.
Returns:
A list of PathSpec instances for each component.
Raises:
ValueError: If the glob is invalid.
|
codesearchnet
|
def output(self, _filename):
txt = ''
for contract in self.slither.contracts_derived:
txt += '\n{}:\n'.format(contract.name)
table = PrettyTable(['Name', 'ID'])
for function in contract.functions:
if function.visibility in ['public', 'external']:
table.add_row([function.full_name, hex(get_function_id(function.full_name))])
for variable in contract.state_variables:
if variable.visibility in ['public']:
variable_getter_args = ""
if type(variable.type) is ArrayType:
length = 0
v = variable
while type(v.type) is ArrayType:
length += 1
v = v.type
variable_getter_args = ','.join(["uint256"]*length)
elif type(variable.type) is MappingType:
variable_getter_args = variable.type.type_from
table.add_row([f"{variable.name}({variable_getter_args})", hex(get_function_id(f"{variable.name}({variable_getter_args})"))])
txt += str(table) + '\n'
self.info(txt)
|
_filename is not used
Args:
_filename(string)
|
juraj-google-style
|
def _compute_latents(self, g_values: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
x = torch.repeat_interleave(torch.unsqueeze(g_values, dim=-2), self.watermarking_depth, axis=-2)
x = torch.tril(x, diagonal=-1)
logits = (self.delta[..., None, :] @ x.type(self.delta.dtype)[..., None]).squeeze() + self.beta
p_two_unique_tokens = torch.sigmoid(logits)
p_one_unique_token = 1 - p_two_unique_tokens
return (p_one_unique_token, p_two_unique_tokens)
|
Computes the unique token probability distribution given g-values.
Args:
g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`):
PRF values.
Returns:
p_one_unique_token and p_two_unique_tokens, both of shape
[batch_size, seq_len, watermarking_depth]. p_one_unique_token[i,t,l]
gives the probability of there being one unique token in a tournament
match on layer l, on timestep t, for batch item i.
p_one_unique_token[i,t,l] + p_two_unique_token[i,t,l] = 1.
|
github-repos
|
class PipelineDataFormat:
SUPPORTED_FORMATS = ['json', 'csv', 'pipe']
def __init__(self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite: bool=False):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(',') if column is not None else ['']
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split('=')) if '=' in c else (c, c) for c in self.column]
if output_path is not None and (not overwrite):
if exists(abspath(self.output_path)):
raise OSError(f'{self.output_path} already exists on disk')
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError(f"{self.input_path} doesn't exist on disk")
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, 'pickle'))
with open(binary_path, 'wb+') as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False) -> 'PipelineDataFormat':
if format == 'json':
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == 'csv':
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == 'pipe':
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError(f'Unknown reader {format} (Available reader are json/csv/pipe)')
|
Base class for all the pipeline supported data format both for reading and writing. Supported data formats
currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to
pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (`str`): Where to save the outgoing data.
input_path (`str`): Where to look for the input data.
column (`str`): The column to read.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to overwrite the `output_path`.
|
github-repos
|
def get_remaining_width(sample_string, max_terminal_width=None):
if (max_terminal_width is not None):
available_width = min(terminal_width(), max_terminal_width)
else:
available_width = terminal_width()
return (available_width - len(sample_string))
|
Returns the number of characters available if sample string were to be printed in the terminal.
Positional arguments:
sample_string -- gets the length of this string.
Keyword arguments:
max_terminal_width -- limit the overall width of everything to these many characters.
Returns:
Integer.
|
codesearchnet
|
def __init__(self, property_type=TableFeaturePropType.OFPTFPT_INSTRUCTIONS,
instruction_ids=None):
super().__init__(property_type=property_type)
self.instruction_ids = instruction_ids if instruction_ids else []
self.update_length()
|
Create a InstructionsProperty with the optional parameters below.
Args:
type(|TableFeaturePropType_v0x04|):
Property Type value of this instance.
next_table_ids(|ListOfInstruction_v0x04|):
List of InstructionGotoTable instances.
|
juraj-google-style
|
def files_comments_delete(self, *, file: str, id: str, **kwargs) -> SlackResponse:
kwargs.update({"file": file, "id": id})
return self.api_call("files.comments.delete", json=kwargs)
|
Deletes an existing comment on a file.
Args:
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890'
|
juraj-google-style
|
def start(backdate=None):
if f.s.cum:
raise StartError("Already have stamps, can't start again (must reset).")
if (f.t.subdvsn_awaiting or f.t.par_subdvsn_awaiting):
raise StartError("Already have subdivisions, can't start again (must reset).")
if f.t.stopped:
raise StoppedError('Timer already stopped (must open new or reset).')
t = timer()
if (backdate is None):
t_start = t
else:
if (f.t is f.root):
raise BackdateError('Cannot backdate start of root timer.')
if (not isinstance(backdate, float)):
raise TypeError('Backdate must be type float.')
if (backdate > t):
raise BackdateError('Cannot backdate to future time.')
if (backdate < f.tm1.last_t):
raise BackdateError('Cannot backdate start to time previous to latest stamp in parent timer.')
t_start = backdate
f.t.paused = False
f.t.tmp_total = 0.0
f.t.start_t = t_start
f.t.last_t = t_start
return t
|
Mark the start of timing, overwriting the automatic start data written on
import, or the automatic start at the beginning of a subdivision.
Notes:
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp in the parent timer.
Args:
backdate (float, optional): time to use for start instead of current.
Returns:
float: The current time.
Raises:
BackdateError: If given backdate time is out of range or used in root timer.
StartError: If the timer is not in a pristine state (if any stamps or
subdivisions, must reset instead).
StoppedError: If the timer is already stopped (must reset instead).
TypeError: If given backdate value is not type float.
|
codesearchnet
|
def to_dict(self):
dictionary = dict()
for (local_name, attribute) in self._attributes.items():
remote_name = attribute.remote_name
if hasattr(self, local_name):
value = getattr(self, local_name)
if isinstance(value, NURESTObject):
value = value.to_dict()
if (isinstance(value, list) and (len(value) > 0) and isinstance(value[0], NURESTObject)):
tmp = list()
for obj in value:
tmp.append(obj.to_dict())
value = tmp
dictionary[remote_name] = value
else:
pass
return dictionary
|
Converts the current object into a Dictionary using all exposed ReST attributes.
Returns:
dict: the dictionary containing all the exposed ReST attributes and their values.
Example::
>>> print entity.to_dict()
{"name": "my entity", "description": "Hello World", "ID": "xxxx-xxx-xxxx-xxx", ...}
|
codesearchnet
|
def get_template_name(env, pipeline_type):
pipeline_base = 'pipeline/pipeline'
template_name_format = '{pipeline_base}'
if env.startswith('prod'):
template_name_format = template_name_format + '_{env}'
else:
template_name_format = template_name_format + '_stages'
if pipeline_type != 'ec2':
template_name_format = template_name_format + '_{pipeline_type}'
template_name_format = template_name_format + '.json.j2'
template_name = template_name_format.format(pipeline_base=pipeline_base, env=env, pipeline_type=pipeline_type)
return template_name
|
Generates the correct template name based on pipeline type
Args:
env (str): environment to generate templates for
pipeline_type (str): Type of pipeline like ec2 or lambda
Returns:
str: Name of template
|
juraj-google-style
|
def absolute(x):
if any_symbolic_tensors((x,)):
return Absolute().symbolic_call(x)
return backend.numpy.absolute(x)
|
Compute the absolute value element-wise.
`keras.ops.abs` is a shorthand for this function.
Args:
x: Input tensor.
Returns:
An array containing the absolute value of each element in `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-1.2, 1.2])
>>> keras.ops.absolute(x)
array([1.2, 1.2], dtype=float32)
|
github-repos
|
def export_dae(filename, cutout, level=0):
if ".dae" not in filename:
filename = filename + ".dae"
vs, fs = mcubes.marching_cubes(cutout, level)
mcubes.export_mesh(vs, fs, filename, "ndioexport")
|
Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes).
Arguments:
filename (str): The filename to write out to
cutout (numpy.ndarray): The dense annotation
level (int): The level at which to run mcubes
Returns:
boolean success
|
juraj-google-style
|
def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVisionConfig, qformer_config: InstructBlipQFormerConfig, text_config: PretrainedConfig, **kwargs):
return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
|
Instantiate a [`InstructBlipConfig`] (or a derived class) from a InstructBLIP vision model, Q-Former and
language model configurations.
Returns:
[`InstructBlipConfig`]: An instance of a configuration object
|
github-repos
|
def transpose(x, axes=None):
if any_symbolic_tensors((x,)):
return Transpose(axes=axes).symbolic_call(x)
return backend.numpy.transpose(x, axes=axes)
|
Returns a tensor with `axes` transposed.
Args:
x: Input tensor.
axes: Sequence of integers. Permutation of the dimensions of `x`.
By default, the order of the axes are reversed.
Returns:
`x` with its axes permuted.
|
github-repos
|
async def train(state, tf_records):
model_path = os.path.join(fsdb.models_dir(), state.train_model_name)
await run(
'python3', 'train.py', *tf_records,
'--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'train.flags')),
'--work_dir={}'.format(fsdb.working_dir()),
'--export_path={}'.format(model_path),
'--training_seed={}'.format(state.seed),
'--freeze=true')
elapsed = time.time() - state.start_time
timestamps_path = os.path.join(fsdb.models_dir(), 'train_times.txt')
with gfile.Open(timestamps_path, 'a') as f:
print('{:.3f} {}'.format(elapsed, state.train_model_name), file=f)
|
Run training and write a new model to the fsdb models_dir.
Args:
state: the RL loop State instance.
tf_records: a list of paths to TensorFlow records to train on.
|
juraj-google-style
|
def __init__(self, num_steps=None, last_step=None, steps_per_run=1):
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
if steps_per_run is None or steps_per_run < 1:
raise ValueError('steps_per_run should be greater than 0')
self._num_steps = num_steps
self._last_step = last_step
self._steps_per_run_initial_value = steps_per_run
|
Initializes a `MultiStepStopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The steps_per_run variable
determines the number of iterations of the loop before returning to the CPU.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
steps_per_run: Number of steps executed per run call.
Raises:
ValueError: If one of the arguments is invalid.
|
github-repos
|
def _audience_condition_deserializer(obj_dict):
return [obj_dict.get('name'), obj_dict.get('value'), obj_dict.get('type'), obj_dict.get('match')]
|
Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
|
codesearchnet
|
def has_sample(self, md5):
sample = self.get_sample(md5)
return True if sample else False
|
Checks if data store has this sample.
Args:
md5: The md5 digest of the required sample.
Returns:
True if sample with this md5 is present, else False.
|
juraj-google-style
|
def load_ems(self, modules_paths: List[str]):
all_em_lst = []
if modules_paths:
for modules_path in modules_paths:
em_lst = []
try:
for file_name in os.listdir(modules_path):
if (file_name.startswith('em_') and file_name.endswith('.py')):
sys.path.append(modules_path)
this_module = importlib.import_module(file_name[:(- 3)])
for em in self.classes_in_module(this_module):
em_lst.append(em(self))
except:
self.log(('Error when loading etk modules from ' + modules_path), 'error')
raise NotGetETKModuleError('Wrong file path for ETK modules')
all_em_lst += em_lst
try:
all_em_lst = self.topological_sort(all_em_lst)
except Exception:
self.log('Topological sort for ETK modules fails', 'error')
raise NotGetETKModuleError('Topological sort for ETK modules fails')
return all_em_lst
|
Load all extraction modules from the path
Args:
modules_path: str
Returns:
|
codesearchnet
|
def add_streamer(self, streamer):
if ((self._max_streamers is not None) and (len(self.streamers) >= self._max_streamers)):
raise ResourceUsageError('Maximum number of streamers exceeded', max_streamers=self._max_streamers)
streamer.link_to_storage(self.sensor_log)
streamer.index = len(self.streamers)
self.streamers.append(streamer)
|
Add a streamer to this sensor graph.
Args:
streamer (DataStreamer): The streamer we want to add
|
codesearchnet
|
def _check_pattern_list(patterns, key, default=None):
if (not patterns):
return default
if isinstance(patterns, basestring):
return [patterns]
if isinstance(patterns, list):
if all((isinstance(p, basestring) for p in patterns)):
return patterns
raise ValueError("Invalid file patterns in key '{}': must be a string or list of strings".format(key))
|
Validates file search patterns from user configuration.
Acceptable input is a string (which will be converted to a singleton list),
a list of strings, or anything falsy (such as None or an empty dictionary).
Empty or unset input will be converted to a default.
Args:
patterns: input from user configuration (YAML).
key (str): name of the configuration key the input came from,
used for error display purposes.
Keyword Args:
default: value to return in case the input is empty or unset.
Returns:
list[str]: validated list of patterns
Raises:
ValueError: if the input is unacceptable.
|
codesearchnet
|
def get_project_name(project_id, projects):
for project in projects:
if (project_id == project.id):
return project.name
|
Retrieves project name for given project id
Args:
projects: List of projects
project_id: project id
Returns: Project name or None if there is no match
|
codesearchnet
|
def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._simple_command('flash', arg=partition, info_cb=info_cb,
timeout_ms=timeout_ms)
|
Flashes the last downloaded file to the given partition.
Args:
partition: Partition to flash.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing.
|
juraj-google-style
|
def sync_to_numpy_or_python_type(tensors):
if isinstance(tensors, coordinator_lib.RemoteValue):
return tensors.fetch()
def _to_single_numpy_or_python_type(t):
if isinstance(t, tensor_lib.Tensor):
x = t.numpy()
return x.item() if np.ndim(x) == 0 else x
return t
return nest.map_structure(_to_single_numpy_or_python_type, tensors)
|
Syncs and converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Async strategies (such as `TPUStrategy` and `ParameterServerStrategy`) are
forced to
sync during this process.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
|
github-repos
|
def get_file_download(self, resources):
api_name = 'virustotal-file-download'
api_endpoint = 'file/download'
return self._extract_all_responses(resources, api_endpoint, api_name)
|
Retrieves a file from its a md5, sha1, and/or sha2 hash.
Args:
resources: list of string hashes.
Returns:
a file download
|
juraj-google-style
|
def execute_command(self, command):
self.runner.info_log("Executing command: %s" % command)
process = Popen(
command,
stdout=open(os.devnull, 'w'),
stderr=open('runner.log', 'a'),
)
return process
|
Execute a command
Args:
command (str)
Returns:
process (object)
|
juraj-google-style
|
def get_relative_imports(module_file: Union[str, os.PathLike]) -> list[str]:
with open(module_file, encoding='utf-8') as f:
content = f.read()
relative_imports = re.findall('^\\s*import\\s+\\.(\\S+)\\s*$', content, flags=re.MULTILINE)
relative_imports += re.findall('^\\s*from\\s+\\.(\\S+)\\s+import', content, flags=re.MULTILINE)
return list(set(relative_imports))
|
Get the list of modules that are relatively imported in a module file.
Args:
module_file (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of relative imports in the module.
|
github-repos
|
def emit(self, record):
try:
message = self.format(record)
log_record = LogRecord(record.levelno, record.name, os.path.basename(record.pathname), record.lineno, int((record.created * 1000)), message)
self._test_record.add_log_record(log_record)
self._notify_update()
except Exception:
self.handleError(record)
|
Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record.
|
codesearchnet
|
def run(self, copy_to_current_on_exit=False, site_property=None):
scratch = tempfile.gettempdir()
with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir:
self._write_input(input_dir=scratch_dir)
packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r')
p = Popen(self.packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
output_file = os.path.join(scratch_dir, self.control_params["output"])
if os.path.isfile(output_file):
packed_mol = BabelMolAdaptor.from_file(output_file,
self.control_params["filetype"])
packed_mol = packed_mol.pymatgen_mol
print("packed molecule written to {}".format(
self.control_params["output"]))
if site_property:
packed_mol = self.restore_site_properties(site_property=site_property, filename=output_file)
return packed_mol
else:
print("Packmol execution failed")
print(stdout, stderr)
return None
|
Write the input file to the scratch directory, run packmol and return
the packed molecule.
Args:
copy_to_current_on_exit (bool): Whether or not to copy the packmol
input/output files from the scratch directory to the current
directory.
site_property (str): if set then the specified site property
for the the final packed molecule will be restored.
Returns:
Molecule object
|
juraj-google-style
|
def check_with_golden(filename):
path_to_file = PATH_TO_DIR + '/data/' + filename
if os.path.isfile(path_to_file) and os.path.isfile(CUDA_CC_GOLDEN_DIR):
with open(path_to_file, 'r') as f_new:
with open(CUDA_CC_GOLDEN_DIR, 'r') as f_golden:
diff = difflib.unified_diff(f_new.readlines(), f_golden.readlines(), fromfile=path_to_file, tofile=CUDA_CC_GOLDEN_DIR)
diff_list = []
for line in diff:
diff_list.append(line)
if diff_list:
print('WARNING: difference(s) found between new csv and golden csv.')
print(diff_list)
else:
print('No difference found between new csv and golen csv.')
|
Checks the newly created CUDA compute capability file with the golden.
If differences are found, then it prints a list of all mismatches as
a `WARNING`.
Golden file must reside in `golden/` directory.
Args:
filename: String that is the name of the newly created file.
|
github-repos
|
def ParseActivityLogUncompressedRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = ChromeExtensionActivityEventData()
event_data.action_type = self._GetRowValue(query_hash, row, 'action_type')
event_data.activity_id = self._GetRowValue(query_hash, row, 'activity_id')
event_data.api_name = self._GetRowValue(query_hash, row, 'api_name')
event_data.arg_url = self._GetRowValue(query_hash, row, 'arg_url')
event_data.args = self._GetRowValue(query_hash, row, 'args')
event_data.extension_id = self._GetRowValue(query_hash, row, 'extension_id')
event_data.other = self._GetRowValue(query_hash, row, 'other')
event_data.page_title = self._GetRowValue(query_hash, row, 'page_title')
event_data.page_url = self._GetRowValue(query_hash, row, 'page_url')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an activity log row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.