code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, token_type_ids=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = text_outputs[1]
text_features = self.text_projection(pooled_output)
return text_features
|
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`AltCLIPTextModel`].
Examples:
```python
>>> from transformers import AutoProcessor, AltCLIPModel
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```
|
github-repos
|
def ChangePassword(self, password_old, password_new):
if not self.ValidatePassword(password_old):
return False
if isinstance(password_new, str):
password_new = password_new.encode('utf-8')
password_key = hashlib.sha256(password_new)
self.SaveStoredData("PasswordHash", password_key)
self.SaveStoredData("MasterKey", AES.new(self._master_key, AES.MODE_CBC, self._iv))
return True
|
Change the password used to protect the private key.
Args:
password_old (str): the current password used to encrypt the private key.
password_new (str): the new to be used password to encrypt the private key.
Returns:
bool: whether the password has been changed
|
juraj-google-style
|
def update_location_centroid(point, cluster, max_distance, min_samples):
cluster.append(point)
points = [p.gen2arr() for p in cluster]
eps = estimate_meters_to_deg(max_distance, precision=6)
p_cluster = DBSCAN(eps=eps, min_samples=min_samples)
p_cluster.fit(points)
clusters = {}
for (i, label) in enumerate(p_cluster.labels_):
if (label in clusters.keys()):
clusters[label].append(points[i])
else:
clusters[label] = [points[i]]
centroids = []
biggest_centroid_l = (- float('inf'))
biggest_centroid = None
for (label, n_cluster) in clusters.items():
centroid = compute_centroid(n_cluster)
centroids.append(centroid)
if ((label >= 0) and (len(n_cluster) >= biggest_centroid_l)):
biggest_centroid_l = len(n_cluster)
biggest_centroid = centroid
if (biggest_centroid is None):
biggest_centroid = compute_centroid(points)
return (biggest_centroid, cluster)
|
Updates the centroid of a location cluster with another point
Args:
point (:obj:`Point`): Point to add to the cluster
cluster (:obj:`list` of :obj:`Point`): Location cluster
max_distance (float): Max neighbour distance
min_samples (int): Minimum number of samples
Returns:
(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid
and new point cluster (given cluster + given point)
|
codesearchnet
|
def write_file(self, filename):
with open(filename, "w") as f:
f.write(self.__str__())
|
Write the PWSCF input file.
Args:
filename (str): The string filename to output to.
|
juraj-google-style
|
def is_compatible_with(self, spec_or_value):
if not isinstance(spec_or_value, TypeSpec):
spec_or_value = type_spec_from_value(spec_or_value)
if type(self) is not type(spec_or_value):
return False
return self.__is_compatible(self._serialize(), spec_or_value._serialize())
|
Returns true if `spec_or_value` is compatible with this TypeSpec.
Prefer using "is_subtype_of" and "most_specific_common_supertype" wherever
possible.
Args:
spec_or_value: A TypeSpec or TypeSpec associated value to compare against.
|
github-repos
|
def get_config(self):
raise NotImplementedError(str(self) + ' does not implement get_config()')
|
Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for saving and loading models to HDF5 formats,
Keras model cloning, some visualization utilities,
and exporting models to and from JSON.
Returns:
Python dictionary.
|
github-repos
|
def ws_db004(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws_db004`'.format(value))
self._ws_db004 = value
|
Corresponds to IDD Field `ws_db004`
Mean wind speed coincident with 0.4% dry-bulb temperature
Args:
value (float): value for IDD Field `ws_db004`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def _DetectStaticBatchSize(node_def):
shapes = node_def.attr['_output_shapes'].list.shape
batch_size = set((list(s.dim)[0].size if len(s.dim) >= 2 else None for s in shapes))
if len(batch_size) == 1 and list(batch_size)[0] >= 1:
return list(batch_size)[0]
return None
|
Returns the static batch size of an operation or None.
It is incorrect to use the output shapes to find the batch size of an
operation, as the segmenter actually uses the input shapes. However, it is
a simplification and works for most of the cases for the test purposes.
Args:
node_def: `tf.NodeDef`. The target node for analysis.
Returns:
If all the outputs of the node have the same static batch size, returns
the int value for the batch size. Otherwise returns None.
|
github-repos
|
def convert(data, in_format, out_format, name=None, pretty=False):
dumps = (json.dumps if pretty else json.compress)
if ((not has_ob) and (in_format == 'json') and (out_format == 'json')):
return dumps((json.loads(data) if is_string(data) else data))
elif (not has_ob):
raise ImportError('Chemical file format conversion requires pybel.')
if (in_format == 'json'):
mol = json_to_pybel((json.loads(data) if is_string(data) else data))
elif (in_format == 'pybel'):
mol = data
else:
mol = pybel.readstring(in_format, data)
if (not mol.OBMol.HasNonZeroCoords()):
mol.make3D()
if ((in_format == 'mmcif') and hasattr(mol, 'unitcell')):
mol.unitcell.FillUnitCell(mol.OBMol)
mol.OBMol.ConnectTheDots()
mol.OBMol.PerceiveBondOrders()
mol.OBMol.Center()
if (out_format == 'pybel'):
return mol
elif (out_format == 'object'):
return pybel_to_json(mol, name)
elif (out_format == 'json'):
return dumps(pybel_to_json(mol, name))
else:
return mol.write(out_format)
|
Converts between two inputted chemical formats.
Args:
data: A string representing the chemical file to be converted. If the
`in_format` is "json", this can also be a Python object
in_format: The format of the `data` string. Can be "json" or any format
recognized by Open Babel
out_format: The format to convert to. Can be "json" or any format
recognized by Open Babel
name: (Optional) If `out_format` is "json", will save the specified
value in a "name" property
pretty: (Optional) If True and `out_format` is "json", will pretty-
print the output for human readability
Returns:
A string representing the inputted `data` in the specified `out_format`
|
codesearchnet
|
def airborne_position(msg0, msg1, t0, t1):
mb0 = common.hex2bin(msg0)[32:]
mb1 = common.hex2bin(msg1)[32:]
cprlat_even = (common.bin2int(mb0[22:39]) / 131072.0)
cprlon_even = (common.bin2int(mb0[39:56]) / 131072.0)
cprlat_odd = (common.bin2int(mb1[22:39]) / 131072.0)
cprlon_odd = (common.bin2int(mb1[39:56]) / 131072.0)
air_d_lat_even = (360.0 / 60)
air_d_lat_odd = (360.0 / 59)
j = common.floor((((59 * cprlat_even) - (60 * cprlat_odd)) + 0.5))
lat_even = float((air_d_lat_even * ((j % 60) + cprlat_even)))
lat_odd = float((air_d_lat_odd * ((j % 59) + cprlat_odd)))
if (lat_even >= 270):
lat_even = (lat_even - 360)
if (lat_odd >= 270):
lat_odd = (lat_odd - 360)
if (common.cprNL(lat_even) != common.cprNL(lat_odd)):
return None
if (t0 > t1):
lat = lat_even
nl = common.cprNL(lat)
ni = max((common.cprNL(lat) - 0), 1)
m = common.floor((((cprlon_even * (nl - 1)) - (cprlon_odd * nl)) + 0.5))
lon = ((360.0 / ni) * ((m % ni) + cprlon_even))
else:
lat = lat_odd
nl = common.cprNL(lat)
ni = max((common.cprNL(lat) - 1), 1)
m = common.floor((((cprlon_even * (nl - 1)) - (cprlon_odd * nl)) + 0.5))
lon = ((360.0 / ni) * ((m % ni) + cprlon_odd))
if (lon > 180):
lon = (lon - 360)
return (round(lat, 5), round(lon, 5))
|
Decode airborn position from a pair of even and odd position message
Args:
msg0 (string): even message (28 bytes hexadecimal string)
msg1 (string): odd message (28 bytes hexadecimal string)
t0 (int): timestamps for the even message
t1 (int): timestamps for the odd message
Returns:
(float, float): (latitude, longitude) of the aircraft
|
codesearchnet
|
def getitem_row_array(self, key):
key = list(key)
def getitem(df, internal_indices=[]):
return df.iloc[internal_indices]
result = self.data.apply_func_to_select_indices(1, getitem, key, keep_remaining=False)
new_index = self.index[key]
return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
|
Get row data for target labels.
Args:
key: Target numeric indices by which to retrieve data.
Returns:
A new QueryCompiler.
|
codesearchnet
|
def datetimeobj_epoch(value):
return datetime.datetime.utcfromtimestamp(int(value)).replace(tzinfo=TZ_GMT)
|
Convert timestamp string to a datetime object.
Timestamps strings like '1383470155' are able to be converted by this
function.
Args:
value: A timestamp string as seconds since epoch.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
|
juraj-google-style
|
def get_sns_subscriptions(app_name, env, region):
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
lambda_subscriptions = []
subscriptions = sns_client.list_subscriptions()
for subscription in subscriptions['Subscriptions']:
if ((subscription['Protocol'] == 'lambda') and (subscription['Endpoint'] == lambda_alias_arn)):
lambda_subscriptions.append(subscription['SubscriptionArn'])
if (not lambda_subscriptions):
LOG.debug('SNS subscription for function %s not found', lambda_alias_arn)
return lambda_subscriptions
|
List SNS lambda subscriptions.
Returns:
list: List of Lambda subscribed SNS ARNs.
|
codesearchnet
|
def _instantiate(class_, type_, __value, *args, **kwargs):
try:
return class_(__value, *args, **kwargs)
except TypeError:
try:
return type_(__value, *args, **kwargs)
except Exception:
return __value
|
Instantiate the object if possible.
Args:
class_: The class to instantiate.
type_: The the class is uninstantiable, attempt to cast to a base
type.
__value: The value to return if the class and type are
uninstantiable.
*args: The positional arguments to pass to the class.
**kwargs: The keyword arguments to pass to the class.
Returns:
The class or base type instantiated using the arguments. If it is
not possible to instantiate either, returns __value.
|
codesearchnet
|
def is_tensor_final(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
return (tensor in self._final_tensors)
|
Whether a tensor is a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor was a final output.
|
codesearchnet
|
def parse_conf(self, keys=[]):
confs = self.app.config.get('WAFFLE_CONFS', {})
if not keys:
keys = confs.keys()
result = {}
for key in keys:
if key.startswith('WAFFLE_'):
continue
if key not in confs.keys():
continue
stored_conf = self.configstore.get(key)
if not stored_conf:
value = confs[key].get('default', '')
stored_conf = self.configstore.put(key, util.serialize(value))
self.configstore.commit()
else:
value = util.deserialize(stored_conf.get_value())
result[stored_conf.get_key()] = value
return result
|
Parse configuration values from the database.
The extension must have been previously initialized.
If a key is not found in the database, it will be created with the
default value specified.
Arguments:
keys (list[str]): list of keys to parse. If the list is empty, then
all the keys known to the application will be used.
Returns:
dict of the parsed config values.
|
juraj-google-style
|
def set_features(self, partition=1):
if len(self.json) < partition + 1:
raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition))
data = []
for offset in range(len(self.json) - partition):
json = self.json[offset : offset + partition]
data.append(eval_features(json))
return pd.DataFrame(data=data, dtype=np.float32)
|
Parses market data JSON for technical analysis indicators
Args:
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features.
|
juraj-google-style
|
def call(self, decision_points: List[pg.geno.DecisionPoint], global_state: Optional[pg.geno.AttributeDict]=None, step: int=0) -> List[pg.geno.DecisionPoint]:
|
Implementation of filtering logic. Subclass to override.
Args:
decision_points: A list of decision points as candidates for filtering.
global_state: An optional keyword argument as the global state.
step: An optional keyword argument as current step of evolution.
Returns:
A list of decision points that should be kept.
|
github-repos
|
def normalize(input_tensor, output_tensor):
image_dims = utils.get_img_shape(input_tensor)[1:]
return (output_tensor / np.prod(image_dims))
|
Normalizes the `output_tensor` with respect to `input_tensor` dimensions.
This makes regularizer weight factor more or less uniform across various input image dimensions.
Args:
input_tensor: An tensor of shape: `(samples, channels, image_dims...)` if `image_data_format=
channels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`.
output_tensor: The tensor to normalize.
Returns:
The normalized tensor.
|
codesearchnet
|
def ef_plugin(service_name):
def class_rebuilder(cls):
class EFPlugin(cls):
def __init__(self, context, clients):
self.service = service_name
self.context = context
self.clients = clients
self.oInstance = cls()
def __getattribute__(self, s):
try:
x = super(EFPlugin, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
return self.oInstance.__getattribute__(s)
return EFPlugin
return class_rebuilder
|
Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code.
Args:
service_name (str): The name of the service being extended.
Example:
@ef_plugin('ef-generate')
class NewRelicPlugin(object):
def run(self):
exec_code()
|
juraj-google-style
|
def wc(filename, contents, parsed=None, is_jekyll=False):
if is_jekyll:
fmt = 'jekyll'
else:
fmt = 'md/txt'
body = (parsed.strip() if parsed else contents.strip())
words = re.sub('\\s+', ' ', body, re.MULTILINE)
for punctuation in INTERSTITIAL_PUNCTUATION:
words = re.sub(punctuation, ' ', words)
punct = re.compile('[^\\w\\s]', re.U)
words = punct.sub('', words)
real_characters = re.sub('\\s', '', words)
paragraphs = [(1 if (len(x) == 0) else 0) for x in contents.strip().splitlines()]
for (index, paragraph) in enumerate(paragraphs):
if ((paragraph == 1) and (paragraphs[(index + 1)] == 1)):
paragraphs[index] = 0
return {'counts': {'file': filename, 'type': fmt, 'paragraphs': (sum(paragraphs) + 1), 'words': len(re.split('\\s+', words)), 'characters_real': len(real_characters), 'characters_total': len(words)}}
|
Count the words, characters, and paragraphs in a string.
Args:
contents: the original string to count
filename (optional): the filename as provided to the CLI
parsed (optional): a parsed string, expected to be plaintext only
is_jekyll: whether the original contents were from a Jekyll file
Returns:
An object containing the various counts
|
codesearchnet
|
def fn(x: int, y: int):
pass
|
Test function
Args:
x: The first input
y: The second input. This is a longer description
that spans multiple lines with indentation and stuff.
Returns:
God knows what
|
github-repos
|
def cancel(self):
if (not self.id):
raise WorkflowError('Workflow is not running. Cannot cancel.')
if self.batch_values:
self.workflow.batch_workflow_cancel(self.id)
else:
self.workflow.cancel(self.id)
|
Cancel a running workflow.
Args:
None
Returns:
None
|
codesearchnet
|
def filter_lines(lines, filter_regex, groups=None):
pattern = re.compile(filter_regex)
for line in lines:
match = pattern.search(line)
if match:
if (groups is None):
(yield line)
elif (len(groups) == 1):
(yield match.group(groups[0]))
else:
matched_groups = match.groupdict()
(yield tuple((matched_groups.get(group) for group in groups)))
|
Filters out the lines not matching the pattern.
Args:
lines: list[string]: lines to filter.
pattern: string: regular expression to filter out lines.
Returns: list[string]: the list of filtered lines.
|
codesearchnet
|
async def report_winner(self, winner: Participant, scores_csv: str):
(await self._report(scores_csv, winner._id))
|
report scores and give a winner
|methcoro|
Args:
winner: :class:Participant instance
scores_csv: Comma separated set/game scores with player 1 score first (e.g. "1-3,3-0,3-2")
Raises:
ValueError: scores_csv has a wrong format
APIException
|
codesearchnet
|
def load_op_library(library_filename):
lib_handle = py_tf.TF_LoadLibrary(library_filename)
try:
wrappers = _pywrap_python_op_gen.GetPythonWrappers(py_tf.TF_GetOpList(lib_handle))
finally:
py_tf.TF_DeleteLibraryHandle(lib_handle)
module_name = hashlib.sha1(wrappers).hexdigest()
if module_name in sys.modules:
return sys.modules[module_name]
module_spec = importlib.machinery.ModuleSpec(module_name, None)
module = importlib.util.module_from_spec(module_spec)
exec(wrappers, module.__dict__)
setattr(module, '_IS_TENSORFLOW_PLUGIN', True)
sys.modules[module_name] = module
return module
|
Loads a TensorFlow plugin, containing custom ops and kernels.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here. When the
library is loaded, ops and kernels registered in the library via the
`REGISTER_*` macros are made available in the TensorFlow process. Note
that ops with the same name as an existing op are rejected and not
registered with the process.
Args:
library_filename: Path to the plugin.
Relative or absolute filesystem path to a dynamic library file.
Returns:
A python module containing the Python wrappers for Ops defined in
the plugin.
Raises:
RuntimeError: when unable to load the library or get the python wrappers.
|
github-repos
|
def assert_files_same(path1, path2):
difflines = compare_files(path1, path2)
assert (len(difflines) == 0), ''.join((['\n'] + difflines))
|
Asserts that two files are the same and returns delta using
-, ?, + format if not
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
None
|
codesearchnet
|
def languages(self, **kwargs):
path = '/projects/%s/languages' % self.get_id()
return self.manager.gitlab.http_get(path, **kwargs)
|
Get languages used in the project with percentage value.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
|
juraj-google-style
|
def get_kerberos_ticket(username, password):
cache = "/tmp/ion-%s" % uuid.uuid4()
logger.debug("Setting KRB5CCNAME to 'FILE:{}'".format(cache))
os.environ["KRB5CCNAME"] = "FILE:" + cache
try:
realm = settings.CSL_REALM
kinit = pexpect.spawnu("/usr/bin/kinit {}@{}".format(username, realm), timeout=settings.KINIT_TIMEOUT)
kinit.expect(":")
kinit.sendline(password)
returned = kinit.expect([pexpect.EOF, "password:"])
if returned == 1:
logger.debug("Password for {}@{} expired, needs reset".format(username, realm))
return "reset"
kinit.close()
exitstatus = kinit.exitstatus
except pexpect.TIMEOUT:
KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)
exitstatus = 1
if exitstatus != 0:
try:
realm = settings.AD_REALM
kinit = pexpect.spawnu("/usr/bin/kinit {}@{}".format(username, realm), timeout=settings.KINIT_TIMEOUT)
kinit.expect(":")
kinit.sendline(password)
returned = kinit.expect([pexpect.EOF, "password:"])
if returned == 1:
return False
kinit.close()
exitstatus = kinit.exitstatus
except pexpect.TIMEOUT:
KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)
exitstatus = 1
if "KRB5CCNAME" in os.environ:
subprocess.check_call(['kdestroy', '-c', os.environ["KRB5CCNAME"]])
del os.environ["KRB5CCNAME"]
if exitstatus == 0:
logger.debug("Kerberos authorized {}@{}".format(username, realm))
return True
else:
logger.debug("Kerberos failed to authorize {}".format(username))
return False
|
Attempts to create a Kerberos ticket for a user.
Args:
username
The username.
password
The password.
Returns:
Boolean indicating success or failure of ticket creation
|
juraj-google-style
|
def cancel_signature_request(self, signature_request_id):
request = self._get_request()
request.post(url=(self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id), get_json=False)
|
Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signing_request_id (str): The id of the signature request to cancel
Returns:
None
|
codesearchnet
|
def _PrepareAttributeContainer(self, attribute_container):
attribute_values_hash = hash(attribute_container.GetAttributeValuesString())
identifier = identifiers.FakeIdentifier(attribute_values_hash)
attribute_container.SetIdentifier(identifier)
return copy.deepcopy(attribute_container)
|
Prepares an attribute container for storage.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
AttributeContainer: copy of the attribute container to store in
the fake storage.
|
codesearchnet
|
def window_partition(hidden_state, window_size):
batch_size, height, width, num_channels = hidden_state.shape
pad_height = (window_size - height % window_size) % window_size
pad_width = (window_size - width % window_size) % window_size
hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height))
padded_height, padded_width = (height + pad_height, width + pad_width)
hidden_state = hidden_state.view(batch_size, padded_height
windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return (windows, (padded_height, padded_width))
|
Partition into non-overlapping windows with padding if needed.
Args:
hidden_state (`torch.Tensor`):
Input tokens with [batch_size, height, width, num_channels].
window_size (`int`):
Window size.
Returns:
`tuple(torch.FloatTensor)` comprising various elements:
- windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
- (padded_height, padded_width): padded height and width before partition
|
github-repos
|
def escape_meta(self, string, pos):
if pos > 0 and string[pos - 1] == "\\":
string = string[:pos - 1] + string[pos:]
else:
warnings.warn("Un-escaped meta-character: '{0}' (Escape"
" it with a '\\')".format(string[pos]),
Warning)
pos += 1
meta = self.meta.search(string, pos)
return string, meta
|
Checks if a meta character is escaped or else warns about it.
If the meta character has an escape character ('\') preceding it,
the meta character is escaped. If it does not, a warning is emitted
that the user should escape it.
Arguments:
string (str): The relevant string in which the character was found.
pos (int): The index of the meta character within the string.
Returns:
The possibly escaped string and the next meta match.
|
juraj-google-style
|
def save_state(self, out_path):
state = self.dump_state()
state = _clean_intenum(state)
with open(out_path, 'w') as outfile:
json.dump(state, outfile, indent=4)
|
Save the current state of this emulated object to a file.
Args:
out_path (str): The path to save the dumped state of this emulated
object.
|
codesearchnet
|
def build_java_worker_command(java_worker_options, redis_address, plasma_store_name, raylet_name, redis_password, temp_dir):
assert (java_worker_options is not None)
command = 'java '.format(java_worker_options)
if (redis_address is not None):
command += '-Dray.redis.address={} '.format(redis_address)
if (plasma_store_name is not None):
command += '-Dray.object-store.socket-name={} '.format(plasma_store_name)
if (raylet_name is not None):
command += '-Dray.raylet.socket-name={} '.format(raylet_name)
if (redis_password is not None):
command += '-Dray.redis.password={} '.format(redis_password)
command += '-Dray.home={} '.format(RAY_HOME)
command += '-Dray.log-dir={} '.format(os.path.join(temp_dir, 'sockets'))
if java_worker_options:
command += (java_worker_options + ' ')
command += 'org.ray.runtime.runner.worker.DefaultWorker'
return command
|
This method assembles the command used to start a Java worker.
Args:
java_worker_options (str): The command options for Java worker.
redis_address (str): Redis address of GCS.
plasma_store_name (str): The name of the plasma store socket to connect
to.
raylet_name (str): The name of the raylet socket to create.
redis_password (str): The password of connect to redis.
temp_dir (str): The path of the temporary directory Ray will use.
Returns:
The command string for starting Java worker.
|
codesearchnet
|
def completion(self, device, folder):
return self.get(
'completion',
params={'folder': folder, 'device': device}
).get('completion', None)
|
Returns the completion percentage (0 to 100) for a given device
and folder.
Args:
device (str): The Syncthing device the folder is syncing to.
folder (str): The folder that is being synced.
Returs:
int
|
juraj-google-style
|
def _PrintEventLabelsCounter(self, event_labels_counter, session_identifier=None):
if (not event_labels_counter):
return
title = 'Event tags generated per label'
if session_identifier:
title = '{0:s}: {1:s}'.format(title, session_identifier)
table_view = views.ViewsFactory.GetTableView(self._views_format_type, column_names=['Label', 'Number of event tags'], title=title)
for (key, value) in sorted(event_labels_counter.items()):
if (key == 'total'):
continue
table_view.AddRow([key, value])
try:
total = event_labels_counter['total']
except KeyError:
total = 'N/A'
table_view.AddRow(['Total', total])
table_view.Write(self._output_writer)
|
Prints the event labels counter.
Args:
event_labels_counter (collections.Counter): number of event tags per
label.
session_identifier (Optional[str]): session identifier.
|
codesearchnet
|
def is_commit_id_equal(self, dest, name):
if (not name):
return False
return (self.get_revision(dest) == name)
|
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
|
codesearchnet
|
def size(self, url):
return self.metadata(url).size_in_bytes
|
Fetches file size for a URL.
Returns:
int size of path according to the FileSystem.
Raises:
``BeamIOError``: if url doesn't exist.
|
github-repos
|
def msgconvert(email):
log.debug("Started converting Outlook email")
temph, temp = tempfile.mkstemp(prefix="outlook_")
command = ["msgconvert", "--outfile", temp, email]
try:
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
except OSError:
message = "To use this function you must install 'msgconvert' tool"
log.exception(message)
raise MailParserOSError(message)
else:
stdoutdata, _ = out.communicate()
return temp, stdoutdata.decode("utf-8").strip()
finally:
os.close(temph)
|
Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3)
|
juraj-google-style
|
def disaggregate_humidity(data_daily, method='equal', temp=None, a0=None, a1=None, kr=None, month_hour_precip_mean=None, preserve_daily_mean=False):
assert (method in ('equal', 'minimal', 'dewpoint_regression', 'min_max', 'linear_dewpoint_variation', 'month_hour_precip_mean')), 'Invalid option'
if (method == 'equal'):
hum_disagg = melodist.distribute_equally(data_daily.hum)
elif (method in ('minimal', 'dewpoint_regression', 'linear_dewpoint_variation')):
if (method == 'minimal'):
a0 = 0
a1 = 1
assert ((a0 is not None) and (a1 is not None)), 'a0 and a1 must be specified'
tdew_daily = (a0 + (a1 * data_daily.tmin))
tdew = melodist.distribute_equally(tdew_daily)
if (method == 'linear_dewpoint_variation'):
assert (kr is not None), 'kr must be specified'
assert (kr in (6, 12)), 'kr must be 6 or 12'
tdew_delta = (0.5 * np.sin(((((temp.index.hour + 1) * np.pi) / kr) - ((3.0 * np.pi) / 4.0))))
tdew_nextday = tdew.shift((- 24))
tdew_nextday.iloc[(- 24):] = tdew.iloc[(- 24):]
tdew += (((temp.index.hour / 24.0) * (tdew_nextday - tdew)) + tdew_delta)
sat_vap_press_tdew = util.vapor_pressure(tdew, 100)
sat_vap_press_t = util.vapor_pressure(temp, 100)
hum_disagg = pd.Series(index=temp.index, data=((100 * sat_vap_press_tdew) / sat_vap_press_t))
elif (method == 'min_max'):
assert (('hum_min' in data_daily.columns) and ('hum_max' in data_daily.columns)), 'Minimum and maximum humidity must be present in data frame'
hmin = melodist.distribute_equally(data_daily.hum_min)
hmax = melodist.distribute_equally(data_daily.hum_max)
tmin = melodist.distribute_equally(data_daily.tmin)
tmax = melodist.distribute_equally(data_daily.tmax)
hum_disagg = (hmax + (((temp - tmin) / (tmax - tmin)) * (hmin - hmax)))
elif (method == 'month_hour_precip_mean'):
assert (month_hour_precip_mean is not None)
precip_equal = melodist.distribute_equally(data_daily.precip)
hum_disagg = pd.Series(index=precip_equal.index)
locs = list(zip(hum_disagg.index.month, hum_disagg.index.hour, (precip_equal > 0)))
hum_disagg[:] = month_hour_precip_mean.loc[locs].values
if preserve_daily_mean:
daily_mean_df = pd.DataFrame(data=dict(obs=data_daily.hum, disagg=hum_disagg.resample('D').mean()))
bias = melodist.util.distribute_equally((daily_mean_df.disagg - daily_mean_df.obs))
bias = bias.fillna(0)
hum_disagg -= bias
return hum_disagg.clip(0, 100)
|
general function for humidity disaggregation
Args:
daily_data: daily values
method: keyword specifying the disaggregation method to be used
temp: hourly temperature time series (necessary for some methods)
kr: parameter for linear_dewpoint_variation method (6 or 12)
month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values
preserve_daily_mean: if True, correct the daily mean values of the disaggregated
data with the observed daily means.
Returns:
Disaggregated hourly values of relative humidity.
|
codesearchnet
|
def register_instances(name, instances, region=None, key=None, keyid=None, profile=None):
if (isinstance(instances, six.string_types) or isinstance(instances, six.text_type)):
instances = [instances]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
registered_instances = conn.register_instances(name, instances)
except boto.exception.BotoServerError as error:
log.warning(error)
return False
registered_instance_ids = [instance.id for instance in registered_instances]
register_failures = set(instances).difference(set(registered_instance_ids))
if register_failures:
log.warning('Instance(s): %s not registered with ELB %s.', list(register_failures), name)
register_result = False
else:
register_result = True
return register_result
|
Register instances with an ELB. Instances is either a string
instance id or a list of string instance id's.
Returns:
- ``True``: instance(s) registered successfully
- ``False``: instance(s) failed to be registered
CLI example:
.. code-block:: bash
salt myminion boto_elb.register_instances myelb instance_id
salt myminion boto_elb.register_instances myelb "[instance_id,instance_id]"
|
codesearchnet
|
def _functions(self) -> list[StructuredFunctionWrapper]:
return []
|
Returns a list of functions associated with this dataset.
Returns:
A list of `StructuredFunctionWrapper` objects.
|
github-repos
|
def orient_directed_graph(self, data, graph):
warnings.warn('The algorithm is ran on the skeleton of the given graph.')
return self.orient_undirected_graph(data, nx.Graph(graph))
|
Run the algorithm on a directed_graph.
Args:
data (pandas.DataFrame): DataFrame containing the data
graph (networkx.DiGraph): Skeleton of the graph to orient
Returns:
networkx.DiGraph: Solution on the given skeleton.
.. warning::
The algorithm is ran on the skeleton of the given graph.
|
codesearchnet
|
def get_airport_metars(self, iata, page=1, limit=100):
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
w = self._fr24.get_airport_weather(url)
return w['metar']
|
Retrieve the metar data at the current time
Given the IATA code of an airport, this method returns the metar information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars('HYD')
|
codesearchnet
|
def HandleAccounts(self, result):
self.logger.debug('Checking for changes to user accounts.')
configured_users = self.utils.GetConfiguredUsers()
enable_oslogin = self._GetEnableOsLoginValue(result)
enable_two_factor = self._GetEnableTwoFactorValue(result)
if enable_oslogin:
desired_users = {}
self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)
else:
desired_users = self._GetAccountsData(result)
self.oslogin.UpdateOsLogin(False)
remove_users = sorted(set(configured_users) - set(desired_users.keys()))
self._UpdateUsers(desired_users)
self._RemoveUsers(remove_users)
self.utils.SetConfiguredUsers(desired_users.keys())
|
Called when there are changes to the contents of the metadata server.
Args:
result: json, the deserialized contents of the metadata server.
|
juraj-google-style
|
def pull_full_properties(self):
full_properties = self.manager.session.get(self._uri)
self._properties = dict(full_properties)
self._properties_timestamp = int(time.time())
self._full_properties = True
|
Retrieve the full set of resource properties and cache them in this
object.
Authorization requirements:
* Object-access permission to this resource.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
|
codesearchnet
|
def list_files(d, extension=None):
if os.path.isdir(d):
expanded_dir = os.path.expanduser(d)
files = sorted(glob.glob(expanded_dir + '/*'))
else:
files = [d, ]
if extension is not None:
if type(extension) in STR_TYPES:
extension = [extension, ]
files = [f for f in files if any([f.split('.')[-1] in extension,
f.split('.')[-1].upper() in extension,
f.split('.')[-1].lower() in extension])]
return files
|
Lists files in a given directory.
Args:
d (str): Path to a directory.
extension (str): If supplied, only files that contain the
specificied extension will be returned. Default is ``False``,
which returns all files in ``d``.
Returns:
list: A sorted list of file paths.
|
juraj-google-style
|
def search_next(self, obj):
if (('meta' in obj) and ('next' in obj['meta']) and (obj['meta']['next'] != None)):
uri = (self.api_url % obj['meta']['next'])
(header, content) = self._http_uri_request(uri)
resp = json.loads(content)
if (not self._is_http_response_ok(header)):
error = resp.get('error_message', 'Unknown Error')
raise HttpException(header.status, header.reason, error)
return resp
return {}
|
Takes the dictionary that is returned by 'search' or 'search_next' function and gets the next batch of results
Args:
obj: dictionary returned by the 'search' or 'search_next' function
Returns:
A dictionary with a data returned by the server
Raises:
HttpException with the error message from the server
|
codesearchnet
|
def make_elb_json(self):
env = self.env
region = self.region
elb_settings = self.properties['elb']
LOG.debug('Block ELB Settings:\n%s', pformat(elb_settings))
health_settings = elb_settings['health']
elb_subnet_purpose = elb_settings.get('subnet_purpose', 'internal')
region_subnets = get_subnets(target='elb', purpose=elb_subnet_purpose, env=env, region=region)
region_subnets.pop('subnet_ids', None)
if (elb_subnet_purpose == 'internal'):
is_internal = 'true'
else:
is_internal = 'false'
target = elb_settings.get('target', 'HTTP:80/health')
health = splay_health(target)
listeners = format_listeners(elb_settings=elb_settings, env=self.env, region=region)
idle_timeout = elb_settings.get('idle_timeout', None)
access_log = elb_settings.get('access_log', {})
connection_draining_timeout = elb_settings.get('connection_draining_timeout', None)
security_groups = DEFAULT_ELB_SECURITYGROUPS[env]
security_groups.append(self.app)
security_groups.extend(self.properties['security_group']['elb_extras'])
security_groups = remove_duplicate_sg(security_groups)
template_kwargs = {'access_log': json.dumps(access_log), 'app_name': self.app, 'availability_zones': json.dumps(region_subnets), 'connection_draining_timeout': json.dumps(connection_draining_timeout), 'env': env, 'hc_string': target, 'health_interval': health_settings['interval'], 'health_path': health.path, 'health_port': health.port, 'health_protocol': health.proto, 'health_timeout': health_settings['timeout'], 'healthy_threshold': health_settings['threshold'], 'idle_timeout': json.dumps(idle_timeout), 'isInternal': is_internal, 'listeners': json.dumps(listeners), 'region_zones': json.dumps(region_subnets[region]), 'region': region, 'security_groups': json.dumps(security_groups), 'subnet_type': elb_subnet_purpose, 'unhealthy_threshold': health_settings['unhealthy_threshold'], 'vpc_id': get_vpc_id(env, region)}
rendered_template = get_template(template_file='infrastructure/elb_data.json.j2', **template_kwargs)
return rendered_template
|
Render the JSON template with arguments.
Returns:
str: Rendered ELB template.
|
codesearchnet
|
def get_coding_intervals(self, build='37', genes=None):
intervals = {}
if (not genes):
genes = self.all_genes(build=build)
LOG.info('Building interval trees...')
for (i, hgnc_obj) in enumerate(genes):
chrom = hgnc_obj['chromosome']
start = max((hgnc_obj['start'] - 5000), 1)
end = (hgnc_obj['end'] + 5000)
if (chrom not in intervals):
intervals[chrom] = intervaltree.IntervalTree()
intervals[chrom].addi(start, end, i)
continue
res = intervals[chrom].search(start, end)
if (not res):
intervals[chrom].addi(start, end, i)
continue
for interval in res:
if (interval.begin < start):
start = interval.begin
if (interval.end > end):
end = interval.end
intervals[chrom].remove(interval)
intervals[chrom].addi(start, end, i)
return intervals
|
Return a dictionary with chromosomes as keys and interval trees as values
Each interval represents a coding region of overlapping genes.
Args:
build(str): The genome build
genes(iterable(scout.models.HgncGene)):
Returns:
intervals(dict): A dictionary with chromosomes as keys and overlapping genomic intervals as values
|
codesearchnet
|
def target(self, value):
if value == self._defaults['target'] and 'target' in self._values:
del self._values['target']
else:
self._values['target'] = value
|
The target property.
Args:
value (string). the property value.
|
juraj-google-style
|
def get_cache_index_key(resource):
if isinstance(resource, APIResource):
(attr, attr_value) = list(resource.get_cache_index_keys().items())[0]
key = (type(resource), attr, attr_value)
else:
key = tuple(resource)
if (len(key) != 3):
raise TypeError('Cache key must be tuple of (class, key, value), got `{!r}` instead'.format(key))
if (not issubclass(key[0], APIResource)):
raise TypeError('First value of cache key must be a subclass of APIResource, got `{!r}` instead'.format(key[0]))
return key
|
Return a usable cache lookup key for an already initialized resource
Args:
resource (APIResource|tuple): APIResource instance or 3-length tuple key returned from this function
Raises:
TypeError: If resource is not an APIResource instance or acceptable 3-length tuple cache key
|
codesearchnet
|
def GetFileObject(self, data_stream_name=''):
if (not data_stream_name and
not self._fsntfs_file_entry.has_default_data_stream()):
return None
path_spec = copy.deepcopy(self.path_spec)
if data_stream_name:
setattr(path_spec, 'data_stream', data_stream_name)
return resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
|
Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): data stream name, where an empty
string represents the default data stream.
Returns:
NTFSFileIO: file-like object or None.
|
juraj-google-style
|
def _partitions_list(N):
if (N < _NUM_PRECOMPUTED_PARTITION_LISTS):
return list(_partition_lists[N])
else:
raise ValueError('Partition lists not yet available for system with {} nodes or more'.format(_NUM_PRECOMPUTED_PARTITION_LISTS))
|
Return a list of partitions of the |N| binary nodes.
Args:
N (int): The number of nodes under consideration.
Returns:
list[list]: A list of lists, where each inner list is the set of
micro-elements corresponding to a macro-element.
Example:
>>> _partitions_list(3)
[[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]], [[0], [1], [2]]]
|
codesearchnet
|
def categorize(self, categories, default=None):
return dim(self, categorize, categories=categories, default=default)
|
Replaces discrete values with supplied categories
Replaces discrete values in input array into a fixed set of
categories defined either as a list or dictionary.
Args:
categories: List or dict of categories to map inputs to
default: Default value to assign if value not in categories
|
codesearchnet
|
def _restore_resources(resources):
resources = deepcopy(resources)
for resource in resources:
schema = resource['schema']
for fk in schema.get('foreignKeys', []):
_, name = _restore_path(fk['reference']['resource'])
fk['reference']['resource'] = name
return resources
|
Restore schemas from being compatible with storage schemas.
Foreign keys related operations.
Args:
list: resources from storage
Returns:
list: restored resources
|
juraj-google-style
|
def remove_service(self, service):
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
return True
|
Stop and remove a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def summary_writer_initializer_op():
if context.executing_eagerly():
raise RuntimeError('tf.contrib.summary.summary_writer_initializer_op is only supported in graph mode.')
return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
|
Graph-mode only. Returns the list of ops to create all summary writers.
Returns:
The initializer ops.
Raises:
RuntimeError: If in Eager mode.
|
github-repos
|
def repeat(count, max_consecutive_error=None):
if count <= 1:
raise ValueError(f'The `count` for `repeat` must be larger than 1, got "{count}".')
if max_consecutive_error is not None and max_consecutive_error > count:
raise ValueError(f'The `max_consecutive_error` ({max_consecutive_error}) for `repeat` must be smaller than `count` ({count}).')
def _outer_decorator(func):
setattr(func, ATTR_REPEAT_CNT, count)
setattr(func, ATTR_MAX_CONSEC_ERROR, max_consecutive_error)
@functools.wraps(func)
def _wrapper(*args):
func(*args)
return _wrapper
return _outer_decorator
|
Decorator for repeating a test case multiple times.
The BaseTestClass will execute the test cases annotated with this decorator
the specified number of time.
This decorator only stores the information needed for the repeat. It does not
execute the repeat.
Args:
count: int, the total number of times to execute the decorated test case.
max_consecutive_error: int, the maximum number of consecutively failed
iterations allowed. If reached, the remaining iterations is abandoned.
By default this is not enabled.
Returns:
The wrapped test function.
Raises:
ValueError, if the user input is invalid.
|
github-repos
|
def seek_to_end(self, *partitions):
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)
|
Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
|
juraj-google-style
|
def MessageReceived(self, m):
if m.Command == 'verack':
if self.incoming_client:
if self.expect_verack_next:
self.expect_verack_next = False
else:
self.HandleVerack()
elif m.Command == 'version':
self.HandleVersion(m.Payload)
elif m.Command == 'getaddr':
self.SendPeerInfo()
elif m.Command == 'getdata':
self.HandleGetDataMessageReceived(m.Payload)
elif m.Command == 'getblocks':
self.HandleGetBlocksMessageReceived(m.Payload)
elif m.Command == 'inv':
self.HandleInvMessage(m.Payload)
elif m.Command == 'block':
self.HandleBlockReceived(m.Payload)
elif m.Command == 'getheaders':
self.HandleGetHeadersMessageReceived(m.Payload)
elif m.Command == 'headers':
self.HandleBlockHeadersReceived(m.Payload)
elif m.Command == 'addr':
self.HandlePeerInfoReceived(m.Payload)
else:
logger.debug(f"{self.prefix} Command not implemented: {m.Command}")
|
Process a message.
Args:
m (neo.Network.Message):
|
juraj-google-style
|
def __init__(self, value=KeyFormatTypeEnum.RAW):
super(KeyFormatType, self).__init__(
KeyFormatTypeEnum, value, Tags.KEY_FORMAT_TYPE)
|
Construct a KeyFormatType object.
Args:
value (KeyFormatType): A KeyFormatType enumeration value,
(e.g., KeyFormatType.PKCS_1). Optional, default to
KeyFormatType.RAW.
|
juraj-google-style
|
def _tokenize_table(self, table=None):
tokenized_rows = []
tokenized_row = []
for column in table:
if self.strip_column_names:
tokenized_row.append(self.tokenize(''))
else:
tokenized_row.append(self.tokenize(column))
tokenized_rows.append(tokenized_row)
for idx, row in table.iterrows():
tokenized_row = []
for cell in row:
tokenized_row.append(self.tokenize(cell))
tokenized_rows.append(tokenized_row)
token_coordinates = []
for row_index, row in enumerate(tokenized_rows):
for column_index, cell in enumerate(row):
for token_index, _ in enumerate(cell):
token_coordinates.append(TokenCoordinates(row_index=row_index, column_index=column_index, token_index=token_index))
return TokenizedTable(rows=tokenized_rows, selected_tokens=token_coordinates)
|
Tokenizes column headers and cell texts of a table.
Args:
table (`pd.Dataframe`):
Table. Returns: `TokenizedTable`: TokenizedTable object.
|
github-repos
|
def attach_template(self, _template, _key, **unbound_var_values):
if _key in unbound_var_values:
raise ValueError('%s specified twice.' % _key)
unbound_var_values[_key] = self
return _DeferredLayer(self.bookkeeper,
_template.as_layer().construct,
[],
unbound_var_values,
scope=self._scope,
defaults=self._defaults,
partial_context=self._partial_context)
|
Attaches the template to this with the _key is supplied with this layer.
Note: names were chosen to avoid conflicts.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
ValueError: If _key is specified twice or there is a problem computing the
template.
|
juraj-google-style
|
def load_pip_addons(_globals):
for package_name in known_pip_addons:
_, username = package_username(package_name)
try:
load_addon(username, package_name.replace('-', '_'), _globals)
except ImportError:
pass
|
Load all known fabsetup addons which are installed as pypi pip-packages.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None
|
juraj-google-style
|
def stop(self, timeout_s=None):
self.stopped.set()
if self.thread:
self.thread.join(timeout_s)
return not self.thread.isAlive()
else:
return True
|
Stops the interval.
If a timeout is provided and stop returns False then the thread is
effectively abandoned in whatever state it was in (presumably dead-locked).
Args:
timeout_s: The time in seconds to wait on the thread to finish. By
default it's forever.
Returns:
False if a timeout was provided and we timed out.
|
juraj-google-style
|
def compute_writer_results(results):
if (not results):
return
(sources, targets, delayeds) = split_results(results)
if targets:
delayeds.append(da.store(sources, targets, compute=False))
if delayeds:
da.compute(delayeds)
if targets:
for target in targets:
if hasattr(target, 'close'):
target.close()
|
Compute all the given dask graphs `results` so that the files are
saved.
Args:
results (iterable): Iterable of dask graphs resulting from calls to
`scn.save_datasets(..., compute=False)`
|
codesearchnet
|
def __init__(self, mapping, record=None):
super().__init__()
self.mapping = mapping
self.record = record
|
Initialize this visitor.
Args:
mapping: A dictionary, mapping strings to node instances. Any NamedType or
ClassType with a name in this dictionary will be replaced with the
corresponding value.
record: Optional. A set. If given, this records which entries in the map
were used.
|
github-repos
|
def __send_smtp_email(self, recipients, subject, html_body, text_body):
smtp = smtplib.SMTP(
dbconfig.get('smtp_server', NS_EMAIL, 'localhost'),
dbconfig.get('smtp_port', NS_EMAIL, 25)
)
source_arn = dbconfig.get('source_arn', NS_EMAIL)
return_arn = dbconfig.get('return_path_arn', NS_EMAIL)
from_arn = dbconfig.get('from_arn', NS_EMAIL)
msg = MIMEMultipart('alternative')
if source_arn and from_arn and return_arn:
msg['X-SES-SOURCE-ARN'] = source_arn
msg['X-SES-FROM-ARN'] = from_arn
msg['X-SES-RETURN-PATH-ARN'] = return_arn
msg['Subject'] = subject
msg['To'] = ','.join(recipients)
msg['From'] = self.sender
if html_body:
html_part = MIMEText(html_body, 'html')
msg.attach(html_part)
if text_body:
text_part = MIMEText(text_body, 'plain')
msg.attach(text_part)
if dbconfig.get('smtp_tls', NS_EMAIL, False):
smtp.starttls()
username = dbconfig.get('smtp_username', NS_EMAIL)
password = dbconfig.get('smtp_password', NS_EMAIL)
if username and password:
smtp.login(username, password)
smtp.sendmail(self.sender, recipients, msg.as_string())
smtp.quit()
|
Send an email using SMTP
Args:
recipients (`list` of `str`): List of recipient email addresses
subject (str): Subject of the email
html_body (str): HTML body of the email
text_body (str): Text body of the email
Returns:
`None`
|
juraj-google-style
|
def parse_arguments(
argv: Optional[Sequence[str]] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Git credential helper using pass as the data source.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-m', '--mapping',
type=argparse.FileType('r'),
metavar='MAPPING_FILE',
default=None,
help='A mapping file to be used, specifying how hosts '
'map to pass entries. Overrides the default mapping files from '
'XDG config locations, usually: {config_file}'.format(
config_file=DEFAULT_CONFIG_FILE))
parser.add_argument(
'-l', '--logging',
action='store_true',
default=False,
help='Print debug messages on stderr. '
'Might include sensitive information')
parser.add_argument(
'action',
type=str,
metavar='ACTION',
help='Action to preform as specified in the git credential API')
args = parser.parse_args(argv)
return args
|
Parse the command line arguments.
Args:
argv:
If not ``None``, use the provided command line arguments for
parsing. Otherwise, extract them automatically.
Returns:
The argparse object representing the parsed arguments.
|
juraj-google-style
|
def get_report_zip(results):
def add_subdir(root_path, subdir):
subdir_path = os.path.join(root_path, subdir)
for (subdir_root, subdir_dirs, subdir_files) in os.walk(subdir_path):
for subdir_file in subdir_files:
subdir_file_path = os.path.join(root_path, subdir, subdir_file)
if os.path.isfile(subdir_file_path):
rel_path = os.path.relpath(subdir_root, subdir_file_path)
subdir_arc_name = os.path.join(rel_path, subdir_file)
zip_file.write(subdir_file_path, subdir_arc_name)
for subdir in subdir_dirs:
add_subdir(subdir_path, subdir)
storage = BytesIO()
tmp_dir = tempfile.mkdtemp()
try:
save_output(results, tmp_dir)
with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for (root, dirs, files) in os.walk(tmp_dir):
for file in files:
file_path = os.path.join(root, file)
if os.path.isfile(file_path):
arcname = os.path.join(os.path.relpath(root, tmp_dir), file)
zip_file.write(file_path, arcname)
for directory in dirs:
dir_path = os.path.join(root, directory)
if os.path.isdir(dir_path):
zip_file.write(dir_path, directory)
add_subdir(root, directory)
finally:
shutil.rmtree(tmp_dir)
return storage.getvalue()
|
Creates a zip file of parsed report output
Args:
results (OrderedDict): The parsed results
Returns:
bytes: zip file bytes
|
codesearchnet
|
def post_pipeline(self, pipeline):
if isinstance(pipeline, str):
pipeline_str = pipeline
else:
pipeline_str = json.dumps(pipeline)
pipeline_json = json.loads(pipeline_str)
name = '{0} (onetime-{1})'.format(pipeline_json['name'], self.environments[0])
pipeline_json['name'] = name
pipeline_id = super().compare_with_existing(onetime=True)
if pipeline_id:
pipeline_json['id'] = pipeline_id
else:
del pipeline_json['id']
for trigger in pipeline_json['triggers']:
trigger['enabled'] = False
self.log.debug('Manual Pipeline JSON:\n%s', pipeline_json)
super().post_pipeline(pipeline_json)
|
Send Pipeline JSON to Spinnaker.
Args:
pipeline (dict, str): New Pipeline to create.
|
juraj-google-style
|
def dump(self):
walker = self.dump_walker
if (walker is not None):
walker = walker.dump()
state = {'storage': self.storage.dump(), 'dump_walker': walker, 'next_id': self.next_id}
return state
|
Serialize the state of this subsystem into a dict.
Returns:
dict: The serialized state
|
codesearchnet
|
def Logger(name, debug=False, facility=None):
logger = logging.getLogger(name)
logger.handlers = []
logger.addHandler(logging.NullHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter((name + ': %(levelname)s %(message)s'))
if debug:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
if facility:
syslog_handler = logging.handlers.SysLogHandler(address=constants.SYSLOG_SOCKET, facility=facility)
syslog_handler.setLevel(logging.INFO)
syslog_handler.setFormatter(formatter)
logger.addHandler(syslog_handler)
return logger
|
Get a logging object with handlers for sending logs to SysLog.
Args:
name: string, the name of the logger which will be added to log entries.
debug: bool, True if debug output should write to the console.
facility: int, an encoding of the SysLog handler's facility and priority.
Returns:
logging object, an object for logging entries.
|
codesearchnet
|
def zip_(*structures, **kwargs):
flatten = kwargs.pop('flatten', False)
assert (not kwargs), 'zip() got unexpected keyword arguments.'
return map((lambda *x: (x if (len(x) > 1) else x[0])), *structures, flatten=flatten)
|
Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*structures: Nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
|
codesearchnet
|
def Process(self, parser_mediator, root_item=None, **kwargs):
super(DocumentSummaryInformationOLECFPlugin, self).Process(parser_mediator, **kwargs)
if (not root_item):
raise ValueError('Root item not set.')
(root_creation_time, root_modification_time) = self._GetTimestamps(root_item)
for item_name in self.REQUIRED_ITEMS:
item = root_item.get_sub_item_by_name(item_name)
if (not item):
continue
summary_information = OLECFDocumentSummaryInformation(item)
event_data = summary_information.GetEventData(data_type='olecf:document_summary_info')
event_data.name = 'Document Summary Information'
if root_creation_time:
date_time = dfdatetime_filetime.Filetime(timestamp=root_creation_time)
event = OLECFDocumentSummaryInformationEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if root_modification_time:
date_time = dfdatetime_filetime.Filetime(timestamp=root_modification_time)
event = OLECFDocumentSummaryInformationEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a document summary information OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root item is not set.
|
codesearchnet
|
def parse_conservation(variant, info_key):
raw_score = variant.INFO.get(info_key)
conservations = []
if raw_score:
if isinstance(raw_score, numbers.Number):
raw_score = (raw_score,)
for score in raw_score:
if score >= CONSERVATION[info_key]['conserved_min']:
conservations.append('Conserved')
else:
conservations.append('NotConserved')
return conservations
|
Get the conservation prediction
Args:
variant(dict): A variant dictionary
info_key(str)
Returns:
conservations(list): List of censervation terms
|
juraj-google-style
|
def get_coordination_sphere(self, index_of_atom, n_sphere=1, give_only_index=False, only_surface=True, exclude=None, use_lookup=None):
if (use_lookup is None):
use_lookup = settings['defaults']['use_lookup']
exclude = (set() if (exclude is None) else exclude)
bond_dict = self.get_bonds(use_lookup=use_lookup)
i = index_of_atom
if (n_sphere != 0):
visited = (set([i]) | exclude)
try:
tmp_bond_dict = {j: (bond_dict[j] - visited) for j in bond_dict[i]}
except KeyError:
tmp_bond_dict = {}
n = 0
while (tmp_bond_dict and ((n + 1) < n_sphere)):
new_tmp_bond_dict = {}
for i in tmp_bond_dict:
if (i in visited):
continue
visited.add(i)
for j in tmp_bond_dict[i]:
new_tmp_bond_dict[j] = (bond_dict[j] - visited)
tmp_bond_dict = new_tmp_bond_dict
n += 1
if only_surface:
index_out = set(tmp_bond_dict.keys())
else:
index_out = (visited | set(tmp_bond_dict.keys()))
else:
index_out = {i}
if give_only_index:
return (index_out - exclude)
else:
return self.loc[(index_out - exclude)]
|
Return a Cartesian of atoms in the n-th coordination sphere.
Connected means that a path along covalent bonds exists.
Args:
index_of_atom (int):
give_only_index (bool): If ``True`` a set of indices is
returned. Otherwise a new Cartesian instance.
n_sphere (int): Determines the number of the coordination sphere.
only_surface (bool): Return only the surface of the coordination
sphere.
exclude (set): A set of indices that should be ignored
for the path finding.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
A set of indices or a new Cartesian instance.
|
codesearchnet
|
def cosine(w, A=1, phi=0, offset=0):
from math import cos
def f(i):
return A * cos(w*i + phi) + offset
return partial(force, sequence=_advance(f))
|
Return a driver function that can advance a sequence of cosine values.
.. code-block:: none
value = A * cos(w*i + phi) + offset
Args:
w (float) : a frequency for the cosine driver
A (float) : an amplitude for the cosine driver
phi (float) : a phase offset to start the cosine driver with
offset (float) : a global offset to add to the driver values
|
juraj-google-style
|
def plot(self, ax=None, return_fig=False, **kwargs):
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
hypertime = np.linspace(self.start, self.stop, (10 * self.size - 1) + 1)
hyperamp = np.interp(hypertime, self.basis, self)
ax.plot(hyperamp, hypertime, 'k')
ax.fill_betweenx(hypertime, hyperamp, 0, hyperamp > 0.0, facecolor='k', lw=0)
ax.invert_yaxis()
ax.set_title(self.name)
if return_ax:
return ax
elif return_fig:
return fig
else:
return None
|
Plot a synthetic.
Args:
ax (ax): A matplotlib axis.
legend (Legend): For now, only here to match API for other plot
methods.
return_fig (bool): whether to return the matplotlib figure.
Default False.
Returns:
ax. If you passed in an ax, otherwise None.
|
juraj-google-style
|
def median(x, axis=None, keepdims=False):
if any_symbolic_tensors((x,)):
return Median(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.median(x, axis=axis, keepdims=keepdims)
|
Compute the median along the specified axis.
Args:
x: Input tensor.
axis: Axis or axes along which the medians are computed. Defaults to
`axis=None` which is to compute the median(s) along a flattened
version of the array.
keepdims: If this is set to `True`, the axes which are reduce
are left in the result as dimensions with size one.
Returns:
The output tensor.
|
github-repos
|
def _caching_device(rnn_cell):
if context.executing_eagerly():
return None
if not getattr(rnn_cell, '_enable_caching_device', False):
return None
if control_flow_util.IsInWhileLoop(ops.get_default_graph()):
logging.warning('Variable read device caching has been disabled because the RNN is in tf.while_loop loop context, which will cause reading stalled value in forward path. This could slow down the training due to duplicated variable reads. Please consider updating your code to remove tf.while_loop if possible.')
return None
if rnn_cell._dtype_policy.compute_dtype != rnn_cell._dtype_policy.variable_dtype:
logging.warning("Variable read device caching has been disabled since it doesn't work with the mixed precision API. This is likely to cause a slowdown for RNN training due to duplicated read of variable for each timestep, which will be significant in a multi remote worker setting. Please consider disabling mixed precision API if the performance has been affected.")
return None
return lambda op: op.device
|
Returns the caching device for the RNN variable.
This is useful for distributed training, when variable is not located as same
device as the training worker. By enabling the device cache, this allows
worker to read the variable once and cache locally, rather than read it every
time step from remote when it is needed.
Note that this is assuming the variable that cell needs for each time step is
having the same value in the forward path, and only gets updated in the
backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the
cell body relies on any variable that gets updated every time step, then
caching device will cause it to read the stall value.
Args:
rnn_cell: the rnn cell instance.
|
github-repos
|
def limit_replace(self, accountID, orderID, **kwargs):
return self.replace(accountID, orderID, order=LimitOrderRequest(**kwargs))
|
Shortcut to replace a pending Limit Order in an Account
Args:
accountID : The ID of the Account
orderID : The ID of the Limit Order to replace
kwargs : The arguments to create a LimitOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request
|
codesearchnet
|
def p40baro(msg):
d = hex2bin(data(msg))
if d[26] == '0':
return None
p = bin2int(d[27:39]) * 0.1 + 800
return p
|
Barometric pressure setting
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
float: pressure in millibar
|
juraj-google-style
|
def __init__(self, client_id='', service_account_email='', service_account_key='',
widget_url='', cookie_name='gtoken', http=None, project_id=''):
self.client_id = client_id
self.widget_url = widget_url
self.cookie_name = cookie_name
self.project_id = project_id
self.rpc_helper = rpchelper.RpcHelper(service_account_email,
service_account_key,
GitkitClient.GOOGLE_API_BASE,
http)
self.config_data_cached = None
if not self.client_id:
self.client_id = self.GetClientId()
|
Inits the Gitkit client library.
Args:
client_id: string, developer's Google oauth2 web client id.
service_account_email: string, Google service account email.
service_account_key: string, Google service account private key.
widget_url: string, Gitkit widget URL.
cookie_name: string, Gitkit cookie name.
http: Http, http client which support cache.
project_id: string, developer console's project id.
|
juraj-google-style
|
def CheckNextIncludeOrder(self, header_type):
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
|
Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
|
juraj-google-style
|
def visit_Import(self, node):
new_aliases = []
import_updated = False
import_renames = getattr(self._api_change_spec, 'import_renames', {})
max_submodule_depth = getattr(self._api_change_spec, 'max_submodule_depth', 1)
inserts_after_imports = getattr(self._api_change_spec, 'inserts_after_imports', {})
for import_alias in node.names:
all_import_components = import_alias.name.split('.')
found_update = False
for i in reversed(list(range(1, max_submodule_depth + 1))):
import_component = all_import_components[0]
for j in range(1, min(i, len(all_import_components))):
import_component += '.' + all_import_components[j]
import_rename_spec = import_renames.get(import_component, None)
if not import_rename_spec or excluded_from_module_rename(import_alias.name, import_rename_spec):
continue
new_name = import_rename_spec.new_name + import_alias.name[len(import_component):]
new_asname = import_alias.asname
if not new_asname and '.' not in import_alias.name:
new_asname = import_alias.name
new_alias = ast.alias(name=new_name, asname=new_asname)
new_aliases.append(new_alias)
import_updated = True
found_update = True
full_import = (import_alias.name, import_alias.asname)
insert_offset = 1
for line_to_insert in inserts_after_imports.get(full_import, []):
assert self._stack[-1] is node
parent = self._stack[-2]
new_line_node = pasta.parse(line_to_insert)
ast.copy_location(new_line_node, node)
parent.body.insert(parent.body.index(node) + insert_offset, new_line_node)
insert_offset += 1
old_suffix = pasta.base.formatting.get(node, 'suffix')
if old_suffix is None:
old_suffix = os.linesep
if os.linesep not in old_suffix:
pasta.base.formatting.set(node, 'suffix', old_suffix + os.linesep)
pasta.base.formatting.set(new_line_node, 'prefix', pasta.base.formatting.get(node, 'prefix'))
pasta.base.formatting.set(new_line_node, 'suffix', os.linesep)
self.add_log(INFO, node.lineno, node.col_offset, 'Adding `%s` after import of %s' % (new_line_node, import_alias.name))
if found_update:
break
if not found_update:
new_aliases.append(import_alias)
if import_updated:
assert self._stack[-1] is node
parent = self._stack[-2]
new_node = ast.Import(new_aliases)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
self.add_log(INFO, node.lineno, node.col_offset, 'Changed import from %r to %r.' % (pasta.dump(node), pasta.dump(new_node)))
self.generic_visit(node)
|
Handle visiting an import node in the AST.
Args:
node: Current Node
|
github-repos
|
def clear_config(clear_constants=False):
_set_config_is_locked(False)
_CONFIG.clear()
_SINGLETONS.clear()
if clear_constants:
_CONSTANTS.clear()
else:
saved_constants = _CONSTANTS.copy()
_CONSTANTS.clear()
for name, value in six.iteritems(saved_constants):
constant(name, value)
_IMPORTED_MODULES.clear()
_OPERATIVE_CONFIG.clear()
|
Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False.
|
juraj-google-style
|
def get_feature_data(self, ids=None, features=None, dense=True):
result = self.data
if (ids is not None):
result = result.ix[ids]
if (features is not None):
result = result.ix[(:, features)]
return (result.to_dense() if dense else result)
|
Slices and returns a subset of feature data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return rows for. If None, returns data for all studies
(i.e., all rows in array).
features (list, array): A list or 1D numpy array of named features
to return. If None, returns data for all features (i.e., all
columns in array).
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
sparse matrix. Note that if ids is not None, the returned array
will always be dense.
Returns:
A pandas DataFrame with study IDs in rows and features incolumns.
|
codesearchnet
|
def fill_treewidget(self, tree, parameters):
tree.clear()
assert isinstance(parameters, (dict, Parameter))
for key, value in parameters.items():
if isinstance(value, Parameter):
B26QTreeItem(tree, key, value, parameters.valid_values[key], parameters.info[key])
else:
B26QTreeItem(tree, key, value, type(value), '')
|
fills a QTreeWidget with nested parameters, in future replace QTreeWidget with QTreeView and call fill_treeview
Args:
tree: QtWidgets.QTreeWidget
parameters: dictionary or Parameter object
show_all: boolean if true show all parameters, if false only selected ones
Returns:
|
juraj-google-style
|
def shift_time(start_time, mins) -> str:
s_time = pd.Timestamp(start_time)
e_time = s_time + np.sign(mins) * pd.Timedelta(f'00:{abs(mins)}:00')
return e_time.strftime('%H:%M')
|
Shift start time by mins
Args:
start_time: start time in terms of HH:MM string
mins: number of minutes (+ / -)
Returns:
end time in terms of HH:MM string
|
juraj-google-style
|
def __init__(self, client_id, client_secret, refresh_token,
manager_account_id, dev_token):
credentials = GoogleRefreshTokenClient(client_id, client_secret,
refresh_token)
self.client = AdWordsClient(dev_token, credentials, self._USER_AGENT,
client_customer_id=manager_account_id,
cache=ZeepServiceProxy.NO_CACHE)
|
Initializes an APIHandler.
Args:
client_id: The client customer id retrieved from the Developers Console.
client_secret: The client secret retrieved from the Developers Console.
refresh_token: The refresh token retrieved with generate_refresh_token.py.
manager_account_id: The AdWords manager account Id.
dev_token: The AdWords Developer Token.
|
juraj-google-style
|
def remove_extra_presentations(self, resource, timeout=-1):
uri = self.URI + "/repair"
custom_headers = {'Accept-Language': 'en_US'}
return self._client.create(resource, uri=uri, timeout=timeout, custom_headers=custom_headers)
|
Removes extra presentations from a specified server profile.
Args:
resource (dict):
Object to create
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Associated storage attachment resource.
|
juraj-google-style
|
def startDrag(self, index):
if not index.isValid():
return
dataFrame = self.model().dataFrame()
dfindex = dataFrame.iloc[[index.row()]].index
columnName = dataFrame.columns[index.column()]
dtype = dataFrame[columnName].dtype
value = dataFrame[columnName][dfindex]
mimePayload = PandasCellPayload(
dfindex,
columnName,
value,
dtype,
hex(id(self.model()))
)
mimeData = MimeData()
mimeData.setData(mimePayload)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
pixmap = QtGui.QPixmap(":/icons/insert-table.png")
drag.setHotSpot(QtCore.QPoint(pixmap.width()/3, pixmap.height()/3))
drag.setPixmap(pixmap)
result = drag.start(Qt.MoveAction)
|
start a drag operation with a PandasCellPayload on defined index.
Args:
index (QModelIndex): model index you want to start the drag operation.
|
juraj-google-style
|
def inspect_virtual(self, stream_id):
stream = DataStream.FromEncoded(stream_id)
if stream.buffered:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]
try:
reading = self.storage.inspect_last(stream, only_allocated=True)
return [Error.NO_ERROR, reading.value]
except StreamEmptyError:
return [Error.NO_ERROR, 0]
except UnresolvedIdentifierError:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]
|
Inspect the last value written into a virtual stream.
Args:
stream_id (int): The virtual stream was want to inspect.
Returns:
(int, int): An error code and the stream value.
|
juraj-google-style
|
def open_jsonl(path: str, mode: str='r', **kwargs) -> pg_io.Sequence:
return pg_io.open_sequence(path, mode, serializer=to_json_str, deserializer=from_json_str, **kwargs)
|
Open a JSONL file for reading or writing.
Example::
with pg.open_jsonl('my_file.jsonl', 'w') as f:
f.add(1)
f.add('foo')
f.add(dict(x=1))
with pg.open_jsonl('my_file.jsonl', 'r') as f:
for value in f:
print(value)
Args:
path: The path to the file.
mode: The mode of the file.
**kwargs: Additional keyword arguments that will be passed to
``pg_io.open_sequence``.
Returns:
A sequence for PyGlove objects.
|
github-repos
|
def CheckSupportedFormat(cls, path, check_readable_only=False):
try:
connection = sqlite3.connect(path, detect_types=(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES))
cursor = connection.cursor()
query = 'SELECT * FROM metadata'
cursor.execute(query)
metadata_values = {row[0]: row[1] for row in cursor.fetchall()}
cls._CheckStorageMetadata(metadata_values, check_readable_only=check_readable_only)
connection.close()
result = True
except (IOError, sqlite3.DatabaseError):
result = False
return result
|
Checks if the storage file format is supported.
Args:
path (str): path to the storage file.
check_readable_only (Optional[bool]): whether the store should only be
checked to see if it can be read. If False, the store will be checked
to see if it can be read and written to.
Returns:
bool: True if the format is supported.
|
codesearchnet
|
def install_time(self):
time1970 = self.__mod_time1970
try:
(date_string, item_type) = win32api.RegQueryValueEx(self.__reg_uninstall_handle, 'InstallDate')
except pywintypes.error as exc:
if (exc.winerror == winerror.ERROR_FILE_NOT_FOUND):
return time1970
else:
raise
if (item_type == win32con.REG_SZ):
try:
date_object = datetime.datetime.strptime(date_string, '%Y%m%d')
time1970 = time.mktime(date_object.timetuple())
except ValueError:
pass
return time1970
|
Return the install time, or provide an estimate of install time.
Installers or even self upgrading software must/should update the date
held within InstallDate field when they change versions. Some installers
do not set ``InstallDate`` at all so we use the last modified time on the
registry key.
Returns:
int: Seconds since 1970 UTC.
|
codesearchnet
|
def to_set(self):
if self.closed():
raise ValueError('Attempt to call to_set() on a closed Queryable.')
if isinstance(self._iterable, set):
return self._iterable
s = set()
for item in self:
if (item in s):
raise ValueError('Duplicate item value {0} in sequence during to_set()'.format(repr(item)))
s.add(item)
return s
|
Convert the source sequence to a set.
Note: This method uses immediate execution.
Raises:
ValueError: If duplicate keys are in the projected source sequence.
ValueError: If the Queryable is closed().
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.