code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def sync_model(self, comment='', compact_central=False,
release_borrowed=True, release_workset=True,
save_local=False):
self._add_entry(templates.FILE_SYNC_START)
if compact_central:
self._add_entry(templates.FILE_SYNC_COMPACT)
if release_borrowed:
self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED)
if release_workset:
self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS)
if save_local:
self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL)
self._add_entry(templates.FILE_SYNC_COMMENT_OK
.format(sync_comment=comment)) | Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well | juraj-google-style |
def add_node(self, node_name):
graph = self.graph
if node_name in graph:
raise KeyError('node %s already exists' % node_name)
graph[node_name] = set() | Add a node if it does not exist yet, or error out.
Args:
node_name (str): The unique name of the node to add.
Raises:
KeyError: Raised if a node with the same name already exist in the
graph | juraj-google-style |
def get_by_alias(self, alias):
if (alias not in self._aliases):
raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))
return self.get_by_index(self._aliases[alias]) | Return a dataset by its alias.
Args:
alias (str): The alias of the dataset that should be returned.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset. | codesearchnet |
def allreduce_ring_single_shard(xs, devices, reduction_fn_string="SUM"):
n = len(xs)
binary_reduction = mtf.binary_reduction_fn(reduction_fn_string)
assert len(devices) == n, "devices must be a list of length len(xs)"
if n == 1:
return xs
result = [None] * n
if n % 2 == 0:
left_center = n
right_center = left_center + 1
else:
left_center = n
right_center = left_center
left_sum = xs[0]
for i in xrange(1, left_center + 1):
with tf.device(devices[i]):
left_sum = binary_reduction(left_sum, xs[i])
right_sum = xs[n-1]
for i in reversed(xrange(left_center + 1, n - 1)):
with tf.device(devices[i]):
right_sum = binary_reduction(xs[i], right_sum)
with tf.device(devices[left_center]):
result[left_center] = binary_reduction(left_sum, right_sum)
if n % 2 == 0:
with tf.device(devices[right_center]):
result[right_center] = binary_reduction(left_sum, right_sum)
for i in reversed(xrange(left_center)):
with tf.device(devices[i]):
result[i] = tf.identity(result[i + 1])
for i in xrange(right_center + 1, n):
with tf.device(devices[i]):
result[i] = tf.identity(result[i - 1])
return result | Compute the reduction of all Tensors and put the result everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of strings
reduction_fn_string: "SUM" or "MAX"
Returns:
a list of n Tensors
Raises:
ValueError: if devices is not a list of n strings | juraj-google-style |
def format_auth_params(params):
parts = []
for (key, value) in params.items():
if value:
parts.append('{}="{}"'.format(key, value))
return ', '.join(parts) | Generate the format expected by HTTP Headers from parameters.
Args:
params (dict): {key: value} to convert to key=value
Returns:
A formatted header string. | codesearchnet |
def calculate_columns(sequence):
columns = {}
for row in sequence:
for key in row.keys():
if key not in columns:
columns[key] = len(key)
value_length = len(str(row[key]))
if value_length > columns[key]:
columns[key] = value_length
return columns | Find all row names and the maximum column widths.
Args:
columns (dict): the keys are the column name and the value the max length.
Returns:
dict: column names (key) and widths (value). | juraj-google-style |
def intersection(self, other, recursive=True):
if not isinstance(other, composite):
raise AssertionError('Cannot intersect composite and {} types'.format(type(other)))
if self.meta_type != other.meta_type:
return composite({})
if self.meta_type == 'list':
keep = []
for item in self._list:
if item in other._list:
if recursive and isinstance(item, composite):
keep.extend(item.intersection(other.index(item), recursive=True))
else:
keep.append(item)
return composite(keep)
elif self.meta_type == 'dict':
keep = {}
for key in self._dict:
item = self._dict[key]
if key in other._dict:
if recursive and \
isinstance(item, composite) and \
isinstance(other.get(key), composite):
keep[key] = item.intersection(other.get(key), recursive=True)
elif item == other[key]:
keep[key] = item
return composite(keep)
return | Recursively compute intersection of data. For dictionaries, items
for specific keys will be reduced to unique items. For lists, items
will be reduced to unique items. This method is meant to be analogous
to set.intersection for composite objects.
Args:
other (composite): Other composite object to intersect with.
recursive (bool): Whether or not to perform the operation recursively,
for all nested composite objects. | juraj-google-style |
def image_transform(X, function, reshape_before=False, reshape_after=False,
width=None, height=None, **kwargs):
if not callable(function):
function = import_object(function)
elif not callable(function):
raise ValueError("function must be a str or a callable")
flat_image = len(X[0].shape) == 1
if reshape_before and flat_image:
if not (width and height):
side_length = math.sqrt(X.shape[1])
if side_length.is_integer():
side_length = int(side_length)
width = side_length
height = side_length
else:
raise ValueError("Image sizes must be given for non-square images")
else:
reshape_before = False
new_X = []
for image in X:
if reshape_before:
image = image.reshape((width, height))
features = function(
image,
**kwargs
)
if reshape_after:
features = np.reshape(features, X.shape[1])
new_X.append(features)
return np.array(new_X) | Apply a function image by image.
Args:
reshape_before: whether 1d array needs to be reshaped to a 2d image
reshape_after: whether the returned values need to be reshaped back to a 1d array
width: image width used to rebuild the 2d images. Required if the image is not square.
height: image height used to rebuild the 2d images. Required if the image is not square. | juraj-google-style |
def adapt(self, d, x):
self.update_memory_x(x)
m_d, m_x = self.read_memory()
y = np.dot(self.w, x-m_x) + m_d
e = d - y
nu = self.mu / (self.eps + np.dot(x-m_x, x-m_x))
dw = nu * e * (x-m_x)
self.w += dw
self.update_memory_d(d) | Adapt weights according one desired value and its input.
Args:
* `d` : desired value (float)
* `x` : input array (1-dimensional array) | juraj-google-style |
def dot_product(t1, t2, keep_dims=False, name=None, reduction_dim=None):
with tf.name_scope(name, 'dot', [t1, t2]) as scope:
t1 = tf.convert_to_tensor(t1, name='t1')
t2 = tf.convert_to_tensor(t2, name='t2')
mul = tf.multiply(t1, t2)
if not reduction_dim:
reduction_dim = _last_index(mul, 1)
return tf.reduce_sum(mul, reduction_dim, name=scope, keep_dims=keep_dims) | Computes the dot product of t1 and t2.
Args:
t1: A rank 2 tensor.
t2: A tensor that is the same size as t1.
keep_dims: If true, reduction does not change the rank of the input.
name: Optional name for this op.
reduction_dim: The dimension to reduce, by default choose the last one
and if no shape is specified guess 1.
Returns:
The dot product. | juraj-google-style |
def finalize_options(self):
self.cwd = os.path.abspath(os.path.dirname(__file__))
self.build_dirs = [
os.path.join(self.cwd, 'build'),
os.path.join(self.cwd, 'htmlcov'),
os.path.join(self.cwd, 'dist'),
os.path.join(self.cwd, 'pylink_square.egg-info')
]
self.build_artifacts = ['.pyc', '.o', '.elf', '.bin'] | Populate the attributes.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None`` | juraj-google-style |
def ParseByteStream(
self, parser_mediator, byte_stream, parent_path_segments=None,
codepage='cp1252'):
if parent_path_segments and isinstance(parent_path_segments, list):
self._path_segments = list(parent_path_segments)
else:
self._path_segments = []
shell_item_list = pyfwsi.item_list()
parser_mediator.AppendToParserChain(self)
try:
shell_item_list.copy_from_byte_stream(
byte_stream, ascii_codepage=codepage)
for shell_item in iter(shell_item_list.items):
self._ParseShellItem(parser_mediator, shell_item)
finally:
parser_mediator.PopFromParserChain() | Parses the shell items from the byte stream.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
byte_stream (bytes): shell items data.
parent_path_segments (Optional[list[str]]): parent shell item path
segments.
codepage (Optional[str]): byte stream codepage. | juraj-google-style |
def _get_num_audio_features(self, audio_lengths: Sequence[int]) -> Sequence[int]:
hop_length = self.melspec_kwargs['hop_length']
effective_window_size = self.projector_window_size
projector_lengths = []
for raw_length in audio_lengths:
mel_length = raw_length
encoder_length = mel_length
nblocks = math.ceil(encoder_length / self.projector_window_size)
projector_length = nblocks * effective_window_size
projector_lengths.append(projector_length)
return projector_lengths | Gets the (variable length) number of features (i.e., projector output) for the sequences
being considered.
Args:
audio_lengths (`Sequence[int]`):
Sequence of one or more raw audio lengths. | github-repos |
def __init__(self, num_packs=1):
if num_packs < 0:
raise ValueError('HierarchicalCopy requires num_packs >= 0, but {} is specified'.format(num_packs))
super(HierarchicalCopyAllReduce, self).__init__(all_reduce_alg='hierarchical_copy', num_packs=num_packs) | Initializes the object.
Args:
num_packs: a non-negative integer. The number of packs to split values
into. If zero, no packing will be done.
Raises:
ValueError if `num_packs` is negative. | github-repos |
def RunOnce(self):
start_time = time.time()
processed = 0
queue_manager = queue_manager_lib.QueueManager(token=self.token)
for queue in self.queues:
queue_manager.FreezeTimestamp()
fetch_messages_start = time.time()
notifications = queue_manager.GetNotifications(queue)
stats_collector_instance.Get().RecordEvent('worker_time_to_retrieve_notifications', (time.time() - fetch_messages_start))
stuck_flows = []
for n in notifications:
if n.in_progress:
stuck_flows.append(n)
if stuck_flows:
self.ProcessStuckFlows(stuck_flows, queue_manager)
notifications_available = []
for notification in notifications:
if (notification.session_id not in self.queued_flows):
notifications_available.append(notification)
try:
processed += self.ProcessMessages(notifications_available, queue_manager, (self.RUN_ONCE_MAX_SECONDS - (time.time() - start_time)))
except Exception as e:
logging.error('Error processing message %s. %s.', e, traceback.format_exc())
stats_collector_instance.Get().IncrementCounter('grr_worker_exceptions')
if flags.FLAGS.pdb_post_mortem:
pdb.post_mortem()
queue_manager.UnfreezeTimestamp()
if ((time.time() - start_time) > self.RUN_ONCE_MAX_SECONDS):
return processed
return processed | Processes one set of messages from Task Scheduler.
The worker processes new jobs from the task master. For each job
we retrieve the session from the Task Scheduler.
Returns:
Total number of messages processed by this call. | codesearchnet |
def get_supervisor(func: types.AnyFunction) -> types.Supervisor:
if (not callable(func)):
raise TypeError('func is not callable')
if asyncio.iscoroutinefunction(func):
supervisor = _async_supervisor
else:
supervisor = _sync_supervisor
return functools.partial(supervisor, func) | Get the appropriate supervisor to use and pre-apply the function.
Args:
func: A function. | codesearchnet |
def ch_stop_time(self, *channels: List[Channel]) -> int:
return self.timeslots.ch_stop_time(*channels) | Return maximum start time for supplied channels.
Args:
*channels: Supplied channels | codesearchnet |
def send_post(self, mri, method_name, **params):
q = Queue()
request = Post(
path=[mri, method_name],
parameters=params)
request.set_callback(q.put)
IOLoopHelper.call(self._send_request, request)
response = q.get()
if isinstance(response, Error):
raise response.message
else:
return response.value | Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server | juraj-google-style |
def __init__(self, **kwargs):
try:
arguments = Adapter(CollectorUpdate.schema_complete().validate(kwargs))
self.matrix = arguments.matrix
self.stage = arguments.stage
self.timestamp = arguments.timestamp
self.status = arguments.status
self.information = arguments.information.data
except SchemaError as exception:
Logger.get_logger(__name__).error(exception)
raise RuntimeError(str(exception)) | Initializing and validating fields.
Args:
kwargs (dict): application command line options.
Raises:
RuntimeError: when validation of parameters has failed. | juraj-google-style |
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token', None) is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
if os.path.isfile(save_directory):
raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
self.to_json_file(output_feature_extractor_file)
logger.info(f'Feature extractor saved in {output_feature_extractor_file}')
if push_to_hub:
self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))
return [output_feature_extractor_file] | Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the
[`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`Dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. | github-repos |
def ParseFileObject(self, parser_mediator, file_object):
file_offset = 0
file_size = file_object.get_size()
record_map = self._GetDataTypeMap('pls_recall_record')
while file_offset < file_size:
try:
pls_record, record_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
except (ValueError, errors.ParseError) as exception:
if file_offset == 0:
raise errors.UnableToParseFile('Unable to parse first record.')
parser_mediator.ProduceExtractionWarning((
'unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
if file_offset == 0 and not self._VerifyRecord(pls_record):
raise errors.UnableToParseFile('Verification of first record failed.')
event_data = PlsRecallEventData()
event_data.database_name = pls_record.database_name.rstrip('\x00')
event_data.sequence_number = pls_record.sequence_number
event_data.offset = file_offset
event_data.query = pls_record.query.rstrip('\x00')
event_data.username = pls_record.username.rstrip('\x00')
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.last_written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset += record_data_size | Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | juraj-google-style |
def install(self, path, dry_run=False, overrides=None):
repo = package_repository_manager.get_repository(path)
resource = repo.install_variant(self.resource, dry_run=dry_run, overrides=overrides)
if (resource is None):
return None
elif (resource is self.resource):
return self
else:
return Variant(resource) | Install this variant into another package repository.
If the package already exists, this variant will be correctly merged
into the package. If the variant already exists in this package, the
existing variant is returned.
Args:
path (str): Path to destination package repository.
dry_run (bool): If True, do not actually install the variant. In this
mode, a `Variant` instance is only returned if the equivalent
variant already exists in this repository; otherwise, None is
returned.
overrides (dict): Use this to change or add attributes to the
installed variant.
Returns:
`Variant` object - the (existing or newly created) variant in the
specified repository. If `dry_run` is True, None may be returned. | codesearchnet |
def enumerate_dataset(start=0):
def _apply_fn(dataset):
return dataset.enumerate(start)
return _apply_fn | A transformation that enumerates the elements of a dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.data.experimental.enumerate_dataset(start=5))
=> { (5, 1), (6, 2), (7, 3) }
b.apply(tf.data.experimental.enumerate_dataset())
=> { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`. | github-repos |
def compute_token_logits(sequence_output, temperature, output_weights, output_bias):
logits = (torch.einsum('bsj,j->bs', sequence_output, output_weights) + output_bias) / temperature
return logits | Computes logits per token
Args:
sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
temperature (`float`):
Temperature for the Bernoulli distribution.
output_weights (`torch.FloatTensor` of shape `(hidden_size,)`):
Weights of the linear layer for cell selection.
output_bias (`torch.FloatTensor` of shape `()`):
Bias of the linear layer for cell selection
Returns:
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token. | github-repos |
def construct_analogy_test_set(test_examples, dictionary, ignore_missing=False):
test = []
for example in test_examples:
try:
test.append([dictionary[word] for word in example])
except KeyError:
if ignore_missing:
pass
else:
raise
try:
test = np.array(test, dtype=np.int32)
except ValueError as e:
raise ValueError('Each row of the test set should contain '
'4 integer word ids', e)
return test | Construct the analogy test set by mapping the words to their
word vector ids.
Arguments:
- test_examples: iterable of 4-word iterables
- dictionay: a mapping from words to ids
- boolean ignore_missing: if True, words in the test set
that are not in the dictionary
will be dropeed.
Returns:
- a N by 4 numpy matrix. | juraj-google-style |
def is17(msg):
if allzeros(msg):
return False
d = hex2bin(data(msg))
if bin2int(d[28:56]) != 0:
return False
caps = cap17(msg)
if 'BDS20' not in caps:
return False
return True | Check if a message is likely to be BDS code 1,7
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False | juraj-google-style |
def __init__(self, layer, named_tensors=None, scope='tf-layer', summary_labels=(), **kwargs):
self.layer_spec = layer
self.layer = util.get_object(obj=layer, predefined_objects=TFLayer.tf_layers, kwargs=kwargs)
self.first_scope = None
super(TFLayer, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels) | Creates a new layer instance of a TensorFlow layer.
Args:
name: The name of the layer, one of 'dense'.
**kwargs: Additional arguments passed on to the TensorFlow layer constructor. | juraj-google-style |
def parse(cls, op):
for event in cls:
if event.value == int(op):
return event
return None | Gets the enum for the op code
Args:
op: value of the op code (will be casted to int)
Returns:
The enum that matches the op code | juraj-google-style |
def get_external_command_output(command: str) -> bytes:
args = shlex.split(command)
ret = subprocess.check_output(args)
return ret | Takes a command-line command, executes it, and returns its ``stdout``
output.
Args:
command: command string
Returns:
output from the command as ``bytes`` | codesearchnet |
def reply(self, reply_comment):
payload = (('{ "Comment": "' + reply_comment) + '"}')
endpoint = (('https:
self._make_api_call('post', endpoint, data=payload) | Reply to the Message.
Notes:
HTML can be inserted in the string and will be interpreted properly by Outlook.
Args:
reply_comment: String message to send with email. | codesearchnet |
def create(self, data=None, uri=None, timeout=-1, custom_headers=None, force=False):
if not data:
data = {}
default_values = self._get_default_values()
data = self._helper.update_resource_fields(data, default_values)
logger.debug('Create (uri = %s, resource = %s)' % (uri, str(data)))
resource_data = self._helper.create(data, uri, timeout, custom_headers, force)
new_resource = self.new(self._connection, resource_data)
return new_resource | Makes a POST request to create a resource when a request body is required.
Args:
data: Additional fields can be passed to create the resource.
uri: Resouce uri
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows set specific HTTP headers.
Returns:
Created resource. | juraj-google-style |
def deserialize(doc_xml, pyxb_binding=None):
pyxb_binding = (pyxb_binding or d1_common.types.dataoneTypes)
try:
return pyxb_binding.CreateFromDocument(doc_xml)
except pyxb.ValidationError as e:
raise ValueError('Unable to deserialize XML to PyXB. error="{}" xml="{}"'.format(e.details(), doc_xml))
except (pyxb.PyXBException, xml.sax.SAXParseException, Exception) as e:
raise ValueError('Unable to deserialize XML to PyXB. error="{}" xml="{}"'.format(str(e), doc_xml)) | Deserialize DataONE XML types to PyXB.
Args:
doc_xml: UTF-8 encoded ``bytes``
pyxb_binding: PyXB binding object. If not specified, the correct one should be
selected automatically.
Returns:
PyXB object
See Also:
``deserialize_d1_exception()`` for deserializing DataONE Exception types. | codesearchnet |
def write_object_proto(var, proto, options):
if options.experimental_variable_policy._expand_distributed_variables():
for var in var.values:
var_proto = proto.variable.experimental_distributed_variable_components.add()
var_proto.name = var.name.split(':')[0]
var_proto.device = var.device | Update a SavedObject proto for the caller.
If a DistributedVariable object supports this method, it will be called when
saving with a pre-built `SavedObject` proto representing the object, plus an
instance of `SaveOptions`. This method is then free to modify that proto
instance.
`DistributedVariable` with `AUTO` or `ON_WRITE` synchronization optionally
write out information about their components to the
`experimental_distributed_variable_components` field of a
`SavedVariable` (depending on the `SaveOptions` variable policy).
Args:
var: The DistributedVariable object.
proto: A pre-built `SavedObject` proto for this object. It is assumed this
will be a `SavedVariable` instance.
options: A `SaveOptions` instance. | github-repos |
def create_new_username(ip, devicetype=None, timeout=_DEFAULT_TIMEOUT):
res = Resource(_api_url(ip), timeout)
prompt = "Press the Bridge button, then press Return: "
if sys.version_info.major == 2:
_ = raw_input(prompt)
else:
_ = input(prompt)
if devicetype is None:
devicetype = "qhue
response = res(devicetype=devicetype, http_method="post")
return response[0]["success"]["username"] | Interactive helper function to generate a new anonymous username.
Args:
ip: ip address of the bridge
devicetype (optional): devicetype to register with the bridge. If
unprovided, generates a device type based on the local hostname.
timeout (optional, default=5): request timeout in seconds
Raises:
QhueException if something went wrong with username generation (for
example, if the bridge button wasn't pressed). | juraj-google-style |
def remove_vcf_info(keyword, variant_line=None, variant_dict=None):
logger.debug("Removing variant information {0}".format(keyword))
fixed_variant = None
def get_new_info_string(info_string, keyword):
new_info_list = []
splitted_info_string = info_string.split(';')
for info in splitted_info_string:
splitted_info_entry = info.split('=')
if splitted_info_entry[0] != keyword:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
return new_info_string
if variant_line:
logger.debug("Removing information from a variant line")
splitted_variant = variant_line.rstrip('\n').split('\t')
old_info = splitted_variant[7]
if old_info == '.':
new_info_string = '.'
else:
new_info_string = get_new_info_string(old_info, keyword)
splitted_variant[7] = new_info_string
fixed_variant = '\t'.join(splitted_variant)
elif variant_dict:
logger.debug("Removing information to a variant dict")
old_info = variant_dict['INFO']
if old_info == '.':
variant_dict['INFO'] = old_info
else:
new_info_string = get_new_info_string(old_info, keyword)
variant_dict['INFO'] = new_info_string
fixed_variant = variant_dict
return fixed_variant | Remove the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
Returns:
variant_line (str): A annotated variant line | juraj-google-style |
def FromTimestampToHttp(self, ts):
ts = time.gmtime(ts)
return time.strftime('%a, %d %b %Y %H:%M:%S GMT', ts) | Converts internal nss_cache timestamp to HTTP timestamp.
Args:
ts: number of seconds since epoch
Returns:
HTTP format timestamp string | github-repos |
def make_simulated_env_fn(**env_kwargs):
def env_fn(in_graph):
class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv
return class_(**env_kwargs)
return env_fn | Returns a function creating a simulated env, in or out of graph.
Args:
**env_kwargs: kwargs to pass to the simulated env constructor.
Returns:
Function in_graph -> env. | juraj-google-style |
def sudo_remove_dirtree(dir_name):
try:
subprocess.check_output(['sudo', 'rm', '-rf', dir_name])
except subprocess.CalledProcessError as e:
raise WorkerError('Cant remove directory {0}'.format(dir_name), e) | Removes directory tree as a superuser.
Args:
dir_name: name of the directory to remove.
This function is necessary to cleanup directories created from inside a
Docker, since they usually written as a root, thus have to be removed as a
root. | codesearchnet |
def lookup_descriptor(self, definition_name):
try:
return self.__descriptors[definition_name]
except KeyError:
pass
if self.__descriptor_loader:
definition = self.__descriptor_loader(definition_name)
self.__descriptors[definition_name] = definition
return definition
else:
raise messages.DefinitionNotFoundError(('Could not find definition for %s' % definition_name)) | Lookup descriptor by name.
Get descriptor from library by name. If descriptor is not found will
attempt to find via descriptor loader if provided.
Args:
definition_name: Definition name to find.
Returns:
Descriptor that describes definition name.
Raises:
DefinitionNotFoundError if not descriptor exists for definition name. | codesearchnet |
def filter(self, limit=None, to=None, category=None):
if (category and (not to)):
msg_slice = itertools.islice((x for x in self.store if (x[2] == category)), limit)
elif (to and (not category)):
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit)
elif (to and category):
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if (_agent_in_msg(to, x[1]) and (x[2] == category))), limit)
else:
msg_slice = self.all(limit=limit)
return msg_slice
return list(msg_slice)[::(- 1)] | Returns the events that match the filters
Args:
limit (int, optional): the max length of the events to return (Default value = None)
to (str, optional): only events that have been sent or received by 'to' (Default value = None)
category (str, optional): only events belonging to the category (Default value = None)
Returns:
list: a list of filtered events | codesearchnet |
def _add_input_deps(self, executor, args, kwargs):
if (executor == 'data_manager'):
return (args, kwargs)
inputs = kwargs.get('inputs', [])
for (idx, f) in enumerate(inputs):
if (isinstance(f, File) and f.is_remote()):
inputs[idx] = self.data_manager.stage_in(f, executor)
for (kwarg, f) in kwargs.items():
if (isinstance(f, File) and f.is_remote()):
kwargs[kwarg] = self.data_manager.stage_in(f, executor)
newargs = list(args)
for (idx, f) in enumerate(newargs):
if (isinstance(f, File) and f.is_remote()):
newargs[idx] = self.data_manager.stage_in(f, executor)
return (tuple(newargs), kwargs) | Look for inputs of the app that are remote files. Submit stage_in
apps for such files and replace the file objects in the inputs list with
corresponding DataFuture objects.
Args:
- executor (str) : executor where the app is going to be launched
- args (List) : Positional args to app function
- kwargs (Dict) : Kwargs to app function | codesearchnet |
def __init__(self, config, channel=None):
self.channel = channel
if not _kubernetes_enabled:
raise OptionalModuleMissing(['kubernetes'],
"Kubernetes provider requires kubernetes module and config.")
self.kube_client = client.ExtensionsV1beta1Api()
self.config = config
self.sitename = self.config['site']
self.namespace = self.config['execution']['namespace']
self.image = self.config['execution']['image']
self.init_blocks = self.config["execution"]["block"]["initBlocks"]
self.min_blocks = self.config["execution"]["block"]["minBlocks"]
self.max_blocks = self.config["execution"]["block"]["maxBlocks"]
self.user_id = None
self.group_id = None
self.run_as_non_root = None
if 'security' in self.config['execution']:
self.user_id = self.config["execution"]['security']["user_id"]
self.group_id = self.config["execution"]['security']["group_id"]
self.run_as_non_root = self.config["execution"]['security']["run_as_non_root"]
self.secret = None
if 'secret' in self.config['execution']:
self.secret = self.config['execution']['secret']
self.resources = {} | Initialize the Kubernetes execution provider class
Args:
- Config (dict): Dictionary with all the config options.
KWargs :
- channel (channel object) : default=None A channel object | juraj-google-style |
def convert_tokens_to_string(self, tokens: List[str]) -> str:
raise NotImplementedError | Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
often want to remove sub-word tokenization artifacts at the same time.
Args:
tokens (`List[str]`): The token to join in a string.
Returns:
`str`: The joined tokens. | github-repos |
def relu(x, alpha=0.0, max_value=None, threshold=0):
dtype = getattr(x, 'dtype', floatx())
if alpha != 0.0:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
x = x * math_ops.cast(math_ops.greater(x, threshold), dtype=dtype)
elif max_value == 6:
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.0:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x | Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Args:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor. | github-repos |
def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, functions):
var_index = random.randrange(len(vlist))
var = vlist[var_index]
consts = (vlist[:var_index] + vlist[(var_index + 1):])
depth = random.randrange(min_depth, (max_depth + 1))
expr = random_expr_with_required_var(depth, var, consts, ops)
expr_str = str(expr)
sample = ((var + ':') + expr_str)
target = format_sympy_expr(sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions)
return (sample, target) | Randomly generate a symbolic integral dataset sample.
Given an input expression, produce the indefinite integral.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
sample: String representation of the input. Will be of the form
'var:expression'.
target: String representation of the solution. | codesearchnet |
def WaitUntilNoFlowsToProcess(self, timeout=None):
t = self.flow_handler_thread
if (not t):
return
start_time = time.time()
while True:
with self.lock:
if ((not t.isAlive()) or ((not self._GetFlowRequestsReadyForProcessing()) and (not self.flow_handler_num_being_processed))):
return
time.sleep(0.2)
if (timeout and ((time.time() - start_time) > timeout)):
raise TimeOutWhileWaitingForFlowsToBeProcessedError("Flow processing didn't finish in time.") | Waits until flow processing thread is done processing flows.
Args:
timeout: If specified, is a max number of seconds to spend waiting.
Raises:
TimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached. | codesearchnet |
def handle_duplications(file_path):
logging.info('Handling duplications for "%s"', file_path)
f = open_strings_file(file_path, "r+")
header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)
file_elements = []
section_file_elements = []
keys_to_objects = {}
duplicates_found = []
for header_comment, comments, key, value in header_comment_key_value_tuples:
if len(header_comment) > 0:
for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):
file_elements.append(elem)
section_file_elements = []
file_elements.append(Comment(header_comment))
if key in keys_to_objects:
keys_to_objects[key].add_comments(comments)
duplicates_found.append(key)
else:
loc_obj = LocalizationEntry(comments, key, value)
keys_to_objects[key] = loc_obj
section_file_elements.append(loc_obj)
for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):
file_elements.append(elem)
f.seek(0)
for element in file_elements:
f.write(unicode(element))
f.write(u"\n")
f.truncate()
f.close()
logging.info("Omitted %d duplicates (%s)" % (len(duplicates_found), ",".join(duplicates_found)))
logging.info('Finished handling duplications for "%s"', file_path) | Omits the duplications in the strings files.
Keys that appear more than once, will be joined to one appearance and the omit will be documented.
Args:
file_path (str): The path to the strings file. | juraj-google-style |
def hexstr(text):
text = text.strip().lower()
if text.startswith(('0x', '0X')):
text = text[2:]
if (not text):
raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg='No string left after stripping')
try:
s_common.uhex(text)
except (binascii.Error, ValueError) as e:
raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg=str(e))
return text | Ensure a string is valid hex.
Args:
text (str): String to normalize.
Examples:
Norm a few strings:
hexstr('0xff00')
hexstr('ff00')
Notes:
Will accept strings prefixed by '0x' or '0X' and remove them.
Returns:
str: Normalized hex string. | codesearchnet |
def __gt__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.greater, tf.float32) | Returns a TensorFluent for the greater-than relational operator.
Args:
self: The first operand.
other: The second operand. | juraj-google-style |
def sg_arg_def(**kwargs):
for (k, v) in kwargs.items():
if ((type(v) is tuple) or (type(v) is list)):
(v, c) = (v[0], v[1])
else:
c = k
if (type(v) is str):
tf.app.flags.DEFINE_string(k, v, c)
elif (type(v) is int):
tf.app.flags.DEFINE_integer(k, v, c)
elif (type(v) is float):
tf.app.flags.DEFINE_float(k, v, c)
elif (type(v) is bool):
tf.app.flags.DEFINE_bool(k, v, c) | r"""Defines command line options
Args:
**kwargs:
key: A name for the option.
value : Default value or a tuple of (default value, description).
Returns:
None
For example,
```
# Either of the following two lines will define `--n_epoch` command line argument and set its default value as 1.
tf.sg_arg_def(n_epoch=1)
tf.sg_arg_def(n_epoch=(1, 'total number of epochs'))
``` | codesearchnet |
def _GetStatus(self, two_factor=False):
params = ['status']
if two_factor:
params += ['--twofactor']
retcode = self._RunOsLoginControl(params)
if (retcode is None):
if self.oslogin_installed:
self.logger.warning('OS Login not installed.')
self.oslogin_installed = False
return None
self.oslogin_installed = True
if (not os.path.exists(constants.OSLOGIN_NSS_CACHE)):
return False
return (not retcode) | Check whether OS Login is installed.
Args:
two_factor: bool, True if two factor should be enabled.
Returns:
bool, True if OS Login is installed. | codesearchnet |
def get_avatar(from_header, size=64, default='retro'):
params = OrderedDict([('s', size), ('d', default)])
query = parse.urlencode(params)
address = email.utils.parseaddr(from_header)[1]
value_hash = sha256(address.encode('utf-8')).hexdigest()
return 'https: | Get the avatar URL from the email's From header.
Args:
from_header (str): The email's From header. May contain the sender's full name.
Returns:
str: The URL to that sender's avatar. | codesearchnet |
def __init__(self, file_system, path_spec):
super(Directory, self).__init__()
self._entries = None
self._file_system = file_system
self.path_spec = path_spec | Initializes a directory.
Args:
file_system (FileSystem): file system.
path_spec (PathSpec): path specification. | juraj-google-style |
def _get_current_ids(self, source=True, meta=True, spectra=True, spectra_annotation=True):
c = self.c
if source:
c.execute('SELECT max(id) FROM library_spectra_source')
last_id_origin = c.fetchone()[0]
if last_id_origin:
self.current_id_origin = (last_id_origin + 1)
else:
self.current_id_origin = 1
if meta:
c.execute('SELECT max(id) FROM library_spectra_meta')
last_id_meta = c.fetchone()[0]
if last_id_meta:
self.current_id_meta = (last_id_meta + 1)
else:
self.current_id_meta = 1
if spectra:
c.execute('SELECT max(id) FROM library_spectra')
last_id_spectra = c.fetchone()[0]
if last_id_spectra:
self.current_id_spectra = (last_id_spectra + 1)
else:
self.current_id_spectra = 1
if spectra_annotation:
c.execute('SELECT max(id) FROM library_spectra_annotation')
last_id_spectra_annotation = c.fetchone()[0]
if last_id_spectra_annotation:
self.current_id_spectra_annotation = (last_id_spectra_annotation + 1)
else:
self.current_id_spectra_annotation = 1 | Get the current id for each table in the database
Args:
source (boolean): get the id for the table "library_spectra_source" will update self.current_id_origin
meta (boolean): get the id for the table "library_spectra_meta" will update self.current_id_meta
spectra (boolean): get the id for the table "library_spectra" will update self.current_id_spectra
spectra_annotation (boolean): get the id for the table "library_spectra_annotation" will update
self.current_id_spectra_annotation | codesearchnet |
def _inter_df_op_handler(self, func, other, **kwargs):
axis = kwargs.get("axis", 0)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(other, type(self)):
return self._inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self._scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
) | Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index. | juraj-google-style |
def process_user_input(self):
user_input = self.screen.input()
try:
indexes = self.__parse_range_list(user_input)
indexes[:] = [(x - 1) for x in indexes if (0 < x < (len(self.items) + 1))]
for index in indexes:
self.current_option = index
self.select()
except Exception as e:
return | This overrides the method in ConsoleMenu to allow for comma-delimited and range inputs.
Examples:
All of the following inputs would have the same result:
* 1,2,3,4
* 1-4
* 1-2,3-4
* 1 - 4
* 1, 2, 3, 4
Raises:
ValueError: If the input cannot be correctly parsed. | codesearchnet |
def StoreRequestsAndResponses(self, new_requests=None, new_responses=None, requests_to_delete=None):
to_write = {}
if (new_requests is not None):
for (request, timestamp) in new_requests:
subject = request.session_id.Add('state')
queue = to_write.setdefault(subject, {})
queue.setdefault((self.FLOW_REQUEST_TEMPLATE % request.id), []).append((request.SerializeToString(), timestamp))
if (new_responses is not None):
for (response, timestamp) in new_responses:
if (response.type == rdf_flows.GrrMessage.Type.STATUS):
subject = response.session_id.Add('state')
attribute = (self.FLOW_STATUS_TEMPLATE % response.request_id)
to_write.setdefault(subject, {}).setdefault(attribute, []).append((response.SerializeToString(), timestamp))
subject = self.GetFlowResponseSubject(response.session_id, response.request_id)
attribute = (self.FLOW_RESPONSE_TEMPLATE % (response.request_id, response.response_id))
to_write.setdefault(subject, {}).setdefault(attribute, []).append((response.SerializeToString(), timestamp))
to_delete = {}
if (requests_to_delete is not None):
for request in requests_to_delete:
queue = to_delete.setdefault(request.session_id.Add('state'), [])
queue.append((self.FLOW_REQUEST_TEMPLATE % request.id))
queue.append((self.FLOW_STATUS_TEMPLATE % request.id))
for subject in (set(to_write) | set(to_delete)):
self.MultiSet(subject, to_write.get(subject, {}), to_delete=to_delete.get(subject, []), sync=True) | Stores new flow requests and responses to the data store.
Args:
new_requests: A list of tuples (request, timestamp) to store in the data
store.
new_responses: A list of tuples (response, timestamp) to store in the data
store.
requests_to_delete: A list of requests that should be deleted from the
data store. | codesearchnet |
def report_status_to_github(self, state: str, description: str, context: str, target_url: Optional[str]=None):
if (state not in ['error', 'failure', 'pending', 'success']):
raise ValueError('Unrecognized state: {!r}'.format(state))
if ((self.repository is None) or (self.repository.access_token is None)):
return
print(repr(('report_status', context, state, description, target_url)), file=sys.stderr)
payload = {'state': state, 'description': description, 'context': context}
if (target_url is not None):
payload['target_url'] = target_url
url = 'https:
response = requests.post(url, json=payload)
if (response.status_code != 201):
raise IOError('Request failed. Code: {}. Content: {}.'.format(response.status_code, response.content)) | Sets a commit status indicator on github.
If not running from a pull request (i.e. repository is None), then this
just prints to stderr.
Args:
state: The state of the status indicator.
Must be 'error', 'failure', 'pending', or 'success'.
description: A summary of why the state is what it is,
e.g. '5 lint errors' or 'tests passed!'.
context: The name of the status indicator, e.g. 'pytest' or 'lint'.
target_url: Optional location where additional details about the
status can be found, e.g. an online test results page.
Raises:
ValueError: Not one of the allowed states.
IOError: The HTTP post request failed, or the response didn't have
a 201 code indicating success in the expected way. | codesearchnet |
def detect_extracellular_compartment(model):
extracellular_key = Counter()
for reaction in model.reactions:
equation = reaction.equation
if equation is None:
continue
if len(equation.compounds) == 1:
compound, _ = equation.compounds[0]
compartment = compound.compartment
extracellular_key[compartment] += 1
if len(extracellular_key) == 0:
return None
else:
best_key, _ = extracellular_key.most_common(1)[0]
logger.info('{} is extracellular compartment'.format(best_key))
return best_key | Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`. | juraj-google-style |
def swd_read32(self, offset):
value = self._dll.JLINK_SWD_GetU32(offset)
return ctypes.c_uint32(value).value | Gets a unit of ``32`` bits from the input buffer.
Args:
self (JLink): the ``JLink`` instance
offset (int): the offset (in bits) from which to start reading
Returns:
The integer read from the input buffer. | codesearchnet |
def from_non_deterministic_state(cls, alg=None):
if config.is_op_determinism_enabled():
raise RuntimeError('"from_non_deterministic_state" cannot be called when determinism is enabled.')
if alg is None:
alg = DEFAULT_ALGORITHM
alg = random_ops_util.convert_alg_to_int(alg)
state = non_deterministic_ints(shape=[_get_state_size(alg)], dtype=SEED_TYPE)
return cls(state=state, alg=alg) | Creates a generator by non-deterministically initializing its state.
The source of the non-determinism will be platform- and time-dependent.
Args:
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator. | github-repos |
def reqExecutions(
self, execFilter: ExecutionFilter = None) -> List[Fill]:
return self._run(self.reqExecutionsAsync(execFilter)) | It is recommended to use :meth:`.fills` or
:meth:`.executions` instead.
Request and return a list a list of fills.
This method is blocking.
Args:
execFilter: If specified, return executions that match the filter. | juraj-google-style |
def evaluate(self, verbose=True, decode=True, passes=None, num_threads=1,
apply_experimental_transforms=False):
if isinstance(self.expr, WeldObject):
return self.expr.evaluate(
to_weld_type(
self.weld_type,
self.dim),
verbose,
decode,
passes=passes,
num_threads=num_threads,
apply_experimental_transforms=apply_experimental_transforms)
return self.expr | Summary
Args:
verbose (bool, optional): Description
decode (bool, optional): Description
Returns:
TYPE: Description | juraj-google-style |
def get_hosted_zone_by_name(client, zone_name):
p = client.get_paginator("list_hosted_zones")
for i in p.paginate():
for zone in i["HostedZones"]:
if zone["Name"] == zone_name:
return parse_zone_id(zone["Id"])
return None | Get the zone id of an existing zone by name.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone. | juraj-google-style |
def mtf_range(mesh, dim, dtype, name=None):
dim = convert_to_dimension(dim)
with tf.variable_scope(name, default_name="range"):
if dtype == tf.bfloat16:
tf_range = tf.cast(tf.range(dim.size), tf.bfloat16)
else:
tf_range = tf.range(dim.size, dtype=dtype)
return import_tf_tensor(mesh, tf_range, shape=Shape([dim])) | Create a 1d mesh tensor with a range from [0, dim.size).
Call externally as mtf.range()
Args:
mesh: a Mesh
dim: a Dimension
dtype: a tf.DType
name: an optional string
Returns:
a Tensor | juraj-google-style |
def create_xml_dom_element(doc, name, value):
s = str_or_unicode(value)
if six.PY2 and not isinstance(s, unicode):
s = s.decode('utf-8', 'ignore')
if isinstance(value, bool):
s = s.lower()
s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s)
e = doc.createElement(name)
e.appendChild(doc.createTextNode(s))
return e | Returns an XML DOM element with name and text value.
Args:
doc: minidom.Document, the DOM document it should create nodes from.
name: str, the tag of XML element.
value: object, whose string representation will be used
as the value of the XML element. Illegal or highly discouraged xml 1.0
characters are stripped.
Returns:
An instance of minidom.Element. | juraj-google-style |
def _do_pass(self, pass_, dag, options):
if not options["ignore_requires"]:
for required_pass in pass_.requires:
dag = self._do_pass(required_pass, dag, options)
if pass_ not in self.valid_passes:
if pass_.is_transformation_pass:
pass_.property_set = self.fenced_property_set
new_dag = pass_.run(dag)
if not isinstance(new_dag, DAGCircuit):
raise TranspilerError("Transformation passes should return a transformed dag."
"The pass %s is returning a %s" % (type(pass_).__name__,
type(new_dag)))
dag = new_dag
elif pass_.is_analysis_pass:
pass_.property_set = self.property_set
pass_.run(FencedDAGCircuit(dag))
else:
raise TranspilerError("I dont know how to handle this type of pass")
self._update_valid_passes(pass_, options['ignore_preserves'])
return dag | Do a pass and its "requires".
Args:
pass_ (BasePass): Pass to do.
dag (DAGCircuit): The dag on which the pass is ran.
options (dict): PassManager options.
Returns:
DAGCircuit: The transformed dag in case of a transformation pass.
The same input dag in case of an analysis pass.
Raises:
TranspilerError: If the pass is not a proper pass instance. | juraj-google-style |
def get_request_feature(self, name):
if ('[]' in name):
return (self.request.query_params.getlist(name) if (name in self.features) else None)
elif ('{}' in name):
return (self._extract_object_params(name) if (name in self.features) else {})
else:
return (self.request.query_params.get(name) if (name in self.features) else None) | Parses the request for a particular feature.
Arguments:
name: A feature name.
Returns:
A feature parsed from the URL if the feature is supported, or None. | codesearchnet |
def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST):
ratios = tuple((float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)))
ratio_height, ratio_width = ratios
new_annotation = {}
new_annotation['size'] = target_size
for key, value in annotation.items():
if key == 'boxes':
boxes = value
scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
new_annotation['boxes'] = scaled_boxes
elif key == 'area':
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation['area'] = scaled_area
elif key == 'masks':
masks = value[:, None]
masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
masks = masks.astype(np.float32)
masks = masks[:, 0] > threshold
new_annotation['masks'] = masks
elif key == 'size':
new_annotation['size'] = target_size
else:
new_annotation[key] = value
return new_annotation | Resizes an annotation to a target size.
Args:
annotation (`Dict[str, Any]`):
The annotation dictionary.
orig_size (`Tuple[int, int]`):
The original size of the input image.
target_size (`Tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
The resampling filter to use when resizing the masks. | github-repos |
def select_with_condition(self, condition, key=None):
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
obj = (conf if (key is None) else AttrDict(conf[key]))
add_it = condition(obj=obj)
if add_it:
new_confs.append(conf)
self._confs = new_confs | Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars | codesearchnet |
def Decrypt(self, encrypted_data):
index_split = -(len(encrypted_data) % AES.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._aes_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data | Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data. | juraj-google-style |
def get_trial(self) -> Trial: | Gets current Trial.
Returns:
An up-to-date `Trial` object. A distributed tuning backend should make
sure the return value is up-to-date not only locally, but among different
workers. | github-repos |
def get_tick(self, name):
name_map = {'fast': config_fast_tick_secs, 'user1': config_tick1_secs, 'user2': config_tick2_secs}
config = name_map.get(name)
if (config is None):
raise ArgumentError('Unknown tick requested', name=name)
slot = SlotIdentifier.FromString('controller')
try:
var = self.get_config(slot, config)
return var[1]
except ArgumentError:
return 0 | Check the config variables to see if there is a configurable tick.
Sensor Graph has a built-in 10 second tick that is sent every 10
seconds to allow for triggering timed events. There is a second
'user' tick that is generated internally by the sensorgraph compiler
and used for fast operations and finally there are several field
configurable ticks that can be used for setting up configurable
timers.
This is done by setting a config variable on the controller with the
desired tick interval, which is then interpreted by this function.
The appropriate config_id to use is listed in `known_constants.py`
Returns:
int: 0 if the tick is disabled, otherwise the number of seconds
between each tick | codesearchnet |
def _count_eventually_passing_retries(self):
count = 0
for record in self.passed:
r = record
while r.parent is not None and r.parent[1] == TestParentType.RETRY:
count += 1
r = r.parent[0]
return count | Counts the number of retry iterations that eventually passed.
If a test is retried and eventually passed, all the associated non-passing
iterations should not be considered when devising the final state of the
test run.
Returns:
Int, the number that should be subtracted from the result altering error
counts. | github-repos |
def split_vector_ctype(ctype):
if not is_vector_ctype(ctype):
raise ValueError('The given ctype is not a vector type.')
for vector_length in [2, 3, 4, 8, 16]:
if ctype.endswith(str(vector_length)):
vector_str_len = len(str(vector_length))
return ctype[:-vector_str_len], int(ctype[-vector_str_len:]) | Split a vector ctype into a raw ctype and the vector length.
If the given ctype is not a vector type, we raise an error. I
Args:
ctype (str): the ctype to possibly split into a raw ctype and the vector length
Returns:
tuple: the raw ctype and the vector length | juraj-google-style |
def add_region_feature(self, start_resnum, end_resnum, feat_type=None, feat_id=None, qualifiers=None):
if self.feature_file:
raise ValueError('Feature file associated with sequence, please remove file association to append additional features.')
if (not feat_type):
feat_type = 'Manually added protein sequence region feature'
newfeat = SeqFeature(location=FeatureLocation((start_resnum - 1), end_resnum), type=feat_type, id=feat_id, qualifiers=qualifiers)
self.features.append(newfeat) | Add a feature to the features list describing a region of the protein sequence.
Args:
start_resnum (int): Start residue number of the protein sequence feature
end_resnum (int): End residue number of the protein sequence feature
feat_type (str, optional): Optional description of the feature type (ie. 'binding domain')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1') | codesearchnet |
def update(self, **kwargs):
to_remove = []
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
self.validate()
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
return unused_kwargs | Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
returning all the unused kwargs.
Args:
kwargs (`Dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. | github-repos |
def load_imgs(filenames, masker, nan_to_num=True):
if isinstance(filenames, string_types):
filenames = [filenames]
data = np.zeros((masker.n_vox_in_mask, len(filenames)))
for (i, f) in enumerate(filenames):
data[(:, i)] = masker.mask(f, nan_to_num)
return data | Load multiple images from file into an ndarray.
Args:
filenames: A single filename or list of filenames pointing to valid
images.
masker: A Masker instance.
nan_to_num: Optional boolean indicating whether to convert NaNs to zero.
Returns:
An m x n 2D numpy array, where m = number of voxels in mask and
n = number of images passed. | codesearchnet |
def is_coord_subset_pbc(subset, superset, atol=1e-8, mask=None):
c1 = np.array(subset, dtype=np.float64)
c2 = np.array(superset, dtype=np.float64)
if mask is not None:
m = np.array(mask, dtype=np.int)
else:
m = np.zeros((len(subset), len(superset)), dtype=np.int)
atol = np.zeros(3, dtype=np.float64) + atol
return cuc.is_coord_subset_pbc(c1, c2, atol, m) | Tests if all fractional coords in subset are contained in superset.
Args:
subset, superset: List of fractional coords
atol (float or size 3 array): Tolerance for matching
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
Returns:
True if all of subset is in superset. | juraj-google-style |
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if export_scope and (not node_def.input[i].lstrip('^').startswith(export_scope)):
node_def.input[i] = re.sub('([\\^]|^)(.*)', '\\1' + _UNBOUND_INPUT_PREFIX + '\\2', compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in from_node_def.attr.items():
if k == '_class':
new_s = [compat.as_bytes(ops.strip_name_scope(s, export_scope)) for s in v.list.s if not export_scope or compat.as_str(s).split('@')[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
elif node_def.op in ('Enter', 'RefEnter') and k == 'frame_name':
if not export_scope or compat.as_str(v.s).startswith(export_scope):
new_s = compat.as_bytes(ops.strip_name_scope(v.s, export_scope))
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(s=new_s))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ''
return node_def | Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer. | github-repos |
async def async_fetch(url: str, **kwargs) -> Selector:
kwargs.setdefault('headers', DEFAULT_HEADERS)
async with aiohttp.ClientSession(**kwargs) as ses:
async with ses.get(url, **kwargs) as res:
html = await res.text()
tree = Selector(text=html)
return tree | Do the fetch in an async style.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions. | juraj-google-style |
def file_name_increase(file_name, file_location):
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name | Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename. | juraj-google-style |
def add_outgoing_edge(self, edge):
self._outgoing_edges.append(edge) | Adds an outgoing edge to the Convertible's list of edges.
Args:
edge: The outgoing edge (its source should be 'self'). | github-repos |
def copy_foreign_keys(self, event):
event_keys = set(event._meta.fields.keys())
obj_keys = self._meta.fields.keys()
matching_keys = event_keys.intersection(obj_keys)
for key in matching_keys:
if (key == 'created_by'):
continue
if (not isinstance(self._meta.fields[key], peewee.ForeignKeyField)):
continue
setattr(event, key, getattr(self, key))
possible_key = self.__class__.__name__.lower()
if ((possible_key in event_keys) and (event.code != 'AUDIT_DELETE')):
setattr(event, possible_key, self) | Copies possible foreign key values from the object into the Event,
skipping common keys like modified and created.
Args:
event (Event): The Event instance to copy the FKs into
obj (fleaker.db.Model): The object to pull the values from | codesearchnet |
def validate_dataset_input(x, y, sample_weight, validation_split=None):
if y is not None:
raise ValueError('You passed a dataset or dataset iterator (%s) as input `x` to your model. In that case, you should not specify a target (`y`) argument, since the dataset or dataset iterator generates both input data and target data. Received: %s' % (x, y))
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when input `x` is a dataset or a dataset iterator. Instead, youcan provide sample_weight as the third element of yourdataset, i.e. (inputs, targets, sample_weight). Received: x=%s, sample_weight=%s' % (x, sample_weight))
if validation_split is not None and validation_split != 0.0:
raise ValueError('`validation_split` argument is not supported when input `x` is a dataset or a dataset iterator. Received: x=%s, validation_split=%f' % (x, validation_split)) | Validates user input arguments when a dataset iterator is passed.
Args:
x: Input data. A `tf.data` dataset or iterator.
y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).
Expected to be `None` when `x` is a dataset iterator.
sample_weight: An optional sample-weight array passed by the user to weight
the importance of each sample in `x`. Expected to be `None` when `x` is a
dataset iterator
validation_split: Float between 0 and 1. Fraction of the training data to be
used as validation data. Expected to be `None` when `x` is a dataset
iterator.
Raises:
ValueError: if argument `y` or `sample_weight` or `validation_split` are
provided by user. | github-repos |
def get_utxoset_merkle_root(self):
utxoset = backend.query.get_unspent_outputs(self.connection)
hashes = [sha3_256('{}{}'.format(utxo['transaction_id'], utxo['output_index']).encode()).digest() for utxo in utxoset]
return merkleroot(sorted(hashes)) | Returns the merkle root of the utxoset. This implies that
the utxoset is first put into a merkle tree.
For now, the merkle tree and its root will be computed each
time. This obviously is not efficient and a better approach
that limits the repetition of the same computation when
unnecesary should be sought. For instance, future optimizations
could simply re-compute the branches of the tree that were
affected by a change.
The transaction hash (id) and output index should be sufficient
to uniquely identify a utxo, and consequently only that
information from a utxo record is needed to compute the merkle
root. Hence, each node of the merkle tree should contain the
tuple (txid, output_index).
.. important:: The leaves of the tree will need to be sorted in
some kind of lexicographical order.
Returns:
str: Merkle root in hexadecimal form. | codesearchnet |
def encode(self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, freeze_feature_encoder: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(inputs, dtype='i4')
rngs = {}
if dropout_rng is not None:
rngs['dropout'] = dropout_rng
def _encoder_forward(module, inputs, attention_mask, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(inputs, attention_mask, **kwargs)
outputs = self.module.apply({'params': params or self.params}, inputs=jnp.array(inputs, dtype='f4'), attention_mask=jnp.array(attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs, method=_encoder_forward)
if return_dict:
outputs = FlaxBaseModelOutput(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
return outputs | Returns:
Example:
```python
>>> from transformers import FlaxSpeechEncoderDecoderModel
>>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized
>>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
... "facebook/wav2vec2-large-lv60", "facebook/bart-large"
... )
>>> inputs = jnp.ones((2, 5000), dtype=jnp.float32)
>>> encoder_outputs = model.encode(inputs)
``` | github-repos |
def to_event_set(pipe: beam.PCollection[Dict[str, Any]], schema: Schema, timestamp_key: str='timestamp', format: DictEventSetFormatChoices=DictEventSetFormat.GROUPED_BY_INDEX) -> BeamEventSet:
num_features = len(schema.features)
if format == DictEventSetFormat.GROUPED_BY_INDEX:
if num_features != 0:
return partition_by_feature_idx(pipe | 'Parse dict' >> beam.FlatMap(_event_set_dict_to_event_set, schema, timestamp_key), num_features=num_features, reshuffle=True)
else:
return _reshuffle_item_in_tuples((pipe | 'Parse dict' >> beam.Map(_event_set_dict_to_event_set_no_features, schema, timestamp_key),))
elif format == DictEventSetFormat.SINGLE_EVENTS:
indexed = pipe | 'Parse and index' >> beam.Map(_parse_and_index, schema, timestamp_key) | 'Aggregate' >> beam.GroupByKey()
if num_features != 0:
return partition_by_feature_idx(indexed | 'Merge by timestamps' >> beam.ParDo(_MergeTimestamps(schema.features)), num_features=num_features, reshuffle=True)
else:
return _reshuffle_item_in_tuples((indexed | 'Merge by timestamps' >> beam.Map(_merge_timestamps_no_features),))
else:
raise ValueError(f'Unknown format {format}') | Converts a PCollection of key:value to a Beam EventSet.
This method is compatible with the output of `from_csv_raw` and the
Official Beam IO connectors.
When importing data from csv files, use `from_csv` to convert csv files
directly into EventSets.
Unlike Temporian in-process EventSet import method (
[tp.event_set][temporian.event_set])), this method (`tpb.to_event_set`)
requires for timestamps to be numerical values.
Args:
pipe: Beam pipe of key values.
schema: Schema of the data. Note: The schema of a Temporian node is
available with `node.schema`.
timestamp_key: Key containing the timestamps.
format: Format of the events inside the received dictionary. See
[DictEventSetFormat][temporian.io.format.DictEventSetFormat] for
more.
Returns:
Beam EventSet. | github-repos |
def assert_style(self, styles, **kwargs):
query = StyleQuery(styles, **kwargs)
@self.synchronize(wait=query.wait)
def assert_style():
if not query.resolves_for(self):
raise ExpectationNotMet(query.failure_message)
return True
return assert_style() | Asserts that an element has the specified CSS styles. ::
element.assert_style({"color": "rgb(0,0,255)", "font-size": re.compile(r"px")})
Args:
styles (Dict[str, str | RegexObject]): The expected styles.
Returns:
True
Raises:
ExpectationNotMet: The element doesn't have the specified styles. | juraj-google-style |
def Insert(self, request, global_params=None):
config = self.GetMethodConfig('Insert')
return self._RunMethod(config, request, global_params=global_params) | Creates a new routine in the dataset.
Args:
request: (BigqueryRoutinesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Routine) The response message. | github-repos |
def __init__(self, shape_id=None, lat=None, lon=None,seq=None, dist=None,
field_dict=None):
self._schedule = None
if field_dict:
if isinstance(field_dict, self.__class__):
for k, v in field_dict.iteritems():
self.__dict__[k] = v
else:
self.__dict__.update(field_dict)
else:
self.shape_id = shape_id
self.shape_pt_lat = lat
self.shape_pt_lon = lon
self.shape_pt_sequence = seq
self.shape_dist_traveled = dist | Initialize a new ShapePoint object.
Args:
field_dict: A dictionary mapping attribute name to unicode string | juraj-google-style |
def AddAdGroup(self, client_customer_id, campaign_id, name, status):
self.client.SetClientCustomerId(client_customer_id)
ad_group_service = self.client.GetService('AdGroupService')
operations = [{'operator': 'ADD', 'operand': {'campaignId': campaign_id, 'name': name, 'status': status}}]
ad_group_service.mutate(operations) | Create a new ad group.
Args:
client_customer_id: str Client Customer Id used to create the AdGroup.
campaign_id: str Id of the campaign to use.
name: str Name to assign to the AdGroup.
status: str Status to assign to the AdGroup when it is created. | codesearchnet |
def learn_q(self, predicted_q_arr, real_q_arr):
self.__predicted_q_arr_list.append(predicted_q_arr)
while len(self.__predicted_q_arr_list) > self.__seq_len:
self.__predicted_q_arr_list = self.__predicted_q_arr_list[1:]
while len(self.__predicted_q_arr_list) < self.__seq_len:
self.__predicted_q_arr_list.append(self.__predicted_q_arr_list[-1])
predicted_q_arr = np.array(self.__predicted_q_arr_list)
predicted_q_arr = predicted_q_arr.transpose((1, 0, 2))
self.__real_q_arr_list.append(real_q_arr)
while len(self.__real_q_arr_list) > self.__seq_len:
self.__real_q_arr_list = self.__real_q_arr_list[1:]
while len(self.__real_q_arr_list) < self.__seq_len:
self.__real_q_arr_list.append(self.__real_q_arr_list[-1])
real_q_arr = np.array(self.__real_q_arr_list)
real_q_arr = real_q_arr.transpose((1, 0, 2))
loss = self.__computable_loss.compute_loss(predicted_q_arr, real_q_arr)
delta_arr = self.__computable_loss.compute_delta(predicted_q_arr, real_q_arr)
delta_arr, lstm_output_grads_list = self.__lstm_model.output_back_propagate(
predicted_q_arr,
delta_arr
)
delta_arr, _, lstm_hidden_grads_list = self.__lstm_model.hidden_back_propagate(
delta_arr[:, -1]
)
lstm_grads_list = lstm_output_grads_list
lstm_grads_list.extend(lstm_hidden_grads_list)
self.__lstm_model.optimize(lstm_grads_list, self.__learning_rate, 1)
self.__loss_list.append(loss) | Infernce Q-Value.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
real_q_arr: `np.ndarray` of real Q-Values. | juraj-google-style |
def parse_conservations(variant):
conservations = {}
conservations['gerp'] = parse_conservation(
variant,
'dbNSFP_GERP___RS'
)
conservations['phast'] = parse_conservation(
variant,
'dbNSFP_phastCons100way_vertebrate'
)
conservations['phylop'] = parse_conservation(
variant,
'dbNSFP_phyloP100way_vertebrate'
)
return conservations | Parse the conservation predictors
Args:
variant(dict): A variant dictionary
Returns:
conservations(dict): A dictionary with the conservations | juraj-google-style |
def __init__(self, tensors=None, values=None, tol=1e-5):
self._tensor_list = tensors or []
self._value_list = values or []
if not len(self._tensor_list) == len(self._value_list):
raise ValueError("TensorMapping must be initialized with tensors"
"and values of equivalent length")
self.tol = tol | Initialize a TensorMapping
Args:
tensor_list ([Tensor]): list of tensors
value_list ([]): list of values to be associated with tensors
tol (float): an absolute tolerance for getting and setting
items in the mapping | juraj-google-style |
def login(self, broker_name, account_cookie, account=None):
res = False
if account is None:
if account_cookie not in self.session.keys():
self.session[account_cookie] = QA_Account(
account_cookie=account_cookie,
broker=broker_name
)
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
self.order_handler.subscribe(
self.session[account_cookie],
self.broker[broker_name]
)
else:
if account_cookie not in self.session.keys():
account.broker = broker_name
self.session[account_cookie] = account
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
self.order_handler.subscribe(
account,
self.broker[broker_name]
)
if res:
return res
else:
try:
self.session.pop(account_cookie)
except:
pass
return False | login 登录到交易前置
2018-07-02 在实盘中,登录到交易前置后,需要同步资产状态
Arguments:
broker_name {[type]} -- [description]
account_cookie {[type]} -- [description]
Keyword Arguments:
account {[type]} -- [description] (default: {None})
Returns:
[type] -- [description] | juraj-google-style |
def save(self, **kwargs):
updated_data = self._get_updated_data()
if (not updated_data):
return
obj_id = self.get_id()
server_data = self.manager.update(obj_id, updated_data, **kwargs)
if (server_data is not None):
self._update_attrs(server_data) | Save the changes made to the object to the server.
The object is updated to match what the server returns.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raise:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request | codesearchnet |
def insert_taxon_in_new_fasta_file(self, aln):
new_seq_records = []
for seq_record in SeqIO.parse(aln, 'fasta'):
new_seq_record_id = '[{0}] {1}'.format(self.taxon_for_codon_usage, seq_record.id)
new_seq_record = SeqRecord(seq_record.seq, id=new_seq_record_id)
new_seq_records.append(new_seq_record)
base_filename = os.path.splitext(aln)
new_filename = '{0}_modified{1}'.format(base_filename[0], base_filename[1])
SeqIO.write(new_seq_records, new_filename, 'fasta')
return new_filename | primer4clades infers the codon usage table from the taxon names in the
sequences.
These names need to be enclosed by square brackets and be
present in the description of the FASTA sequence. The position is not
important. I will insert the names in the description in a new FASTA
file.
Returns:
Filename of modified FASTA file that includes the name of the taxon. | codesearchnet |
def check_oneof(**kwargs):
if (not kwargs):
return None
not_nones = [val for val in kwargs.values() if (val is not None)]
if (len(not_nones) > 1):
raise ValueError('Only one of {fields} should be set.'.format(fields=', '.join(sorted(kwargs.keys())))) | Raise ValueError if more than one keyword argument is not none.
Args:
kwargs (dict): The keyword arguments sent to the function.
Returns: None
Raises:
ValueError: If more than one entry in kwargs is not none. | codesearchnet |
def get_additional_charge_by_identifier(self, recurring_billing_id):
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._get(self.url + fmt, headers=self.get_headers()) | Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns: | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.