code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def take_profit(self, accountID, **kwargs):
return self.create(
accountID,
order=TakeProfitOrderRequest(**kwargs)
)
|
Shortcut to create a Take Profit Order in an Account
Args:
accountID : The ID of the Account
kwargs : The arguments to create a TakeProfitOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request
|
juraj-google-style
|
def run(self, shell=False, ignore_errors=False, stdin=False, check_output=False):
previous_directory = os.getcwd()
os.chdir(self.directory)
try:
kwargs = {'stderr': sys.stderr, 'stdin': (sys.stdin if stdin else None), 'env': self.env_vars, 'shell': shell}
if check_output:
return subprocess.check_output(self.command, **kwargs).decode('utf8')
else:
kwargs['stdout'] = sys.stdout
return subprocess.check_call(self.command, **kwargs)
except subprocess.CalledProcessError:
if ignore_errors:
pass
else:
raise
os.chdir(previous_directory)
|
Run subcommand.
Args:
shell (Optional[bool]): Run command using shell (default False)
ignore_errors (Optional[bool]): If the command has a non-zero return code, don't raise an exception (default False)
stdin (Optional[bool]): Plug input from stdin when running command (default False)
check_output (Optional[bool]): Return command output as string (default False)
Returns:
String if check_output is True, else None.
Raises:
subprocess.CalledProcessError when the command has an error, unless ignore_errors is True.
|
codesearchnet
|
def sg_input(shape=None, dtype=sg_floatx, name=None):
r
if shape is None:
return tf.placeholder(dtype, shape=None, name=name)
else:
if not isinstance(shape, (list, tuple)):
shape = [shape]
return tf.placeholder(dtype, shape=[None] + list(shape), name=name)
|
r"""Creates a placeholder.
Args:
shape: A tuple/list of integers. If an integers is given, it will turn to a list.
dtype: A data type. Default is float32.
name: A name for the placeholder.
Returns:
A wrapped placeholder `Tensor`.
|
juraj-google-style
|
def sample(self, n):
total = bq.Query(('select count(*) from %s' % self._get_source())).execute().result()[0].values()[0]
if (n > total):
raise ValueError('sample larger than population')
sampling = bq.Sampling.random(percent=((n * 100.0) / float(total)))
if (self._query is not None):
source = self._query
else:
source = ('SELECT * FROM `%s`' % self._table)
sample = bq.Query(source).execute(sampling=sampling).result()
df = sample.to_dataframe()
return df
|
Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will
incur cost.
Args:
n: number of sampled counts. Note that the number of counts returned is approximated.
Returns:
A dataframe containing sampled data.
Raises:
Exception if n is larger than number of rows.
|
codesearchnet
|
def save_attributes_to_hdf5_group(group, name, data):
HDF5_OBJECT_HEADER_LIMIT = 64512
bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]
if bad_attributes:
raise RuntimeError(f'The following attributes cannot be saved to HDF5 file because they are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}')
data_npy = np.asarray(data)
num_chunks = 1
chunked_data = np.array_split(data_npy, num_chunks)
while any((x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data)):
num_chunks += 1
chunked_data = np.array_split(data_npy, num_chunks)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(chunked_data):
group.attrs['%s%d' % (name, chunk_id)] = chunk_data
else:
group.attrs[name] = data
|
Saves attributes (data) of the specified name into the HDF5 group.
This method deals with an inherent problem of HDF5 file which is not able to store data larger than
HDF5_OBJECT_HEADER_LIMIT bytes.
Args:
group: A pointer to a HDF5 group.
name: A name of the attributes to save.
data: Attributes data to store.
Raises:
RuntimeError: If any single attribute is too large to be saved.
Copied from Keras to Transformers to avoid versioning issues.
|
github-repos
|
def _check_dims_and_partition_or_replicate_on_host(self, tensor, dims):
self._check_input_partition_dims(tensor, dims)
return partition_or_replicate_on_host(tensor, dims)
|
Checks dims and partitions or replicates the input tensor.
The ops inside this function are placed on the host side.
Args:
tensor: The input tensor which will be partitioned or replicated.
dims: A list of integer describes how to partition the input tensor.
Returns:
An iterator of `Tensor`s or a list of partitioned tensors.
|
github-repos
|
def ask_when_work_is_populated(self, work):
work.read_all_from_datastore()
if work.work:
print('Work is already written to datastore.\n'
'If you continue these data will be overwritten and '
'possible corrupted.')
inp = input_str('Do you want to continue? '
'(type "yes" without quotes to confirm): ')
return inp == 'yes'
else:
return True
|
When work is already populated asks whether we should continue.
This method prints warning message that work is populated and asks
whether user wants to continue or not.
Args:
work: instance of WorkPiecesBase
Returns:
True if we should continue and populate datastore, False if we should stop
|
juraj-google-style
|
def summary(self, stdout=True, plot=False):
if stdout:
print('Collinearity summary:')
print(pd.concat([self.results['Eigenvalues'], self.results['ConditionIndices'], self.results['VIFs'], self.results['CorrelationMatrix']], axis=1))
print('Outlier summary:')
print(self.results['RowMahalanobisDistances'])
print(self.results['ColumnMahalanobisDistances'])
print('Validity summary:')
print(self.results['Variances'])
if plot:
verify_dependencies('seaborn')
for (key, result) in self.results.items():
if (key == 'CorrelationMatrix'):
ax = plt.axes()
sns.heatmap(result, cmap='Blues', ax=ax)
ax.set_title(key)
sns.plt.show()
else:
result.plot(kind='bar', title=key)
plt.show()
|
Displays diagnostics to the user
Args:
stdout (bool): print results to the console
plot (bool): use Seaborn to plot results
|
codesearchnet
|
def _check_audience(payload_dict, audience):
if (audience is None):
return
audience_in_payload = payload_dict.get('aud')
if (audience_in_payload is None):
raise AppIdentityError('No aud field in token: {0}'.format(payload_dict))
if (audience_in_payload != audience):
raise AppIdentityError('Wrong recipient, {0} != {1}: {2}'.format(audience_in_payload, audience, payload_dict))
|
Checks audience field from a JWT payload.
Does nothing if the passed in ``audience`` is null.
Args:
payload_dict: dict, A dictionary containing a JWT payload.
audience: string or NoneType, an audience to check for in
the JWT payload.
Raises:
AppIdentityError: If there is no ``'aud'`` field in the payload
dictionary but there is an ``audience`` to check.
AppIdentityError: If the ``'aud'`` field in the payload dictionary
does not match the ``audience``.
|
codesearchnet
|
def resolve(phrases: typing.List[str], html: str, separator: str='\u200b') -> str:
resolver = HTMLChunkResolver(phrases, separator)
resolver.feed(html)
result = '<span style="%s">%s</span>' % (PARENT_CSS_STYLE, resolver.output)
return result
|
Wraps phrases in the HTML string with non-breaking markup.
Args:
phrases (List[str]): The phrases included in the HTML string.
html (str): The HTML string to resolve.
separator (str, optional): The separator string.
Returns:
The HTML string with phrases wrapped in non-breaking markup.
|
github-repos
|
def save_weights_to_hdf5_group(f, model):
from keras.src import __version__ as keras_version
save_attributes_to_hdf5_group(f, 'layer_names', [layer.name.encode('utf8') for layer in model.layers])
f.attrs['backend'] = backend.backend().encode('utf8')
f.attrs['keras_version'] = str(keras_version).encode('utf8')
for layer in sorted(model.layers, key=lambda x: x.name):
g = f.create_group(layer.name)
weights = _legacy_weights(layer)
save_subset_weights_to_hdf5_group(g, weights)
weights = list((v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights))
g = f.create_group('top_level_model_weights')
save_subset_weights_to_hdf5_group(g, weights)
|
Saves the weights of a list of layers to a HDF5 group.
Args:
f: HDF5 group.
model: Model instance.
|
github-repos
|
def format_auth_params(params):
parts = []
for (key, value) in params.items():
if value:
parts.append('{}="{}"'.format(key, value))
return ", ".join(parts)
|
Generate the format expected by HTTP Headers from parameters.
Args:
params (dict): {key: value} to convert to key=value
Returns:
A formatted header string.
|
juraj-google-style
|
def imshow(img, win_name='', wait_time=0):
cv2.imshow(win_name, imread(img))
cv2.waitKey(wait_time)
|
Show an image.
Args:
img (str or ndarray): The image to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
|
juraj-google-style
|
def unpack(self, buff, offset=0):
super().unpack(buff, offset)
if not self.is_valid():
raise UnpackException("Unsupported protocols in ARP packet")
|
Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Check if the protocols involved are Ethernet and IPv4. Other protocols
are currently not supported.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
|
juraj-google-style
|
def load_own_variables(self, store):
all_vars = self._trainable_variables + self._non_trainable_variables
if len(store.keys()) != len(all_vars):
if len(all_vars) == 0 and (not self.built):
raise ValueError(f"Layer '{self.name}' was never built and thus it doesn't have any variables. However the weights file lists {len(store.keys())} variables for this layer.\nIn most cases, this error indicates that either:\n\n1. The layer is owned by a parent layer that implements a `build()` method, but calling the parent's `build()` method did NOT create the state of the child layer '{self.name}'. A `build()` method must create ALL state for the layer, including the state of any children layers.\n\n2. You need to implement the `def build_from_config(self, config)` method on layer '{self.name}', to specify how to rebuild it during loading. In this case, you might also want to implement the method that generates the build config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the layer (i.e. its variables) upon deserialization.")
raise ValueError(f"Layer '{self.name}' expected {len(all_vars)} variables, but received {len(store.keys())} variables during loading. Expected: {[v.name for v in all_vars]}")
for i, v in enumerate(all_vars):
v.assign(store[f'{i}'])
|
Loads the state of the layer.
You can override this method to take full control of how the state of
the layer is loaded upon calling `keras.models.load_model()`.
Args:
store: Dict from which the state of the model will be loaded.
|
github-repos
|
def ts_to_dt(jwt_dict):
d = jwt_dict.copy()
for k, v in [v[:2] for v in CLAIM_LIST if v[2]]:
if k in jwt_dict:
d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k])
return d
|
Convert timestamps in JWT to datetime objects.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with
datetime.datetime() objects.
|
juraj-google-style
|
def dispatch_event(self, event: "Event") -> None:
if event.target is None:
event.set_target(self)
listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event.type)
if listeners is None:
return
for listener in listeners:
listener(event)
|
Dispatches the given event.
It is the duty of this method to set the target of the dispatched event by calling
`event.set_target(self)`.
Args:
event (Event): The event to dispatch. Must not be `None`.
Raises:
TypeError: If the event is `None` or its type is incorrect.
|
juraj-google-style
|
def has_full_stack(self, value):
if value == self._defaults['hasFullStack'] and 'hasFullStack' in self._values:
del self._values['hasFullStack']
else:
self._values['hasFullStack'] = value
|
The has_full_stack property.
Args:
value (bool). the property value.
|
juraj-google-style
|
def identify_triggers(cfg, sources, sinks, lattice, nosec_lines):
assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode)
tainted_nodes = filter_cfg_nodes(cfg, TaintedNode)
tainted_trigger_nodes = [TriggerNode(Source('Framework function URL parameter'), cfg_node=node) for node in tainted_nodes]
sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines)
sources_in_file.extend(tainted_trigger_nodes)
find_secondary_sources(assignment_nodes, sources_in_file, lattice)
sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines)
sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file)
return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict)
|
Identify sources, sinks and sanitisers in a CFG.
Args:
cfg(CFG): CFG to find sources, sinks and sanitisers in.
sources(tuple): list of sources, a source is a (source, sanitiser) tuple.
sinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple.
nosec_lines(set): lines with # nosec whitelisting
Returns:
Triggers tuple with sink and source nodes and a sanitiser node dict.
|
codesearchnet
|
def take_bug_report(self,
test_name,
begin_time,
timeout=300,
destination=None):
new_br = True
try:
stdout = self.adb.shell('bugreportz -v').decode('utf-8')
if 'not found' in stdout:
new_br = False
except adb.AdbError:
new_br = False
if destination:
br_path = utils.abs_path(destination)
else:
br_path = os.path.join(self.log_path, 'BugReports')
utils.create_dir(br_path)
base_name = ',%s,%s.txt' % (begin_time, self._normalized_serial)
if new_br:
base_name = base_name.replace('.txt', '.zip')
test_name_len = utils.MAX_FILENAME_LEN - len(base_name)
out_name = test_name[:test_name_len] + base_name
full_out_path = os.path.join(br_path, out_name.replace(' ', r'\ '))
self.wait_for_boot_completion()
self.log.info('Taking bugreport for %s.', test_name)
if new_br:
out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')
if not out.startswith('OK'):
raise DeviceError(self, 'Failed to take bugreport: %s' % out)
br_out_path = out.split(':')[1].strip()
self.adb.pull([br_out_path, full_out_path])
else:
self.adb.bugreport(
' > "%s"' % full_out_path, shell=True, timeout=timeout)
self.log.info('Bugreport for %s taken at %s.', test_name,
full_out_path)
|
Takes a bug report on the device and stores it in a file.
Args:
test_name: Name of the test method that triggered this bug report.
begin_time: Timestamp of when the test started.
timeout: float, the number of seconds to wait for bugreport to
complete, default is 5min.
destination: string, path to the directory where the bugreport
should be saved.
|
juraj-google-style
|
def DeserializeExclusiveData(self, reader):
if self.Version is not 0:
raise Exception('Invalid format')
self.PublicKey = ECDSA.Deserialize_Secp256r1(reader)
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the version read is incorrect.
|
juraj-google-style
|
def insert_tile(self, tile_info):
for i, tile in enumerate(self.registered_tiles):
if tile.slot == tile_info.slot:
self.registered_tiles[i] = tile_info
return
self.registered_tiles.append(tile_info)
|
Add or replace an entry in the tile cache.
Args:
tile_info (TileInfo): The newly registered tile.
|
juraj-google-style
|
def event_shape(self):
return tensor_shape.as_shape(self._event_shape())
|
Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
|
github-repos
|
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(QueryRequestPayload, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
query_functions = []
while self.is_tag_next(enums.Tags.QUERY_FUNCTION, local_buffer):
query_function = primitives.Enumeration(enums.QueryFunction, tag=enums.Tags.QUERY_FUNCTION)
query_function.read(local_buffer, kmip_version=kmip_version)
query_functions.append(query_function)
if query_functions:
self._query_functions = query_functions
else:
raise exceptions.InvalidKmipEncoding('The Query request payload encoding is missing the query functions.')
self.is_oversized(local_buffer)
|
Read the data encoding the QueryRequestPayload object and decode it
into its constituent parts.
Args:
input_buffer (Stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the query functions are missing
from the encoded payload.
|
codesearchnet
|
def to(self, new_unit):
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type, unit=new_unit)
|
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
|
juraj-google-style
|
def merge(metric_kind, prior, latest):
(prior_type, _) = _detect_value(prior)
(latest_type, _) = _detect_value(latest)
if (prior_type != latest_type):
_logger.warn(u'Metric values are not compatible: %s, %s', prior, latest)
raise ValueError(u'Incompatible delta metric values')
if (prior_type is None):
_logger.warn(u'Bad metric values, types not known for : %s, %s', prior, latest)
raise ValueError(u'Unsupported delta metric types')
if (metric_kind == MetricKind.DELTA):
return _merge_delta_metric(prior, latest)
else:
return _merge_cumulative_or_gauge_metrics(prior, latest)
|
Merges `prior` and `latest`
Args:
metric_kind (:class:`MetricKind`): indicates the kind of metrics
being merged
prior (:class:`MetricValue`): an prior instance of the metric
latest (:class:`MetricValue`: the latest instance of the metric
|
codesearchnet
|
def parse_dtype_info(flags):
if flags.dtype in (i[0] for i in DTYPE_MAP.values()):
return
try:
flags.dtype, default_loss_scale = DTYPE_MAP[flags.dtype]
except KeyError:
raise ValueError("Invalid dtype: {}".format(flags.dtype))
flags.loss_scale = flags.loss_scale or default_loss_scale
|
Convert dtype string to tf dtype, and set loss_scale default as needed.
Args:
flags: namespace object returned by arg parser.
Raises:
ValueError: If an invalid dtype is provided.
|
juraj-google-style
|
def _parse_graph(self):
if self.exists:
self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)
else:
self.rdf.graph = rdflib.Graph()
self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)
for ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items():
self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
for ns_prefix, ns_uri in self.rdf.graph.namespaces():
setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)
self.rdf._orig_graph = copy.deepcopy(self.rdf.graph)
self.parse_object_like_triples()
|
use Content-Type from headers to determine parsing method
Args:
None
Return:
None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
|
juraj-google-style
|
def GetParserObjects(cls, parser_filter_expression=None):
(includes, excludes) = cls._GetParserFilters(parser_filter_expression)
parser_objects = {}
for (parser_name, parser_class) in iter(cls._parser_classes.items()):
if ((not includes) and (parser_name in excludes)):
continue
if (includes and (parser_name not in includes)):
continue
parser_object = parser_class()
if parser_class.SupportsPlugins():
plugin_includes = None
if (parser_name in includes):
plugin_includes = includes[parser_name]
parser_object.EnablePlugins(plugin_includes)
parser_objects[parser_name] = parser_object
return parser_objects
|
Retrieves the parser objects.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
Returns:
dict[str, BaseParser]: parsers per name.
|
codesearchnet
|
def to_dlpack(tf_tensor):
return pywrap_tfe.TFE_ToDlpackCapsule(tf_tensor)
|
Returns the dlpack capsule representing the tensor.
This operation ensures the underlying data memory is ready when returns.
```python
a = tf.tensor([1, 10])
dlcapsule = tf.experimental.dlpack.to_dlpack(a)
# dlcapsule represents the dlpack data structure
```
Args:
tf_tensor: Tensorflow eager tensor, to be converted to dlpack capsule.
Returns:
A PyCapsule named as dltensor, which shares the underlying memory to other
framework. This PyCapsule can be consumed only once.
|
github-repos
|
def _ContinueReportCompilation(self):
analyzer_alive = self._analyzer.is_alive()
hash_queue_has_tasks = (self.hash_queue.unfinished_tasks > 0)
analysis_queue = (not self.hash_analysis_queue.empty())
return ((analyzer_alive and hash_queue_has_tasks) or analysis_queue)
|
Determines if the plugin should continue trying to compile the report.
Returns:
bool: True if the plugin should continue, False otherwise.
|
codesearchnet
|
def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str:
root = ElementTree.fromstring(xml)
return docx_text_from_xml_node(root, 0, config)
|
Converts an XML tree of a DOCX file to string contents.
Args:
xml: raw XML text
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string
|
codesearchnet
|
def ParseRow(header,
row):
precondition.AssertDictType(row, Text, Text)
result = rdf_osquery.OsqueryRow()
for column in header.columns:
result.values.append(row[column.name])
return result
|
Parses a single row of osquery output.
Args:
header: A parsed header describing the row format.
row: A row in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryRow` instance.
|
juraj-google-style
|
class SamplePatchTSMixerRegressionOutput(ModelOutput):
sequences: Optional[torch.FloatTensor] = None
|
Base class for time series model's predictions outputs that contains the sampled values from the chosen
distribution.
Args:
sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, num_targets)`
Sampled values from the chosen distribution.
|
github-repos
|
def __init__(self, pattern):
super(Interpolator, self).__init__()
self._pattern = pattern
if isinstance(pattern, bytes):
var_regex = re.compile(self._VAR_PLACEHOLDER_PATTERN.encode("ascii"))
scope_regex = re.compile(self._SCOPE_PLACEHOLDER_PATTERN.encode("ascii"))
decoder = lambda _: _.decode("ascii")
elif isinstance(pattern, Text):
var_regex = re.compile(self._VAR_PLACEHOLDER_PATTERN)
scope_regex = re.compile(self._SCOPE_PLACEHOLDER_PATTERN)
decoder = lambda _: _
else:
raise TypeError("Unexpected pattern type '{}'".format(type(pattern)))
self._vars = set()
for matches in var_regex.finditer(pattern):
var = matches.group("var")
self._vars.add(decoder(var))
self._scopes = dict()
for matches in scope_regex.finditer(pattern):
scope = matches.group("scope")
var = matches.group("var")
self._scopes.setdefault(decoder(scope), set()).add(decoder(var))
self._var_bindings = collections.defaultdict(lambda: [])
self._scope_bindings = collections.defaultdict(lambda: [])
|
Initializes the interpolator.
Args:
pattern: A string (either of unicode or byte characters) with placeholders
to format.
|
juraj-google-style
|
def default(self, name, action, seqno):
return self.configure(('default route-map %s %s %s' % (name, action, seqno)))
|
Defaults the routemap on the node
Note:
This method will attempt to default the routemap from the nodes
operational config. Since routemaps do not exist by default,
the default action is essentially a negation and the result will
be the removal of the routemap clause.
If the routemap does not exist then this
method will not perform any changes but still return True
Args:
name (string): The full name of the routemap.
action (string): The action to take for this routemap clause.
seqno (integer): The sequence number for the routemap clause.
Returns:
True if the routemap could be deleted otherwise False (see Node)
|
codesearchnet
|
async def updateCronJob(self, iden, query):
cron = self.cell.agenda.appts.get(iden)
if cron is None:
raise s_exc.NoSuchIden()
self._trig_auth_check(cron.useriden)
await self.cell.agenda.mod(iden, query)
|
Change an existing cron job's query
Args:
iden (bytes): The iden of the cron job to be changed
|
juraj-google-style
|
def __init__(self, name, formatter=None):
if formatter is not None:
name = formatter(name)
self._tag_data = {'name': name}
self._valid = True
if not name:
self._valid = False
|
Initialize Class Properties.
Args:
name (str): The value for this tag.
formatter (method, optional): A method that take a tag value and returns a
formatted tag.
|
juraj-google-style
|
def set_item(target, i, x):
if isinstance(target, tensor_array_ops.TensorArray):
return _tf_tensorarray_set_item(target, i, x)
elif tensor_util.is_tf_type(target):
if target.dtype == dtypes.variant:
return _tf_tensor_list_set_item(target, i, x)
else:
return _tf_tensor_set_item(target, i, x)
else:
return _py_set_item(target, i, x)
|
The slice write operator (i.e. __setitem__).
Note: it is unspecified whether target will be mutated or not. In general,
if target is mutable (like Python lists), it will be mutated.
Args:
target: An entity that supports setitem semantics.
i: Index to modify.
x: The new element value.
Returns:
Same as target, after the update was performed.
Raises:
ValueError: if target is not of a supported type.
|
github-repos
|
def ami_lookup(region='us-east-1', name='tomcat8'):
if AMI_JSON_URL:
ami_dict = _get_ami_dict(AMI_JSON_URL)
ami_id = ami_dict[region][name]
elif GITLAB_TOKEN:
warn_user('Use AMI_JSON_URL feature instead.')
ami_contents = _get_ami_file(region=region)
ami_dict = json.loads(ami_contents)
ami_id = ami_dict[name]
else:
ami_id = name
LOG.info('Using AMI: %s', ami_id)
return ami_id
|
Look up AMI ID.
Use _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided,
_name_ is returned as the ami id.
Args:
region (str): AWS Region to find AMI ID.
name (str): Simple AMI base name to lookup.
Returns:
str: AMI ID for _name_ in _region_.
|
juraj-google-style
|
def define_saver(exclude=None):
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver
|
Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
|
juraj-google-style
|
def set_query(self, value):
if (isinstance(value, basestring) or (value is None)):
self._content['query'] = value
elif hasattr(value, 'keys'):
self._content['query'] = query.terms_from_dict(value)
else:
raise TypeError((('Query must be a string or dict. Got: ' + type(value)) + ' insted!'))
|
Convert a dict form of query in a string of needed and store the query string.
Args:
value -- A query string or a dict with query xpaths as keys and text or
nested query dicts as values.
|
codesearchnet
|
def lookup(self, keys, name=None):
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')
values = keys
if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):
values = keys.values
if self._table and self._table.key_dtype.base_dtype == dtypes.int64:
values = math_ops.cast(values, dtypes.int64)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
with ops.name_scope(name, '%s_Lookup' % self.name):
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(self._hasher_spec)
buckets = str_to_hash_bucket(_as_string(values), num_buckets=self._num_oov_buckets, name='hash_bucket')
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where_v2(is_id_non_default, ids, buckets)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
elif isinstance(keys, internal.RaggedTensor):
return keys.with_values(ids)
return ids
|
Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged,
otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
|
github-repos
|
def parse_json(self, values_json):
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
|
Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_json` cannot be parsed.
|
codesearchnet
|
def variants(self, case_id, skip=0, count=1000, filters=None):
filters = (filters or {})
logger.debug('Looking for variants in {0}'.format(case_id))
limit = (count + skip)
gemini_query = (filters.get('gemini_query') or 'SELECT * from variants v')
any_filter = False
if filters.get('frequency'):
frequency = filters['frequency']
extra_info = '(v.max_aaf_all < {0} or v.max_aaf_all is Null)'.format(frequency)
gemini_query = self.build_gemini_query(gemini_query, extra_info)
if filters.get('cadd'):
cadd_score = filters['cadd']
extra_info = '(v.cadd_scaled > {0})'.format(cadd_score)
gemini_query = self.build_gemini_query(gemini_query, extra_info)
if filters.get('gene_ids'):
gene_list = [gene_id.strip() for gene_id in filters['gene_ids']]
gene_string = 'v.gene in ('
for (index, gene_id) in enumerate(gene_list):
if (index == 0):
gene_string += "'{0}'".format(gene_id)
else:
gene_string += ", '{0}'".format(gene_id)
gene_string += ')'
gemini_query = self.build_gemini_query(gemini_query, gene_string)
if filters.get('range'):
chrom = filters['range']['chromosome']
if (not chrom.startswith('chr')):
chrom = 'chr{0}'.format(chrom)
range_string = "v.chrom = '{0}' AND ((v.start BETWEEN {1} AND {2}) OR (v.end BETWEEN {1} AND {2}))".format(chrom, filters['range']['start'], filters['range']['end'])
gemini_query = self.build_gemini_query(gemini_query, range_string)
filtered_variants = self._variants(case_id=case_id, gemini_query=gemini_query)
if filters.get('consequence'):
consequences = set(filters['consequence'])
filtered_variants = (variant for variant in filtered_variants if set(variant.consequences).intersection(consequences))
if filters.get('impact_severities'):
severities = set([severity.strip() for severity in filters['impact_severities']])
new_filtered_variants = []
filtered_variants = (variant for variant in filtered_variants if set([variant.impact_severity]).intersection(severities))
if filters.get('sv_len'):
sv_len = int(filters['sv_len'])
filtered_variants = (variant for variant in filtered_variants if (variant.sv_len >= sv_len))
variants = []
for (index, variant_obj) in enumerate(filtered_variants):
if (index >= skip):
if (index < limit):
variants.append(variant_obj)
else:
break
return Results(variants, len(variants))
|
Return count variants for a case.
This function needs to have different behaviours based on what is asked
for. It should allways try to give minimal information back to improve
on speed. For example, if consequences are not asked for we will not
build all transcripts. If not sv variants we will not build sv
coordinates.
So the minimal case is to just show what is asked for in the variants
interface.
Args:
case_id (str): A gemini db
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
consequence: [] (list of consequences),
impact_severities: [] (list of consequences),
genetic_models [] (list of genetic models)
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants
|
codesearchnet
|
def compile_state_cpfs(self, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[Noise]=None) -> List[CPFPair]:
next_state_fluents = []
with self.graph.as_default():
with tf.name_scope('state_cpfs'):
for cpf in self.rddl.domain.state_cpfs:
cpf_noise = (noise.get(cpf.name, None) if (noise is not None) else None)
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
next_state_fluents.append((cpf.name, t))
key = (lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0]))
next_state_fluents = sorted(next_state_fluents, key=key)
return next_state_fluents
|
Compiles the next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
|
codesearchnet
|
def PushEvent(self, timestamp, event_data):
heap_values = (timestamp, event_data)
heapq.heappush(self._heap, heap_values)
self.data_size += len(event_data)
|
Pushes a serialized event onto the heap.
Args:
timestamp (int): event timestamp, which contains the number of
micro seconds since January 1, 1970, 00:00:00 UTC.
event_data (bytes): serialized event.
|
codesearchnet
|
def convert_version_to_int(version):
version = version.split('-')[0]
version_segments = version.split('.')
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
|
Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
|
github-repos
|
def __version_capture_slp(self, pkg_id, version_binary, version_display, display_name):
if (self.__pkg_obj and hasattr(self.__pkg_obj, 'version_capture')):
(version_str, src, version_user_str) = self.__pkg_obj.version_capture(pkg_id, version_binary, version_display, display_name)
if ((src != 'use-default') and version_str and src):
return (version_str, src, version_user_str)
elif (src != 'use-default'):
raise ValueError("version capture within object '{0}' failed for pkg id: '{1}' it returned '{2}' '{3}' '{4}'".format(six.text_type(self.__pkg_obj), pkg_id, version_str, src, version_user_str))
if (version_display and (re.match('\\d+', version_display, flags=(re.IGNORECASE + re.UNICODE)) is not None)):
version_str = version_display
src = 'display-version'
elif (version_binary and (re.match('\\d+', version_binary, flags=(re.IGNORECASE + re.UNICODE)) is not None)):
version_str = version_binary
src = 'version-binary'
else:
src = 'none'
version_str = '0.0.0.0.0'
return (version_str, src, version_str)
|
This returns the version and where the version string came from, based on instructions
under ``version_capture``, if ``version_capture`` is missing, it defaults to
value of display-version.
Args:
pkg_id (str): Publisher of the software/component.
version_binary (str): Name of the software.
version_display (str): True if package is a component.
display_name (str): True if the software/component is 32bit architecture.
Returns:
str: Package Id
|
codesearchnet
|
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
fncall = line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1)
break
if (
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
|
Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
juraj-google-style
|
def CheckForNewlineAtEOF(filename, lines, error):
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
|
Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
|
juraj-google-style
|
def in_range(self, ver, req):
if req.exclude is not None:
for v in ver:
if v in req.exclude:
return False
include_checked = False
if req.include is not None:
for v in ver:
if v in req.include:
return True
include_checked = True
if req.range != [None, None]:
min_v = req.range[0]
max_v = req.range[1]
ver = ver[0]
lg = _compare_versions(min_v, ver)['larger']
sm = _compare_versions(ver, max_v)['smaller']
if lg in [ver, 'equal'] and sm in [ver, 'equal', 'inf']:
return True
else:
err_msg = '[Error] Version is outside of supported range. '
err_msg += '(config = %s, ' % str(req.config)
err_msg += 'version = %s, ' % str(ver)
err_msg += 'supported range = %s)' % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
else:
err_msg = ''
if include_checked:
err_msg = '[Error] Version is outside of supported range. '
else:
err_msg = '[Error] Missing specification. '
err_msg += '(config = %s, ' % str(req.config)
err_msg += 'version = %s, ' % str(ver)
err_msg += 'supported range = %s)' % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
|
Checks if a version satisfies a version and/or compatibility requirement.
Args:
ver: List whose first item is a config version that needs to be checked
for support status and version compatibility.
e.g. ver = [`1.0`]
req: `_Reqs` class instance that represents a configuration version and
compatibility specifications.
Returns:
Boolean output of checking if version `ver` meets the requirement
stored in `req` (or a `_Reqs` requirements class instance).
|
github-repos
|
def get_attribute_id(self, attribute_key):
attribute = self.attribute_key_map.get(attribute_key)
has_reserved_prefix = attribute_key.startswith(RESERVED_ATTRIBUTE_PREFIX)
if attribute:
if has_reserved_prefix:
self.logger.warning(('Attribute %s unexpectedly has reserved prefix %s; using attribute ID '
'instead of reserved attribute name.' % (attribute_key, RESERVED_ATTRIBUTE_PREFIX)))
return attribute.id
if has_reserved_prefix:
return attribute_key
self.logger.error('Attribute "%s" is not in datafile.' % attribute_key)
self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_ERROR))
return None
|
Get attribute ID for the provided attribute key.
Args:
attribute_key: Attribute key for which attribute is to be fetched.
Returns:
Attribute ID corresponding to the provided attribute key.
|
juraj-google-style
|
def num_parameters(self, only_trainable: bool=False) -> int:
if only_trainable:
return int(sum((np.prod(w.shape.as_list()) for w in self.trainable_variables)))
else:
return self.count_params()
|
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (`bool`, *optional*, defaults to `False`):
Whether or not to return only the number of trainable parameters
Returns:
`int`: The number of parameters.
|
github-repos
|
def make_trace_api(client):
generated = trace_service_client.TraceServiceClient(
credentials=client._credentials, client_info=_CLIENT_INFO
)
return _TraceAPI(generated, client)
|
Create an instance of the gapic Trace API.
Args:
client (~google.cloud.trace.client.Client): The client that holds
configuration details.
Returns:
A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the
proper configurations.
|
juraj-google-style
|
def skip_summary():
replica_context = distribute_lib.get_replica_context()
if not replica_context:
return False
replica_id = replica_context.replica_id_in_sync_group
if isinstance(replica_id, tensor.Tensor):
replica_id = tensor_util.constant_value(replica_id)
return replica_id and replica_id > 0
|
Determines if summary should be skipped.
If using multiple replicas in distributed strategy, skip summaries on all
replicas except the first one (replica_id=0).
Returns:
True if the summary is skipped; False otherwise.
|
github-repos
|
async def leader(self):
response = (await self._api.get('/v1/status/leader'))
if (response.status == 200):
return response.body
|
Returns the current Raft leader
Returns:
str: address of leader such as ``10.1.10.12:8300``
|
codesearchnet
|
def add_rel(self, source_node_id, target_node_id, rel):
n1_ref = self.graph_db.get_indexed_node('Node', 'node_id', source_node_id)
n2_ref = self.graph_db.get_indexed_node('Node', 'node_id', target_node_id)
if not n1_ref or not n2_ref:
print 'Cannot add relationship between unfound nodes: %s --> %s' % (source_node_id, target_node_id)
return
path = neo4j.Path(n1_ref, rel, n2_ref)
path.get_or_create(self.graph_db)
|
Add a relationship between nodes.
Args:
source_node_id: Node Id for the source node.
target_node_id: Node Id for the target node.
rel: Name of the relationship 'contains'
|
juraj-google-style
|
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
if isinstance(absorbing_atom, str):
return (absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0])
elif isinstance(absorbing_atom, int):
return (str(structure[absorbing_atom].specie), absorbing_atom)
else:
raise ValueError('absorbing_atom must be either specie symbol or site index')
|
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
|
codesearchnet
|
def segment(self, text):
files = {'text': text}
(res, status_code) = self.post(self.segmentation_service, files=files)
if (status_code != 200):
logger.debug('Segmentation failed.')
return (self.decode(res), status_code)
|
Call the segmenter in order to split text in sentences.
Args:
text (str): Text to be segmented.
Returns:
dict, int: A dict containing a list of dicts with the offsets of
each sentence; an integer representing the response code.
|
codesearchnet
|
def controller_factory(cls, passes, options, **partial_controller):
if (None in partial_controller.values()):
raise TranspilerError('The controller needs a condition.')
if partial_controller:
for registered_controller in cls.registered_controllers.keys():
if (registered_controller in partial_controller):
return cls.registered_controllers[registered_controller](passes, options, **partial_controller)
raise TranspilerError(('The controllers for %s are not registered' % partial_controller))
else:
return FlowControllerLinear(passes, options)
|
Constructs a flow controller based on the partially evaluated controller arguments.
Args:
passes (list[BasePass]): passes to add to the flow controller.
options (dict): PassManager options.
**partial_controller (dict): Partially evaluated controller arguments in the form
`{name:partial}`
Raises:
TranspilerError: When partial_controller is not well-formed.
Returns:
FlowController: A FlowController instance.
|
codesearchnet
|
def lin_moma2(self, objective, wt_obj):
reactions = set(self._adjustment_reactions())
z_diff = self._z_diff
v = self._v
v_wt = self._v_wt
with self.constraints() as constr:
for f_reaction in reactions:
constr.add((z_diff[f_reaction] >= (v_wt[f_reaction] - v[f_reaction])), ((v_wt[f_reaction] - v[f_reaction]) >= (- z_diff[f_reaction])))
self._prob.set_objective(z_diff.sum(reactions))
constr.add((self._v_wt[objective] >= wt_obj))
self._solve(lp.ObjectiveSense.Minimize)
|
Find the smallest redistribution vector using a linear objective.
The change in flux distribution is mimimized by minimizing the sum
of the absolute values of the differences of wild type FBA solution
and the knockout strain flux solution.
Creates the constraint that the we select the optimal flux vector that
is closest to the wildtype. This might still return an arbitrary flux
vector the maximizes the objective function.
Args:
objective: Objective reaction for the model.
wt_obj: The flux value for your wild type objective reactions.
Can either use an expiremental value or on determined by FBA
by using :meth:`.get_fba_obj_flux(objective)`.
|
codesearchnet
|
def _combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape, name=None):
with ops.name_scope(name, 'CombineSparseSuccessor', [parent_indices, parent_shape, child_indices, child_values, child_shape]):
(indices, values, shape) = ops_module.combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape)
return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
|
Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding
first `SparseTensor`'s values.
Args:
parent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices
parent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape
child_indices: 2D int64 `Tensor` with child `SparseTensor` indices
child_values: 1D int64 `Tensor` with child `SparseTensor` values
child_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape
name: A name for the operation (optional).
Returns:
`SparseTensor` with an additional dimension of size 1 added.
|
codesearchnet
|
def circuits_to_qobj(circuits, qobj_header=None, qobj_id=None, backend_name=None, config=None, shots=None, max_credits=None, basis_gates=None, coupling_map=None, seed=None, memory=None):
warnings.warn('circuits_to_qobj is deprecated and will be removed in Qiskit Terra 0.9. Use qiskit.compiler.assemble() to serialize circuits into a qobj.', DeprecationWarning)
qobj_header = (qobj_header or QobjHeader())
if backend_name:
qobj_header.backend_name = backend_name
if basis_gates:
warnings.warn('basis_gates was unused and will be removed.', DeprecationWarning)
if coupling_map:
warnings.warn('coupling_map was unused and will be removed.', DeprecationWarning)
qobj = assemble(experiments=circuits, qobj_id=qobj_id, qobj_header=qobj_header, shots=shots, memory=memory, max_credits=max_credits, seed_simulator=seed, config=config)
return qobj
|
Convert a list of circuits into a qobj.
Args:
circuits (list[QuantumCircuits] or QuantumCircuit): circuits to compile
qobj_header (QobjHeader): header to pass to the results
qobj_id (int): TODO: delete after qiskit-terra 0.8
backend_name (str): TODO: delete after qiskit-terra 0.8
config (dict): TODO: delete after qiskit-terra 0.8
shots (int): TODO: delete after qiskit-terra 0.8
max_credits (int): TODO: delete after qiskit-terra 0.8
basis_gates (str): TODO: delete after qiskit-terra 0.8
coupling_map (list): TODO: delete after qiskit-terra 0.8
seed (int): TODO: delete after qiskit-terra 0.8
memory (bool): TODO: delete after qiskit-terra 0.8
Returns:
Qobj: the Qobj to be run on the backends
|
codesearchnet
|
def _take_screenshot(self, screenshot=False, name_prefix='unknown'):
if isinstance(screenshot, bool):
if not screenshot:
return
return self._save_screenshot(name_prefix=name_prefix)
if isinstance(screenshot, Image.Image):
return self._save_screenshot(screen=screenshot, name_prefix=name_prefix)
raise TypeError("invalid type for func _take_screenshot: "+ type(screenshot))
|
This is different from _save_screenshot.
The return value maybe None or the screenshot path
Args:
screenshot: bool or PIL image
|
juraj-google-style
|
def require_representation(self, req):
try:
type_, subtype, _ = parse_mime_type(req.content_type)
content_type = '/'.join((type_, subtype))
except:
raise falcon.HTTPUnsupportedMediaType(
description="Invalid Content-Type header: {}".format(
req.content_type
)
)
if content_type == 'application/json':
body = req.stream.read()
return json.loads(body.decode('utf-8'))
else:
raise falcon.HTTPUnsupportedMediaType(
description="only JSON supported, got: {}".format(content_type)
)
|
Require raw representation dictionary from falcon request object.
This does not perform any field parsing or validation but only uses
allowed content-encoding handler to decode content body.
Note:
Currently only JSON is allowed as content type.
Args:
req (falcon.Request): request object
Returns:
dict: raw dictionary of representation supplied in request body
|
juraj-google-style
|
def execute_before(self, sensor_graph, scope_stack):
sensor_graph.add_constant(self.stream, 0)
new_scope = GatedClockScope(sensor_graph, scope_stack, (self.stream, self.trigger))
scope_stack.append(new_scope)
|
Execute statement before children are executed.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
|
juraj-google-style
|
def SendReply(self, response, tag=None):
if not isinstance(response, rdfvalue.RDFValue):
raise ValueError("SendReply can only send RDFValues")
if self.rdf_flow.parent_flow_id:
response = rdf_flow_objects.FlowResponse(
client_id=self.rdf_flow.client_id,
request_id=self.rdf_flow.parent_request_id,
response_id=self.GetNextResponseId(),
payload=response,
flow_id=self.rdf_flow.parent_flow_id,
tag=tag)
self.flow_responses.append(response)
else:
reply = rdf_flow_objects.FlowResult(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
payload=response,
tag=tag)
self.replies_to_write.append(reply)
self.replies_to_process.append(reply)
self.rdf_flow.num_replies_sent += 1
|
Allows this flow to send a message to its parent flow.
If this flow does not have a parent, the message is ignored.
Args:
response: An RDFValue() instance to be sent to the parent.
tag: If specified, tag the result with this tag.
Raises:
ValueError: If responses is not of the correct type.
|
juraj-google-style
|
def is_stateful(self) -> bool:
return True
|
Indicates whether this ThresholdFn is stateful.
Returns:
bool: Always True for `QuantileThreshold` as it is stateful.
|
github-repos
|
def read_video_opencv(video_path: str, sample_indices_fn: Callable, **kwargs):
requires_backends(read_video_opencv, ['cv2'])
import cv2
video = cv2.VideoCapture(video_path)
total_num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
video_fps = video.get(cv2.CAP_PROP_FPS)
duration = total_num_frames / video_fps if video_fps else 0
metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='opencv')
indices = sample_indices_fn(metadata=metadata, **kwargs)
index = 0
frames = []
while video.isOpened():
success, frame = video.read()
if not success:
break
if index in indices:
height, width, channel = frame.shape
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame[0:height, 0:width, 0:channel])
if success:
index += 1
if index >= total_num_frames:
break
video.release()
metadata.frames_indices = indices
return (np.stack(frames), metadata)
|
Decode a video using the OpenCV backend.
Args:
video_path (`str`):
Path to the video file.
sample_indices_fn (`Callable`):
A callable function that will return indices at which the video should be sampled. If the video has to be loaded using
by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.
If not provided, simple uniform sampling with fps is performed.
Example:
def sample_indices_fn(metadata, **kwargs):
return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)
Returns:
Tuple[`np.array`, `VideoMetadata`]: A tuple containing:
- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
- `VideoMetadata` object.
|
github-repos
|
def parse_flags_with_usage(args):
try:
return FLAGS(args)
except flags.Error as error:
sys.stderr.write(('FATAL Flags parsing error: %s\n' % error))
sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n')
sys.exit(1)
|
Tries to parse the flags, print usage, and exit if unparseable.
Args:
args: [str], a non-empty list of the command line arguments including
program name.
Returns:
[str], a non-empty list of remaining command line arguments after parsing
flags, including program name.
|
codesearchnet
|
def hist_axis_func(axis_type: enum.Enum) -> Callable[([Hist], Axis)]:
def axis_func(hist: Hist) -> Axis:
' Retrieve the axis associated with the ``HistAxisRange`` object for a given hist.\n\n Args:\n hist: Histogram from which the selected axis should be retrieved.\n axis_type: Enumeration corresponding to the axis to be restricted. The numerical\n value of the enum should be axis number (for a THnBase).\n Returns:\n ROOT.TAxis: The axis associated with the ``HistAxisRange`` object.\n '
try:
hist_axis_type = axis_type.value
except AttributeError:
hist_axis_type = axis_type
if (hasattr(hist, 'ProjectionND') and hasattr(hist, 'Projection')):
return hist.GetAxis(hist_axis_type)
else:
axis_function_map = {TH1AxisType.x_axis.value: hist.GetXaxis, TH1AxisType.y_axis.value: hist.GetYaxis, TH1AxisType.z_axis.value: hist.GetZaxis}
return_func = axis_function_map[hist_axis_type]
return return_func()
return axis_func
|
Wrapper to retrieve the axis of a given histogram.
This can be convenient outside of just projections, so it's made available in the API.
Args:
axis_type: The type of axis to retrieve.
Returns:
Callable to retrieve the specified axis when given a hist.
|
codesearchnet
|
def window(self, begin, end=None):
if (self._name_parts.decorator != ''):
raise Exception('Cannot use window() on an already decorated table')
start = Table._convert_decorator_time(begin)
if (end is None):
if isinstance(begin, datetime.timedelta):
end = datetime.timedelta(0)
else:
end = datetime.datetime.utcnow()
stop = Table._convert_decorator_time(end)
if ((start > 0 >= stop) or (stop > 0 >= start)):
raise Exception(('window: Between arguments must both be absolute or relative: %s, %s' % (str(begin), str(end))))
if (start > stop):
raise Exception(('window: Between arguments: begin must be before end: %s, %s' % (str(begin), str(end))))
return Table(('%s@%s-%s' % (self._full_name, str(start), str(stop))), context=self._context)
|
Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
|
codesearchnet
|
def to_proto(self, export_scope=None):
raise NotImplementedError
|
Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
|
github-repos
|
def bounded_uniform(cls, lowest, highest, weight_interval=None):
if (weight_interval is None):
weights = [(lowest, 1), (highest, 1)]
else:
i = lowest
weights = []
while (i < highest):
weights.append((i, 1))
i += weight_interval
weights.append((highest, 1))
return cls(weights)
|
Initialize with a uniform distribution between two values.
If no ``weight_interval`` is passed, this weight distribution
will just consist of ``[(lowest, 1), (highest, 1)]``. If specified,
weights (still with uniform weight distribution) will be added every
``weight_interval``. Use this if you intend to modify the weights
in any complex way after initialization.
Args:
lowest (float or int):
highest (float or int):
weight_interval (int):
Returns:
SoftFloat: A newly constructed instance.
|
codesearchnet
|
def __user_location(__pkg: str, type_) -> str:
if ALLOW_DARWIN and sys.platform == 'darwin':
user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0])
else:
user_dir = getenv('XDG_{}_HOME'.format(type_.upper()),
path.sep.join([getenv('HOME', ''),
__LOCATIONS[type_][1]]))
return path.expanduser(path.sep.join([user_dir, __pkg]))
|
Utility function to look up XDG basedir locations
Args:
__pkg: Package name
__type: Location type
|
juraj-google-style
|
def __get_conn(self, flag_force_new=False, filename=None):
flag_open_new = (flag_force_new or (not self._conn_is_open()))
if flag_open_new:
if (filename is None):
filename = self.filename
conn = self._get_conn(filename)
self._conn = conn
else:
conn = self._conn
return conn
|
Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment
|
codesearchnet
|
def save_target_classes_for_batch(self, filename, image_batches, batch_id):
images = image_batches.data[batch_id]['images']
with open(filename, 'w') as f:
for (image_id, image_val) in iteritems(images):
target_class = self.get_target_class(image_val['dataset_image_id'])
f.write('{0}.png,{1}\n'.format(image_id, target_class))
|
Saves file with target class for given dataset batch.
Args:
filename: output filename
image_batches: instance of ImageBatchesBase with dataset batches
batch_id: dataset batch ID
|
codesearchnet
|
def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul')
if bias_fn is not None:
out = bias_fn(out, self.bias)
if activation_fn is not None:
out = activation_fn(out)
return {'output': out}
|
Performs a matrix multiplication.
Depending on self.bias_fn and self.activation_fn, it may add a bias
term or go through the activaction function.
Args:
input_tensor: Input tensor to matmul with the filter.
Returns:
A map of: output key -> output result.
|
github-repos
|
def diff(self, container):
return self._result(self._get(self._url('/containers/{0}/changes', container)), True)
|
Inspect changes on a container's filesystem.
Args:
container (str): The container to diff
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def __init__(self, indices, num_segments, batch_dims=0):
self.indices = torch.as_tensor(indices, device=indices.device)
self.num_segments = torch.as_tensor(num_segments, device=indices.device)
self.batch_dims = batch_dims
|
Creates an index
Args:
indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer):
Tensor containing the indices.
num_segments (`torch.LongTensor`):
Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same
number of segments (although many segments can be empty).
batch_dims (`int`, *optional*, defaults to 0):
The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as
batch dimensions. Segments in different batch elements are always distinct even if they have the same
index.
|
github-repos
|
def read_tree_newick(newick):
if (not isinstance(newick, str)):
try:
newick = str(newick)
except:
raise TypeError('newick must be a str')
if newick.lower().endswith('.gz'):
f = gopen(expanduser(newick))
ts = f.read().decode().strip()
f.close()
elif isfile(expanduser(newick)):
f = open(expanduser(newick))
ts = f.read().strip()
f.close()
else:
ts = newick.strip()
lines = ts.splitlines()
if (len(lines) != 1):
return [read_tree_newick(l) for l in lines]
try:
t = Tree()
t.is_rooted = ts.startswith('[&R]')
if (ts[0] == '['):
ts = ']'.join(ts.split(']')[1:]).strip()
ts = ts.replace(', ', ',')
n = t.root
i = 0
while (i < len(ts)):
if (ts[i] == ';'):
if ((i != (len(ts) - 1)) or (n != t.root)):
raise RuntimeError(INVALID_NEWICK)
elif (ts[i] == '('):
c = Node()
n.add_child(c)
n = c
elif (ts[i] == ')'):
n = n.parent
elif (ts[i] == ','):
n = n.parent
c = Node()
n.add_child(c)
n = c
elif (ts[i] == ':'):
i += 1
ls = ''
while ((ts[i] != ',') and (ts[i] != ')') and (ts[i] != ';')):
ls += ts[i]
i += 1
n.edge_length = float(ls)
i -= 1
else:
label = ''
while ((ts[i] != ':') and (ts[i] != ',') and (ts[i] != ';') and (ts[i] != ')')):
label += ts[i]
i += 1
i -= 1
n.label = label
i += 1
except Exception as e:
raise RuntimeError(('Failed to parse string as Newick: %s' % ts))
return t
|
Read a tree from a Newick string or file
Args:
``newick`` (``str``): Either a Newick string or the path to a Newick file (plain-text or gzipped)
Returns:
``Tree``: The tree represented by ``newick``. If the Newick file has multiple trees (one per line), a ``list`` of ``Tree`` objects will be returned
|
codesearchnet
|
def parse_query(self, query, index, stop_current, shuffle):
if index is not None and len(self.queue) > 0:
if index < 0 or index >= len(self.queue):
if len(self.queue) == 1:
self.statuslog.error("Play index must be 1 (1 song in queue)")
return
else:
self.statuslog.error("Play index must be between 1 and {}".format(len(self.queue)))
return
try:
yt_videos = api_music.parse_query(query, self.statuslog)
if shuffle:
random.shuffle(yt_videos)
if len(yt_videos) == 0:
self.statuslog.error("No results for: {}".format(query))
return
if index is None:
self.queue = self.queue + yt_videos
else:
if len(self.queue) > 0:
self.queue = self.queue[:index] + yt_videos + self.queue[index:]
else:
self.queue = yt_videos
self.update_queue()
if stop_current:
if self.streamer:
self.streamer.stop()
except Exception as e:
logger.exception(e)
|
Parses a query and adds it to the queue
Args:
query (str): Either a search term or a link
index (int): The index to enqueue at (None for end)
stop_current (bool): Whether to stop the current song after the songs are queued
shuffle (bool): Whether to shuffle the added songs
|
juraj-google-style
|
def clamp(value, maximum=None):
value = max(value, 0)
if maximum is not None:
return min(value, maximum)
else:
return value
|
Clamp numeric values to be non-negative, an optionally, less than a
given maximum.
Args:
value (float) :
A number to clamp.
maximum (float, optional) :
A max bound to to clamp to. If None, there is no upper bound,
and values are only clamped to be non-negative. (default: None)
Returns:
float
|
juraj-google-style
|
def _AsTensorList(x, p):
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_composite(v)
if isinstance(v, tensor_lib.Tensor):
l.append(array_ops.identity(v))
else:
l.append(indexed_slices.IndexedSlices(array_ops.identity(v.values), array_ops.identity(v.indices)))
return l
|
Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
|
github-repos
|
def Append(self, value, timestamp):
timestamp = self._NormalizeTime(timestamp)
if self.data and timestamp < self.data[-1][1]:
raise RuntimeError("Next timestamp must be larger.")
self.data.append([value, timestamp])
|
Adds value at timestamp.
Values must be added in order of increasing timestamp.
Args:
value: An observed value.
timestamp: The timestamp at which value was observed.
Raises:
RuntimeError: If timestamp is smaller than the previous timstamp.
|
juraj-google-style
|
def SaveGDAL(filename, rda):
if type(rda) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if not GDAL_AVAILABLE:
raise Exception("richdem.SaveGDAL() requires GDAL.")
driver = gdal.GetDriverByName('GTiff')
data_type = gdal.GDT_Float32
data_set = driver.Create(filename, xsize=rda.shape[1], ysize=rda.shape[0], bands=1, eType=data_type)
data_set.SetGeoTransform(rda.geotransform)
data_set.SetProjection(rda.projection)
band = data_set.GetRasterBand(1)
band.SetNoDataValue(rda.no_data)
band.WriteArray(np.array(rda))
for k,v in rda.metadata.items():
data_set.SetMetadataItem(str(k),str(v))
|
Save a GDAL file.
Saves a RichDEM array to a data file in GeoTIFF format.
If you need to do something more complicated, look at the source of this
function.
Args:
filename (str): Name of the raster file to be created
rda (rdarray): Data to save.
Returns:
No Return
|
juraj-google-style
|
def terminate(self, end):
if self.terminated:
raise TdlError('Cannot terminate a closed list.')
if (end == LIST_TYPE):
self.terminated = False
elif (end == EMPTY_LIST_TYPE):
if self._last_path:
self[self._last_path] = None
else:
self._avm = None
self.terminated = True
elif self._last_path:
self[self._last_path] = end
self.terminated = True
else:
raise TdlError('Empty list must be {} or {}'.format(LIST_TYPE, EMPTY_LIST_TYPE))
|
Set the value of the tail of the list.
Adding values via :meth:`append` places them on the `FIRST`
feature of some level of the feature structure (e.g.,
`REST.FIRST`), while :meth:`terminate` places them on the
final `REST` feature (e.g., `REST.REST`). If *end* is a
:class:`Conjunction` or :class:`Term`, it is typically a
:class:`Coreference`, otherwise *end* is set to
`tdl.EMPTY_LIST_TYPE` or `tdl.LIST_TYPE`. This method does
not necessarily close the list; if *end* is `tdl.LIST_TYPE`,
the list is left open, otherwise it is closed.
Args:
end (str, :class:`Conjunction`, :class:`Term`): value to
use as the end of the list.
|
codesearchnet
|
def __init__(self, mh_map: dict[str, ModelHandler]):
self._max_models = None
self._mh_map: dict[str, ModelHandler] = mh_map
self._key_to_last_update: dict[str, str] = defaultdict(str)
self._tag_map: dict[str, str] = OrderedDict()
self._proxy_map: dict[str, multi_process_shared.MultiProcessShared] = {}
|
Args:
mh_map: A map from keys to model handlers which can be used to load a
model.
|
github-repos
|
def match_opcodes(opcode_traces, lineno, op_match_list):
out = []
for trace in opcode_traces[lineno]:
for match_op, match_symbol in op_match_list:
if trace.op == match_op and match_symbol in [None, trace.symbol]:
out.append((trace.op, trace.symbol, trace.types))
return out
|
Get all opcodes matching op_match_list on a given line.
Args:
opcode_traces: traces
lineno: line number to get ops from.
op_match_list: [(opcode_name, symbol|None), ...]; None matches any symbol.
Returns:
A list of matching opcodes.
|
github-repos
|
def __tomo_linear_inv(freqs, ops, weights=None, trace=None):
if (weights is not None):
W = np.array(weights)
if (W.ndim == 1):
W = np.diag(W)
S = np.array([vectorize(m).conj() for m in ops]).reshape(len(ops), ops[0].size)
if (weights is not None):
S = np.dot(W, S)
v = np.array(freqs)
if (weights is not None):
v = np.dot(W, freqs)
Sdg = S.T.conj()
inv = np.linalg.pinv(np.dot(Sdg, S))
ret = devectorize(np.dot(inv, np.dot(Sdg, v)))
if (trace is not None):
ret = ((trace * ret) / np.trace(ret))
return ret
|
Reconstruct a matrix through linear inversion.
Args:
freqs (list[float]): list of observed frequences.
ops (list[np.array]): list of corresponding projectors.
weights (list[float] or array_like):
weights to be used for weighted fitting.
trace (float or None): trace of returned operator.
Returns:
numpy.array: A numpy array of the reconstructed operator.
|
codesearchnet
|
def output_sector_csv(self,csv_path,file_dict_key,out_path):
csv_file = csv_path + "{0}_{1}_{2}_{3}.csv".format(
file_dict_key,
self.ensemble_name,
self.member,
self.run_date.strftime(self.date_format))
if exists(csv_file):
csv_data = pd.read_csv(csv_file)
if self.inds is None:
lon_obj = csv_data.loc[:,"Centroid_Lon"]
lat_obj = csv_data.loc[:,"Centroid_Lat"]
self.inds = np.where((self.ne_lat>=lat_obj)&(self.sw_lat<=lat_obj)\
&(self.ne_lon>=lon_obj)&(self.sw_lon<=lon_obj))[0]
if np.shape(self.inds)[0] > 0:
csv_data = csv_data.reindex(np.array(self.inds))
sector_csv_filename = out_path + "{0}_{1}_{2}_{3}.csv".format(
file_dict_key,
self.ensemble_name,
self.member,
self.run_date.strftime(self.date_format))
print("Output sector csv file " + sector_csv_filename)
csv_data.to_csv(sector_csv_filename,
na_rep="nan",
float_format="%0.5f",
index=False)
os.chmod(sector_csv_filename, 0o666)
else:
print('No {0} {1} sector data found'.format(self.member,
self.run_date.strftime("%Y%m%d")))
else:
print('No {0} {1} csv file found'.format(self.member,
self.run_date.strftime("%Y%m%d")))
return
|
Segment forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
csv_path(str): Path to the full CONUS csv file.
file_dict_key(str): Dictionary key for the csv files,
currently either 'track_step' or 'track_total'
out_path (str): Path to output new segmented csv files.
Returns:
Segmented forecast tracks in a csv file.
|
juraj-google-style
|
def verify_fully_used_iterator(self, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False):
self.verify_run_with_breaks(ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors, assert_items_equal=assert_items_equal)
|
Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: 0-argument function that returns a Dataset.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
assert_items_equal: Tests the output has the expected elements regardless
of order.
Raises:
AssertionError if test fails.
|
github-repos
|
def get_cpu_vendor(cls, family, arch='x86'):
props = cls.get_cpu_props(family, arch)
vendor = 'generic'
try:
vendor = props.xpath('vendor/@name')[0]
except IndexError:
pass
return vendor
|
Get CPU vendor, if vendor is not available will return 'generic'
Args:
family(str): CPU family
arch(str): CPU arch
Returns:
str: CPU vendor if found otherwise 'generic'
|
juraj-google-style
|
def concat(self, second_iterable):
if self.closed():
raise ValueError('Attempt to call concat() on a closed Queryable.')
if (not is_iterable(second_iterable)):
raise TypeError('Cannot compute concat() with second_iterable of non-iterable {0}'.format(str(type(second_iterable))[7:(- 1)]))
return self._create(itertools.chain(self, second_iterable))
|
Concatenates two sequences.
Note: This method uses deferred execution.
Args:
second_iterable: The sequence to concatenate on to the sequence.
Returns:
A Queryable over the concatenated sequences.
Raises:
ValueError: If the Queryable is closed().
TypeError: If second_iterable is not in fact iterable.
|
codesearchnet
|
def Process(self, parser_mediator, zip_file, archive_members):
if (not self.REQUIRED_PATHS):
raise ValueError('REQUIRED_PATHS not specified')
if (not set(archive_members).issuperset(self.REQUIRED_PATHS)):
raise errors.WrongCompoundZIPPlugin(self.NAME)
logger.debug('Compound ZIP Plugin used: {0:s}'.format(self.NAME))
self.InspectZipFile(parser_mediator, zip_file)
|
Determines if this is the correct plugin; if so proceed with processing.
This method checks if the zip file being contains the paths specified in
REQUIRED_PATHS. If all paths are present, the plugin logic processing
continues in InspectZipFile.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
zip_file (zipfile.ZipFile): the zip file. It should not be closed in
this method, but will be closed by the parser logic in czip.py.
archive_members (list[str]): file paths in the archive.
Raises:
UnableToParseFile: when the file cannot be parsed.
ValueError: if a subclass has not specified REQUIRED_PATHS.
|
codesearchnet
|
def get_config_file(basename):
locations = [
os.path.join(os.curdir, basename),
os.path.join(
os.path.expanduser("~"),
".config",
"scriptabit",
basename),
resource_filename(
Requirement.parse("scriptabit"),
os.path.join('scriptabit', basename))
]
for location in locations:
if os.path.isfile(location):
return location
|
Looks for a configuration file in 3 locations:
- the current directory
- the user config directory (~/.config/scriptabit)
- the version installed with the package (using setuptools resource API)
Args:
basename (str): The base filename.
Returns:
str: The full path to the configuration file.
|
juraj-google-style
|
def DisjoinCalendars(self, cutoff):
def TruncatePeriod(service_period, start, end):
service_period.start_date = max(service_period.start_date, start)
service_period.end_date = min(service_period.end_date, end)
dates_to_delete = []
for k in service_period.date_exceptions:
if (k < start) or (k > end):
dates_to_delete.append(k)
for k in dates_to_delete:
del service_period.date_exceptions[k]
year = int(cutoff[:4])
month = int(cutoff[4:6])
day = int(cutoff[6:8])
cutoff_date = datetime.date(year, month, day)
one_day_delta = datetime.timedelta(days=1)
before = (cutoff_date - one_day_delta).strftime('%Y%m%d')
for a in self.feed_merger.a_schedule.GetServicePeriodList():
TruncatePeriod(a, 0, before)
for b in self.feed_merger.b_schedule.GetServicePeriodList():
TruncatePeriod(b, cutoff, '9'*8)
|
Forces the old and new calendars to be disjoint about a cutoff date.
This truncates the service periods of the old schedule so that service
stops one day before the given cutoff date and truncates the new schedule
so that service only begins on the cutoff date.
Args:
cutoff: The cutoff date as a string in YYYYMMDD format. The timezone
is the same as used in the calendar.txt file.
|
juraj-google-style
|
def _parse_format_pages_isbn(html_chunk):
ppi = get_first_content(
html_chunk.find("div", {"class": "price-overflow"})
)
if not ppi:
return None, None, None
ppi = filter(lambda x: x.strip(), ppi.split("<br />"))[0]
isbn = dhtmlparser.parseString(ppi)
isbn = isbn.find("b")
isbn = isbn[0].getContent() if isbn else None
pages = None
book_format = None
details = ppi.split("|")
if len(details) >= 2:
book_format = details[0].strip()
pages = details[1].strip()
return book_format, pages, isbn
|
Parse format, number of pages and ISBN.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (format, pages, isbn), all as string.
|
juraj-google-style
|
def table(name=None, mode='create', use_cache=True, priority='interactive', allow_large_results=False):
output = QueryOutput()
output._output_type = 'table'
output._table_name = name
output._table_mode = mode
output._use_cache = use_cache
output._priority = priority
output._allow_large_results = allow_large_results
return output
|
Construct a query output object where the result is a table
Args:
name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a name to be specified) (default False).
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.