code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def ensure_model_downloaded(repo_id: Optional[str]=None, revision: Optional[str]=None, local_dir: Optional[str]=None) -> str:
if local_dir is not None:
if os.path.exists(local_dir):
print(f'Using provided local directory: {local_dir}')
else:
os.makedirs(local_dir, exist_ok=True)
print(f'Created local directory: {local_dir}')
if repo_id is None:
raise ValueError('Either repo_id or local_dir must be provided')
print(f'Ensuring {repo_id} (revision: {revision or 'latest'}) is downloaded...')
try:
download_dir = snapshot_download(repo_id, revision=revision, local_files_only=True, local_dir=local_dir)
print(f'Found model files locally at {download_dir}')
return download_dir
except Exception:
print(f'Downloading model files for {repo_id}...')
download_dir = snapshot_download(repo_id, revision=revision, local_files_only=False, local_dir=local_dir)
print(f'Downloaded model files to {download_dir}')
return download_dir
|
Ensures model files are downloaded locally, downloads them if not.
Returns path to local files.
Args:
repo_id: The Hugging Face model repo ID (required if local_dir not provided)
revision: Optional git revision to use
local_dir: Optional local directory path where model files should be stored/found
|
github-repos
|
def variables(self):
def deref(weak_v):
v = weak_v()
if v is None:
raise AssertionError('Called a function referencing variables which have been deleted. This likely means that function-local variables were created and not referenced elsewhere in the program. This is generally a mistake; consider storing variables in an object attribute on first call.')
return v
return tuple((deref(v) for v in self._weak_variables))
|
A sequence of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of variables for this func graph.
|
github-repos
|
def match(self, message: Message) -> bool:
if self.template:
return self.template.match(message)
return True
|
Matches a message with the behaviour's template
Args:
message(spade.message.Message): the message to match with
Returns:
bool: wheter the messaged matches or not
|
juraj-google-style
|
def _aggregate_additional_loss(self, loss):
if not backend.is_float_dtype(loss.dtype):
loss = ops.cast(loss, dtype=backend.floatx())
return ops.sum(loss)
|
Aggregates losses from `add_loss`, regularizers and sublayers.
Args:
loss: A tensor representing the additional loss to aggregate.
Returns:
A tensor representing the summed loss, cast to the `floatx()` if
necessary.
|
github-repos
|
def get_observation_coordinates(self, x, y, hdulist_index):
return self.hdulist[hdulist_index].converter.get_inverse_converter().convert((x, y))
|
Retrieves the location of a point using the coordinate system of
the original observation, i.e. the original image before any
cutouts were done.
Returns:
(x, y) in the original image coordinate system.
@param x: x-pixel location in the cutout frame of reference
@param y: y-pixel location in the cutout frame of reference
@param idx: index of hdu in hdulist that the given x/y corresponds to.
|
codesearchnet
|
def ReadFromFile(self, artifacts_reader, filename):
for artifact_definition in artifacts_reader.ReadFile(filename):
self.RegisterDefinition(artifact_definition)
|
Reads artifact definitions into the registry from a file.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
filename (str): name of the file to read from.
|
juraj-google-style
|
class MeanMetricWrapper(Mean):
def __init__(self, fn, name=None, dtype=None, **kwargs):
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_true, y_pred], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([y_true, y_pred], sample_weight)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
return super(MeanMetricWrapper, self).update_state(matches, sample_weight=sample_weight)
def get_config(self):
config = {}
if type(self) is MeanMetricWrapper:
config['fn'] = self._fn
for k, v in self._fn_kwargs.items():
config[k] = backend.eval(v) if is_tensor_or_variable(v) else v
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
fn = config.pop('fn', None)
if cls is MeanMetricWrapper:
return cls(get(fn), **config)
return super(MeanMetricWrapper, cls).from_config(config)
|
Wraps a stateless metric function with the Mean metric.
You could use this class to quickly build a mean metric from a function. The
function needs to have the signature `fn(y_true, y_pred)` and return a
per-sample loss array. `MeanMetricWrapper.result()` will return
the average metric value across all samples seen so far.
For example:
```python
def accuracy(y_true, y_pred):
return tf.cast(tf.math.equal(y_true, y_pred), tf.float32)
accuracy_metric = tf.keras.metrics.MeanMetricWrapper(fn=accuracy)
keras_model.compile(..., metrics=accuracy_metric)
```
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Keyword arguments to pass on to `fn`.
|
github-repos
|
def start_engine(self, **kwargs):
self.current = WFCurrent(**kwargs)
self.wf_state = {'in_external': False, 'finished': False}
if (not self.current.new_token):
self.wf_state = self.current.wf_cache.get(self.wf_state)
self.current.workflow_name = self.wf_state['name']
if ('subject' in self.wf_state):
self.current.input['id'] = self.wf_state['subject']
self.current.task_data['object_id'] = self.wf_state['subject']
self.check_for_authentication()
self.check_for_permission()
self.workflow = self.load_or_create_workflow()
if ('form' in self.current.input):
form = self.current.input['form']
if ('form_name' in form):
self.current.task_data[form['form_name']] = form
start_init_values = (self.workflow_spec.wf_properties.get('init', 'False') == 'True')
if start_init_values:
WFInit = get_object_from_path(settings.WF_INITIAL_VALUES)()
WFInit.assign_wf_initial_values(self.current)
log_msg = ('\n\n::::::::::: ENGINE STARTED :::::::::::\n\tWF: %s (Possible) TASK:%s\n\tCMD:%s\n\tSUBCMD:%s' % (self.workflow.name, self.workflow.get_tasks(Task.READY), self.current.input.get('cmd'), self.current.input.get('subcmd')))
log.debug(log_msg)
sys._zops_wf_state_log = log_msg
self.current.workflow = self.workflow
|
Initializes the workflow with given request, response objects and diagram name.
Args:
session:
input:
workflow_name (str): Name of workflow diagram without ".bpmn" suffix.
File must be placed under one of configured :py:attr:`~zengine.settings.WORKFLOW_PACKAGES_PATHS`
|
codesearchnet
|
def from_df(cls, data, entities=None, source='contrast'):
variables = []
for col in data.columns:
_data = pd.DataFrame(data[col].values, columns=['amplitude'])
if entities is not None:
_data = pd.concat([_data, entities], axis=1, sort=True)
variables.append(SimpleVariable(name=col, data=_data, source=source))
return BIDSVariableCollection(variables)
|
Create a Collection from a pandas DataFrame.
Args:
df (DataFrame): The DataFrame to convert to a Collection. Each
column will be converted to a SimpleVariable.
entities (DataFrame): An optional second DataFrame containing
entity information.
source (str): The value to set as the source for all Variables.
Returns:
A BIDSVariableCollection.
|
juraj-google-style
|
def _representative_structure_setter(self, structprop, keep_chain, clean=True, keep_chemicals=None, out_suffix='_clean', outdir=None, force_rerun=False):
if (not outdir):
outdir = self.structure_dir
if (not outdir):
raise ValueError('Output directory must be specified')
new_id = 'REP-{}'.format(structprop.id)
if self.structures.has_id(new_id):
if force_rerun:
existing = self.structures.get_by_id(new_id)
self.structures.remove(existing)
if clean:
final_pdb = structprop.clean_structure(outdir=outdir, out_suffix=out_suffix, keep_chemicals=keep_chemicals, keep_chains=keep_chain, force_rerun=force_rerun)
log.debug('{}: cleaned structure and saved new file at {}'.format(structprop.id, final_pdb))
else:
final_pdb = structprop.structure_path
self.representative_structure = StructProp(ident=new_id, chains=keep_chain, mapped_chains=keep_chain, structure_path=final_pdb, file_type='pdb')
self.representative_chain = keep_chain
self.representative_structure.update(structprop.get_dict_with_chain(chain=keep_chain), only_keys=self.__representative_structure_attributes, overwrite=True)
self.representative_structure.original_structure_id = structprop.id
self.representative_structure.parse_structure()
self.structures.append(self.representative_structure)
|
Set the representative structure by 1) cleaning it and 2) copying over attributes of the original structure.
The structure is copied because the chains stored may change, and cleaning it makes a new PDB file.
Args:
structprop (StructProp): StructProp object to set as representative
keep_chain (str): Chain ID to keep
clean (bool): If the PDB file should be cleaned (see ssbio.structure.utils.cleanpdb)
keep_chemicals (str, list): Keep specified chemical names
out_suffix (str): Suffix to append to clean PDB file
outdir (str): Path to output directory
Returns:
StructProp: representative structure
|
codesearchnet
|
def group(self, group_type, name, **kwargs):
group_obj = Group(group_type, name, **kwargs)
return self._group(group_obj)
|
Add Group data to Batch object.
Args:
group_type (str): The ThreatConnect define Group type.
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Group.
|
codesearchnet
|
def set_computer_name(name):
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if (pending not in (None, False)):
ret['Computer Name']['Pending'] = pending
return ret
return False
|
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
|
codesearchnet
|
def get_partstudio_tessellatededges(self, did, wid, eid):
return self._api.request('get', (((((('/api/partstudios/d/' + did) + '/w/') + wid) + '/e/') + eid) + '/tessellatededges'))
|
Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
|
codesearchnet
|
def configure_callbacks(callbacks, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, count_mode='steps', mode=ModeKeys.TRAIN):
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
if mode == ModeKeys.TRAIN:
model.history = History()
callbacks = [BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(ProgbarLogger(count_mode))
callback_list = CallbackList(callbacks)
callback_model = model._get_callback_model()
callback_list.set_model(callback_model)
set_callback_parameters(callback_list, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=samples, verbose=verbose, mode=mode)
callback_list.model.stop_training = False
return callback_list
|
Configures callbacks for use in various training loops.
Args:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
|
github-repos
|
def DownloadPqlResultToCsv(self, pql_query, file_handle, values=None):
pql_writer = csv.writer(file_handle, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
self._PageThroughPqlSet(pql_query, pql_writer.writerow, values)
|
Downloads the results of a PQL query to CSV.
Args:
pql_query: str a statement filter to apply (the query should not include
the limit or the offset)
file_handle: file the file object to write to.
[optional]
values: A dict of python objects or a list of raw SOAP values to bind
to the pql_query.
|
codesearchnet
|
def add(self, timestamp, information):
try:
item = Schema(CollectorStage.schema_event_items()).validate({'timestamp': timestamp, 'information': information})
self.events.append(item)
except SchemaError as exception:
Logger.get_logger(__name__).error(exception)
raise RuntimeError(str(exception))
|
Add event information.
Args:
timestamp (int): event timestamp.
information (dict): event information.
Raises:
RuntimeError: when validation of parameters has failed.
|
codesearchnet
|
def _close_open_file(self, file_des):
self.open_files[file_des] = None
heapq.heappush(self._free_fd_heap, file_des)
|
Remove file object with given descriptor from the list
of open files.
Sets the entry in open_files to None.
Args:
file_des: Descriptor of file object to be removed from
open files list.
|
juraj-google-style
|
def tee_log(tee_file: TextIO, loglevel: int) -> None:
handler = get_monochrome_handler(stream=tee_file)
handler.setLevel(loglevel)
rootlogger = logging.getLogger()
rootlogger.addHandler(handler)
with TeeContextManager(tee_file, capture_stdout=True):
with TeeContextManager(tee_file, capture_stderr=True):
try:
yield
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value,
exc_traceback)
log.critical("\n" + "".join(lines))
raise
|
Context manager to add a file output stream to our logging system.
Args:
tee_file: file-like object to write to
loglevel: log level (e.g. ``logging.DEBUG``) to use for this stream
|
juraj-google-style
|
def transform_column_source_data(data, buffers=None, cols=None):
to_transform = (set(data) if (cols is None) else set(cols))
data_copy = {}
for key in to_transform:
if (pd and isinstance(data[key], (pd.Series, pd.Index))):
data_copy[key] = transform_series(data[key], buffers=buffers)
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key], buffers=buffers)
else:
data_copy[key] = traverse_data(data[key], buffers=buffers)
return data_copy
|
Transform ``ColumnSourceData`` data to a serialized format
Args:
data (dict) : the mapping of names to data columns to transform
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encoding
will be used (default: None)
**This is an "out" parameter**. The values it contains will be
modified in-place.
cols (list[str], optional) :
Optional list of subset of columns to transform. If None, all
columns will be transformed (default: None)
Returns:
JSON compatible dict
|
codesearchnet
|
def set_position_p(self, pvalue):
pvalue_msb = int(pvalue) >> 8
pvalue_lsb = int(pvalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KP_RAM)
data.append(BYTE2)
data.append( pvalue_lsb)
data.append( pvalue_msb)
send_data(data)
|
Set the P gain of the position PID
Args:
pvalue (int): P value
|
juraj-google-style
|
def inversion(origin=(0, 0, 0)):
mat = (- np.eye(4))
mat[(3, 3)] = 1
mat[(0:3, 3)] = (2 * np.array(origin))
return SymmOp(mat)
|
Inversion symmetry operation about axis.
Args:
origin (3x1 array): Origin of the inversion operation. Defaults
to [0, 0, 0].
Returns:
SymmOp representing an inversion operation about the origin.
|
codesearchnet
|
def transform_to_mods_mono(marc_xml, uuid, url):
marc_xml = _read_content_or_path(marc_xml)
transformed = xslt_transformation(
marc_xml,
_absolute_template_path("MARC21slim2MODS3-4-NDK.xsl")
)
return _apply_postprocessing(
marc_xml=marc_xml,
xml=transformed,
func=mods_postprocessor.postprocess_monograph,
uuid=uuid,
url=url,
)
|
Convert `marc_xml` to MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings.
|
juraj-google-style
|
def get_residue_annotations(self, start_resnum, end_resnum=None):
if not end_resnum:
end_resnum = start_resnum
f = SeqFeature(FeatureLocation(start_resnum - 1, end_resnum))
return f.extract(self).letter_annotations
|
Retrieve letter annotations for a residue or a range of residues
Args:
start_resnum (int): Residue number
end_resnum (int): Optional residue number, specify if a range is desired
Returns:
dict: Letter annotations for this residue or residues
|
juraj-google-style
|
def predict(parameters, X):
A2, cache = forward_propagation(X, parameters)
predictions = np.array([1 if (i > 0.5) else 0 for i in A2[0]])
return predictions
|
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
|
juraj-google-style
|
def check_destinations(destinations):
if isinstance(destinations, (resource_variable_ops.BaseResourceVariable, tensor_lib.Tensor)):
return bool(destinations.device)
return bool(destinations)
|
Checks whether `destinations` is not empty.
Args:
destinations: a `DistributedValues`, variable, or string object.
Returns:
Boolean which is True if `destinations` is not empty.
|
github-repos
|
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):
if kwargs.get('post_data_raw'):
logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw']))
else:
logging.debug('post_content: %s\npost_data: %s' % (url, post_data))
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
if kwargs.get('post_data_raw'):
post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')
else:
post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')
response = urlopen_with_retry(req, data=post_data_enc)
data = response.read()
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
if decoded:
charset = match1(
response.getheader('Content-Type'), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
|
Post the content of a URL via sending a HTTP POST request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
|
juraj-google-style
|
def add_event(self, event_):
conv_event = self._wrap_event(event_)
if (conv_event.id_ not in self._events_dict):
self._events.append(conv_event)
self._events_dict[conv_event.id_] = conv_event
else:
logger.info('Conversation %s ignoring duplicate event %s', self.id_, conv_event.id_)
return None
return conv_event
|
Add an event to the conversation.
This method is used by :class:`.ConversationList` to maintain this
instance.
Args:
event_: ``Event`` message.
Returns:
:class:`.ConversationEvent` representing the event.
|
codesearchnet
|
def _kl_uniform_uniform(a, b, name=None):
with tf.name_scope(name or "kl_uniform_uniform"):
final_batch_shape = distribution_util.get_broadcast_shape(
a.low, b.low, a.high, b.high)
dtype = dtype_util.common_dtype(
[a.low, a.high, b.low, b.high], tf.float32)
return tf.where((b.low <= a.low) & (a.high <= b.high),
tf.math.log(b.high - b.low) - tf.math.log(a.high - a.low),
tf.broadcast_to(
dtype_util.as_numpy_dtype(dtype)(np.inf),
final_batch_shape))
|
Calculate the batched KL divergence KL(a || b) with a and b Uniform.
Note that the KL divergence is infinite if the support of `a` is not a subset
of the support of `b`.
Args:
a: instance of a Uniform distribution object.
b: instance of a Uniform distribution object.
name: (optional) Name to use for created operations.
default is "kl_uniform_uniform".
Returns:
Batchwise KL(a || b)
|
juraj-google-style
|
def security_label_pivot(self, security_label_resource):
resource = self.copy()
resource._request_uri = '{}/{}'.format(security_label_resource.request_uri, resource._request_uri)
return resource
|
Pivot point on security labels for this resource.
This method will return all *resources* (group, indicators, task,
victims, etc) for this resource that have the provided security
label applied.
**Example Endpoints URI's**
+--------------+----------------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+======================================================================+
| GET | /v2/securityLabels/{resourceId}/groups/{resourceType} |
+--------------+----------------------------------------------------------------------+
| GET | /v2/securityLabels/{resourceId}/groups/{resourceType}/{uniqueId} |
+--------------+----------------------------------------------------------------------+
| GET | /v2/securityLabels/{resourceId}/indicators/{resourceType} |
+--------------+----------------------------------------------------------------------+
| GET | /v2/securityLabels/{resourceId}/indicators/{resourceType}/{uniqueId} |
+--------------+----------------------------------------------------------------------+
Args:
resource_id (string): The resource pivot id (security label name).
|
codesearchnet
|
def GetEnvironmentVariable(self, name):
name = name.upper()
return self._environment_variables.get(name, None)
|
Retrieves an environment variable.
Args:
name (str): name of the environment variable.
Returns:
EnvironmentVariableArtifact: environment variable artifact or None
if there was no value set for the given name.
|
codesearchnet
|
def process_api_config_response(self, config_json):
with self._config_lock:
self._add_discovery_config()
for config in config_json.get('items', []):
lookup_key = config.get('name', ''), config.get('version', '')
self._configs[lookup_key] = config
for config in self._configs.itervalues():
name = config.get('name', '')
api_version = config.get('api_version', '')
path_version = config.get('path_version', '')
sorted_methods = self._get_sorted_methods(config.get('methods', {}))
for method_name, method in sorted_methods:
self._save_rest_method(method_name, name, path_version, method)
|
Parses a JSON API config and registers methods for dispatch.
Side effects:
Parses method name, etc. for all methods and updates the indexing
data structures with the information.
Args:
config_json: A dict, the JSON body of the getApiConfigs response.
|
juraj-google-style
|
def split_input(cls, mapper_spec):
reader_spec = cls.get_params(mapper_spec, allow_old=False)
bucket = reader_spec[cls.BUCKET_NAME_PARAM]
filenames = reader_spec[cls.OBJECT_NAMES_PARAM]
delimiter = reader_spec.get(cls.DELIMITER_PARAM)
account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM)
fail_on_missing_input = reader_spec.get(cls.FAIL_ON_MISSING_INPUT)
all_filenames = []
for filename in filenames:
if filename.endswith("*"):
all_filenames.extend(
[file_stat.filename for file_stat in cloudstorage.listbucket(
"/" + bucket + "/" + filename[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
all_filenames.append("/%s/%s" % (bucket, filename))
readers = []
for shard in range(0, mapper_spec.shard_count):
shard_filenames = all_filenames[shard::mapper_spec.shard_count]
if shard_filenames:
reader = cls(
shard_filenames, buffer_size=buffer_size, _account_id=account_id,
delimiter=delimiter)
reader._fail_on_missing_input = fail_on_missing_input
readers.append(reader)
return readers
|
Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
mapper_spec: an instance of model.MapperSpec.
Returns:
A list of InputReaders. None when no input data can be found.
|
juraj-google-style
|
def any(x, axis=None, keepdims=False):
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
|
Bitwise reduction (logical OR).
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
|
github-repos
|
def distance(self, other):
return np.linalg.norm(other.coords - self.coords)
|
Get distance between two sites.
Args:
other: Other site.
Returns:
Distance (float)
|
juraj-google-style
|
def write_version_and_dims(version, dims, f):
f.write((('
f.write((((((((dims[0] + '\t') + dims[1]) + '\t') + dims[2]) + '\t') + dims[3]) + '\n'))
|
Write first two lines of gct file.
Args:
version (string): 1.3 by default
dims (list of strings): length = 4
f (file handle): handle of output file
Returns:
nothing
|
codesearchnet
|
async def get_entry(config, url):
previous = config.cache.get(
'entry', url,
schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
LOGGER.error("Could not get entry %s: %d", url,
request.status if request else -1)
return None, previous, False
if request.cached:
return previous, previous, False
current = Entry(request)
if config.cache:
config.cache.set('entry', url, current)
return current, previous, (not previous
or previous.digest != current.digest
or previous.status != current.status)
|
Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated)
|
juraj-google-style
|
def _check_multiple_access_to_resources(self, captured_resources, exclusive_resource_access):
for sg in ops.get_collection(CRITICAL_SECTION_EXECUTIONS):
if self._is_self_handle(sg.handle):
continue
if not (exclusive_resource_access or sg.exclusive_resource_access):
continue
resource_intersection = captured_resources.intersection(sg.resources)
if resource_intersection:
raise ValueError(f"This execution would access resources: {list(resource_intersection)}. Either this lock (CriticalSection: {self._handle}) or lock '{sg}' (CriticalSection: {sg.handle}) requested exclusive resource access of this resource. Did you mean to call execute with keyword argument exclusive_resource_access=False?")
|
Raise if captured_resources are accessed by another CriticalSection.
Args:
captured_resources: Set of tensors of type resource.
exclusive_resource_access: Whether this execution requires exclusive
resource access.
Raises:
ValueError: If any tensors in `captured_resources` are also accessed
by another `CriticalSection`, and at least one of them requires
exclusive resource access.
|
github-repos
|
def get_size(self, value=None):
if value is None:
value = self._value
if hasattr(value, 'get_size'):
return value.get_size()
return len(self.pack(value))
|
Return the size in bytes.
Args:
value (bytes): In structs, the user can assign other value instead
of this class' instance. Here, in such cases, ``self`` is a
class attribute of the struct.
Returns:
int: The address size in bytes.
|
juraj-google-style
|
def RunScripts(self, script_dict):
metadata_types = ['%s-script-url', '%s-script']
metadata_keys = [key % self.script_type for key in metadata_types]
metadata_keys = [key for key in metadata_keys if script_dict.get(key)]
if not metadata_keys:
self.logger.info('No %s scripts found in metadata.', self.script_type)
for metadata_key in metadata_keys:
metadata_script = script_dict.get(metadata_key)
self._MakeExecutable(metadata_script)
self._RunScript(metadata_key, metadata_script)
|
Run the metadata scripts; execute a URL script first if one is provided.
Args:
script_dict: a dictionary mapping metadata keys to script files.
|
juraj-google-style
|
def array_to_int_csv(array_data):
flattened_array = array_data.flatten()
array_as_strings = [item.astype(int).astype(str) for item in flattened_array]
return ','.join(array_as_strings)
|
Converts all elements in a numerical array to a comma-separated string.
Args:
array_data: Numerical array to convert.
Returns:
String containing array values as integers, separated by commas.
|
github-repos
|
def run(in_file_nose, out_dir_unitth):
suites = Converter.read_nose(in_file_nose)
Converter.write_unitth(suites, out_dir_unitth)
|
Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files
Args:
in_file_nose (:obj:`str`): path to nose-style test report
out_file_unitth (:obj:`str`): path to save UnitTH-style test reports
|
codesearchnet
|
def __init__(self, *args, allow_partial: bool=False, sealed: Optional[bool]=None, root_path: Optional[utils.KeyPath]=None, explicit_init: bool=False, **kwargs):
if sealed is None:
sealed = not self.__class__.allow_symbolic_mutation
if not isinstance(allow_partial, bool):
raise TypeError(f"Expect bool type for argument 'allow_partial' in symbolic.Object.__init__ but encountered {allow_partial}.")
super().__init__(allow_partial=allow_partial, accessor_writable=self.__class__.allow_symbolic_assignment, sealed=sealed, root_path=root_path, init_super=not explicit_init)
_, unmatched_keys = self.__class__.__schema__.resolve(list(kwargs.keys()))
if unmatched_keys:
arg_phrase = utils.auto_plural(len(unmatched_keys), 'argument')
keys_str = utils.comma_delimited_str(unmatched_keys)
raise TypeError(f'{self.__class__.__name__}.__init__() got unexpected keyword {arg_phrase}: {keys_str}')
field_args = {}
init_arg_names = self.__class__.init_arg_list
if args:
if not self.__class__.__schema__.fields:
raise TypeError(f'{self.__class__.__name__}() takes no arguments.')
elif init_arg_names and init_arg_names[-1].startswith('*'):
vararg_name = init_arg_names[-1][1:]
vararg_field = self.__class__.__schema__.get_field(vararg_name)
assert vararg_field is not None
num_named_args = len(init_arg_names) - 1
field_args[vararg_name] = list(args[num_named_args:])
args = args[:num_named_args]
elif len(args) > len(init_arg_names):
arg_phrase = utils.auto_plural(len(init_arg_names), 'argument')
was_phrase = utils.auto_plural(len(args), 'was', 'were')
raise TypeError(f'{self.__class__.__name__}.__init__() takes {len(init_arg_names)} positional {arg_phrase} but {len(args)} {was_phrase} given.')
for i, arg_value in enumerate(args):
arg_name = init_arg_names[i]
field_args[arg_name] = arg_value
for k, v in kwargs.items():
if k in field_args:
values_str = utils.comma_delimited_str([field_args[k], v])
raise TypeError(f"{self.__class__.__name__}.__init__() got multiple values for argument '{k}': {values_str}.")
field_args[k] = v
if not base.accepts_partial(self):
missing_args = []
for field in self.__class__.__schema__.fields.values():
if not field.value.has_default and isinstance(field.key, pg_typing.ConstStrKey) and (field.key not in field_args):
missing_args.append(str(field.key))
if missing_args:
arg_phrase = utils.auto_plural(len(missing_args), 'argument')
keys_str = utils.comma_delimited_str(missing_args)
raise TypeError(f'{self.__class__.__name__}.__init__() missing {len(missing_args)} required {arg_phrase}: {keys_str}.')
self._set_raw_attr('_sym_attributes', pg_dict.Dict(field_args, value_spec=self.__class__.sym_fields, allow_partial=allow_partial, sealed=sealed, accessor_writable=True, root_path=root_path, as_object_attributes_container=True))
self._sym_attributes.sym_setparent(self)
self._on_init()
self.seal(sealed)
|
Create an Object instance.
Args:
*args: positional arguments.
allow_partial: If True, the object can be partial.
sealed: If True, seal the object from future modification (unless under
a `pg.seal(False)` context manager). If False, treat the object as
unsealed. If None, it's determined by `cls.allow_symbolic_mutation`.
root_path: The symbolic path for current object. By default it's None,
which indicates that newly constructed object does not have a parent.
explicit_init: Should set to `True` when `__init__` is called via
`pg.Object.__init__` instead of `super().__init__`.
**kwargs: key/value arguments that align with the schema. All required
keys in the schema must be specified, and values should be acceptable
according to their value spec.
Raises:
KeyError: When required key(s) are missing.
ValueError: When value(s) are not acceptable by their value spec.
|
github-repos
|
async def get_word(self, term: str) -> 'asyncurban.word.Word':
resp = await self._get(term=term)
return Word(resp['list'][0])
|
Gets the first matching word available.
Args:
term: The word to be defined.
Returns:
The closest matching :class:`Word` from UrbanDictionary.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found).
|
juraj-google-style
|
def configure(self, argv=('',), **kwargs):
parser = argparse_flags.ArgumentParser(prog='tensorboard', description='TensorBoard is a suite of web applications for inspecting and understanding your TensorFlow runs and graphs. https:
for loader in self.plugin_loaders:
loader.define_flags(parser)
arg0 = (argv[0] if argv else '')
flags = parser.parse_args(argv[1:])
self.cache_key = manager.cache_key(working_directory=os.getcwd(), arguments=argv[1:], configure_kwargs=kwargs)
if (absl_flags and arg0):
for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):
if hasattr(flags, flag.name):
raise ValueError(('Conflicting Abseil flag: %s' % flag.name))
setattr(flags, flag.name, flag.value)
for (k, v) in kwargs.items():
if (not hasattr(flags, k)):
raise ValueError(('Unknown TensorBoard flag: %s' % k))
setattr(flags, k, v)
for loader in self.plugin_loaders:
loader.fix_flags(flags)
self.flags = flags
return [arg0]
|
Configures TensorBoard behavior via flags.
This method will populate the "flags" property with an argparse.Namespace
representing flag values parsed from the provided argv list, overridden by
explicit flags from remaining keyword arguments.
Args:
argv: Can be set to CLI args equivalent to sys.argv; the first arg is
taken to be the name of the path being executed.
kwargs: Additional arguments will override what was parsed from
argv. They must be passed as Python data structures, e.g.
`foo=1` rather than `foo="1"`.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
Raises:
ValueError: If flag values are invalid.
|
codesearchnet
|
def split_strings(string, separators):
logger = logging.getLogger('extract_vcf.split_strings')
logger.debug("splitting string '{0}' with separators {1}".format(
string, separators
))
results = []
def recursion(recursive_string, separators, i=1):
if i == len(separators):
for value in recursive_string.split(separators[i-1]):
logger.debug("Adding {0} to results".format(value))
results.append(value)
else:
for value in recursive_string.split(separators[i-1]):
recursion(value, separators, i+1)
if len(separators) > 0:
recursion(string, separators)
else:
results = [string]
return results
|
Split a string with arbitrary number of separators.
Return a list with the splitted values
Arguments:
string (str): ex. "a:1|2,b:2"
separators (list): ex. [',',':','|']
Returns:
results (list) : ex. ['a','1','2','b','2']
|
juraj-google-style
|
async def starttls(self, context=None):
if (not self.use_aioopenssl):
raise BadImplementationError('This connection does not use aioopenssl')
import aioopenssl
import OpenSSL
(await self.ehlo_or_helo_if_needed())
if ('starttls' not in self.esmtp_extensions):
raise SMTPCommandNotSupportedError('STARTTLS')
(code, message) = (await self.do_cmd('STARTTLS', success=(220,)))
if (context is None):
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
(await self.transport.starttls(ssl_context=context))
self.last_ehlo_response = (None, None)
self.last_helo_response = (None, None)
self.supports_esmtp = False
self.esmtp_extensions = {}
self.auth_mechanisms = []
return (code, message)
|
Upgrades the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports SSL/TLS, this will encrypt the rest of the SMTP
session.
Raises:
SMTPCommandNotSupportedError: If the server does not support STARTTLS.
SMTPCommandFailedError: If the STARTTLS command fails
BadImplementationError: If the connection does not use aioopenssl.
Args:
context (:obj:`OpenSSL.SSL.Context`): SSL context
Returns:
(int, message): A (code, message) 2-tuple containing the server
response.
|
codesearchnet
|
def get_chain(self, name, table='filter'):
return [r for r in self.rules if ((r['table'] == table) and (r['chain'] == name))]
|
Get the list of rules for a particular chain. Chain order is kept intact.
Args:
name (str): chain name, e.g. ``
table (str): table name, defaults to ``filter``
Returns:
list: rules
|
codesearchnet
|
def set_server_def_retries(self, retries):
self._set_server_def_retries = retries
|
Set the number of retries to use when calling SetServerDef.
In cases where many servers run in high-preemption environments, jobs could
be preempted during startup and initial connection via SetServerDef. Retries
allow for more robust connection in these environments.
Args:
retries: int specifying the number of connection retries before failing.
Retries follow an exponential backoff waiting period with min value 1ms,
max value 10s, and exponent 1.3.
|
github-repos
|
def set_as_default(self, step=None):
self.as_default(step).__enter__()
|
Enables this summary writer for the current thread.
For convenience, if `step` is not None, this function also sets a default
value for the `step` parameter used in summary-writing functions elsewhere
in the API so that it need not be explicitly passed in every such
invocation. The value can be a constant or a variable.
Note: when setting `step` in a @tf.function, the step value will be
captured at the time the function is traced, so changes to the step outside
the function will not be reflected inside the function unless using
a `tf.Variable` step.
Args:
step: An `int64`-castable default step value, or `None`. When not `None`,
the current step is modified to the given value. When `None`, the
current step is not modified.
|
github-repos
|
def pad_tensor(tensor, length, padding_index=DEFAULT_PADDING_INDEX):
n_padding = length - tensor.shape[0]
assert n_padding >= 0
if n_padding == 0:
return tensor
padding = tensor.new(n_padding, *tensor.shape[1:]).fill_(padding_index)
return torch.cat((tensor, padding), dim=0)
|
Pad a ``tensor`` to ``length`` with ``padding_index``.
Args:
tensor (torch.Tensor [n, ...]): Tensor to pad.
length (int): Pad the ``tensor`` up to ``length``.
padding_index (int, optional): Index to pad tensor with.
Returns
(torch.Tensor [length, ...]) Padded Tensor.
|
juraj-google-style
|
def discrete_bottleneck(self, x):
x_reshaped = self.slice_hidden(x)
x_means_hot = []
x_means = 0
loss = 0
(x_means_hot, x_means, q_loss, e_loss) = self.embedding_lookup(x_reshaped, self.means)
if self.hparams.ema:
tf.logging.info('Using EMA with beta = {}'.format(self.hparams.beta))
updated_ema_count = moving_averages.assign_moving_average(self.ema_count, tf.reduce_sum(tf.reshape(x_means_hot, shape=[(- 1), self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False)
dw = tf.matmul(tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2]))
updated_ema_means = moving_averages.assign_moving_average(self.ema_means, dw, self.hparams.decay, zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=(- 1), keep_dims=True)
updated_ema_count = (((updated_ema_count + self.hparams.epsilon) / (n + ((2 ** self.hparams.z_size) * self.hparams.epsilon))) * n)
updated_ema_means = (updated_ema_means / tf.expand_dims(updated_ema_count, axis=(- 1)))
with tf.control_dependencies([e_loss]):
update_means = tf.assign(self.means, updated_ema_means)
with tf.control_dependencies([update_means]):
loss += (self.hparams.beta * e_loss)
else:
loss += (q_loss + (self.hparams.beta * e_loss))
x_means_idx = tf.argmax(x_means_hot, axis=(- 1))
num_bits = int((self.hparams.z_size
x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2)
x_discrete = self.bit_to_int(tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2)
shape_x = common_layers.shape_list(x)
shape_discrete = shape_x[:(- 1)]
x_discrete = tf.reshape(x_discrete, shape_discrete)
x_means = tf.reshape(x_means, shape=shape_x)
h1 = (x + tf.stop_gradient((x_means - x)))
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name='vch2')
res = tf.layers.dense(tf.nn.relu(h2), self.hparams.hidden_size, name='vcfin')
embed_fn = partial(self.embed)
return {'dense': res, 'discrete': x_discrete, 'loss': loss, 'embed': embed_fn}
|
Discretization bottleneck for latent variables.
Args:
x: Input to the discretization bottleneck.
Returns:
Embedding to pass to the decoder, discrete latent, loss, and the
embedding
function.
Raises:
ValueError: If projection_tensors is None for reshape_method
project, or
ema_count or ema_means is None if we are using ema, or unknown
args.
|
codesearchnet
|
def load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False):
if 'keras_version' in f.attrs:
original_keras_version = f.attrs['keras_version']
if hasattr(original_keras_version, 'decode'):
original_keras_version = original_keras_version.decode('utf8')
else:
original_keras_version = '1'
if 'backend' in f.attrs:
original_backend = f.attrs['backend']
if hasattr(original_backend, 'decode'):
original_backend = original_backend.decode('utf8')
else:
original_backend = None
layer_names = load_attributes_from_hdf5_group(f, 'layer_names')
index = {}
for layer in model.layers:
if layer.name:
index.setdefault(layer.name, []).append(layer)
for k, name in enumerate(layer_names):
g = f[name]
weight_values = load_subset_weights_from_hdf5_group(g)
for layer in index.get(name, []):
symbolic_weights = _legacy_weights(layer)
if len(weight_values) != len(symbolic_weights):
if skip_mismatch:
warnings.warn(f'Skipping loading of weights for layer
continue
raise ValueError(f'Weight count mismatch for layer
_set_weights(layer, symbolic_weights, weight_values, skip_mismatch=skip_mismatch, name=f'layer
if 'top_level_model_weights' in f:
symbolic_weights = model._trainable_variables + model._non_trainable_variables
weight_values = load_subset_weights_from_hdf5_group(f['top_level_model_weights'])
if len(weight_values) != len(symbolic_weights):
if skip_mismatch:
warnings.warn(f'Skipping loading top-level weights for model due to mismatch in number of weights. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)', stacklevel=2)
else:
raise ValueError(f'Weight count mismatch for top-level weights of model. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)')
else:
_set_weights(model, symbolic_weights, weight_values, skip_mismatch=skip_mismatch, name='top-level model')
|
Implements name-based weight loading (instead of topological loading).
Layers that have no matching name are skipped.
Args:
f: A pointer to a HDF5 group.
model: Model instance.
skip_mismatch: Boolean, whether to skip loading of layers
where there is a mismatch in the number of weights,
or a mismatch in the shape of the weights.
Raises:
ValueError: in case of mismatch between provided layers
and weights file and skip_match=False.
|
github-repos
|
def write(self, file_des, contents):
file_handle = self.filesystem.get_open_file(file_des)
if isinstance(file_handle, FakeDirWrapper):
self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)
if isinstance(file_handle, FakePipeWrapper):
return file_handle.write(contents)
file_handle.raw_io = True
file_handle._sync_io()
file_handle.update_flush_pos()
file_handle.write(contents)
file_handle.flush()
return len(contents)
|
Write string to file descriptor, returns number of bytes written.
Args:
file_des: An integer file descriptor for the file object requested.
contents: String of bytes to write to file.
Returns:
Number of bytes written.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
|
juraj-google-style
|
def check_oversized_pickle(pickled, name, obj_type, worker):
length = len(pickled)
if length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE:
return
warning_message = (
"Warning: The {} {} has size {} when pickled. "
"It will be stored in Redis, which could cause memory issues. "
"This may mean that its definition uses a large array or other object."
).format(obj_type, name, length)
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
warning_message,
driver_id=worker.task_driver_id)
|
Send a warning message if the pickled object is too large.
Args:
pickled: the pickled object.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', 'actor', or 'object'.
worker: the worker used to send warning message.
|
juraj-google-style
|
def __init__(self, img_input, p=6.):
super(LPNorm, self).__init__()
if p < 1:
raise ValueError('p value should range between [1, inf)')
self.name = "L-{} Norm Loss".format(p)
self.p = p
self.img = img_input
|
Builds a L-p norm function. This regularizer encourages the intensity of pixels to stay bounded.
i.e., prevents pixels from taking on very large values.
Args:
img_input: 4D image input tensor to the model of shape: `(samples, channels, rows, cols)`
if data_format='channels_first' or `(samples, rows, cols, channels)` if data_format='channels_last'.
p: The pth norm to use. If p = float('inf'), infinity-norm will be used.
|
juraj-google-style
|
def handle_error(self, code, message_values=None, raise_error=True):
try:
if message_values is None:
message_values = []
message = self.error_codes.message(code).format(*message_values)
self.log.error('Error code: {}, {}'.format(code, message))
except AttributeError:
self.log.error('Incorrect error code provided ({}).'.format(code))
raise RuntimeError(1000, 'Generic Failure, see logs for more details.')
except IndexError:
self.log.error(
'Incorrect message values provided for error code {} ({}).'.format(
code, message_values
)
)
raise RuntimeError(1000, 'Generic Failure, see logs for more details.')
if raise_error:
raise RuntimeError(code, message)
|
Raise RuntimeError
Args:
code (integer): The error code from API or SDK.
message (string): The error message from API or SDK.
|
juraj-google-style
|
def AddBudget(self, client_customer_id, micro_amount):
self.client.SetClientCustomerId(client_customer_id)
budget_service = self.client.GetService('BudgetService')
operations = [{'operator': 'ADD', 'operand': {'name': ('Budget
return budget_service.mutate(operations)['value'][0]['budgetId']
|
Create a new Budget with the given microAmount.
Args:
client_customer_id: str Client Customer Id used to create Budget.
micro_amount: str The budget represented in micros.
Returns:
str BudgetId of the newly created Budget.
|
codesearchnet
|
def AsParameterType(type: ContractParameterType, item: StackItem):
if type == ContractParameterType.Integer:
return ContractParameter(type, value=item.GetBigInteger())
elif type == ContractParameterType.Boolean:
return ContractParameter(type, value=item.GetBoolean())
elif type == ContractParameterType.Array:
output = [ContractParameter.ToParameter(subitem) for subitem in item.GetArray()]
return ContractParameter(type, value=output)
elif type == ContractParameterType.String:
return ContractParameter(type, value=item.GetString())
elif type == ContractParameterType.InteropInterface:
return ContractParameter(type, value=item.GetInterface())
else:
return ContractParameter(type, value=item.GetByteArray())
|
Convert a StackItem to a ContractParameter object of a specified ContractParameterType
Args:
type (neo.SmartContract.ContractParameterType): The ContractParameterType to convert to
item (neo.VM.InteropService.StackItem): The item to convert to a ContractParameter object
Returns:
|
juraj-google-style
|
def expand(self):
return self.element_wise((lambda o: (o.expand() if isinstance(o, QuantumExpression) else o)))
|
Expand each matrix element distributively.
Returns:
Matrix: Expanded matrix.
|
codesearchnet
|
def quad_genz_keister_22(order):
order = sorted(GENZ_KEISTER_22.keys())[order]
(abscissas, weights) = GENZ_KEISTER_22[order]
abscissas = numpy.array(abscissas)
weights = numpy.array(weights)
weights /= numpy.sum(weights)
abscissas *= numpy.sqrt(2)
return (abscissas, weights)
|
Hermite Genz-Keister 22 rule.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
Returns:
(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):
Abscissas and weights
Examples:
>>> abscissas, weights = quad_genz_keister_22(1)
>>> print(numpy.around(abscissas, 4))
[-1.7321 0. 1.7321]
>>> print(numpy.around(weights, 4))
[0.1667 0.6667 0.1667]
|
codesearchnet
|
def rename_nodes(self, renaming_map):
if (not isinstance(renaming_map, dict)):
raise TypeError('renaming_map must be a dict')
for node in self.traverse_preorder():
if (node.label in renaming_map):
node.label = renaming_map[node.label]
|
Rename nodes in this ``Tree``
Args:
``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values)
|
codesearchnet
|
def _run_check(self):
cmd = shlex.split(self.config['check_cmd'])
self.log.info('running %s', ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
start_time = time.time()
try:
(outs, errs) = proc.communicate(timeout=self.config['check_timeout'])
except subprocess.TimeoutExpired:
self.log.error('check timed out')
if (proc.poll() is None):
try:
proc.kill()
except PermissionError:
self.log.warning('failed to kill check due to adequate access rights, check could be running under another user(root) via sudo')
return False
else:
msg = 'check duration {t:.3f}ms'.format(t=((time.time() - start_time) * 1000))
self.log.info(msg)
if (proc.returncode != 0):
self.log.info('stderr from the check %s', errs)
self.log.info('stdout from the check %s', outs)
return (proc.returncode == 0)
|
Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False.
|
codesearchnet
|
def __learn_labels(self, labels):
if self.feature_length > 0:
result = list(self.labels.classes_)
else:
result = []
for label in labels:
result.append(label)
self.labels.fit(result)
|
Learns new labels, this method is intended for internal use
Args:
labels (:obj:`list` of :obj:`str`): Labels to learn
|
juraj-google-style
|
def data_period_name_or_description(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_period_name_or_description`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `data_period_name_or_description`')
self._data_period_name_or_description = value
|
Corresponds to IDD Field `data_period_name_or_description`
Args:
value (str): value for IDD Field `data_period_name_or_description`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def _write_handle(self, conn, handle, ack, value, timeout=1.0):
conn_handle = conn
char_handle = handle
def write_handle_acked(event):
if ((event.command_class == 4) and (event.command == 1)):
(conn, _, char) = unpack('<BHH', event.payload)
return ((conn_handle == conn) and (char_handle == char))
data_len = len(value)
if (data_len > 20):
return (False, {'reason': 'Data too long to write'})
payload = struct.pack(('<BHB%ds' % data_len), conn_handle, char_handle, data_len, value)
try:
if ack:
response = self._send_command(4, 5, payload)
else:
response = self._send_command(4, 6, payload)
except InternalTimeoutError:
return (False, {'reason': 'Timeout waiting for response to command in _write_handle'})
(_, result) = unpack('<BH', response.payload)
if (result != 0):
return (False, {'reason': 'Error writing to handle', 'error_code': result})
if ack:
events = self._wait_process_events(timeout, (lambda x: False), write_handle_acked)
if (len(events) == 0):
return (False, {'reason': 'Timeout waiting for acknowledge on write'})
(_, result, _) = unpack('<BHH', events[0].payload)
if (result != 0):
return (False, {'reason': 'Error received during write to handle', 'error_code': result})
return (True, None)
|
Write to a BLE device characteristic by its handle
Args:
conn (int): The connection handle for the device we should read from
handle (int): The characteristics handle we should read
ack (bool): Should this be an acknowledges write or unacknowledged
timeout (float): How long to wait before failing
value (bytearray): The value that we should write
|
codesearchnet
|
def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y):
return image_transform(X, cv2.GaussianBlur, ksize=(ksize_width, ksize_height), sigmaX=sigma_x, sigmaY=sigma_y)
|
Apply Gaussian blur to the given data.
Args:
X: data to blur
kernel_size: Gaussian kernel size
stddev: Gaussian kernel standard deviation (in both X and Y directions)
|
codesearchnet
|
def add_real_file(self, source_path, read_only=True, target_path=None):
target_path = (target_path or source_path)
source_path = make_string_path(source_path)
target_path = self.make_string_path(target_path)
real_stat = os.stat(source_path)
fake_file = self.create_file_internally(target_path, read_from_real_fs=True)
fake_file.stat_result.set_from_stat_result(real_stat)
if read_only:
fake_file.st_mode &= 261924
fake_file.file_path = source_path
self.change_disk_usage(fake_file.size, fake_file.name, fake_file.st_dev)
return fake_file
|
Create `file_path`, including all the parent directories along the
way, for an existing real file. The contents of the real file are read
only on demand.
Args:
source_path: Path to an existing file in the real file system
read_only: If `True` (the default), writing to the fake file
raises an exception. Otherwise, writing to the file changes
the fake file only.
target_path: If given, the path of the target direction,
otherwise it is equal to `source_path`.
Returns:
the newly created FakeFile object.
Raises:
OSError: if the file does not exist in the real file system.
IOError: if the file already exists in the fake file system.
.. note:: On most systems, accessing the fake file's contents may
update both the real and fake files' `atime` (access time).
In this particular case, `add_real_file()` violates the rule
that `pyfakefs` must not modify the real file system.
|
codesearchnet
|
def WriteStackFrameWithId(self, stack_frame_with_id):
debug_event = debug_event_pb2.DebugEvent(stack_frame_with_id=stack_frame_with_id)
self._EnsureTimestampAdded(debug_event)
_pywrap_debug_events_writer.WriteStackFrameWithId(self._dump_root, debug_event)
|
Write a StackFrameWithId proto with the writer.
Args:
stack_frame_with_id: A StackFrameWithId proto, describing the content a
stack frame involved in the execution of the debugged TensorFlow
program.
|
github-repos
|
def get_img_shape(img):
if isinstance(img, np.ndarray):
shape = img.shape
else:
shape = K.int_shape(img)
if (K.image_data_format() == 'channels_last'):
shape = list(shape)
shape.insert(1, shape[(- 1)])
shape = tuple(shape[:(- 1)])
return shape
|
Returns image shape in a backend agnostic manner.
Args:
img: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or
`(image_dims..., channels)` if data_format='channels_last'.
Returns:
Tuple containing image shape information in `(samples, channels, image_dims...)` order.
|
codesearchnet
|
def _check(self, check, radl):
if (check[0] == float):
if ((not isinstance(self.value, int)) and (not isinstance(self.value, float))):
raise RADLParseException(('Invalid type; expected %s' % check[0]), line=self.line)
elif (check[0] == str):
if ((not isinstance(self.value, str)) and (not isinstance(self.value, unicode))):
raise RADLParseException(('Invalid type; expected %s' % check[0]), line=self.line)
elif (not isinstance(self.value, check[0])):
raise RADLParseException(('Invalid type; expected %s' % check[0]), line=self.line)
if ((isinstance(self.value, str) or isinstance(self.value, unicode)) and (self.prop.find('version') == (- 1))):
if (self.operator != '='):
raise RADLParseException("Invalid operator; expected '='", line=self.line)
elif (isinstance(self.value, int) or isinstance(self.value, float) or (self.prop.find('version') >= 0)):
if (self.operator not in ['=', '<=', '>=', '>', '<']):
raise RADLParseException(("Invalid operator; expected '=', '<=', " + "'>=', '>' or '<'"), line=self.line)
elif isinstance(self.value, Features):
if (self.operator != 'contains'):
raise RADLParseException("Invalid operator; expected 'contains'", line=self.line)
if isinstance(check[1], list):
if (self.value.upper() not in check[1]):
raise RADLParseException(('Invalid value; expected one of %s' % check[1]), line=self.line)
elif callable(check[1]):
if (not check[1](self, radl)):
raise RADLParseException(("Invalid value in property '%s'" % self.prop), line=self.line)
if ((len(check) < 3) or (check[2] is None)):
if self.unit:
raise RADLParseException('Invalid unit; expected none', line=self.line)
elif ((len(check) > 2) and check[2]):
if (self.unit.upper() not in check[2]):
raise RADLParseException(('Invalid unit; expected one of %s' % check[2]), line=self.line)
return True
|
Check type, operator and unit in a feature.
Args:
- check(tuple):
- v[0]: expected type of the feature value.
- v[1]: can be a list of possible values or a function to test the value or None.
- v[2] (optional): can be a list of possible units; if None or not set the
unit valid is none.
- radl: second argument passed when calling v[1].
|
codesearchnet
|
def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:
known_args, pipeline_args = parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
model_handler = VLLMCompletionsModelHandler(model_name=known_args.model)
input_examples = COMPLETION_EXAMPLES
if known_args.chat:
model_handler = VLLMChatModelHandler(model_name=known_args.model, chat_template_path=known_args.chat_template)
input_examples = CHAT_EXAMPLES
pipeline = test_pipeline
if not test_pipeline:
pipeline = beam.Pipeline(options=pipeline_options)
examples = pipeline | 'Create examples' >> beam.Create(input_examples)
predictions = examples | 'RunInference' >> RunInference(model_handler)
process_output = predictions | 'Process Predictions' >> beam.ParDo(PostProcessor())
_ = process_output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)
result = pipeline.run()
result.wait_until_finish()
return result
|
Args:
argv: Command line arguments defined for this example.
save_main_session: Used for internal testing.
test_pipeline: Used for internal testing.
|
github-repos
|
def get_accepted_features(features, proposed_feature):
def eq(feature):
'Features are equal if they have the same source\n\n At least in this implementation...\n '
return (feature.source == proposed_feature.source)
result = lfilter(complement(eq), features)
if ((len(features) - len(result)) == 1):
return result
elif (len(result) == len(features)):
raise BalletError("Did not find match for proposed feature within 'contrib'")
else:
raise BalletError('Unexpected condition (n_features={}, n_result={})'.format(len(features), len(result)))
|
Deselect candidate features from list of all features
Args:
features (List[Feature]): collection of all features in the ballet
project: both accepted features and candidate ones that have not
been accepted
proposed_feature (Feature): candidate feature that has not been
accepted
Returns:
List[Feature]: list of features with the proposed feature not in it.
Raises:
ballet.exc.BalletError: Could not deselect exactly the proposed
feature.
|
codesearchnet
|
def due_date(self, due_date):
if not self.can_update():
self._tcex.handle_error(910, [self.type])
due_date = self._utils.format_datetime(due_date, date_format='%Y-%m-%dT%H:%M:%SZ')
self._data['dueDate'] = due_date
request = {'dueDate': due_date}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
|
Sets the task due_date
Args:
due_date: Converted to %Y-%m-%dT%H:%M:%SZ date format
|
juraj-google-style
|
def delete_endpoint_config(self, endpoint_config_name):
LOGGER.info('Deleting endpoint configuration with name: {}'.format(endpoint_config_name))
self.sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)
|
Delete an Amazon SageMaker endpoint configuration.
Args:
endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete.
|
codesearchnet
|
def export_to_tf_tensor(self, x):
mesh_impl = self.mesh_impl(x)
return mesh_impl.export_to_tf_tensor(
x, self.tensors[x].to_laid_out_tensor())
|
Turn a Tensor into a tf.Tensor.
Args:
x: Tensor.
Returns:
tf.Tensor.
|
juraj-google-style
|
def get_psd(self, omega):
w = np.asarray(omega)
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients
p = get_psd_value(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, w.flatten())
return p.reshape(w.shape)
|
Compute the PSD of the term for an array of angular frequencies
Args:
omega (array[...]): An array of frequencies where the PSD should
be evaluated.
Returns:
The value of the PSD for each ``omega``. This will have the same
shape as ``omega``.
|
codesearchnet
|
def check_errors(self, is_global=False):
errors = (self.global_errors if is_global else self.errors)
if errors:
print('dfTimewolf encountered one or more errors:')
for (error, critical) in errors:
print('{0:s} {1:s}'.format(('CRITICAL: ' if critical else ''), error))
if critical:
print('Critical error found. Aborting.')
sys.exit((- 1))
|
Checks for errors and exits if any of them are critical.
Args:
is_global: If True, check the global_errors attribute. If false, check the
error attribute.
|
codesearchnet
|
def _ParseExtensionsString(self, extensions_string):
if (not extensions_string):
return
extensions_string = extensions_string.lower()
extensions = [extension.strip() for extension in extensions_string.split(',')]
file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions)
self._filter_collection.AddFilter(file_entry_filter)
|
Parses the extensions string.
Args:
extensions_string (str): comma separated extensions to filter.
|
codesearchnet
|
def reinit_nested_vars(variables, indices=None):
if isinstance(variables, (tuple, list)):
return tf.group(*[reinit_nested_vars(variable, indices) for variable in variables])
if (indices is None):
return variables.assign(tf.zeros_like(variables))
else:
zeros = tf.zeros(([tf.shape(indices)[0]] + variables.shape[1:].as_list()))
return tf.scatter_update(variables, indices, zeros)
|
Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation.
|
codesearchnet
|
def get_uuid_string(low=None, high=None, **x):
if low is None or high is None:
return None
x = ''.join([parse_part(low), parse_part(high)])
return '-'.join([x[:8], x[8:12], x[12:16], x[16:20], x[20:32]])
|
This method parses a UUID protobuf message type from its component
'high' and 'low' longs into a standard formatted UUID string
Args:
x (dict): containing keys, 'low' and 'high' corresponding to the UUID
protobuf message type
Returns:
str: UUID formatted string
|
juraj-google-style
|
def convert_positional_argument(self, index, arg_value):
if self._has_self:
if (index == 0):
return arg_value
index -= 1
arg_name = self.arg_names[index]
return self.convert_argument(arg_name, arg_value)
|
Convert and validate a positional argument.
Args:
index (int): The positional index of the argument
arg_value (object): The value to convert and validate
Returns:
object: The converted value.
|
codesearchnet
|
def search(cls, five9, filters):
return cls._name_search(five9.configuration.getWebConnectors, filters)
|
Search for a record on the remote and return the results.
Args:
five9 (five9.Five9): The authenticated Five9 remote.
filters (dict): A dictionary of search parameters, keyed by the
name of the field to search. This should conform to the
schema defined in :func:`five9.Five9.create_criteria`.
Returns:
list[BaseModel]: A list of records representing the result.
|
juraj-google-style
|
def get_vm_extension(access_token, subscription_id, resource_group, vm_name, extension_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachines/', vm_name,
'/extensions/', extension_name,
'?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
Get details about a VM extension.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
extension_name (str): VM extension name.
Returns:
HTTP response. JSON body of VM extension properties.
|
juraj-google-style
|
def load(self, steps_dir=None, step_file=None, step_list=None):
self._closed()
self.steps_library.load(steps_dir=steps_dir, step_file=step_file, step_list=step_list)
|
Load CWL steps into the WorkflowGenerator's steps library.
Adds steps (command line tools and workflows) to the
``WorkflowGenerator``'s steps library. These steps can be used to
create workflows.
Args:
steps_dir (str): path to directory containing CWL files. All CWL in
the directory are loaded.
step_file (str): path to a file containing a CWL step that will be
added to the steps library.
|
codesearchnet
|
def b_fit_score(self, x, y):
x = np.reshape(minmax_scale(x), ((- 1), 1))
y = np.reshape(minmax_scale(y), ((- 1), 1))
poly = PolynomialFeatures(degree=self.degree)
poly_x = poly.fit_transform(x)
poly_x[(:, 1)] = 0
poly_x[(:, 2)] = 0
regressor = LinearRegression()
regressor.fit(poly_x, y)
y_predict = regressor.predict(poly_x)
error = mean_squared_error(y_predict, y)
return error
|
Compute the RECI fit score
Args:
x (numpy.ndarray): Variable 1
y (numpy.ndarray): Variable 2
Returns:
float: RECI fit score
|
codesearchnet
|
def _tokens_to_subtoken_ids(self, tokens):
ret = []
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
return ret
|
Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
|
juraj-google-style
|
def isomorphic(q, g, check_varprops=True):
qdg = _make_digraph(q, check_varprops)
gdg = _make_digraph(g, check_varprops)
def nem(qd, gd):
return qd.get('sig') == gd.get('sig')
return nx.is_isomorphic(qdg, gdg, node_match=nem, edge_match=nem)
|
Return `True` if Xmrs objects *q* and *g* are isomorphic.
Isomorphicity compares the predicates of an Xmrs, the variable
properties of their predications (if `check_varprops=True`),
constant arguments, and the argument structure between
predications. Node IDs and Lnk values are ignored.
Args:
q: the left Xmrs to compare
g: the right Xmrs to compare
check_varprops: if `True`, make sure variable properties are
equal for mapped predications
|
juraj-google-style
|
def _tensor_product(t1, t2):
return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2))
|
Computes the outer product of two possibly batched vectors.
Args:
t1: A `tf.Tensor` of shape `[..., n]`.
t2: A `tf.Tensor` of shape `[..., m]`.
Returns:
A tensor of shape `[..., n, m]` with matching batch dimensions, let's call
it `r`, whose components are:
```None
r[..., i, j] = t1[..., i] * t2[..., j]
```
|
juraj-google-style
|
def exit_code(self) -> Any:
if (self._done() or self.is_killed()):
return self._exit_code
else:
raise BehaviourNotFinishedException
|
Returns the exit_code of the behaviour.
It only works when the behaviour is done or killed,
otherwise it raises an exception.
Returns:
object: the exit code of the behaviour
|
codesearchnet
|
def create(self, name, redirect_uri=None):
data = dict(name=name)
if redirect_uri:
data['redirect_uri'] = redirect_uri
auth_request_resource = self.resource.create(data)
return (auth_request_resource.attributes['metadata']['device_token'], auth_request_resource.attributes['mfa_uri'])
|
Create a new Device object.
Devices tie Users and Applications together. For your Application to
access and act on behalf of a User, the User must authorize a Device
created by your Application.
This function will return a `device_token` which you must store and use
after the Device is approved in
`client.authenticate_device(api_token, device_token)`
The second value returned is an `mfa_uri` which is the location the User
must visit to approve the new device. After this function completes,
you should launch a new browser tab or webview with this value as the
location. After the User approves the Device, they will be redirected to
the redirect_uri you specify in this call.
Args:
name (str): Human-readable name for the device
(e.g. "Suzanne's iPhone")
redirect_uri (str, optional): A URI to which to redirect the User after
they approve the new Device.
Returns: A tuple of (device_token, mfa_uri)
|
codesearchnet
|
def is_instance(state, inst, not_instance_msg=None):
state.assert_is(['object_assignments'], 'is_instance', ['check_object'])
sol_name = state.solution_parts.get('name')
stu_name = state.student_parts.get('name')
if (not_instance_msg is None):
not_instance_msg = 'Is it a {{inst.__name__}}?'
if (not isInstanceInProcess(sol_name, inst, state.solution_process)):
raise InstructorError(('`is_instance()` noticed that `%s` is not a `%s` in the solution process.' % (sol_name, inst.__name__)))
_msg = state.build_message(not_instance_msg, {'inst': inst})
feedback = Feedback(_msg, state)
state.do_test(InstanceProcessTest(stu_name, inst, state.student_process, feedback))
return state
|
Check whether an object is an instance of a certain class.
``is_instance()`` can currently only be used when chained from ``check_object()``, the function that is
used to 'zoom in' on the object of interest.
Args:
inst (class): The class that the object should have.
not_instance_msg (str): When specified, this overrides the automatically generated message in case
the object does not have the expected class.
state (State): The state that is passed in through the SCT chain (don't specify this).
:Example:
Student code and solution code::
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
SCT::
# Verify the class of arr
import numpy
Ex().check_object('arr').is_instance(numpy.ndarray)
|
codesearchnet
|
def sync_job_info(self, job_name):
job_path = os.path.join(self._logdir, job_name)
if job_name not in self._monitored_jobs:
self._create_job_info(job_path)
self._monitored_jobs.add(job_name)
else:
self._update_job_info(job_path)
expr_dirs = filter(lambda d: os.path.isdir(os.path.join(job_path, d)),
os.listdir(job_path))
for expr_dir_name in expr_dirs:
self.sync_trial_info(job_path, expr_dir_name)
self._update_job_info(job_path)
|
Load information of the job with the given job name.
1. Traverse each experiment sub-directory and sync information
for each trial.
2. Create or update the job information, together with the job
meta file.
Args:
job_name (str) name of the Tune experiment
|
juraj-google-style
|
def get_pipeline_yaml(file):
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition
|
Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
|
juraj-google-style
|
def trim(self, len_):
other = Version(None)
other.tokens = self.tokens[:len_]
other.seps = self.seps[:len_ - 1]
return other
|
Return a copy of the version, possibly with less tokens.
Args:
len_ (int): New version length. If >= current length, an
unchanged copy of the version is returned.
|
juraj-google-style
|
def parse(self, data):
self.binding_var_count = 0
self.segment_count = 0
segments = self.parser.parse(data)
path_wildcard = False
for segment in segments:
if segment.kind == _TERMINAL and segment.literal == '**':
if path_wildcard:
raise ValidationException(
'validation error: path template cannot contain more '
'than one path wildcard')
path_wildcard = True
return segments
|
Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment.
|
juraj-google-style
|
def crawl(self,
feeder_kwargs=None,
parser_kwargs=None,
downloader_kwargs=None):
self.signal.reset()
self.logger.info('start crawling...')
feeder_kwargs = {} if feeder_kwargs is None else feeder_kwargs
parser_kwargs = {} if parser_kwargs is None else parser_kwargs
downloader_kwargs = {} if downloader_kwargs is None else downloader_kwargs
self.logger.info('starting %d feeder threads...',
self.feeder.thread_num)
self.feeder.start(**feeder_kwargs)
self.logger.info('starting %d parser threads...',
self.parser.thread_num)
self.parser.start(**parser_kwargs)
self.logger.info('starting %d downloader threads...',
self.downloader.thread_num)
self.downloader.start(**downloader_kwargs)
while True:
if not self.feeder.is_alive():
self.signal.set(feeder_exited=True)
if not self.parser.is_alive():
self.signal.set(parser_exited=True)
if not self.downloader.is_alive():
break
time.sleep(1)
if not self.feeder.in_queue.empty():
self.feeder.clear_buffer()
if not self.parser.in_queue.empty():
self.parser.clear_buffer()
if not self.downloader.in_queue.empty():
self.downloader.clear_buffer(True)
self.logger.info('Crawling task done!')
|
Start crawling
This method will start feeder, parser and download and wait
until all threads exit.
Args:
feeder_kwargs (dict, optional): Arguments to be passed to ``feeder.start()``
parser_kwargs (dict, optional): Arguments to be passed to ``parser.start()``
downloader_kwargs (dict, optional): Arguments to be passed to
``downloader.start()``
|
juraj-google-style
|
def get(self, id_or_url, default=None):
if ('/' in id_or_url):
id = urls.SheetUrl.from_string(id_or_url).id
else:
id = id_or_url
try:
return self[id]
except KeyError:
return default
|
Fetch and return the spreadsheet with the given id or url.
Args:
id_or_url (str): unique alphanumeric id or URL of the spreadsheet
Returns:
New SpreadSheet instance or given default if none is found
Raises:
ValueError: if an URL is given from which no id could be extracted
|
codesearchnet
|
def getField(self, fld_name):
result = ""
if fld_name in self.m_req:
result = self.m_req[fld_name][MeterData.StringValue]
else:
ekm_log("Requested nonexistent field: " + fld_name)
return result
|
Return :class:`~ekmmeters.Field` content, scaled and formatted.
Args:
fld_name (str): A :class:`~ekmmeters.Field` value which is on your meter.
Returns:
str: String value (scaled if numeric) for the field.
|
juraj-google-style
|
def GetParserAndPluginNames(cls, parser_filter_expression=None):
parser_and_plugin_names = []
for (parser_name, parser_class) in cls.GetParsers(parser_filter_expression=parser_filter_expression):
parser_and_plugin_names.append(parser_name)
if parser_class.SupportsPlugins():
for (plugin_name, _) in parser_class.GetPlugins():
parser_and_plugin_names.append('{0:s}/{1:s}'.format(parser_name, plugin_name))
return parser_and_plugin_names
|
Retrieves the parser and parser plugin names.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
Returns:
list[str]: parser and parser plugin names.
|
codesearchnet
|
def __init__(self, entry_type, tag_name, tag_kind, **kwargs):
super(TagProcessorWithEntryTypeAndFindByNamePlusKind,
self).__init__(**kwargs)
self.entry_type = entry_type
self.reference_tag_name = tag_name
self.reference_tag_kind = tag_kind
|
Initializer.
Args:
entry_type: A string that should be returned by get_entry_type()
for all (matching) tags.
tag_name: The unicode string name that matching tags should have.
tag_kind: The unicode string "kind" attribute that matching tags
should have.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.