code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
async def get_mailbox(self, name: str, selected: SelectedMailbox = None) \
-> Tuple[MailboxInterface, Optional[SelectedMailbox]]:
...
|
Retrieves a :class:`~pymap.interfaces.mailbox.MailboxInterface`
object corresponding to an existing mailbox owned by the user. Raises
an exception if the mailbox does not yet exist.
Args:
name: The name of the mailbox.
selected: If applicable, the currently selected mailbox name.
Raises:
:class:`~pymap.exceptions.MailboxNotFound`
|
juraj-google-style
|
def _GetApprovals(self,
approval_type,
offset,
count,
filter_func=None,
token=None):
approvals_base_urn = aff4.ROOT_URN.Add("users").Add(
token.username).Add("approvals").Add(approval_type)
all_children = aff4.FACTORY.RecursiveMultiListChildren([approvals_base_urn])
approvals_urns = []
for subject, children in all_children:
if children:
continue
approvals_urns.append(subject)
approvals_urns.sort(key=lambda x: x.age, reverse=True)
approvals = list(
aff4.FACTORY.MultiOpen(
approvals_urns,
mode="r",
aff4_type=aff4_security.Approval,
age=aff4.ALL_TIMES,
token=token))
approvals_by_urn = {}
for approval in approvals:
approvals_by_urn[approval.symlink_urn or approval.urn] = approval
cur_offset = 0
sorted_approvals = []
for approval_urn in approvals_urns:
try:
approval = approvals_by_urn[approval_urn]
except KeyError:
continue
if filter_func is not None and not filter_func(approval):
continue
cur_offset += 1
if cur_offset <= offset:
continue
if count and len(sorted_approvals) >= count:
break
sorted_approvals.append(approval)
subjects_urns = [a.Get(a.Schema.SUBJECT) for a in approvals]
subjects_by_urn = {}
for subject in aff4.FACTORY.MultiOpen(subjects_urns, mode="r", token=token):
subjects_by_urn[subject.urn] = subject
return sorted_approvals, subjects_by_urn
|
Gets all approvals for a given user and approval type.
Args:
approval_type: The type of approvals to get.
offset: The starting index within the collection.
count: The number of items to return.
filter_func: A predicate function, returning True if a specific approval
should be included in the result and False otherwise.
token: The token identifying the user.
Returns:
A list of approvals of the given approval type.
|
juraj-google-style
|
def get_avro_schema_from_table_schema(schema):
dict_table_schema = get_dict_table_schema(schema)
return bigquery_avro_tools.get_record_schema_from_dict_table_schema('root', dict_table_schema)
|
Transform the table schema into an Avro schema.
Args:
schema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema):
The TableSchema to convert to Avro schema. This can either be a dict or
string or in the TableSchema format.
Returns:
Dict[str, Any]: An Avro schema, which can be used by fastavro.
|
github-repos
|
def zip_file(self, app_path, app_name, tmp_path):
zip_file = os.path.join(app_path, self.args.outdir, app_name)
zip_file_zip = '{}.zip'.format(zip_file)
zip_file_tcx = '{}.tcx'.format(zip_file)
shutil.make_archive(zip_file, 'zip', tmp_path, app_name)
shutil.move(zip_file_zip, zip_file_tcx)
self._app_packages.append(zip_file_tcx)
self.package_data['package'].append({'action': 'App Package:', 'output': zip_file_tcx})
|
Zip the App with tcex extension.
Args:
app_path (str): The path of the current project.
app_name (str): The name of the App.
tmp_path (str): The temp output path for the zip.
|
codesearchnet
|
def extend(self, table, keys=None):
if keys:
for k in keys:
if k not in self._Header():
raise IndexError("Unknown key: '%s'", k)
extend_with = []
for column in table.header:
if column not in self.header:
extend_with.append(column)
if not extend_with:
return
for column in extend_with:
self.AddColumn(column)
if not keys:
for row1, row2 in zip(self, table):
for column in extend_with:
row1[column] = row2[column]
return
for row1 in self:
for row2 in table:
for k in keys:
if row1[k] != row2[k]:
break
else:
for column in extend_with:
row1[column] = row2[column]
break
|
Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name.
|
juraj-google-style
|
def write_journal(self, journal_file_path):
with open(journal_file_path, "w") as jrn_file:
jrn_file.write(self._journal_contents)
|
Write the constructed journal in to the provided file.
Args:
journal_file_path (str): full path to output journal file
|
juraj-google-style
|
def import_aliases(alias_source):
alias_table = get_alias_table()
if is_url(alias_source):
alias_source = retrieve_file_from_url(alias_source)
alias_table.read(alias_source)
os.remove(alias_source)
else:
alias_table.read(alias_source)
_commit_change(alias_table)
|
Import aliases from a file or an URL.
Args:
alias_source: The source of the alias. It can be a filepath or an URL.
|
codesearchnet
|
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
if model._distribution_strategy:
if isinstance(inputs, (data_types.DatasetV1, data_types.DatasetV2)):
inputs = distributed_training_utils_v1.get_iterator(inputs, model._distribution_strategy)
def get_distributed_inputs():
return distributed_training_utils_v1._prepare_feed_values(model, inputs, targets, sample_weights, mode)
if context.executing_eagerly():
return get_distributed_inputs
else:
return get_distributed_inputs()
if isinstance(inputs, (data_types.DatasetV1, data_types.DatasetV2, iterator_ops.Iterator)):
inputs, targets, sample_weights = model._standardize_user_data(inputs, extract_tensors_from_dataset=True)
inputs = training_utils_v1.ModelInputs(inputs).as_list()
targets = list(targets or [])
sample_weights = list(sample_weights or [])
ins = inputs + targets + sample_weights
if mode == ModeKeys.TRAIN and (not isinstance(backend.symbolic_learning_phase(), int)):
ins += [True]
return ins
|
Prepare feed values to the model execution function.
Args:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
|
github-repos
|
def xray_driver_removed_handler(self, unused_channel, data):
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
data, 0)
driver_data = gcs_entries.Entries(0)
message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData(
driver_data, 0)
driver_id = message.DriverId()
logger.info("Monitor: "
"XRay Driver {} has been removed.".format(
binary_to_hex(driver_id)))
self._xray_clean_up_entries_for_driver(driver_id)
|
Handle a notification that a driver has been removed.
Args:
unused_channel: The message channel.
data: The message data.
|
juraj-google-style
|
def accept_alert(self, text=None, wait=None):
wait = (wait or capybara.default_max_wait_time)
with self.driver.accept_modal('alert', text=text, wait=wait):
(yield)
|
Execute the wrapped code, accepting an alert.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found.
|
codesearchnet
|
def check_for_missing_options(config):
for section_name, section in config:
for option_name, option in section:
if option.required and option.value is None:
raise exc.MissingRequiredOption(
"Option {0} in namespace {1} is required.".format(
option_name,
section_name,
)
)
return config
|
Iter over a config and raise if a required option is still not set.
Args:
config (confpy.core.config.Configuration): The configuration object
to validate.
Raises:
MissingRequiredOption: If any required options are not set in the
configuration object.
Required options with default values are considered set and will not cause
this function to raise.
|
juraj-google-style
|
def condensed(network, state):
result = []
covered_nodes = set()
for c in reversed(sorted(complexes(network, state))):
if (not any(((n in covered_nodes) for n in c.subsystem.node_indices))):
result.append(c)
covered_nodes = (covered_nodes | set(c.subsystem.node_indices))
return result
|
Return a list of maximal non-overlapping complexes.
Args:
network (Network): The |Network| of interest.
state (tuple[int]): The state of the network (a binary tuple).
Returns:
list[SystemIrreducibilityAnalysis]: A list of |SIA| for non-overlapping
complexes with maximal |big_phi| values.
|
codesearchnet
|
def issubset(self, other):
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items <= other.items
|
Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
|
juraj-google-style
|
def turtle_to_texture(turtle_program, turn_amount=DEFAULT_TURN, initial_angle=DEFAULT_INITIAL_ANGLE, resolution=1):
generator = branching_turtle_generator(turtle_program, turn_amount, initial_angle, resolution)
return texture_from_generator(generator)
|
Makes a texture from a turtle program.
Args:
turtle_program (str): a string representing the turtle program; see the
docstring of `branching_turtle_generator` for more details
turn_amount (float): amount to turn in degrees
initial_angle (float): initial orientation of the turtle
resolution (int): if provided, interpolation amount for visible lines
Returns:
texture: A texture.
|
codesearchnet
|
def load(self, steps_dir=None, step_file=None, step_list=None):
self._closed()
self.steps_library.load(steps_dir=steps_dir, step_file=step_file,
step_list=step_list)
|
Load CWL steps into the WorkflowGenerator's steps library.
Adds steps (command line tools and workflows) to the
``WorkflowGenerator``'s steps library. These steps can be used to
create workflows.
Args:
steps_dir (str): path to directory containing CWL files. All CWL in
the directory are loaded.
step_file (str): path to a file containing a CWL step that will be
added to the steps library.
|
juraj-google-style
|
def search(*, include_disabled=True, account_ids=None, account_type_id=None, properties=None, return_query=False):
qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)
if (not include_disabled):
qry = qry.filter((Account.enabled == 1))
if account_ids:
if (type(account_ids) not in (list, tuple)):
account_ids = [account_ids]
qry = qry.filter(Account.account_id.in_(account_ids))
if account_type_id:
qry = qry.filter((Account.account_type_id == account_type_id))
if properties:
for (prop_name, value) in properties.items():
alias = aliased(AccountProperty)
qry = qry.join(alias, (Account.account_id == alias.account_id))
if (type(value) == list):
where_clause = []
for item in value:
where_clause.append((alias.value == item))
qry = qry.filter(and_((alias.name == prop_name), or_(*where_clause)).self_group())
else:
qry = qry.filter(and_((alias.name == prop_name), (alias.value == value)).self_group())
if return_query:
return qry
total = qry.count()
return (total, list(map(BaseAccount.get_typed_account, qry.all())))
|
Search for accounts based on the provided filters
Args:
include_disabled (`bool`): Include disabled accounts (default: True)
account_ids: (`list` of `int`): List of account IDs
account_type_id (`int`): Account Type ID to limit results to
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for
sub-classes to amend the search feature with extra conditions. The calling function must handle pagination
on its own
Returns:
`list` of `Account`, `sqlalchemy.orm.Query`
|
codesearchnet
|
def raw_filter(self, filters):
return SearchResult(self, self._api.get(self._href, **{"filter[]": filters}))
|
Sends all filters to the API.
No fancy, just a wrapper. Any advanced functionality shall be implemented as another method.
Args:
filters: List of filters (strings)
Returns: :py:class:`SearchResult`
|
juraj-google-style
|
def verify_response(response, status_code, content_type=None):
status = int(response.status.split(' ', 1)[0])
if status != status_code:
return False
if content_type is None:
return True
for header, value in response.headers:
if header.lower() == 'content-type':
return value == content_type
return False
|
Verifies that a response has the expected status and content type.
Args:
response: The ResponseTuple to be checked.
status_code: An int, the HTTP status code to be compared with response
status.
content_type: A string with the acceptable Content-Type header value.
None allows any content type.
Returns:
True if both status_code and content_type match, else False.
|
juraj-google-style
|
def roll50(msg):
d = hex2bin(data(msg))
if (d[0] == '0'):
return None
sign = int(d[1])
value = bin2int(d[2:11])
if sign:
value = (value - 512)
angle = ((value * 45.0) / 256.0)
return round(angle, 1)
|
Roll angle, BDS 5,0 message
Args:
msg (String): 28 bytes hexadecimal message (BDS50) string
Returns:
float: angle in degrees,
negative->left wing down, positive->right wing down
|
codesearchnet
|
def _read_wrappers(self, name):
io_attr = getattr(self._io, name)
def read_wrapper(*args, **kwargs):
"Wrap all read calls to the stream object.\n\n We do this to track the read pointer separate from the write\n pointer. Anything that wants to read from the stream object\n while we're in append mode goes through this.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n Returns:\n Wrapped stream object method\n "
self._io.seek(self._read_seek, self._read_whence)
ret_value = io_attr(*args, **kwargs)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(0, 2)
return ret_value
return read_wrapper
|
Wrap a stream attribute in a read wrapper.
Returns a read_wrapper which tracks our own read pointer since the
stream object has no concept of a different read and write pointer.
Args:
name: The name of the attribute to wrap. Should be a read call.
Returns:
The read_wrapper function.
|
codesearchnet
|
def get_if_not_set(self, addresses):
with self._lock:
results = []
for add in addresses:
results.append(self._get_if_not_set(add))
return results
|
Returns the value at an address if it was an input to the txn but
never set. It returns None if that address was never set in the
merkle database, or if the address is not within the context.
Args:
addresses (list of str): The full 70 character addresses.
Returns:
(list): bytes at that address but not set within the context
|
codesearchnet
|
def __init__(self, arm_id_list):
[self.__beta_dist_dict.setdefault(key, BetaDist()) for key in arm_id_list]
|
Initialization
Args:
arm_id_list: List of arms Master id.
|
juraj-google-style
|
def daemon_mode(self, args, options):
cws = ControlWebSocket(self, args, options)
cws.start()
if 'cmdsock' in args and args['cmdsock']:
lcs = LocalControlSocket(self, args, options)
lcs.start()
lcs.join()
cws.join()
|
Open a ControlWebSocket to SushiBar server and listend for remote commands.
Args:
args (dict): chef command line arguments
options (dict): additional compatibility mode options given on command line
|
juraj-google-style
|
def wwpn_alloc(self):
wwpn_int = self._wwpn_pool.alloc()
wwpn = ('AFFEAFFE0000' + '{:04X}'.format(wwpn_int))
return wwpn
|
Allocates a WWPN unique to this partition, in the range of
0xAFFEAFFE00008000 to 0xAFFEAFFE0000FFFF.
Returns:
string: The WWPN as 16 hexadecimal digits in upper case.
Raises:
ValueError: No more WWPNs available in that range.
|
codesearchnet
|
def parameterize(
self,
country: Optional[str] = "South Sudan",
state: Optional[str] = None,
year: Optional[int] = None,
month: Optional[int] = None,
unit: Optional[str] = None,
fallback_aggaxes: List[str] = ["year", "month"],
aggfunc: Callable = np.mean,
):
valid_axes = ("country", "state", "year", "month")
if any(map(lambda axis: axis not in valid_axes, fallback_aggaxes)):
raise ValueError(
"All elements of the fallback_aggaxes set must be one of the "
f"following: {valid_axes}"
)
for n in self.nodes(data=True):
for indicator in n[1]["indicators"].values():
indicator.mean, indicator.unit = get_indicator_value(
indicator,
country,
state,
year,
month,
unit,
fallback_aggaxes,
aggfunc,
)
indicator.stdev = 0.1 * abs(indicator.mean)
|
Parameterize the analysis graph.
Args:
country
year
month
fallback_aggaxes:
An iterable of strings denoting the axes upon which to perform
fallback aggregation if the desired constraints cannot be met.
aggfunc: The function that will be called to perform the
aggregation if there are multiple matches.
|
juraj-google-style
|
def _aspect_preserving_resize(image, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_images(
image, size=[new_height, new_width], method=tf.image.ResizeMethod.BICUBIC)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
|
Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
|
juraj-google-style
|
def wait_for_job(self, job, poll=5):
desc = _wait_until_training_done(lambda last_desc: _train_done(self.sagemaker_client, job, last_desc),
None, poll)
self._check_job_status(job, desc, 'TrainingJobStatus')
return desc
|
Wait for an Amazon SageMaker training job to complete.
Args:
job (str): Name of the training job to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
(dict): Return value from the ``DescribeTrainingJob`` API.
Raises:
ValueError: If the training job fails.
|
juraj-google-style
|
def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None):
assert self.P == 1, 'Incompatible number of traits'
assert K!=None or is_noise, 'Specify covariance structure'
if is_noise:
assert self.noisPos==None, 'noise term already exists'
K = SP.eye(self.Nt)
self.noisPos = self.n_terms
else:
assert K.shape[0]==self.Nt, 'Incompatible shape'
assert K.shape[1]==self.Nt, 'Incompatible shape'
if Ks!=None:
assert Ks.shape[0]==self.N, 'Incompatible shape'
if normalize:
Norm = 1/K.diagonal().mean()
K *= Norm
if Ks!=None: Ks *= Norm
self.vd.addTerm(limix.CSingleTraitTerm(K))
if Ks!=None: self.setKstar(self.n_terms,Ks)
self.n_terms+=1
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None
|
add random effects term for single trait models (no trait-trait covariance matrix)
Args:
K: NxN sample covariance matrix
is_noise: bool labeling the noise term (noise term has K=eye)
normalize: if True, K and Ks are scales such that K.diagonal().mean()==1
Ks: NxN test cross covariance for predictions
|
juraj-google-style
|
def freeze(script_path, target_dir='frozen', **kw):
cmds = []
freeze_start_time = time.time()
logging.debug(('/\\%s%s Output%s/\\' % (('-' * 10), 'Pyinstaller', ('-' * 10))))
orig_dir = os.path.abspath('.')
script_path = os.path.abspath(script_path)
try:
os.chdir(target_dir)
cmds += _freeze_config()
pyinst_path = ('%s/thirdparty/pyinstaller' % __path__[0])
cur_cmd = ('python -O %s/pyinstaller.py %s --skip-configure' % (pyinst_path, script_path))
cmds.append(cur_cmd)
if _run(cur_cmd):
_freeze_config(force=True)
cur_cmd = ('python -O %s/pyinstaller.py %s' % (pyinst_path, script_path))
_run(cur_cmd)
finally:
os.chdir(orig_dir)
logging.debug(('\\/%s%s Output%s\\/' % (('-' * 10), 'Pyinstaller', ('-' * 10))))
logging.info(('Pyinstaller took [%f] seconds' % (time.time() - freeze_start_time)))
return cmds
|
Wraps pyinstaller and provides an easy to use interface
Args:
script_path: Absolute path to python script to be frozen.
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: Freeze error.
OSError: Freeze not found.
|
codesearchnet
|
def list(self, **kwargs):
resp = self.client.api.secrets(**kwargs)
return [self.prepare_model(obj) for obj in resp]
|
List secrets. Similar to the ``docker secret ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Secret`): The secrets.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def get_point(self, *position):
array = _ffi.new(self._arrayType, position)
if self._useOctaves:
return ((self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5)
return ((self._noiseFunc(self._noise, array) + 1) * 0.5)
|
Return the noise value of a specific position.
Example usage: value = noise.getPoint(x, y, z)
Args:
position (Tuple[float, ...]): The point to sample at.
Returns:
float: The noise value at position.
This will be a floating point in the 0.0-1.0 range.
|
codesearchnet
|
def select_files(self, what='o'):
choices = collections.OrderedDict([('i', self.input_file), ('o', self.output_file), ('f', self.files_file), ('j', self.job_file), ('l', self.log_file), ('e', self.stderr_file), ('q', self.qout_file)])
if (what == 'all'):
return [getattr(v, 'path') for v in choices.values()]
selected = []
for c in what:
try:
selected.append(getattr(choices[c], 'path'))
except KeyError:
logger.warning(('Wrong keyword %s' % c))
return selected
|
Helper function used to select the files of a task.
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
|
codesearchnet
|
def probe_services(self, handle, conn_id, callback):
self._command_task.async_command(['_probe_services', handle], callback,
{'connection_id': conn_id, 'handle': handle})
|
Given a connected device, probe for its GATT services and characteristics
Args:
handle (int): a handle to the connection on the BLED112 dongle
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): Callback to be called when this procedure finishes
|
juraj-google-style
|
def make_grid(tensor, nrow=8, padding=2, pad_value=0):
if (not (isinstance(tensor, np.ndarray) or (isinstance(tensor, list) and all((isinstance(t, np.ndarray) for t in tensor))))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
if isinstance(tensor, list):
tensor = np.stack(tensor, 0)
if (tensor.ndim == 2):
tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1]))
if (tensor.ndim == 3):
if (tensor.shape[0] == 1):
tensor = np.concatenate((tensor, tensor, tensor), 0)
tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1], tensor.shape[2]))
if ((tensor.ndim == 4) and (tensor.shape[1] == 1)):
tensor = np.concatenate((tensor, tensor, tensor), 1)
if (tensor.shape[0] == 1):
return np.squeeze(tensor)
nmaps = tensor.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil((float(nmaps) / xmaps)))
(height, width) = (int((tensor.shape[2] + padding)), int((tensor.shape[3] + padding)))
grid = (np.ones((3, ((height * ymaps) + padding), ((width * xmaps) + padding))) * pad_value)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if (k >= nmaps):
break
grid[(:, ((y * height) + padding):((y + 1) * height), ((x * width) + padding):((x + 1) * width))] = tensor[k]
k = (k + 1)
return grid
|
Make a grid of images, via numpy.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
pad_value (float, optional): Value for the padded pixels.
|
codesearchnet
|
def bipartition(seq):
return [(tuple(seq[i] for i in part0_idx),
tuple(seq[j] for j in part1_idx))
for part0_idx, part1_idx in bipartition_indices(len(seq))]
|
Return a list of bipartitions for a sequence.
Args:
a (Iterable): The sequence to partition.
Returns:
list[tuple[tuple]]: A list of tuples containing each of the two
partitions.
Example:
>>> bipartition((1,2,3))
[((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))]
|
juraj-google-style
|
def optimize(self, sensor_graph, model):
passes = self._order_pases(self._known_passes.keys())
for opt_name in passes:
rerun = True
pass_instance = self._known_passes[opt_name][0]()
while rerun:
rerun = pass_instance.run(sensor_graph, model=model)
|
Optimize a sensor graph by running optimization passes.
The passes are run one at a time and modify the sensor graph
for future passes.
Args:
sensor_graph (SensorGraph): The graph to be optimized
model (DeviceModel): The device that we are optimizing
for, that OptimizationPass objects are free to use
to guide their optimizations.
|
juraj-google-style
|
def definition_name(cls):
outer_definition_name = cls.outer_definition_name()
if (outer_definition_name is None):
return six.text_type(cls.__name__)
return (u'%s.%s' % (outer_definition_name, cls.__name__))
|
Helper method for creating definition name.
Names will be generated to include the classes package name,
scope (if the class is nested in another definition) and class
name.
By default, the package name for a definition is derived from
its module name. However, this value can be overriden by
placing a 'package' attribute in the module that contains the
definition class. For example:
package = 'some.alternate.package'
class MyMessage(Message):
...
>>> MyMessage.definition_name()
some.alternate.package.MyMessage
Returns:
Dot-separated fully qualified name of definition.
|
codesearchnet
|
def _initialize_mtf_dimension_name_to_size_gcd(self, mtf_graph):
mtf_dimension_name_to_size_gcd = {}
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
mtf_dimension_name_to_size_gcd[mtf_dimension.name] = fractions.gcd(mtf_dimension_name_to_size_gcd.get(mtf_dimension.name, mtf_dimension.size), mtf_dimension.size)
return mtf_dimension_name_to_size_gcd
|
Initializer for self._mtf_dimension_name_to_size_gcd.
Args:
mtf_graph: an mtf.Graph.
Returns:
A {string: int}, mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has. All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x.
|
codesearchnet
|
def _cast_indexed_slice_indices(a, b):
if isinstance(a, indexed_slices.IndexedSlices) and isinstance(b, indexed_slices.IndexedSlices) and (a.indices.dtype != b.indices.dtype):
a._indices = math_ops.cast(a.indices, dtypes.int64)
b._indices = math_ops.cast(b.indices, dtypes.int64)
|
Cast IndexedSlice.indices from int32 to int64 where necessary.
If `a` and `b` are both IndexedSlices, and their indices have different
dtypes, then cast both their dtypes to `int64` (modifies `a` and `b`
in-place). Otherwise, does nothing.
Args:
a: A value, which may be an IndexedSlices.
b: A value, which may be an IndexedSlices.
|
github-repos
|
def translate_pname(self, pname: PrefName, mid: ModuleId) -> QualName:
loc, nid = self.resolve_pname(pname, mid)
return (loc, self.namespace(nid))
|
Translate a prefixed name to a qualified name.
Args:
pname: Name with an optional prefix.
mid: Identifier of the module in which `pname` appears.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in `pname` is not declared.
|
juraj-google-style
|
def get(self, field_paths=None, transaction=None):
if isinstance(field_paths, six.string_types):
raise ValueError("'field_paths' must be a sequence of paths, not a string.")
if (field_paths is not None):
mask = common_pb2.DocumentMask(field_paths=sorted(field_paths))
else:
mask = None
firestore_api = self._client._firestore_api
try:
document_pb = firestore_api.get_document(self._document_path, mask=mask, transaction=_helpers.get_transaction_id(transaction), metadata=self._client._rpc_metadata)
except exceptions.NotFound:
data = None
exists = False
create_time = None
update_time = None
else:
data = _helpers.decode_dict(document_pb.fields, self._client)
exists = True
create_time = document_pb.create_time
update_time = document_pb.update_time
return DocumentSnapshot(reference=self, data=data, exists=exists, read_time=None, create_time=create_time, update_time=update_time)
|
Retrieve a snapshot of the current document.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this reference
will be retrieved in.
Returns:
~.firestore_v1beta1.document.DocumentSnapshot: A snapshot of
the current document. If the document does not exist at
the time of `snapshot`, the snapshot `reference`, `data`,
`update_time`, and `create_time` attributes will all be
`None` and `exists` will be `False`.
|
codesearchnet
|
def _is_callable(self, node, obj):
val = obj.data
if isinstance(val, abstract.AMBIGUOUS_OR_EMPTY):
return (node, None)
if isinstance(val, abstract.Class):
return (node, True)
node, ret = self.ctx.attribute_handler.get_attribute(node, val, '__call__', valself=obj)
return (node, ret is not None)
|
Check if the object is callable.
Args:
node: The given node.
obj: A BaseValue, the arg of a callable() call.
Returns:
(node, result) where result = True if the object is callable,
False if it is not, and None if it is ambiguous.
|
github-repos
|
def sg_accuracy(tensor, opt):
r
assert opt.target is not None, 'target is mandatory.'
opt += tf.sg_opt(k=1)
out = tf.identity(tf.equal(tensor.sg_argmax(), tf.cast(opt.target, tf.int64)).sg_float(), name='acc')
return out
|
r"""Returns accuracy of predictions.
Args:
tensor: A `Tensor`. Probability distributions or unscaled prediction scores.
opt:
target: A 'Tensor`. Labels.
Returns:
A `Tensor` of the same shape as `tensor`. Each value will be 1 if correct else 0.
For example,
```
tensor = [[20.1, 18, -4.2], [0.04, 21.1, 31.3]]
target = [[0, 1]]
tensor.sg_accuracy(target=target) => [[ 1. 0.]]
```
|
juraj-google-style
|
def run_with_time_limit(self, cmd, time_limit=SUBMISSION_TIME_LIMIT):
if time_limit < 0:
return self.run_without_time_limit(cmd)
container_name = str(uuid.uuid4())
cmd = [DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME,
'--detach', '--name', container_name] + cmd
logging.info('Docker command: %s', ' '.join(cmd))
logging.info('Time limit %d seconds', time_limit)
retval = subprocess.call(cmd)
start_time = time.time()
elapsed_time_sec = 0
while is_docker_still_running(container_name):
elapsed_time_sec = int(time.time() - start_time)
if elapsed_time_sec < time_limit:
time.sleep(1)
else:
kill_docker_container(container_name)
logging.warning('Submission was killed because run out of time')
logging.info('Elapsed time of submission: %d', elapsed_time_sec)
logging.info('Docker retval: %d', retval)
if retval != 0:
logging.warning('Docker returned non-zero retval: %d', retval)
raise WorkerError('Docker returned non-zero retval ' + str(retval))
return elapsed_time_sec
|
Runs docker command and enforces time limit.
Args:
cmd: list with the command line arguments which are passed to docker
binary after run
time_limit: time limit, in seconds. Negative value means no limit.
Returns:
how long it took to run submission in seconds
Raises:
WorkerError: if error occurred during execution of the submission
|
juraj-google-style
|
def __init__(self, video, quality=None, download_dir=None, templates=None) -> None:
self.video = video
self.quality = quality or DEFAULT_OPTIONS['quality']
self.download_dir = download_dir or DEFAULT_OPTIONS['download_dir']
self.templates = templates or DEFAULT_OPTIONS['templates']
if self.quality not in ('worst', 'best'):
raise WrongQualityError
|
Create a VideoDownloader for a given video.
Args:
video (Video): Video object.
quality (str): Quality of the video (best/worst). Audio quality defaults to best.
download_dir (str): Destination directory for the downloaded video.
templates (dict): Dictionary of templates needed to generate a download path.
|
juraj-google-style
|
def build_subresource_uri(self, resource_id_or_uri=None, subresource_id_or_uri=None, subresource_path=''):
if (subresource_id_or_uri and ('/' in subresource_id_or_uri)):
return subresource_id_or_uri
else:
if (not resource_id_or_uri):
raise exceptions.HPOneViewValueError(RESOURCE_ID_OR_URI_REQUIRED)
resource_uri = self.build_uri(resource_id_or_uri)
uri = '{}/{}/{}'.format(resource_uri, subresource_path, str((subresource_id_or_uri or '')))
uri = uri.replace('
if uri.endswith('/'):
uri = uri[:(- 1)]
return uri
|
Helps to build a URI with resource path and its sub resource path.
Args:
resoure_id_or_uri: ID/URI of the main resource.
subresource_id__or_uri: ID/URI of the sub resource.
subresource_path: Sub resource path to be added with the URI.
Returns:
Returns URI
|
codesearchnet
|
def config_tab(backend):
status = backend.status().to_dict()
config = backend.configuration().to_dict()
config_dict = {**status, **config}
upper_list = ['n_qubits', 'operational',
'status_msg', 'pending_jobs',
'basis_gates', 'local', 'simulator']
lower_list = list(set(config_dict.keys()).difference(upper_list))
lower_list.remove('gates')
upper_str = "<table>"
upper_str +=
footer = "</table>"
upper_str += "<tr><th>Property</th><th>Value</th></tr>"
for key in upper_list:
upper_str += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td></tr>" % (
key, config_dict[key])
upper_str += footer
upper_table = widgets.HTML(
value=upper_str, layout=widgets.Layout(width='100%', grid_area='left'))
image_widget = widgets.Output(
layout=widgets.Layout(display='flex-inline', grid_area='right',
padding='10px 10px 10px 10px',
width='auto', max_height='300px',
align_items='center'))
if not config['simulator']:
with image_widget:
gate_map = plot_gate_map(backend)
display(gate_map)
plt.close(gate_map)
lower_str = "<table>"
lower_str +=
lower_str += "<tr><th></th><th></th></tr>"
for key in lower_list:
if key != 'name':
lower_str += "<tr><td>%s</td><td>%s</td></tr>" % (
key, config_dict[key])
lower_str += footer
lower_table = widgets.HTML(value=lower_str,
layout=widgets.Layout(
width='auto',
grid_area='bottom'))
grid = widgets.GridBox(children=[upper_table, image_widget, lower_table],
layout=widgets.Layout(
grid_template_rows='auto auto',
grid_template_columns='25% 25% 25% 25%',
grid_template_areas=,
grid_gap='0px 0px'))
return grid
|
The backend configuration widget.
Args:
backend (IBMQbackend): The backend.
Returns:
grid: A GridBox widget.
|
juraj-google-style
|
def report_progress(stream=None):
if stream is None:
stream = sys.stderr
for reporter in _reporters:
reporter(stream)
|
Report progress from any currently installed reporters.
Args:
stream: The text stream (default: sys.stderr) to which
progress will be reported.
|
juraj-google-style
|
def get_raw_data_feature_spec(self, input_types: dict[str, type]) -> dict[str, tf.io.VarLenFeature]:
raw_data_feature_spec = {}
for key, value in input_types.items():
raw_data_feature_spec[key] = self._get_raw_data_feature_spec_per_column(typ=value, col_name=key)
return raw_data_feature_spec
|
Return a DatasetMetadata object to be used with
tft_beam.AnalyzeAndTransformDataset.
Args:
input_types: A dictionary of column names and types.
Returns:
A DatasetMetadata object.
|
github-repos
|
def get_tensor(self):
return load_tensor_from_event_file(self.file_path)
|
Get tensor from the dump (`Event`) file.
Returns:
The tensor loaded from the dump (`Event`) file.
|
github-repos
|
def WriteValuesToJSONFile(self, state, values):
value_counters = {}
max_post_size = config.CONFIG["BigQuery.max_file_post_size"]
for value in values:
class_name = value.__class__.__name__
output_tracker, created = self._GetTempOutputFileHandles(class_name)
value_counters[class_name] = value_counters.get(class_name, -1) + 1
if not value_counters[class_name] % max_post_size
output_tracker.gzip_filehandle.flush()
if os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size:
self.Flush(state)
value_counters[class_name] = 0
output_tracker, created = self._GetTempOutputFileHandles(class_name)
if not output_tracker.schema:
output_tracker.schema = self.RDFValueToBigQuerySchema(value)
if created:
self._WriteJSONValue(output_tracker.gzip_filehandle, value)
else:
self._WriteJSONValue(
output_tracker.gzip_filehandle, value, delimiter="\n")
for output_tracker in itervalues(self.temp_output_trackers):
output_tracker.gzip_filehandle.flush()
|
Write newline separated JSON dicts for each value.
We write each dict separately so we don't have to hold all of the output
streams in memory. We open and close the JSON array manually with [].
Args:
state: rdf_protodict.AttributedDict with the plugin's state.
values: RDF values to export.
|
juraj-google-style
|
def add_rel(self, source_id, target_id, rel):
self.neo_db.add_rel(source_id, target_id, rel)
|
Add a relationship: source, target must already exist (see add_node)
'rel' is the name of the relationship 'contains' or whatever.
Args:
source_id: the unique node_id of the source
target_id: the unique node_id of the target
rel: name of the relationship
Returns:
Nothing
|
juraj-google-style
|
def convert_to_scl(spec, scl_options):
scl_options['skip_functions'] = scl_options['skip_functions'].split(',')
scl_options['meta_spec'] = None
convertor = SclConvertor(options=scl_options)
return str(convertor.convert(spec))
|
Convert spec into SCL-style spec file using `spec2scl`.
Args:
spec: (str) a spec file
scl_options: (dict) SCL options provided
Returns:
A converted spec file
|
codesearchnet
|
def signed_to_twos_comp(val: int, n_bits: int) -> int:
assert ((n_bits % 8) == 0), 'Must specify a whole number of bytes'
n_bytes = (n_bits
b = val.to_bytes(n_bytes, byteorder=sys.byteorder, signed=True)
return int.from_bytes(b, byteorder=sys.byteorder, signed=False)
|
Convert a signed integer to its "two's complement" representation.
Args:
val: signed integer
n_bits: number of bits (which must reflect a whole number of bytes)
Returns:
unsigned integer: two's complement version
|
codesearchnet
|
def _validate_alias_file_path(alias_file_path):
if not os.path.exists(alias_file_path):
raise CLIError(ALIAS_FILE_NOT_FOUND_ERROR)
if os.path.isdir(alias_file_path):
raise CLIError(ALIAS_FILE_DIR_ERROR.format(alias_file_path))
|
Make sure the alias file path is neither non-existant nor a directory
Args:
The alias file path to import aliases from.
|
juraj-google-style
|
def _InitializeURL(self, upload_url, current_content_length):
if (current_content_length != 0):
return upload_url
headers = {'Content-Type': 'application/xml', 'Content-Length': 0, 'x-goog-resumable': 'start'}
req = urllib2.Request(upload_url, data={}, headers=headers)
resp = self._url_opener.open(req)
return resp.headers['location']
|
Ensures that the URL used to upload operations is properly initialized.
Args:
upload_url: a string url.
current_content_length: an integer identifying the current content length
of data uploaded to the Batch Job.
Returns:
An initialized string URL, or the provided string URL if the URL has
already been initialized.
|
codesearchnet
|
def add_dataset(self, task_name, dataset=None, *, aliases=None):
self._datasets.append(dataset if dataset is not None else TaskData())
last_index = len(self._datasets) - 1
self._aliases[task_name] = last_index
if aliases is not None:
for alias in aliases:
self._aliases[alias] = last_index
if len(self._datasets) == 1:
self._default_index = 0
|
Add a new dataset to the MultiTaskData.
Args:
task_name (str): The name of the task from which the dataset was received.
dataset (TaskData): The dataset that should be added.
aliases (list): A list of aliases that should be registered with the dataset.
|
juraj-google-style
|
def restore_component(self, component_name, save_path):
component = self.get_component(component_name=component_name)
self._validate_savable(component=component, component_name=component_name)
component.restore(sess=self.session, save_path=save_path)
|
Restores a component's parameters from a save location.
Args:
component_name: The component to restore.
save_path: The save location.
|
codesearchnet
|
def from_string(cls, s):
log.debug("Parsing email from string")
message = email.message_from_string(s)
return cls(message)
|
Init a new object from a string.
Args:
s (string): raw email
Returns:
Instance of MailParser
|
juraj-google-style
|
def _copy_assets(src_path: str, dst_path: str) -> None:
for assets_dir_name in [_ASSETS_DIR, _ASSETS_EXTRA_DIR]:
src_assets_path = file_io.join(src_path, assets_dir_name)
if not file_io.file_exists_v2(src_assets_path):
continue
dst_assets_path = file_io.join(dst_path, assets_dir_name)
file_io.create_dir_v2(dst_assets_path)
for curr_dir, _, files in file_io.walk_v2(src_assets_path):
for asset_file_name in files:
src_asset_file = file_io.join(curr_dir, asset_file_name)
curr_dst_dir = curr_dir.replace(src_assets_path, dst_assets_path)
dst_asset_file = file_io.join(curr_dst_dir, asset_file_name)
file_io.copy_v2(src_asset_file, dst_asset_file)
logging.info('Copied asset file: %s -> %s', src_asset_file, dst_asset_file)
|
Copies the assets directory of the saved model.
Clones the contents of the assets/ directory from the source saved model
directory to the destination saved model directory. Nothing will be copied if
there are no assets directory in the source directory.
Args:
src_path: Source saved model directory.
dst_path: Destination saved model directory. This directory must exist.
|
github-repos
|
def _LinearMapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
members_data_size = self._data_type_definition.GetByteSize()
self._CheckByteStreamSize(byte_stream, byte_offset, members_data_size)
try:
struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])
struct_values = []
for attribute_index, value in enumerate(struct_tuple):
data_type_map = self._data_type_maps[attribute_index]
member_definition = self._data_type_definition.members[attribute_index]
value = data_type_map.MapValue(value)
supported_values = getattr(member_definition, 'values', None)
if supported_values and value not in supported_values:
raise errors.MappingError(
'Value: {0!s} not in supported values: {1:s}'.format(
value, ', '.join([
'{0!s}'.format(value) for value in supported_values])))
struct_values.append(value)
mapped_value = self._structure_values_class(*struct_values)
except Exception as exception:
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: {2!s}').format(
self._data_type_definition.name, byte_offset, exception)
raise errors.MappingError(error_string)
if context:
context.byte_size = members_data_size
return mapped_value
|
Maps a data type sequence on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
|
juraj-google-style
|
def plot_histogram(self, freq=None, figsize=(15, 5), title=None, bins=20, **kwargs):
if (title is None):
title = self._get_default_plot_title(self.name, freq, 'Return Histogram')
ser = self._get_series(freq).to_returns().dropna()
plt.figure(figsize=figsize)
ax = ser.hist(bins=bins, figsize=figsize, normed=True, **kwargs)
ax.set_title(title)
plt.axvline(0, linewidth=4)
return ser.plot(kind='kde')
|
Plots a histogram of returns given a return frequency.
Args:
* freq (str): Data frequency used for display purposes.
This will dictate the type of returns
(daily returns, monthly, ...)
Refer to pandas docs for valid period strings.
* figsize ((x,y)): figure size
* title (str): Title if default not appropriate
* bins (int): number of bins for the histogram
* kwargs: passed to pandas' hist method
|
codesearchnet
|
def _remove_jmp_to_get_anext_and_merge(blocks: list[Block], processed_blocks: set[Block]) -> list[Block]:
op_to_block = {}
merge_list = []
for block_idx, block in enumerate(blocks):
for code in block.code:
op_to_block[code] = block_idx
for block_idx, block in enumerate(blocks):
for code in block.code:
if code.end_async_for_target:
merge_list.append((block_idx, op_to_block[code.end_async_for_target]))
map_target = {}
for block_idx, block_idx_to_merge in merge_list:
jump_back_op = blocks[block_idx].code.pop()
blocks[block_idx].code.extend(blocks[block_idx_to_merge].code)
map_target[jump_back_op] = blocks[block_idx_to_merge].code[0]
if block_idx_to_merge < len(blocks) - 1:
blocks[block_idx].connect_outgoing(blocks[block_idx_to_merge + 1])
processed_blocks.add(blocks[block_idx])
to_delete = sorted({to_idx for _, to_idx in merge_list}, reverse=True)
for block_idx in to_delete:
del blocks[block_idx]
for block in blocks:
replace_op = map_target.get(block.code[-1].target, None)
if replace_op:
block.code[-1].target = replace_op
return blocks
|
Remove JUMP_BACKWARD instructions to GET_ANEXT instructions.
And also merge the block that contains the END_ASYNC_FOR which is part of the
same loop of the GET_ANEXT and JUMP_BACKWARD construct, to the JUMP_BACKWARD
instruction. This is to ignore the JUMP_BACKWARD because in pytype's eyes it's
useless (as it'll jump back to block that it already executed), and also
this is the way to make pytype run the code of END_ASYNC_FOR and whatever
comes afterwards.
Args:
blocks: A list of Block instances.
Returns:
A list of Block instances after the removal and merge.
|
github-repos
|
def HumanReadableType(self):
if isinstance(self.service_type, py2to3.STRING_TYPES):
return self.service_type
return human_readable_service_enums.SERVICE_ENUMS['Type'].get(self.service_type, '{0:d}'.format(self.service_type))
|
Return a human readable string describing the type value.
Returns:
str: human readable description of the type value.
|
codesearchnet
|
def __contains__(self, item):
if item not in self._contains_cache:
self._contains_cache[item] = self._contains(item)
return self._contains_cache[item]
|
Get result of _contains, cache it and return it.
Args:
item (Package/Module): a package or module.
Returns:
bool: True if self contains item, False otherwise.
|
juraj-google-style
|
def build_attachment(self, text, target, attachment, thread):
attachment = {'as_user': True, 'text': text, 'channel': target, 'attachments': [{'fallback': text, 'image_url': attachment}]}
if thread:
attachment['thread_ts'] = thread
return attachment
|
Builds a slack attachment.
Args:
message (Legobot.Message): message w/ metadata to send.
Returns:
attachment (dict): attachment data.
|
codesearchnet
|
def AddContract(self, contract):
if not contract.PublicKeyHash.ToBytes() in self._keys.keys():
raise Exception('Invalid operation - public key mismatch')
self._contracts[contract.ScriptHash.ToBytes()] = contract
if contract.ScriptHash in self._watch_only:
self._watch_only.remove(contract.ScriptHash)
|
Add a contract to the wallet.
Args:
contract (Contract): a contract of type neo.SmartContract.Contract.
Raises:
Exception: Invalid operation - public key mismatch.
|
juraj-google-style
|
def download_artifacts_from_gcs(bucket_name, prefix, local_path):
client = Client()
bucket = client.get_bucket(bucket_name)
blobs = [blob.name for blob in bucket.list_blobs(prefix=prefix)]
_ = transfer_manager.download_many_to_path(bucket, blobs, destination_directory=local_path)
|
Downloads artifacts from GCS to the local file system.
Args:
bucket_name: The name of the GCS bucket to download from.
prefix: Prefix of GCS objects to download.
local_path: The local path to download the folder to.
|
github-repos
|
class Flatten(PTransform):
def __init__(self, **kwargs):
super().__init__()
self.pipeline = kwargs.pop('pipeline', None)
if kwargs:
raise ValueError('Unexpected keyword arguments: %s' % list(kwargs))
def _extract_input_pvalues(self, pvalueish):
try:
pvalueish = tuple(pvalueish)
except TypeError:
raise ValueError('Input to Flatten must be an iterable. Got a value of type %s instead.' % type(pvalueish))
return (pvalueish, pvalueish)
def expand(self, pcolls):
windowing = self.get_windowing(pcolls)
for pcoll in pcolls:
self._check_pcollection(pcoll)
if pcoll.windowing != windowing:
_LOGGER.warning('All input pcollections must have the same window. Windowing for flatten set to %s, windowing of pcoll %s set to %s', windowing, pcoll, pcoll.windowing)
is_bounded = all((pcoll.is_bounded for pcoll in pcolls))
return pvalue.PCollection(self.pipeline, is_bounded=is_bounded)
def infer_output_type(self, input_type):
return input_type
def to_runner_api_parameter(self, context):
return (common_urns.primitives.FLATTEN.urn, None)
@staticmethod
def from_runner_api_parameter(unused_ptransform, unused_parameter, unused_context):
return Flatten()
|
Merges several PCollections into a single PCollection.
Copies all elements in 0 or more PCollections into a single output
PCollection. If there are no input PCollections, the resulting PCollection
will be empty (but see also kwargs below).
Args:
**kwargs: Accepts a single named argument "pipeline", which specifies the
pipeline that "owns" this PTransform. Ordinarily Flatten can obtain this
information from one of the input PCollections, but if there are none (or
if there's a chance there may be none), this argument is the only way to
provide pipeline information and should be considered mandatory.
|
github-repos
|
def from_file(cls, vert, frag, **kwargs):
vert_program = open(vert).read()
frag_program = open(frag).read()
return cls(vert=vert_program, frag=frag_program, **kwargs)
|
Reads the shader programs, given the vert and frag filenames
Arguments:
- vert (str): The filename of the vertex shader program (ex: 'vertshader.vert')
- frag (str): The filename of the fragment shader program (ex: 'fragshader.frag')
Returns:
- shader (Shader): The Shader using these files.
|
codesearchnet
|
def export_warnings(self, export_file):
warn_filepath = op.dirname(export_file)
warn_filename = op.splitext(op.basename(export_file))[0]
self._add_entry(templates.EXPORT_WARNINGS
.format(warnings_export_path=warn_filepath,
warnings_export_file=warn_filename))
|
Append an export warnings entry to the journal.
This instructs Revit to export warnings from the opened model.
Currently Revit will stop journal execution if the model does not
have any warnings and the export warnings UI button is disabled.
Args:
export_file (str): full path of the ouput html file
|
juraj-google-style
|
def InventoryReceived(self, inventory):
if (inventory.Hash.ToBytes() in self._MissedBlocks):
self._MissedBlocks.remove(inventory.Hash.ToBytes())
if (inventory is MinerTransaction):
return False
if (type(inventory) is Block):
if (BC.Default() is None):
return False
if BC.Default().ContainsBlock(inventory.Index):
return False
if (not BC.Default().AddBlock(inventory)):
return False
elif (not inventory.Verify(self.MemPool.values())):
return False
|
Process a received inventory.
Args:
inventory (neo.Network.Inventory): expect a Block type.
Returns:
bool: True if processed and verified. False otherwise.
|
codesearchnet
|
def clean(deltox=False):
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/'))
|
Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
|
juraj-google-style
|
def transform(self, col):
out = pd.DataFrame(index=col.index)
out[self.col_name] = col.apply(self.get_val, axis=1)
if self.subtype == 'int':
out[self.col_name] = out[self.col_name].astype(int)
return out
|
Prepare the transformer to convert data and return the processed table.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
juraj-google-style
|
def serialize(loss):
return serialization_lib.serialize_keras_object(loss)
|
Serializes loss function or `Loss` instance.
Args:
loss: A Keras `Loss` instance or a loss function.
Returns:
Loss configuration dictionary.
|
github-repos
|
def _send_unary_request(self, request):
if request.ack_ids:
self._client.acknowledge(subscription=self._subscription, ack_ids=list(request.ack_ids))
if request.modify_deadline_ack_ids:
deadline_to_ack_ids = collections.defaultdict(list)
for (n, ack_id) in enumerate(request.modify_deadline_ack_ids):
deadline = request.modify_deadline_seconds[n]
deadline_to_ack_ids[deadline].append(ack_id)
for (deadline, ack_ids) in six.iteritems(deadline_to_ack_ids):
self._client.modify_ack_deadline(subscription=self._subscription, ack_ids=ack_ids, ack_deadline_seconds=deadline)
_LOGGER.debug('Sent request(s) over unary RPC.')
|
Send a request using a separate unary request instead of over the
stream.
Args:
request (types.StreamingPullRequest): The stream request to be
mapped into unary requests.
|
codesearchnet
|
def isplaybook(obj):
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
|
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
|
juraj-google-style
|
def _GenerateAssertion(self):
now = int(time.time())
payload = {'aud': RpcHelper.TOKEN_ENDPOINT, 'scope': 'https:
return crypt.make_signed_jwt(crypt.Signer.from_string(self.service_account_key), payload)
|
Generates the signed assertion that will be used in the request.
Returns:
string, signed Json Web Token (JWT) assertion.
|
codesearchnet
|
def transition_complete(self, pipeline_key):
def txn():
pipeline_record = db.get(pipeline_key)
if (pipeline_record is None):
logging.warning('Tried to mark pipeline ID "%s" as complete but it does not exist.', pipeline_key.name())
raise db.Rollback()
if (pipeline_record.status not in (_PipelineRecord.WAITING, _PipelineRecord.RUN)):
logging.warning('Tried to mark pipeline ID "%s" as complete, found bad state: %s', pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.DONE
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
|
Marks the given pipeline as complete.
Does nothing if the pipeline is no longer in a state that can be completed.
Args:
pipeline_key: db.Key of the _PipelineRecord that has completed.
|
codesearchnet
|
def from_image(cls, filename, start, stop, legend, source='Image', col_offset=0.1, row_offset=2, tolerance=0):
rgb = utils.loglike_from_image(filename, col_offset)
loglike = np.array([utils.rgb_to_hex(t) for t in rgb])
(tops, hexes) = utils.tops_from_loglike(loglike, offset=row_offset)
nonconsecutive = np.append(np.diff(tops), 2)
tops = tops[(nonconsecutive > 1)]
hexes = hexes[(nonconsecutive > 1)]
hexes_reduced = list(set(hexes))
components = [legend.get_component(h, tolerance=tolerance) for h in hexes_reduced]
values = [hexes_reduced.index(i) for i in hexes]
basis = np.linspace(start, stop, loglike.size)
list_of_Intervals = cls.__intervals_from_tops(tops, values, basis, components)
return cls(list_of_Intervals, source='Image')
|
Read an image and generate Striplog.
Args:
filename (str): An image file, preferably high-res PNG.
start (float or int): The depth at the top of the image.
stop (float or int): The depth at the bottom of the image.
legend (Legend): A legend to look up the components in.
source (str): A source for the data. Default: 'Image'.
col_offset (Number): The proportion of the way across the image
from which to extract the pixel column. Default: 0.1 (ie 10%).
row_offset (int): The number of pixels to skip at the top of
each change in colour. Default: 2.
tolerance (float): The Euclidean distance between hex colours,
which has a maximum (black to white) of 441.67 in base 10.
Default: 0.
Returns:
Striplog: The ``striplog`` object.
|
codesearchnet
|
def file(self, path):
with open(path, 'r') as f:
self.body(str(f.read()))
|
Reads the body to match from a disk file.
Arguments:
path (str): relative or absolute path to file to read from.
Returns:
self: current Mock instance.
|
juraj-google-style
|
def get_random_value(length=10, character_sets=[string.ascii_uppercase, string.ascii_lowercase]):
return "".join(random.choice("".join(character_sets)) for i in range(length))
|
Get a random string with the given length.
Args:
length (int): The length of the string to return.
character_sets list(str): The caracter sets to use.
Returns:
str: The random string.
|
juraj-google-style
|
def update_environmental_configuration(self, configuration, timeout=(- 1)):
uri = '{}/environmentalConfiguration'.format(self.data['uri'])
return self._helper.do_put(uri, configuration, timeout, None)
|
Sets the calibrated max power of an unmanaged or unsupported enclosure.
Args:
configuration: Configuration
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Settings that describe the environmental configuration.
|
codesearchnet
|
def update_video_image(edx_video_id, course_id, image_data, file_name):
try:
course_video = CourseVideo.objects.select_related('video').get(course_id=course_id, video__edx_video_id=edx_video_id)
except ObjectDoesNotExist:
error_message = u'VAL: CourseVideo not found for edx_video_id: {0} and course_id: {1}'.format(edx_video_id, course_id)
raise ValVideoNotFoundError(error_message)
(video_image, _) = VideoImage.create_or_update(course_video, file_name, image_data)
return video_image.image_url()
|
Update video image for an existing video.
NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise
a new file name is constructed based on uuid and extension from `file_name` value.
`image_data` will be None in case of course re-run and export.
Arguments:
image_data (InMemoryUploadedFile): Image data to be saved for a course video.
Returns:
course video image url
Raises:
Raises ValVideoNotFoundError if the CourseVideo cannot be retrieved.
|
codesearchnet
|
def from_params(cls, params):
key_fn = (lambda x: id(x[1].owner))
streams = []
for (_, group) in groupby(sorted(params.items(), key=key_fn), key_fn):
group = list(group)
inst = [p.owner for (_, p) in group][0]
if (not isinstance(inst, param.Parameterized)):
continue
names = [p.name for (_, p) in group]
rename = {p.name: n for (n, p) in group}
streams.append(cls(inst, names, rename=rename))
return streams
|
Returns Params streams given a dictionary of parameters
Args:
params (dict): Dictionary of parameters
Returns:
List of Params streams
|
codesearchnet
|
def predict(self, a, b):
a = np.array(a).reshape((-1, 1))
b = np.array(b).reshape((-1, 1))
return sp.kendalltau(a, b)[0]
|
Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic
|
juraj-google-style
|
def GetCloudPath(self, resource_id, cache, database):
cloud_path = cache.GetResults('cloud_path')
if not cloud_path:
results = database.Query(self.CLOUD_PATH_CACHE_QUERY)
cache.CacheQueryResults(
results, 'cloud_path', 'resource_id', ('filename', 'parent'))
cloud_path = cache.GetResults('cloud_path')
if resource_id == 'folder:root':
return '/'
paths = []
parent_path, parent_id = cloud_path.get(resource_id, ['', ''])
while parent_path:
if parent_path == 'folder:root':
break
paths.append(parent_path)
parent_path, parent_id = cloud_path.get(parent_id, ['', ''])
if not paths:
return '/'
paths.reverse()
return '/{0:s}/'.format('/'.join(paths))
|
Return cloud path given a resource id.
Args:
resource_id (str): resource identifier for the file.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: full path to the resource value.
|
juraj-google-style
|
def __init__(self, final_ops, final_ops_feed_dict=None):
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
|
Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
|
github-repos
|
def diff(self, a_ref, target=None, b_ref=None):
result = {}
diff_dct = self.scm.get_diff_trees(a_ref, b_ref=b_ref)
result[DIFF_A_REF] = diff_dct[DIFF_A_REF]
result[DIFF_B_REF] = diff_dct[DIFF_B_REF]
if diff_dct[DIFF_EQUAL]:
result[DIFF_EQUAL] = True
return result
result[DIFF_LIST] = []
diff_outs = _get_diff_outs(self, diff_dct)
if target is None:
result[DIFF_LIST] = [
_diff_royal(self, path, diff_outs[path]) for path in diff_outs
]
elif target in diff_outs:
result[DIFF_LIST] = [_diff_royal(self, target, diff_outs[target])]
else:
msg = "Have not found file/directory '{}' in the commits"
raise FileNotInCommitError(msg.format(target))
return result
|
Gerenates diff message string output
Args:
target(str) - file/directory to check diff of
a_ref(str) - first tag
(optional) b_ref(str) - second git tag
Returns:
string: string of output message with diff info
|
juraj-google-style
|
def cast_to_type(obj, out_type):
in_type = type(obj)
if (out_type is in_type):
return obj
else:
return out_type(obj)
|
Cast obj to out_type if it's not out_type already.
If the obj happens to be out_type already, it just returns obj as is.
Args:
obj: input object
out_type: type.
Returns:
obj cast to out_type. Usual python conversion / casting rules apply.
|
codesearchnet
|
def NewOutputModule(cls, name, output_mediator):
output_class = cls.GetOutputClass(name)
return output_class(output_mediator)
|
Creates a new output module object for the specified output format.
Args:
name (str): name of the output module.
output_mediator (OutputMediator): output mediator.
Returns:
OutputModule: output module.
Raises:
KeyError: if there is no output class found with the supplied name.
ValueError: if name is not a string.
|
codesearchnet
|
def is60(msg):
if allzeros(msg):
return False
d = hex2bin(data(msg))
if wrongstatus(d, 1, 2, 12):
return False
if wrongstatus(d, 13, 14, 23):
return False
if wrongstatus(d, 24, 25, 34):
return False
if wrongstatus(d, 35, 36, 45):
return False
if wrongstatus(d, 46, 47, 56):
return False
ias = ias60(msg)
if ((ias is not None) and (ias > 500)):
return False
mach = mach60(msg)
if ((mach is not None) and (mach > 1)):
return False
vr_baro = vr60baro(msg)
if ((vr_baro is not None) and (abs(vr_baro) > 6000)):
return False
vr_ins = vr60ins(msg)
if ((vr_ins is not None) and (abs(vr_ins) > 6000)):
return False
return True
|
Check if a message is likely to be BDS code 6,0
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
|
codesearchnet
|
def register_write(self, reg_index, value):
res = self._dll.JLINKARM_WriteReg(reg_index, value)
if res != 0:
raise errors.JLinkException('Error writing to register %d' % reg_index)
return value
|
Writes into an ARM register.
Note:
The data is not immediately written, but is cached before being
transferred to the CPU on CPU start.
Args:
self (JLink): the ``JLink`` instance
reg_index (int): the ARM register to write to
value (int): the value to write to the register
Returns:
The value written to the ARM register.
Raises:
JLinkException: on write error.
|
juraj-google-style
|
def plot_legend(ax, no_legend=True, legend_arg=None):
legend_arg = dict_if_none(legend_arg)
if not no_legend:
ax.legend(**legend_arg)
|
Function that defines the legend options
of a matplotlib plot.
Args:
ax: matplotlib axes
no_legend (bool): Defines the presence of a legend in the figure
legend_arg (dict): Addition arguments for matplotlib.legend() call
|
juraj-google-style
|
def spawn_agent(self, agent_definition, location):
self._should_write_to_command_buffer = True
self._add_agents(agent_definition)
command_to_send = SpawnAgentCommand(location, agent_definition.name, agent_definition.type)
self._commands.add_command(command_to_send)
|
Queues a spawn agent command. It will be applied when `tick` or `step` is called next.
The agent won't be able to be used until the next frame.
Args:
agent_definition (:obj:`AgentDefinition`): The definition of the agent to spawn.
location (np.ndarray or list): The position to spawn the agent in the world, in XYZ coordinates (in meters).
|
juraj-google-style
|
def run_inference(self, batch: Sequence[tf.Tensor], model: tf.Module, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
inference_args = {} if not inference_args else inference_args
return self._inference_fn(model, batch, inference_args, self._model_uri)
|
Runs inferences on a batch of tf.Tensor and returns an Iterable of
Tensor Predictions.
This method stacks the list of Tensors in a vectorized format to optimize
the inference call.
Args:
batch: A sequence of Tensors. These Tensors should be batchable, as this
method will call `tf.stack()` and pass in batched Tensors with
dimensions (batch_size, n_features, etc.) into the model's predict()
function.
model: A Tensorflow model.
inference_args: Non-batchable arguments required as inputs to the model's
forward() function. Unlike Tensors in `batch`, these parameters will
not be dynamically batched
Returns:
An Iterable of type PredictionResult.
|
github-repos
|
def translate_to_histogram(self, histogram):
first_bucket_offset = 0
last_bucket_offset = 0
for index in range(0, self.MAX_BUCKET_SIZE):
if self.buckets[index] != 0:
first_bucket_offset = index
break
for index in range(self.MAX_BUCKET_SIZE - 1, -1, -1):
if self.buckets[index] != 0:
last_bucket_offset = index
break
histogram.firstBucketOffset = first_bucket_offset
histogram.bucketCounts = self.buckets[first_bucket_offset:last_bucket_offset + 1]
|
Translate buckets into Histogram.
Args:
histogram: apache_beam.runners.dataflow.internal.clents.dataflow.Histogram
Ideally, only call this function when reporting counter to
dataflow service.
|
github-repos
|
def forward(self, hidden_states: torch.Tensor, pos_emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False):
if self.macaron_style:
residual = hidden_states
if self.normalize_before:
hidden_states = self.ff_macaron_layer_norm(hidden_states)
hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states))
if not self.normalize_before:
hidden_states = self.ff_macaron_layer_norm(hidden_states)
residual = hidden_states
if self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
attention_output, attention_scores = self.self_attn(hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions)
if self.concat_after:
x_concat = torch.cat((hidden_states, attention_output), dim=-1)
hidden_states = self.concat_linear(x_concat)
hidden_states = residual + hidden_states
else:
hidden_states = self.dropout(attention_output)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
if self.use_cnn_module:
residual = hidden_states
if self.normalize_before:
hidden_states = self.conv_layer_norm(hidden_states)
hidden_states = self.conv_module(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.conv_layer_norm(hidden_states)
residual = hidden_states
if self.normalize_before:
hidden_states = self.ff_layer_norm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + self.ff_scale * hidden_states
if not self.normalize_before:
hidden_states = self.ff_layer_norm(hidden_states)
if self.conv_module is not None:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attention_scores,)
return outputs
|
Compute encoded features.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor.
pos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor.
attention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time, size)`.
|
github-repos
|
def cancel(self, accountID, orderSpecifier, **kwargs):
request = Request('PUT', '/v3/accounts/{accountID}/orders/{orderSpecifier}/cancel')
request.set_path_param('accountID', accountID)
request.set_path_param('orderSpecifier', orderSpecifier)
response = self.ctx.request(request)
if (response.content_type is None):
return response
if (not response.content_type.startswith('application/json')):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
if (str(response.status) == '200'):
if (jbody.get('orderCancelTransaction') is not None):
parsed_body['orderCancelTransaction'] = self.ctx.transaction.OrderCancelTransaction.from_dict(jbody['orderCancelTransaction'], self.ctx)
if (jbody.get('relatedTransactionIDs') is not None):
parsed_body['relatedTransactionIDs'] = jbody.get('relatedTransactionIDs')
if (jbody.get('lastTransactionID') is not None):
parsed_body['lastTransactionID'] = jbody.get('lastTransactionID')
elif (str(response.status) == '401'):
if (jbody.get('errorCode') is not None):
parsed_body['errorCode'] = jbody.get('errorCode')
if (jbody.get('errorMessage') is not None):
parsed_body['errorMessage'] = jbody.get('errorMessage')
elif (str(response.status) == '404'):
if (jbody.get('orderCancelRejectTransaction') is not None):
parsed_body['orderCancelRejectTransaction'] = self.ctx.transaction.OrderCancelRejectTransaction.from_dict(jbody['orderCancelRejectTransaction'], self.ctx)
if (jbody.get('relatedTransactionIDs') is not None):
parsed_body['relatedTransactionIDs'] = jbody.get('relatedTransactionIDs')
if (jbody.get('lastTransactionID') is not None):
parsed_body['lastTransactionID'] = jbody.get('lastTransactionID')
if (jbody.get('errorCode') is not None):
parsed_body['errorCode'] = jbody.get('errorCode')
if (jbody.get('errorMessage') is not None):
parsed_body['errorMessage'] = jbody.get('errorMessage')
elif (str(response.status) == '405'):
if (jbody.get('errorCode') is not None):
parsed_body['errorCode'] = jbody.get('errorCode')
if (jbody.get('errorMessage') is not None):
parsed_body['errorMessage'] = jbody.get('errorMessage')
else:
parsed_body = jbody
response.body = parsed_body
return response
|
Cancel a pending Order in an Account
Args:
accountID:
Account Identifier
orderSpecifier:
The Order Specifier
Returns:
v20.response.Response containing the results from submitting the
request
|
codesearchnet
|
def CopyToPath(self):
number_of_path_segments = len(self._path_segments)
if (number_of_path_segments == 0):
return None
strings = [self._path_segments[0]]
number_of_path_segments -= 1
for path_segment in self._path_segments[1:]:
if (path_segment.endswith('\\') and (number_of_path_segments > 1)):
path_segment = path_segment[:(- 1)]
if ((path_segment.startswith('<') and path_segment.endswith('>')) or (len(strings) == 1)):
strings.append(' {0:s}'.format(path_segment))
elif path_segment.startswith('\\'):
strings.append('{0:s}'.format(path_segment))
else:
strings.append('\\{0:s}'.format(path_segment))
number_of_path_segments -= 1
return ''.join(strings)
|
Copies the shell items to a path.
Returns:
str: converted shell item list path or None.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.