code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _preprocess_movie_lens(ratings_df):
ratings_df["data"] = 1.0
num_timestamps = ratings_df[["userId", "timestamp"]].groupby(
"userId").nunique()
last_user_timestamp = ratings_df[["userId", "timestamp"]].groupby(
"userId").max()
ratings_df["numberOfTimestamps"] = ratings_df["userId"].apply(
lambda x: num_timestamps["timestamp"][x])
ratings_df["lastTimestamp"] = ratings_df["userId"].apply(
lambda x: last_user_timestamp["timestamp"][x])
ratings_df = ratings_df[ratings_df["numberOfTimestamps"] > 2]
ratings_df = _create_row_col_indices(ratings_df)
train_ratings_df = ratings_df[
ratings_df["timestamp"] < ratings_df["lastTimestamp"]]
test_ratings_df = ratings_df[
ratings_df["timestamp"] == ratings_df["lastTimestamp"]]
return ratings_df, train_ratings_df, test_ratings_df | Separate the rating datafram into train and test sets.
Filters out users with less than two distinct timestamps. Creates train set
and test set. The test set contains all the last interactions of users with
more than two distinct timestamps.
Args:
ratings_df: pandas dataframe with columns 'userId', 'movieId', 'rating',
'timestamp'.
Returns:
tuple of dataframes (filtered_ratings, train_ratings, test_ratings). | juraj-google-style |
def write_fixed_str(self, value, length):
towrite = value.encode('utf-8')
slen = len(towrite)
if slen > length:
raise SDKException(ErrorCode.param_err('string longer than fixed length: %s' % length))
self.write_bytes(towrite)
diff = length - slen
while diff > 0:
self.write_byte(0)
diff -= 1 | Write a string value to the stream.
Args:
value (str): value to write to the stream.
length (int): length of the string to write. | juraj-google-style |
def fetch_layout(self, dtensor: Any) -> layout_lib.Layout:
if not context.executing_eagerly():
raise RuntimeError('`fetch_layout` must be called eagerly.')
if _pywrap_utils.IsVariable(dtensor):
dtensor = dtensor.read_value()
try:
layout_string = _pywrap_dtensor_device.FetchLayout(context.context()._handle, dtensor, self._device_info)
except core._NotOkStatusException as e:
raise core._status_to_exception(e) from None
if layout_string is None:
return None
return layout_lib.Layout.from_string(layout_string) | Fetches the layout of the DTensor.
Args:
dtensor: The DTensor whose layout is to be fetched.
Returns:
The `Layout` of this DTensor.
Raises:
RuntimeError: When not called eagerly. | github-repos |
def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False):
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1)
scale = torch.clamp(scale, min=1e-08) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-08) / n
return scale | Compute the scaling factor with the given quantization range for symmetric quantization.
Args:
saturation_min (`torch.Tensor`):
Lower bound for quantization range.
saturation_max (`torch.Tensor`):
Upper bound for quantization range.
per_channel (`bool`, *optional*, defaults to `False`):
Whether to or not use channel-wise quantization.
Returns:
`torch.Tensor`: Scaling factor that linearly quantizes the given range between *saturation_min* and
*saturation_max*. | github-repos |
def generate(store, report_format, path):
success = False
if report_format in ['html']:
rendered_content = {
'html': generate_html
}[report_format](store)
if not os.path.isdir(path):
os.makedirs(path)
if rendered_content is not None:
with open(os.path.join(path, 'pipeline.' + report_format), 'w') as handle:
handle.write(rendered_content)
success = True
else:
Logger.get_logger(__name__).error("Unknown report format %s", report_format)
return success | Generate file in defined format representing the report of pipeline(s).
Args:
store (Store): report data.
report_format (str): currently "html" is supported only.
path (str): path where to write the report to. Missing sub folders will be created. | juraj-google-style |
class PatchTSTPatchify(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.sequence_length = config.context_length
self.patch_length = config.patch_length
self.patch_stride = config.patch_stride
if self.sequence_length <= self.patch_length:
raise ValueError(f'Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})')
self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length)
new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)
self.sequence_start = self.sequence_length - new_sequence_length
def forward(self, past_values: torch.Tensor):
sequence_length = past_values.shape[-2]
if sequence_length != self.sequence_length:
raise ValueError(f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length}).")
output = past_values[:, self.sequence_start:, :]
output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)
output = output.transpose(-2, -3).contiguous()
return output | A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` | github-repos |
def ContainsKey(self, public_key):
return self.ContainsKeyHash(Crypto.ToScriptHash(public_key.encode_point(True), unhex=True)) | Test if the wallet contains the supplied public key.
Args:
public_key (edcsa.Curve.point): a public key to test for its existance. e.g. KeyPair.PublicKey
Returns:
bool: True if exists, False otherwise. | juraj-google-style |
def load_image(image: Union[str, 'PIL.Image.Image'], timeout: Optional[float]=None) -> 'PIL.Image.Image':
requires_backends(load_image, ['vision'])
if isinstance(image, str):
if image.startswith('http:
image = PIL.Image.open(BytesIO(requests.get(image, timeout=timeout).content))
elif os.path.isfile(image):
image = PIL.Image.open(image)
else:
if image.startswith('data:image/'):
image = image.split(',')[1]
try:
b64 = base64.decodebytes(image.encode())
image = PIL.Image.open(BytesIO(b64))
except Exception as e:
raise ValueError(f'Incorrect image source. Must be a valid URL starting with `http:
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise TypeError('Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image.')
image = PIL.ImageOps.exif_transpose(image)
image = image.convert('RGB')
return image | Loads `image` to a PIL Image.
Args:
image (`str` or `PIL.Image.Image`):
The image to convert to the PIL Image format.
timeout (`float`, *optional*):
The timeout value in seconds for the URL request.
Returns:
`PIL.Image.Image`: A PIL Image. | github-repos |
def summary_computed(self, sess, summary, global_step=None):
if not self._summary_writer:
raise RuntimeError('Writing a summary requires a summary writer.')
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step) | Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`. | github-repos |
def create_position_ids_from_input_ids(input_ids, padding_idx):
mask = (input_ids != padding_idx).astype('i4')
if mask.ndim > 2:
mask = mask.reshape((-1, mask.shape[-1]))
incremental_indices = jnp.cumsum(mask, axis=1).astype('i4') * mask
incremental_indices = incremental_indices.reshape(input_ids.shape)
else:
incremental_indices = jnp.cumsum(mask, axis=1).astype('i4') * mask
return incremental_indices.astype('i4') + padding_idx | Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
input_ids: jnp.ndarray
padding_idx: int
Returns: jnp.ndarray | github-repos |
def matrices_compliance(dsm, complete_mediation_matrix):
matrix = dsm.data
rows_dep_matrix = len(matrix)
cols_dep_matrix = len(matrix[0])
rows_med_matrix = len(complete_mediation_matrix)
cols_med_matrix = len(complete_mediation_matrix[0])
if (rows_dep_matrix != rows_med_matrix or
cols_dep_matrix != cols_med_matrix):
raise DesignStructureMatrixError(
'Matrices are NOT compliant '
'(number of rows/columns not equal)')
discrepancy_found = False
message = []
for i in range(0, rows_dep_matrix):
for j in range(0, cols_dep_matrix):
if ((complete_mediation_matrix[i][j] == 0 and
matrix[i][j] > 0) or
(complete_mediation_matrix[i][j] == 1 and
matrix[i][j] < 1)):
discrepancy_found = True
message.append(
'Untolerated dependency at %s:%s (%s:%s): '
'%s instead of %s' % (
i, j, dsm.entities[i], dsm.entities[j],
matrix[i][j], complete_mediation_matrix[i][j]))
message = '\n'.join(message)
return not discrepancy_found, message | Check if matrix and its mediation matrix are compliant.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
complete_mediation_matrix (list of list of int): 2-dim array
Returns:
bool: True if compliant, else False | juraj-google-style |
def __init__(
self,
network_retries: int = DEFAULT_NETWORK_RETRIES,
network_timeout: int = DEFAULT_NETWORK_TIMEOUT
) -> None:
self._plugins_repository = PluginsRepository()
SslConnection.set_global_network_settings(network_retries, network_timeout) | Create a scanner for running scanning commands synchronously.
Args:
network_retries: How many times SSLyze should retry a connection that timed out.
network_timeout: The time until an ongoing connection times out. | juraj-google-style |
def run(self):
import behave.__main__ as behave
for d in self.firmware_dirs:
original_dir = os.getcwd()
os.chdir(d)
output = ''
try:
output = subprocess.check_output('make', shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if output:
sys.stdout.write('Captured Output:%s%s%s' % (os.linesep, output, os.linesep))
os.chdir(original_dir)
raise e
os.chdir(original_dir)
return behave.main([self.features_dir]) | Runs the command.
Args:
self (BDDTestCommand): the ``BDDTestCommand`` instance
Returns:
``True`` on success, otherwise ``False``.
Raises:
ValueError: if a build fails | juraj-google-style |
def add_status_parser(subparsers, parent_parser):
parser = subparsers.add_parser(
'status',
help='Displays information about validator status',
description="Provides a subcommand to show a validator\'s status")
grand_parsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
grand_parsers.required = True
add_status_show_parser(grand_parsers, parent_parser) | Adds argument parser for the status command
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object | juraj-google-style |
def parse_fatcat(fatcat_xml):
fatcat_results = {}
with open(fatcat_xml, 'r') as f:
soup = BeautifulSoup(f, 'lxml')
if soup.find('block'):
fatcat_results['tm_score'] = float(soup.find('afpchain')['tmscore'])
return fatcat_results | Parse a FATCAT XML result file.
Args:
fatcat_xml (str): Path to FATCAT XML result file
Returns:
dict: Parsed information from the output
Todo:
- Only returning TM-score at the moment | juraj-google-style |
def _log_progress(self, bytes_downloaded):
self._total_bytes_downloaded += bytes_downloaded
now = time.time()
if (self._interactive_mode() or ((now - self._last_progress_msg_print_time) > 15)):
self._print_download_progress_msg(('Downloading %s: %s' % (self._url, tf_utils.bytes_to_readable_str(self._total_bytes_downloaded, True))))
self._last_progress_msg_print_time = now | Logs progress information about ongoing module download.
Args:
bytes_downloaded: Number of bytes downloaded. | codesearchnet |
def InsertData(self, table_id, fd, schema, job_id):
configuration = {
"schema": {
"fields": schema
},
"destinationTable": {
"projectId": self.project_id,
"tableId": table_id,
"datasetId": self.dataset_id
},
"sourceFormat": "NEWLINE_DELIMITED_JSON",
}
body = {
"configuration": {
"load": configuration
},
"jobReference": {
"projectId": self.project_id,
"jobId": job_id
}
}
mediafile = http.MediaFileUpload(
fd.name, mimetype="application/octet-stream")
job = self.service.jobs().insert(
projectId=self.project_id, body=body, media_body=mediafile)
try:
response = job.execute()
return response
except errors.HttpError as e:
if self.GetDataset(self.dataset_id):
logging.exception("Error with job: %s", job_id)
else:
logging.info("Attempting to create dataset: %s", self.dataset_id)
self.CreateDataset()
return self.RetryUpload(job, job_id, e) | Insert data into a bigquery table.
If the table specified doesn't exist, it will be created with the specified
schema.
Args:
table_id: string table id
fd: open file descriptor containing the newline separated JSON
schema: BigQuery schema dict
job_id: string job id
Returns:
API response object on success, None on failure | juraj-google-style |
def _obtain_sampled_health_pills(self, run, node_names):
runs_to_tags_to_content = self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)
if run not in runs_to_tags_to_content:
return {}
tags_to_content = runs_to_tags_to_content[run]
mapping = {}
for node_name in node_names:
if node_name not in tags_to_content:
continue
health_pills = []
for tensor_event in self._event_multiplexer.Tensors(run, node_name):
json_string = tags_to_content[node_name]
try:
content_object = json.loads(tf.compat.as_text(json_string))
device_name = content_object['device']
output_slot = content_object['outputSlot']
health_pills.append(
self._tensor_proto_to_health_pill(tensor_event, node_name,
device_name, output_slot))
except (KeyError, ValueError) as e:
logger.error('Could not determine device from JSON string '
'%r: %r', json_string, e)
mapping[node_name] = health_pills
return mapping | Obtains the health pills for a run sampled by the event multiplexer.
This is much faster than the alternative path of reading health pills from
disk.
Args:
run: The run to fetch health pills for.
node_names: A list of node names for which to retrieve health pills.
Returns:
A dictionary mapping from node name to a list of
event_accumulator.HealthPillEvents. | juraj-google-style |
def buid(valu=None):
if (valu is None):
return os.urandom(32)
byts = s_msgpack.en(valu)
return hashlib.sha256(byts).digest() | A binary GUID like sequence of 32 bytes.
Args:
valu (object): Optional, if provided, the hash of the msgpack
encoded form of the object is returned. This can be used to
create stable buids.
Notes:
By default, this returns a random 32 byte value.
Returns:
bytes: A 32 byte value. | codesearchnet |
def __setstate__(self, state):
self._api = state['api']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
self._buffer_size = state['buffer_size']
self._max_request_size = state['request_size']
self._etag = state['etag']
self._file_size = state['size']
self._offset = state['offset']
self._buffer = _Buffer()
self.closed = state['closed']
self._buffer_future = None
if self._remaining() and not self.closed:
self._request_next_buffer() | Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
Along with restoring the state, pre-fetch the next read buffer. | juraj-google-style |
def upload(cls, file_obj, store=None):
if (store is None):
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {'UPLOADCARE_STORE': store}
files = uploading_request('POST', 'base/', data=data, files={'file': file_obj})
file_ = cls(files['file'])
return file_ | Uploads a file and returns ``File`` instance.
Args:
- file_obj: file object to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
Returns:
``File`` instance | codesearchnet |
def add(self, pattern_txt):
self.patterns[len(pattern_txt)] = pattern_txt
low = 0
high = len(pattern_txt) - 1
while not pattern_txt[low]:
low += 1
while not pattern_txt[high]:
high -= 1
min_pattern = pattern_txt[low:high + 1]
self.min_patterns[len(min_pattern)] = min_pattern | Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines. | juraj-google-style |
def config_get(config, *path, default=None):
o = object()
result = get_in(config, path, default=o)
if (result is not o):
return result
else:
return default | Get a configuration option following a path through the config
Example usage:
>>> config_get(config,
'problem', 'problem_type_details', 'scorer',
default='accuracy')
Args:
config (dict): config dict
*path (list[str]): List of config sections and options to follow.
default (default=None): A default value to return in the case that
the option does not exist. | codesearchnet |
def list_locations(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/locations?api-version=', BASE_API])
return do_get(endpoint, access_token) | List available locations for a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON list of locations. | juraj-google-style |
def _handle_response(self, response, valid_status_codes, resource):
if response.status_code not in valid_status_codes:
raise InvalidStatusCodeError(
status_code=response.status_code,
expected_status_codes=valid_status_codes
)
if response.content:
data = response.json()
if isinstance(data, list):
return [resource(**x) for x in data]
else:
key = getattr(resource.Meta, 'pagination_key', None)
if isinstance(data.get(key), list):
return [resource(**x) for x in data.get(key)]
else:
return [resource(**data)]
return [] | Handles Response objects
Args:
response: An HTTP reponse object
valid_status_codes: A tuple list of valid status codes
resource: The resource class to build from this response
returns:
resources: A list of Resource instances | juraj-google-style |
def dismiss_prompt(self, text=None, wait=None):
with self.driver.dismiss_modal('prompt', text=text, wait=wait):
(yield) | Execute the wrapped code, dismissing a prompt.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found. | codesearchnet |
def set_forced_variation(self, experiment_key, user_id, variation_key):
experiment = self.get_experiment_from_key(experiment_key)
if (not experiment):
return False
experiment_id = experiment.id
if (variation_key is None):
if (user_id in self.forced_variation_map):
experiment_to_variation_map = self.forced_variation_map.get(user_id)
if (experiment_id in experiment_to_variation_map):
del self.forced_variation_map[user_id][experiment_id]
self.logger.debug(('Variation mapped to experiment "%s" has been removed for user "%s".' % (experiment_key, user_id)))
else:
self.logger.debug(('Nothing to remove. Variation mapped to experiment "%s" for user "%s" does not exist.' % (experiment_key, user_id)))
else:
self.logger.debug(('Nothing to remove. User "%s" does not exist in the forced variation map.' % user_id))
return True
if (not validator.is_non_empty_string(variation_key)):
self.logger.debug('Variation key is invalid.')
return False
forced_variation = self.get_variation_from_key(experiment_key, variation_key)
if (not forced_variation):
return False
variation_id = forced_variation.id
if (user_id not in self.forced_variation_map):
self.forced_variation_map[user_id] = {experiment_id: variation_id}
else:
self.forced_variation_map[user_id][experiment_id] = variation_id
self.logger.debug(('Set variation "%s" for experiment "%s" and user "%s" in the forced variation map.' % (variation_id, experiment_id, user_id)))
return True | Sets users to a map of experiments to forced variations.
Args:
experiment_key: Key for experiment.
user_id: The user ID.
variation_key: Key for variation. If None, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully. | codesearchnet |
def table_delete(self, table_name):
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True) | Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | juraj-google-style |
def GetEventFormatter(self, event):
data_type = getattr(event, 'data_type', None)
if not data_type:
return None
return formatters_manager.FormattersManager.GetFormatterObject(
event.data_type) | Retrieves the event formatter for a specific event type.
Args:
event (EventObject): event.
Returns:
EventFormatter: event formatter or None. | juraj-google-style |
def debug(msg: str, *args, **kwargs) -> None:
_DEFAULT_LOGGER.debug(msg, *args, **kwargs) | Logs debug message.
Args:
msg: Message with possible format string.
*args: Values for variables in the format string.
**kwargs: Keyword arguments for the logger. | github-repos |
def _fire_event(self, event_name, *event_args, **event_kwargs):
if (event_name in self._allowed_events):
self._logger.debug('firing handlers for event %s ', event_name)
for (func, args, kwargs) in self._event_handlers[event_name]:
kwargs.update(event_kwargs)
func(self, *(event_args + args), **kwargs) | Execute all the handlers associated with given event.
This method executes all handlers associated with the event
`event_name`. Optional positional and keyword arguments can be used to
pass arguments to **all** handlers added with this event. These
aguments updates arguments passed using :meth:`~ignite.engine.Engine.add_event_handler`.
Args:
event_name: event for which the handlers should be executed. Valid
events are from :class:`~ignite.engine.Events` or any `event_name` added by
:meth:`~ignite.engine.Engine.register_events`.
*event_args: optional args to be passed to all handlers.
**event_kwargs: optional keyword args to be passed to all handlers. | codesearchnet |
def from_object(cls, obj):
return cls(
obj.get('sessionId', None),
obj.get('status', 0),
obj.get('value', None)
) | The factory method to create WebDriverResult from JSON Object.
Args:
obj(dict): The JSON Object returned by server. | juraj-google-style |
class Owlv2Encoder(nn.Module):
def __init__(self, config: Owlv2Config):
super().__init__()
self.layers = nn.ModuleList([Owlv2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Owlv2EncoderLayer`].
Args:
config: Owlv2Config | github-repos |
def _checkResponseByteCount(payload):
POSITION_FOR_GIVEN_NUMBER = 0
NUMBER_OF_BYTES_TO_SKIP = 1
_checkString(payload, minlength=1, description='payload')
givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])
countedNumberOfDatabytes = (len(payload) - NUMBER_OF_BYTES_TO_SKIP)
if (givenNumberOfDatabytes != countedNumberOfDatabytes):
errortemplate = ('Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + ' The data payload is: {3!r}')
errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)
raise ValueError(errortext) | Check that the number of bytes as given in the response is correct.
The first byte in the payload indicates the length of the payload (first byte not counted).
Args:
payload (string): The payload
Raises:
TypeError, ValueError | codesearchnet |
def _apply_shadow_vars(avg_grads):
ps_var_grads = []
for (grad, var) in avg_grads:
assert var.name.startswith('tower'), var.name
my_name = '/'.join(var.name.split('/')[1:])
my_name = get_op_tensor_name(my_name)[0]
new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype, initializer=var.initial_value, trainable=True)
ps_var_grads.append((grad, new_v))
return ps_var_grads | Create shadow variables on PS, and replace variables in avg_grads
by these shadow variables.
Args:
avg_grads: list of (grad, var) tuples | codesearchnet |
def run_cuda_only(func: _F) -> _F:
if tf_inspect.isclass(func):
raise ValueError('`run_cuda_only` only supports test methods.')
def decorated(self: 'TensorFlowTestCase', *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest('Test requires CUDA GPU')
return func(self, *args, **kwargs)
return decorated | Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated.
Returns:
Returns a function that will conditionally skip the decorated test method. | github-repos |
def _head(self, client_kwargs):
with _handle_client_error():
if 'Key' in client_kwargs:
header = self.client.head_object(**client_kwargs)
else:
header = self.client.head_bucket(**client_kwargs)
for key in ('AcceptRanges', 'ResponseMetadata'):
header.pop(key, None)
return header | Returns object or bucket HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header. | juraj-google-style |
def constant(cls,
value: Value,
dtype: tf.DType = tf.float32) -> 'TensorFluent':
t = tf.constant(value, dtype=dtype)
scope = []
batch = False
return TensorFluent(t, scope, batch=batch) | Returns a constant `value` TensorFluent with given `dtype`.
Args:
value: The constant value.
dtype: The output's data type.
Returns:
A constant TensorFluent. | juraj-google-style |
def __init__(self,
flush_size_chars=_FILE_POOL_FLUSH_SIZE,
ctx=None,
exclusive=False):
self._flush_size = flush_size_chars
self._buffer = []
self._size = 0
self._ctx = ctx
self._exclusive = exclusive | Constructor.
Any classes that subclass this will need to implement the _write() function.
Args:
flush_size_chars: buffer flush threshold as int.
ctx: mapreduce context as context.Context.
exclusive: a boolean flag indicating if the pool has an exclusive
access to the file. If it is True, then it's possible to write
bigger chunks of data. | juraj-google-style |
def global_pool_1d(inputs, pooling_type='MAX', mask=None):
with tf.name_scope('global_pool', values=[inputs]):
if (mask is not None):
mask = tf.expand_dims(mask, axis=2)
inputs = tf.multiply(inputs, mask)
if (pooling_type == 'MAX'):
output = tf.reduce_max(inputs, axis=1)
elif (pooling_type == 'AVR'):
if (mask is not None):
output = tf.reduce_sum(inputs, axis=1)
num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
output = tf.div(output, tf.maximum(num_elems, 1))
else:
output = tf.reduce_mean(inputs, axis=1)
return output | Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors. | codesearchnet |
def __init__(self, while_definition):
logger.debug("starting")
if isinstance(while_definition, dict):
self.error_on_max = while_definition.get('errorOnMax', False)
self.max = while_definition.get('max', None)
self.sleep = while_definition.get('sleep', 0)
self.stop = while_definition.get('stop', None)
if self.stop is None and self.max is None:
logger.error(f"while decorator missing both max and stop.")
raise PipelineDefinitionError("the while decorator must have "
"either max or stop, or both. "
"But not neither. Note that "
"setting stop: False with no "
"max is an infinite loop. If "
"an infinite loop is really "
"what you want, set stop: False")
else:
logger.error(f"while decorator definition incorrect.")
raise PipelineDefinitionError("while decorator must be a dict "
"(i.e a map) type.")
logger.debug("done") | Initialize the class. No duh, huh.
You can happily expect the initializer to initialize all
member attributes.
Args:
while_definition: dict. This is the actual while definition as it
exists in the pipeline yaml. | juraj-google-style |
def sub(x1, x2, output_shape=None, name=None):
output_shape = convert_to_shape(output_shape)
if (not isinstance(x2, Tensor)):
return ScalarAddOperation(x1, (- x2)).outputs[0]
with tf.name_scope(name, default_name='sub'):
(x1, x2) = binary_arguments_to_tensors(x1, x2)
return add(x1, negative(x2), output_shape=output_shape) | Binary subtraction with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor | codesearchnet |
def node_recipients(self, node_name, is_control=False, device_name=None):
if not self._debug_graphs:
raise LookupError('Node recipients are not loaded from partition graphs yet.')
device_name = self._infer_device_name(device_name, node_name)
debug_graph = self._debug_graphs[device_name]
if is_control:
return debug_graph.node_ctrl_recipients[node_name]
else:
return debug_graph.node_recipients[node_name] | Get recipient of the given node's output according to partition graphs.
Args:
node_name: (`str`) name of the node.
is_control: (`bool`) whether control outputs, rather than non-control
outputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet. | github-repos |
def region(self, start=0, end=None):
if (end is None):
end = len(self.sequence)
return '>{}\n{}'.format(self.id, self.sequence[start:end]) | Returns a region of ``Sequence.sequence``, in FASTA format.
If called without kwargs, the entire sequence will be returned.
Args:
start (int): Start position of the region to be returned. Default
is 0.
end (int): End position of the region to be returned. Negative values
will function as they do when slicing strings.
Returns:
str: A region of ``Sequence.sequence``, in FASTA format | codesearchnet |
def is_symmetric(self, symprec=0.1):
sg = SpacegroupAnalyzer(self, symprec=symprec)
return sg.is_laue() | Checks if slab is symmetric, i.e., contains inversion symmetry.
Args:
symprec (float): Symmetry precision used for SpaceGroup analyzer.
Returns:
(bool) Whether slab contains inversion symmetry. | codesearchnet |
def get_pyxb_binding_by_api_version(api_major, api_minor=0):
try:
return VERSION_TO_BINDING_DICT[(api_major, api_minor)]
except KeyError:
raise ValueError('Unknown DataONE API version: {}.{}'.format(api_major, api_minor)) | Map DataONE API version tag to PyXB binding.
Given a DataONE API major version number, return PyXB binding that can
serialize and deserialize DataONE XML docs of that version.
Args:
api_major, api_minor: str or int
DataONE API major and minor version numbers.
- If ``api_major`` is an integer, it is combined with ``api_minor`` to form an
exact version.
- If ``api_major`` is a string of ``v1`` or ``v2``, ``api_minor`` is ignored
and the latest PyXB bindingavailable for the ``api_major`` version is
returned.
Returns:
PyXB binding: E.g., ``d1_common.types.dataoneTypes_v1_1``. | codesearchnet |
def attrname_to_colname_dict(cls) -> Dict[(str, str)]:
attr_col = {}
for (attrname, column) in gen_columns(cls):
attr_col[attrname] = column.name
return attr_col | Asks an SQLAlchemy class how its attribute names correspond to database
column names.
Args:
cls: SQLAlchemy ORM class
Returns:
a dictionary mapping attribute names to database column names | codesearchnet |
def _BuildFindSpecsFromArtifact(self, definition, environment_variables):
find_specs = []
for source in definition.sources:
if (source.type_indicator == artifact_types.TYPE_INDICATOR_FILE):
for path_entry in set(source.paths):
specifications = self._BuildFindSpecsFromFileSourcePath(path_entry, source.separator, environment_variables, self._knowledge_base.user_accounts)
find_specs.extend(specifications)
self.file_system_artifact_names.add(definition.name)
elif (source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
for key_path in set(source.keys):
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
key_paths = {key_value['key'] for key_value in source.key_value_pairs}
key_paths_string = ', '.join(key_paths)
logger.warning('Windows Registry values are not supported, extracting keys: "{0!s}"'.format(key_paths_string))
for key_path in key_paths:
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator == artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):
for name in source.names:
specifications = self._BuildFindSpecsFromGroupName(name, environment_variables)
find_specs.extend(specifications)
else:
logger.warning('Unsupported artifact definition source type: "{0:s}"'.format(source.type_indicator))
return find_specs | Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications. | codesearchnet |
def __init__(self, column_names=None, column_sizes=None, title=None):
super(CLITabularTableView, self).__init__(
column_names=column_names, title=title)
self._column_sizes = column_sizes or [] | Initializes a command line table view.
Args:
column_names (Optional[list[str]]): column names.
column_sizes (Optional[list[int]]): minimum column sizes, in number of
characters. If a column name or row value is larger than the
minimum column size the column will be enlarged. Note that the
minimum columns size will be rounded up to the number of spaces
of the next tab.
title (Optional[str]): title. | juraj-google-style |
def _decorate_block(self, start, end):
color = self._get_scope_highlight_color()
draw_order = DRAW_ORDERS.get('codefolding')
d = TextDecoration(self.editor.document(), start_line=start, end_line=(end + 1), draw_order=draw_order)
d.set_background(color)
d.set_full_width(True, clear=False)
self.editor.decorations.add(d)
self._scope_decos.append(d) | Create a decoration and add it to the editor.
Args:
start (int) start line of the decoration
end (int) end line of the decoration | codesearchnet |
def match1(text, *patterns):
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret | Scans through a string for substrings matched some patterns (first-subgroups only).
Args:
text: A string to be scanned.
patterns: Arbitrary number of regex patterns.
Returns:
When only one pattern is given, returns a string (None if no match found).
When more than one pattern are given, returns a list of strings ([] if no match found). | juraj-google-style |
def post(self, request):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | Save the provided data using the class' serializer.
Args:
request:
The request being made.
Returns:
An ``APIResponse`` instance. If the request was successful
the response will have a 200 status code and contain the
serializer's data. Otherwise a 400 status code and the
request's errors will be returned. | juraj-google-style |
def _parse_grad_debug_op_name(op_name):
name_items = op_name.split('/')
assert len(name_items) > 1
assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)
grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]
if '_' in grad_debugger_uuid:
grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index('_')]
orig_tensor_slot = int(name_items[-2][name_items[-2].rfind('_') + 1:])
orig_base_op_name = name_items[-2][:name_items[-2].rfind('_')]
orig_tensor_name = '/'.join(name_items[:-2] + [orig_base_op_name]) + ':%d' % orig_tensor_slot
return (grad_debugger_uuid, orig_tensor_name) | Parse the name of a debug gradient op.
Args:
op_name: the name of the debug gradient op.
Returns:
1) The UUID of the GradientsDebugger that created the debug gradient op.
2) Name of the original tensor whose gradient is debugged by the debug
gradient op. | github-repos |
def output_key_name(self, input_key: str, output_hist: Hist, projection_name: str, **kwargs) -> str:
return projection_name | Returns the key under which the output object should be stored.
Note:
This function is just a basic placeholder which returns the projection name
and likely should be overridden.
Args:
input_key: Key of the input hist in the input dict
output_hist: The output histogram
projection_name: Projection name for the output histogram
kwargs: Projection information dict combined with additional arguments passed to
the projection function.
Returns:
Key under which the output object should be stored. By default, it returns the
projection name. | codesearchnet |
def train_validation_split(arrays, validation_split):
flat_arrays = tree.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not can_slice_array(t)]
if unsplitable:
raise ValueError(f'Argument `validation_split` is only supported for tensors or NumPy arrays.Found incompatible type in the input: {unsplitable}')
if all((t is None for t in flat_arrays)):
return (arrays, arrays)
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1.0 - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(f'Training data contains {batch_dim} samples, which is not sufficient to split it into a validation and training set as specified by `validation_split={validation_split}`. Either provide more data, or a different value for the `validation_split` argument.')
def _split(t, start, end):
if t is None:
return t
return t[start:end]
sliceables = convert_to_sliceable(arrays)
train_arrays = tree.map_structure(lambda x: _split(x, start=0, end=split_at), sliceables)
val_arrays = tree.map_structure(lambda x: _split(x, start=split_at, end=batch_dim), sliceables)
return (train_arrays, val_arrays) | Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested
structures of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset
to include in the validation split. The rest of the dataset will be
included in the training split.
Returns:
`(train_arrays, validation_arrays)` | github-repos |
def from_value(cls, ion_type, value, annotations=()):
if (value is None):
value = IonPyNull()
else:
(args, kwargs) = cls._to_constructor_args(value)
value = cls(*args, **kwargs)
value.ion_event = None
value.ion_type = ion_type
value.ion_annotations = annotations
return value | Constructs a value as a copy with an associated Ion type and annotations.
Args:
ion_type (IonType): The associated Ion type.
value (Any): The value to construct from, generally of type ``cls``.
annotations (Sequence[unicode]): The sequence Unicode strings decorating this value. | codesearchnet |
def generate_csr(private_key_bytes, subject_name, fqdn_list):
return (
cryptography.x509.CertificateSigningRequestBuilder()
.subject_name(subject_name)
.add_extension(
extension=cryptography.x509.SubjectAlternativeName(
[cryptography.x509.DNSName(v) for v in fqdn_list]
),
critical=False,
)
.sign(
private_key=private_key_bytes,
algorithm=cryptography.hazmat.primitives.hashes.SHA256(),
backend=cryptography.hazmat.backends.default_backend(),
)
) | Generate a Certificate Signing Request (CSR).
Args:
private_key_bytes: bytes
Private key with which the CSR will be signed.
subject_name: str
Certificate Subject Name
fqdn_list:
List of Fully Qualified Domain Names (FQDN) and/or IP addresses for which
this certificate will provide authentication.
E.g.: ['my.membernode.org', '1.2.3.4'] | juraj-google-style |
def parents(self, sourcepath, recursive=True):
return self._get_recursive_dependancies(
self._PARENTS_MAP,
sourcepath,
recursive=True
) | Recursively find all parents that import the given source path.
Args:
sourcepath (str): Source file path to search for.
Keyword Arguments:
recursive (bool): Switch to enabled recursive finding (if True).
Default to True.
Returns:
set: List of finded parents path. | juraj-google-style |
def __parameter_default(self, final_subfield):
if final_subfield.default:
if isinstance(final_subfield, messages.EnumField):
return final_subfield.default.name
else:
return final_subfield.default | Returns default value of final subfield if it has one.
If this subfield comes from a field list returned from __field_to_subfields,
none of the fields in the subfield list can have a default except the final
one since they all must be message fields.
Args:
final_subfield: A simple field from the end of a subfield list.
Returns:
The default value of the subfield, if any exists, with the exception of an
enum field, which will have its value cast to a string. | codesearchnet |
def tagged(pode, tag):
if tag.startswith('
tag = tag[1:]
return pode[1]['tags'].get(tag) is not None | Check if a packed node has a given tag.
Args:
pode (tuple): A packed node.
tag (str): The tag to check.
Examples:
Check if a node is tagged with "woot" and dostuff if it is.
if s_node.tagged(node,'woot'):
dostuff()
Notes:
If the tag starts with `#`, this is removed prior to checking.
Returns:
bool: True if the tag is present. False otherwise. | juraj-google-style |
def destroy_sg(app='', env='', region='', **_):
vpc = get_vpc_id(account=env, region=region)
url = '{api}/securityGroups/{env}/{region}/{app}'.format(api=API_URL, env=env, region=region, app=app)
payload = {'vpcId': vpc}
security_group = requests.get(url, params=payload, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if (not security_group):
LOG.info('Nothing to delete.')
else:
LOG.info('Found Security Group in %(region)s: %(name)s', security_group)
destroy_request = get_template('destroy/destroy_sg.json.j2', app=app, env=env, region=region, vpc=vpc)
wait_for_task(destroy_request)
return True | Destroy Security Group.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): Region name, e.g. us-east-1.
Returns:
True upon successful completion. | codesearchnet |
def parse_requirements(file_):
modules = []
delim = ["<", ">", "=", "!", "~"]
try:
f = open_func(file_, "r")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if x != "\n"]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if not any([y in x for y in delim]):
modules.append({"name": x, "version": None})
for y in x:
if y in delim:
module = x.split(y)
module_name = module[0]
module_version = module[-1].replace("=", "")
module = {"name": module_name, "version": module_version}
if module not in modules:
modules.append(module)
break
return modules | Parse a requirements formatted file.
Traverse a string until a delimiter is detected, then split at said
delimiter, get module name by element index, create a dict consisting of
module:version, and add dict to list of parsed modules.
Args:
file_: File to parse.
Raises:
OSerror: If there's any issues accessing the file.
Returns:
tuple: The contents of the file, excluding comments. | juraj-google-style |
def encode_mezzanine_asset(access_token, processor_id, asset_id, output_assetname, json_profile):
path = '/Jobs'
endpoint = ''.join([ams_rest_endpoint, path])
assets_path = ''.join(['/Assets', "('", asset_id, "')"])
assets_path_encoded = urllib.parse.quote(assets_path, safe='')
endpoint_assets = ''.join([ams_rest_endpoint, assets_path_encoded])
body = (((((((((('{ \t\t"Name":"' + output_assetname) + '", \t\t"InputMediaAssets":[{ \t \t\t"__metadata":{ \t \t\t\t"uri":"') + endpoint_assets) + '" \t \t\t} \t \t}], \t\t"Tasks":[{ \t \t\t"Configuration":\'') + json_profile) + '\', \t \t\t"MediaProcessorId":"') + processor_id) + '", \t \t\t"TaskBody":"<?xml version=\\"1.0\\" encoding=\\"utf-16\\"?><taskBody><inputAsset>JobInputAsset(0)</inputAsset><outputAsset assetCreationOptions=\\"0\\" assetName=\\"') + output_assetname) + '\\">JobOutputAsset(0)</outputAsset></taskBody>" \t\t}] \t}')
return do_ams_post(endpoint, path, body, access_token) | Get Media Service Encode Mezanine Asset.
Args:
access_token (str): A valid Azure authentication token.
processor_id (str): A Media Service Processor ID.
asset_id (str): A Media Service Asset ID.
output_assetname (str): A Media Service Asset Name.
json_profile (str): A Media Service JSON Profile.
Returns:
HTTP response. JSON body. | codesearchnet |
def to_env_var(env_var: str, value) -> str:
val = to_yaml(value)
ret_val = ('%s=%s' % (env_var, escape_yaml(val)))
return ret_val | Create an environment variable from a name and a value.
This generates a shell-compatible representation of an
environment variable that is assigned a YAML representation of
a value.
Args:
env_var (str): Name of the environment variable.
value (Any): A value we convert from. | codesearchnet |
def collapse_repeated(labels, seq_length, name=None):
with ops.name_scope(name, 'collapse_repeated_labels', [labels, seq_length]):
labels = ops.convert_to_tensor(labels, name='labels')
seq_length = ops.convert_to_tensor(seq_length, name='seq_length')
label_mask = array_ops.concat([array_ops.ones_like(labels[:, :1], dtypes.bool), math_ops.not_equal(labels[:, 1:], labels[:, :-1])], axis=1)
maxlen = _get_dim(labels, 1)
seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen)
label_mask = math_ops.logical_and(label_mask, seq_mask)
new_seq_len = math_ops.reduce_sum(math_ops.cast(label_mask, dtypes.int32), axis=1)
new_maxlen = math_ops.reduce_max(new_seq_len)
idx_mask = array_ops.sequence_mask(new_seq_len, maxlen=new_maxlen)
flat_labels = array_ops.reshape(labels, [-1])
flat_label_mask = array_ops.reshape(label_mask, [-1])
flat_idx_mask = array_ops.reshape(idx_mask, [-1])
idx = math_ops.range(_get_dim(flat_idx_mask, 0))
flat = array_ops.scatter_nd(indices=array_ops.expand_dims(array_ops.boolean_mask(idx, flat_idx_mask), axis=1), updates=array_ops.boolean_mask(flat_labels, flat_label_mask), shape=array_ops.shape(flat_idx_mask))
batch_size = _get_dim(labels, 0)
new_shape = [batch_size, new_maxlen]
return (array_ops.reshape(flat, new_shape), math_ops.cast(new_seq_len, seq_length.dtype)) | Merge repeated labels into single labels.
Args:
labels: Tensor of shape [batch, max value in seq_length]
seq_length: Tensor of shape [batch], sequence length of each batch element.
name: A name for this `Op`. Defaults to "collapse_repeated_labels".
Returns:
A tuple `(collapsed_labels, new_seq_length)` where
collapsed_labels: Tensor of shape [batch, max_seq_length] with repeated
labels collapsed and padded to max_seq_length, eg:
`[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]`
new_seq_length: int tensor of shape [batch] with new sequence lengths. | github-repos |
def all_distances(coords1, coords2):
c1 = np.array(coords1)
c2 = np.array(coords2)
z = ((c1[(:, None, :)] - c2[(None, :, :)]) ** 2)
return (np.sum(z, axis=(- 1)) ** 0.5) | Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j] | codesearchnet |
def execute(command, cwd=os.path.curdir, **options):
process = subprocess.Popen(shlex.split(command), cwd=cwd, **options)
(stdout, stderr) = process.communicate()
return (process, stdout, stderr) | Run the system command with optional options.
Args:
* command: system command.
* cwd: current working directory.
* verbose: direct options for :func:`subprocess.Popen`.
Returns:
Opened process, standard output & error. | codesearchnet |
def inference(self, observed_arr):
_ = self.__lstm_model.inference(observed_arr)
return self.__lstm_model.get_feature_points() | Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced. | juraj-google-style |
def jaccard_sims(feature_list):
sim_info_list = []
for feature_info in feature_list:
md5_source = feature_info['md5']
features_source = feature_info['features']
for feature_info in feature_list:
md5_target = feature_info['md5']
features_target = feature_info['features']
if (md5_source == md5_target):
continue
sim = jaccard_sim(features_source, features_target)
if (sim > 0.5):
sim_info_list.append({'source': md5_source, 'target': md5_target, 'sim': sim})
return sim_info_list | Compute Jaccard similarities between all the observations in the feature list.
Args:
feature_list: a list of dictionaries, each having structure as
{ 'md5' : String, 'features': list of Strings }
Returns:
list of dictionaries with structure as
{'source': md5 String, 'target': md5 String, 'sim': Jaccard similarity Number} | codesearchnet |
def genCaCert(self, name, signas=None, outp=None, save=True):
(pkey, cert) = self._genBasePkeyCert(name)
ext0 = crypto.X509Extension(b'basicConstraints', False, b'CA:TRUE')
cert.add_extensions([ext0])
if (signas is not None):
self.signCertAs(cert, signas)
else:
self.selfSignCert(cert, pkey)
if save:
keypath = self._savePkeyTo(pkey, 'cas', ('%s.key' % name))
if (outp is not None):
outp.printf(('key saved: %s' % (keypath,)))
crtpath = self._saveCertTo(cert, 'cas', ('%s.crt' % name))
if (outp is not None):
outp.printf(('cert saved: %s' % (crtpath,)))
return (pkey, cert) | Generates a CA keypair.
Args:
name (str): The name of the CA keypair.
signas (str): The CA keypair to sign the new CA with.
outp (synapse.lib.output.Output): The output buffer.
Examples:
Make a CA named "myca":
mycakey, mycacert = cdir.genCaCert('myca')
Returns:
((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the private key and certificate objects. | codesearchnet |
def get_output_at(self, node_index):
return self._get_node_attribute_at_index(node_index, 'output_tensors', 'output') | Retrieves the output tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first output node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode. | github-repos |
def assert_same_rank(self, other):
other = as_shape(other)
if ((self.ndims is not None) and (other.ndims is not None)):
if (self.ndims != other.ndims):
raise ValueError(('Shapes %s and %s must have the same rank' % (self, other))) | Raises an exception if `self` and `other` do not have convertible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank. | codesearchnet |
def get_xml_request(self):
def wrap_xml_content(xml_content):
' Wrap XML content string in the correct CPS request envelope.'
fields = ['<?xml version="1.0" encoding="utf-8"?>\n', '<cps:request xmlns:cps="www.clusterpoint.com">\n', '<cps:storage>', self.connection._storage, '</cps:storage>\n']
if self.timestamp:
fields += []
if self.request_id:
fields += ['<cps:request_id>', str(self.request_id), '</cps:request_id>\n']
if self.connection.reply_charset:
fields += []
if self.connection.application:
fields += ['<cps:application>', self.connection.application, '</cps:application>\n']
fields += ['<cps:command>', self._command, '</cps:command>\n', '<cps:user>', self.connection._user, '</cps:user>\n', '<cps:password>', self.connection._password, '</cps:password>\n', '<cps:account>', self.connection._account, '</cps:account>\n']
if self.timeout:
fields += ['<cps:timeout>', str(self.timeout), '</cps:timeout>\n']
if self.type:
fields += ['<cps:type>', self.type, '</cps:type>\n']
if xml_content:
fields += ['<cps:content>\n', xml_content, '\n</cps:content>\n']
else:
fields += '<cps:content/>\n'
fields += '</cps:request>\n'
xml_request = ''.join(fields)
return xml_request
xml_content = []
if self._documents:
xml_content += self._documents
for (key, value) in self._nested_content.items():
if value:
xml_content += ((['<{0}>'.format(key)] + ['<{0}>{1}</{0}>'.format(sub_key, sub_value) for (sub_key, sub_value) in value if sub_value]) + ['</{0}>'.format(key)])
for (key, value) in self._content.items():
if (not isinstance(value, list)):
value = [value]
xml_content += ['<{0}>{1}</{0}>'.format(key, item) for item in value if item]
xml_content = '\n'.join(xml_content)
return wrap_xml_content(xml_content) | Make xml request string from stored request information.
Returns:
A properly formated XMl request string containing all set request fields and
wraped in connections envelope. | codesearchnet |
def ParseApplicationUsageRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
application_name = self._GetRowValue(query_hash, row, 'event')
usage = 'Application {0:s}'.format(application_name)
event_data = MacOSApplicationUsageEventData()
event_data.application = self._GetRowValue(query_hash, row, 'app_path')
event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')
event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')
event_data.count = self._GetRowValue(query_hash, row, 'number_times')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'last_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses an application usage row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. | codesearchnet |
def _build_ds_from_instruction(instruction, ds_from_file_fn):
examples_ds = ds_from_file_fn(instruction['filepath'])
mask_ds = _build_mask_ds(mask_offset=instruction['mask_offset'], mask=instruction['mask'])
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
ds = ds.filter((lambda example, mask: mask))
ds = ds.map((lambda example, mask: example))
return ds | Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction | codesearchnet |
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):
config = cls(**config_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
if return_unused_kwargs:
return (config, kwargs)
else:
return config | Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
return_unused_kwargs (`bool`,*optional*, defaults to `False`):
Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
`PreTrainedModel`.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters. | github-repos |
def device_id_to_slug(did):
try:
device_slug = IOTileDeviceSlug(did, allow_64bits=False)
except ValueError:
raise ArgumentError("Unable to recognize {} as a device id".format(did))
return str(device_slug) | Converts a device id into a correct device slug.
Args:
did (long) : A device id
did (string) : A device slug in the form of XXXX, XXXX-XXXX-XXXX, d--XXXX, d--XXXX-XXXX-XXXX-XXXX
Returns:
str: The device slug in the d--XXXX-XXXX-XXXX-XXXX format
Raises:
ArgumentError: if the ID is not in the [1, 16**12] range, or if not a valid string | juraj-google-style |
def connection_made(self, transport):
self.transport = transport
self.responders = [self.make_responder(self)]
try:
good_func = callable(self.responders[0].on_data)
except AttributeError:
good_func = False
if not good_func:
err_str = "Provided responder MUST implement an 'on_data' method"
raise TypeError(err_str)
log_info = (id(self), self.remote_hostname, self.remote_port)
log.info("{:d} connection from {}:{}", *log_info) | (asyncio.Protocol member)
Called upon when there is a new socket connection.
This creates a new responder (as determined by the member
'responder_type') and stores in a list.
Incoming data from this connection will always call on_data
to the last element of this list.
Args:
transport (asyncio.Transport): The Transport handling the
socket communication | juraj-google-style |
def storage(line, cell=None):
parser = datalab.utils.commands.CommandParser(prog='storage', description=)
copy_parser = parser.subcommand('copy',
'Copy one or more GCS objects to a different location.')
copy_parser.add_argument('-s', '--source', help='The name of the object(s) to copy', nargs='+')
copy_parser.add_argument('-d', '--destination', required=True,
help='The copy destination. For multiple source items this must be a '
'bucket.')
copy_parser.set_defaults(func=_storage_copy)
create_parser = parser.subcommand('create', 'Create one or more GCS buckets.')
create_parser.add_argument('-p', '--project', help='The project associated with the objects')
create_parser.add_argument('-b', '--bucket', help='The name of the bucket(s) to create',
nargs='+')
create_parser.set_defaults(func=_storage_create)
delete_parser = parser.subcommand('delete', 'Delete one or more GCS buckets or objects.')
delete_parser.add_argument('-b', '--bucket', nargs='*',
help='The name of the bucket(s) to remove')
delete_parser.add_argument('-o', '--object', nargs='*',
help='The name of the object(s) to remove')
delete_parser.set_defaults(func=_storage_delete)
list_parser = parser.subcommand('list', 'List buckets in a project, or contents of a bucket.')
list_parser.add_argument('-p', '--project', help='The project associated with the objects')
group = list_parser.add_mutually_exclusive_group()
group.add_argument('-o', '--object',
help='The name of the objects(s) to list; can include wildchars',
nargs='?')
group.add_argument('-b', '--bucket',
help='The name of the buckets(s) to list; can include wildchars',
nargs='?')
list_parser.set_defaults(func=_storage_list)
read_parser = parser.subcommand('read',
'Read the contents of a storage object into a Python variable.')
read_parser.add_argument('-o', '--object', help='The name of the object to read',
required=True)
read_parser.add_argument('-v', '--variable', required=True,
help='The name of the Python variable to set')
read_parser.set_defaults(func=_storage_read)
view_parser = parser.subcommand('view', 'View the contents of a storage object.')
view_parser.add_argument('-n', '--head', type=int, default=20,
help='The number of initial lines to view')
view_parser.add_argument('-t', '--tail', type=int, default=20,
help='The number of lines from end to view')
view_parser.add_argument('-o', '--object', help='The name of the object to view',
required=True)
view_parser.set_defaults(func=_storage_view)
write_parser = parser.subcommand('write',
'Write the value of a Python variable to a storage object.')
write_parser.add_argument('-v', '--variable', help='The name of the source Python variable',
required=True)
write_parser.add_argument('-o', '--object', required=True,
help='The name of the destination GCS object to write')
write_parser.add_argument('-c', '--content_type', help='MIME type', default='text/plain')
write_parser.set_defaults(func=_storage_write)
return datalab.utils.commands.handle_magic_line(line, cell, parser) | Implements the storage cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell. | juraj-google-style |
def _parse_logline_timestamp(t):
date, time = t.split(' ')
month, day = date.split('-')
h, m, s = time.split(':')
s, ms = s.split('.')
return (month, day, h, m, s, ms) | Parses a logline timestamp into a tuple.
Args:
t: Timestamp in logline format.
Returns:
An iterable of date and time elements in the order of month, day, hour,
minute, second, microsecond. | juraj-google-style |
def _ParseHTTPHeaders(self, header_data, offset, display_name):
header_string = header_data.decode('ascii', errors='replace')
try:
http_header_start = header_string.index('request-method')
except ValueError:
logger.debug('No request method in header: "{0:s}"'.format(header_string))
return None, None
http_headers = header_string[http_header_start::]
header_parts = http_headers.split('\x00')
request_method = header_parts[1]
if request_method not in self._REQUEST_METHODS:
logger.debug((
'[{0:s}] {1:s}:{2:d}: Unknown HTTP method \'{3:s}\'. Response '
'headers: \'{4:s}\'').format(
self.NAME, display_name, offset, request_method, header_string))
try:
response_head_start = http_headers.index('response-head')
except ValueError:
logger.debug('No response head in header: "{0:s}"'.format(header_string))
return request_method, None
response_head = http_headers[response_head_start::]
response_head_parts = response_head.split('\x00')
response_head_text = response_head_parts[1]
response_head_text_parts = response_head_text.split('\r\n')
response_code = response_head_text_parts[0]
if not response_code.startswith('HTTP'):
logger.debug((
'[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. '
'Response headers: \'{3:s}\'.').format(
self.NAME, display_name, offset, header_string))
return request_method, response_code | Extract relevant information from HTTP header.
Args:
header_data (bytes): HTTP header data.
offset (int): offset of the cache record, relative to the start of
the Firefox cache file.
display_name (str): display name of the Firefox cache file.
Returns:
tuple: containing:
str: HTTP request method or None if the value cannot be extracted.
str: HTTP response code or None if the value cannot be extracted. | juraj-google-style |
def before_request(self, request, method, url, headers):
parts = urllib.parse.urlsplit(url)
audience = urllib.parse.urlunsplit((parts.scheme, parts.netloc, parts.path, '', ''))
token = self._get_jwt_for_audience(audience)
self.apply(headers, token=token) | Performs credential-specific before request logic.
Args:
request (Any): Unused. JWT credentials do not need to make an
HTTP request to refresh.
method (str): The request's HTTP method.
url (str): The request's URI. This is used as the audience claim
when generating the JWT.
headers (Mapping): The request's headers. | codesearchnet |
def list_workflow_outputs(self):
workflow_outputs = []
for task in self.tasks:
for output_port_name in task.outputs._portnames:
if task.outputs.__getattribute__(output_port_name).persist:
workflow_outputs.append(task.name + ':' + output_port_name)
return workflow_outputs | Get a list of outputs from the workflow that are saved to S3. To get resolved locations call workflow status.
Args:
None
Returns:
list | juraj-google-style |
def __content_type_matches(self, content_type, available_content_types):
if content_type is None:
return False
if content_type in available_content_types:
return True
for available_content_type in available_content_types:
if available_content_type in content_type:
return True
return False | Check if the given content type matches one of the available content types.
Args:
content_type (str): The given content type.
available_content_types list(str): All the available content types.
Returns:
bool: True if a match was found, False otherwise. | juraj-google-style |
def set_synchronous_execution(enable):
if enable is None:
context.context().execution_mode = None
elif enable:
context.context().execution_mode = context.SYNC
else:
context.context().execution_mode = context.ASYNC | Specifies whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
When `enable` is set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Args:
enable: Whether operations should be dispatched synchronously.
Valid values:
- None: sets the system default.
- True: executes each operation synchronously.
- False: executes each operation asynchronously. | github-repos |
def contract_low_support(self, threshold):
if not isinstance(threshold, float) and not isinstance(threshold, int):
raise TypeError("threshold must be float or int")
to_contract = list()
for node in self.traverse_preorder():
try:
if float(str(node)) < threshold:
to_contract.append(node)
except:
pass
for node in to_contract:
node.contract() | Contract internal nodes labeled by a number (e.g. branch support) below ``threshold``
Args:
``threshold`` (``float``): The support threshold to use when contracting nodes | juraj-google-style |
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
params = {'amount': amount, 'currency': currency, 'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account', data=json.dumps(params)) | Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
} | codesearchnet |
def _srvmgr(cmd, return_json=False):
if isinstance(cmd, list):
cmd = ' '.join(cmd)
if return_json:
cmd = 'ConvertTo-Json -Compress -Depth 4 -InputObject @({0})' \
''.format(cmd)
cmd = 'Import-Module WebAdministration; {0}'.format(cmd)
ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True)
if ret['retcode'] != 0:
msg = 'Unable to execute command: {0}\nError: {1}' \
''.format(cmd, ret['stderr'])
log.error(msg)
return ret | Execute a powershell command from the WebAdministration PS module.
Args:
cmd (list): The command to execute in a list
return_json (bool): True formats the return in JSON, False just returns
the output of the command.
Returns:
str: The output from the command | juraj-google-style |
def to_json_string(self):
return json.dumps(self.__dict__, indent=2) + '\n' | Serializes this instance to a JSON formatted string.
Returns:
str: JSON formatted string representing the configuration instance. | github-repos |
def are_equivalent_xml(a_xml, b_xml):
return are_equivalent_pyxb(
d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml)
) | Check if two ReplicationPolicy XML docs are semantically equivalent.
The ReplicationPolicy XML docs are normalized before comparison.
Args:
a_xml, b_xml: ReplicationPolicy XML docs to compare
Returns:
bool: ``True`` if the resulting policies for the two objects are semantically
equivalent. | juraj-google-style |
def array2bytes(arr, bytes_type=bytes):
bio = io.BytesIO()
np.save(bio, arr, allow_pickle=False)
return bytes_type(bio.getvalue()) | Wraps NumPy's save function to return bytes.
We use :func:`numpy.save` rather than :meth:`numpy.ndarray.tobytes` because
it encodes endianness and order.
Args:
arr (:obj:`numpy.ndarray`):
Array to be saved.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
bytes_type | codesearchnet |
def from_encoder_decoder_configs(cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) | Instantiate a [`SpeechEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model
configuration and decoder model configuration.
Returns:
[`SpeechEncoderDecoderConfig`]: An instance of a configuration object | github-repos |
def _find_max_under_constraint(self, constrained, dependent, predicate):
feasible = array_ops.where_v2(predicate(constrained, self.value))
feasible_exists = math_ops.greater(array_ops.size(feasible), 0)
max_dependent = math_ops.reduce_max(array_ops.gather(dependent, feasible))
return array_ops.where_v2(feasible_exists, max_dependent, 0.0) | Returns the maximum of dependent_statistic that satisfies the constraint.
Args:
constrained: Over these values the constraint
is specified. A rank-1 tensor.
dependent: From these values the maximum that satiesfies the
constraint is selected. Values in this tensor and in
`constrained` are linked by having the same threshold at each
position, hence this tensor must have the same shape.
predicate: A binary boolean functor to be applied to arguments
`constrained` and `self.value`, e.g. `tf.greater`.
Returns maximal dependent value, if no value satiesfies the constraint 0.0. | github-repos |
def get_environmental_configuration(self, id_or_uri):
uri = self._client.build_uri(id_or_uri) + "/environmentalConfiguration"
return self._client.get(uri) | Returns a description of the environmental configuration (supported feature set, calibrated minimum & maximum
power, location & dimensions, ...) of the resource.
Args:
id_or_uri:
Can be either the Unmanaged Device id or the uri
Returns:
dict:
EnvironmentalConfiguration | juraj-google-style |
def intern(self, text):
if self.table_type.is_shared:
raise TypeError('Cannot intern on shared symbol table')
if (not isinstance(text, six.text_type)):
raise TypeError(('Cannot intern non-Unicode sequence into symbol table: %r' % text))
token = self.get(text)
if (token is None):
token = self.__add_text(text)
return token | Interns the given Unicode sequence into the symbol table.
Note:
This operation is only valid on local symbol tables.
Args:
text (unicode): The target to intern.
Returns:
SymbolToken: The mapped symbol token which may already exist in the table. | codesearchnet |
def import_event(tensor, name=None):
return gen_summary_ops.import_event(_summary_state.writer._resource, tensor, name=name) | Writes a `tf.compat.v1.Event` binary proto.
This can be used to import existing event logs into a new summary writer sink.
Please note that this is lower level than the other summary functions and
will ignore the `tf.summary.should_record_summaries` setting.
Args:
tensor: A `tf.Tensor` of type `string` containing a serialized
`tf.compat.v1.Event` proto.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`. | github-repos |
def FindHeader(self, header):
for section_list in self.include_list:
for f in section_list:
if (f[0] == header):
return f[1]
return (- 1) | Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before. | codesearchnet |
def hr_dp020(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `hr_dp020`'.format(value))
self._hr_dp020 = value | Corresponds to IDD Field `hr_dp020`
humidity ratio corresponding to
Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence
calculated at the standard atmospheric pressure at elevation of station
Args:
value (float): value for IDD Field `hr_dp020`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def _all_sum_grad(op, grad):
if op.get_attr('reduction') != b'sum':
raise LookupError('No gradient defined for NcclAllReduce except for reduction="sum".')
_check_device(grad, expected=op.device)
num_devices = op.get_attr('num_devices')
shared_name = op.get_attr('shared_name') + b'_grad'
with ops.device(op.device):
return gen_nccl_ops.nccl_all_reduce(input=grad, reduction='sum', num_devices=num_devices, shared_name=shared_name) | The gradients for `all_sum`.
Args:
op: The `all_sum` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `all_sum` op.
Returns:
The gradient with respect to the output of `all_sum`.
Raises:
LookupError: If `reduction` is not `sum`. | github-repos |
def entanglement_of_formation(state, d0, d1=None):
state = np.array(state)
if d1 is None:
d1 = int(len(state) / d0)
if state.ndim == 2 and len(state) == 4 and d0 == 2 and d1 == 2:
return __eof_qubit(state)
elif state.ndim == 1:
if d0 < d1:
tr = [1]
else:
tr = [0]
state = partial_trace(state, tr, dimensions=[d0, d1])
return entropy(state)
else:
print('Input must be a state-vector or 2-qubit density matrix.')
return None | Compute the entanglement of formation of quantum state.
The input quantum state must be either a bipartite state vector, or a
2-qubit density matrix.
Args:
state (array_like): (N) array_like or (4,4) array_like, a
bipartite quantum state.
d0 (int): the dimension of the first subsystem.
d1 (int or None): the dimension of the second subsystem.
Returns:
float: The entanglement of formation. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.