code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def create_app(*, debug=False, threads=1, bigchaindb_factory=None): if not bigchaindb_factory: bigchaindb_factory = BigchainDB app = Flask(__name__) app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app) CORS(app) app.debug = debug app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads) add_routes(app) return app
Return an instance of the Flask application. Args: debug (bool): a flag to activate the debug mode for the app (default: False). threads (int): number of threads to use Return: an instance of the Flask application.
juraj-google-style
def softmax_classifier(input_, num_classes, labels=None, loss_weight=None, per_example_weights=None, weights=None, bias=tf.zeros_initializer(), parameter_modifier=parameters.identity, name=PROVIDED): full = input_.fully_connected(num_classes, activation_fn=None, name=name, weights=weights, bias=bias, parameter_modifier=parameter_modifier) return full.softmax(labels=labels, loss_weight=loss_weight, per_example_weights=per_example_weights, name=name)
Creates a fully-connected linear layer followed by a softmax. This returns `(softmax, loss)` where `loss` is the cross entropy loss. Args: input_: A rank 2 Tensor or a Pretty Tensor holding the activation before the logits (penultimate layer). num_classes: The number of classes. labels: The target labels to learn as a float tensor. Use None to not include a training loss. loss_weight: A scalar multiplier for the loss. per_example_weights: A Tensor with a weight per example. weights: The initializer for the weights (see `fully_connected`). bias: The initializer for the bias (see `fully_connected`). parameter_modifier: A modifier for the parameters that compute the logits. name: The optional name. Returns: A named tuple holding: softmax: The result of this layer with softmax normalization. loss: The cross entropy loss. Raises: ValueError: If the datatype is wrong.
codesearchnet
def initialize(self): if eager_context.executing_eagerly(): self._iterator = self._dataset.make_one_shot_iterator() return [] else: return [self._iterator.initializer]
Initialize underlying iterators. Returns: A list of any initializer ops that should be run.
github-repos
def bespoke_md5(self, md5): r = requests.post('http: self._output(r.text)
Performs Bespoke MD5 lookup on an MD5. Args: md5 - A hash.
juraj-google-style
def upload(self, file_path, golden_image_info): uri = '{0}?name={1}&description={2}'.format(self.URI, quote(golden_image_info.get('name', '')), quote(golden_image_info.get('description', ''))) return self._client.upload(file_path, uri)
Adds a Golden Image resource from the file that is uploaded from a local drive. Only the .zip format file can be used for the upload. Args: file_path (str): File name to upload. golden_image_info (dict): Golden Image information. Returns: dict: Golden Image.
codesearchnet
def alias_inplace_sub(x, i, v): return _inplace_helper(x, i, v, gen_array_ops.inplace_sub)
Applies an inplace sub on input x at index i with value v. Aliases x. If i is None, x and v must be the same shape. Computes x -= v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] -= v; Otherwise, x and v must have the same rank. Computes x[i, :] -= v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns x.
github-repos
def __init__( self, credential_data=None, credential_type=None, path_spec=None): super(CredentialConfiguration, self).__init__() self.credential_data = credential_data self.credential_type = credential_type self.path_spec = path_spec
Initializes a credential configuration object. Args: credential_data (Optional[bytes]): credential data. credential_type (Optional[str]): credential type. path_spec (Optional[dfvfs.PathSpec]): path specification.
juraj-google-style
def __init__(self, key_dtype, value_dtype, default_value, name='SimpleHashTable'): super(SimpleHashTable, self).__init__() self._default_value = tf.convert_to_tensor(default_value, dtype=value_dtype) self._value_shape = self._default_value.get_shape() self._key_dtype = key_dtype self._value_dtype = value_dtype self._name = name self._resource_handle = self._create_resource()
Creates an empty `SimpleHashTable` object. Creates a table, the type of its keys and values are specified by key_dtype and value_dtype, respectively. Args: key_dtype: the type of the key tensors. value_dtype: the type of the value tensors. default_value: The value to use if a key is missing in the table. name: A name for the operation (optional). Returns: A `SimpleHashTable` object.
github-repos
def _make_columnar(self, x): if (tensorshape_util.rank(x.shape) is not None): if (tensorshape_util.rank(x.shape) == 1): x = x[(tf.newaxis, :)] return x shape = tf.shape(input=x) maybe_expanded_shape = tf.concat([shape[:(- 1)], distribution_util.pick_vector(tf.equal(tf.rank(x), 1), [1], np.array([], dtype=np.int32)), shape[(- 1):]], 0) return tf.reshape(x, maybe_expanded_shape)
Ensures non-scalar input has at least one column. Example: If `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`. If `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged. If `x = 1` then the output is unchanged. Args: x: `Tensor`. Returns: columnar_x: `Tensor` with at least two dimensions.
codesearchnet
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5): num_launched, do_exit, launched = 0, False, [] for count in range(max_loops): if do_exit: break if count > 0: time.sleep(sleep_time) tasks = self.fetch_tasks_to_run() if any(task in launched for task in tasks): logger.critical("numtasks %d already in launched list:\n%s" % (len(tasks), launched)) tasks = [t for t in tasks if t not in launched] if not tasks: continue for task in tasks: fired = task.start() if fired: launched.append(task) num_launched += 1 if num_launched >= max_nlaunch > 0: logger.info('num_launched >= max_nlaunch, going back to sleep') do_exit = True break self.flow.pickle_dump() return num_launched
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run. Args: max_nlaunch: Maximum number of launches. default: no limit. max_loops: Maximum number of loops sleep_time: seconds to sleep between rapidfire loop iterations Returns: The number of tasks launched.
juraj-google-style
def create_sas_locator(access_token, asset_id, accesspolicy_id): path = '/Locators' endpoint = ''.join([ams_rest_endpoint, path]) body = '{ \ "AccessPolicyId":"' + accesspolicy_id + '", \ "AssetId":"' + asset_id + '", \ "Type":1 \ }' return do_ams_post(endpoint, path, body, access_token)
Create Media Service SAS Locator. Args: access_token (str): A valid Azure authentication token. asset_id (str): Media Service Asset ID. accesspolicy_id (str): Media Service Access Policy ID. Returns: HTTP response. JSON body.
juraj-google-style
def max_intensity(item_a, time_a, item_b, time_b, max_value): intensity_a = item_a.max_intensity(time_a) intensity_b = item_b.max_intensity(time_b) diff = np.sqrt(((intensity_a - intensity_b) ** 2)) return (np.minimum(diff, max_value) / float(max_value))
RMS difference in maximum intensity Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
codesearchnet
def SignMessage(self, message, script_hash): keypair = self.GetKeyByScriptHash(script_hash) prikey = bytes(keypair.PrivateKey) res = Crypto.Default().Sign(message, prikey) return (res, keypair.PublicKey)
Sign a message with a specified script_hash. Args: message (str): a hex encoded message to sign script_hash (UInt160): a bytearray (len 20). Returns: str: the signed message
codesearchnet
def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs): return cls(backbone_config=backbone_config, **kwargs)
Instantiate a [`RTDetrConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model configuration. Args: backbone_config ([`PretrainedConfig`]): The backbone configuration. Returns: [`RTDetrConfig`]: An instance of a configuration object
github-repos
async def send_rpc(self, conn_id, address, rpc_id, payload, timeout): self._ensure_connection(conn_id, True) dev = self._get_property(conn_id, 'device') try: res = dev.call_rpc(address, rpc_id, bytes(payload)) if inspect.iscoroutine(res): return await res else: return res except (RPCInvalidIDError, RPCNotFoundError, TileNotFoundError, RPCErrorCode, BusyRPCResponse): raise except Exception: self._logger.exception("Exception inside rpc %d:0x%04X, payload=%s", address, rpc_id, payload) raise
Asynchronously send an RPC to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection address (int): the address of the tile that we wish to send the RPC to rpc_id (int): the 16-bit id of the RPC we want to call payload (bytearray): the payload of the command timeout (float): the number of seconds to wait for the RPC to execute
juraj-google-style
def _load_dataset_clipping(self, dataset_dir, epsilon): self.dataset_max_clip = {} self.dataset_min_clip = {} self._dataset_image_count = 0 for fname in os.listdir(dataset_dir): if (not fname.endswith('.png')): continue image_id = fname[:(- 4)] image = np.array(Image.open(os.path.join(dataset_dir, fname)).convert('RGB')) image = image.astype('int32') self._dataset_image_count += 1 self.dataset_max_clip[image_id] = np.clip((image + epsilon), 0, 255).astype('uint8') self.dataset_min_clip[image_id] = np.clip((image - epsilon), 0, 255).astype('uint8')
Helper method which loads dataset and determines clipping range. Args: dataset_dir: location of the dataset. epsilon: maximum allowed size of adversarial perturbation.
codesearchnet
def read(self, key, array=False, embedded=True): self.tcex.log.debug('read variable {}'.format(key)) data = key if (key is not None): key = key.strip() key_type = self.variable_type(key) if re.match(self._variable_match, key): if (key_type in self.read_data_types): if (key_type in ['Binary', 'BinaryArray']): data = self.read_data_types[key_type](key) else: data = self.read_data_types[key_type](key, embedded) else: data = self.read_raw(key) else: if (key_type == 'String'): data = re.sub('(?<!\\\\)\\\\s', ' ', data) data = re.sub('\\\\\\\\s', '\\s', data) if embedded: data = self.read_embedded(data, key_type) if (array and (not isinstance(data, list))): if (data is not None): data = [data] else: data = [] return data
Read method of CRUD operation for working with KeyValue DB. This method will automatically check to see if a single variable is passed or if "mixed" data is passed and return the results from the DB. It will also automatically determine the variable type to read. Args: key (string): The variable to read from the DB. array (boolean): Convert string/dict to Array/List before returning. embedded (boolean): Resolve embedded variables. Returns: (any): Results retrieved from DB
codesearchnet
def make_encoder(activation, latent_size, base_depth): conv = functools.partial( tf.keras.layers.Conv2D, padding="SAME", activation=activation) encoder_net = tf.keras.Sequential([ conv(base_depth, 5, 1), conv(base_depth, 5, 2), conv(2 * base_depth, 5, 1), conv(2 * base_depth, 5, 2), conv(4 * latent_size, 7, padding="VALID"), tf.keras.layers.Flatten(), tf.keras.layers.Dense(2 * latent_size, activation=None), ]) def encoder(images): images = 2 * tf.cast(images, dtype=tf.float32) - 1 net = encoder_net(images) return tfd.MultivariateNormalDiag( loc=net[..., :latent_size], scale_diag=tf.nn.softplus(net[..., latent_size:] + _softplus_inverse(1.0)), name="code") return encoder
Creates the encoder function. Args: activation: Activation function in hidden layers. latent_size: The dimensionality of the encoding. base_depth: The lowest depth for a layer. Returns: encoder: A `callable` mapping a `Tensor` of images to a `tfd.Distribution` instance over encodings.
juraj-google-style
def __init__(self, workdir, prefix): self._workdir = workdir self._prefix = prefix self._pprefix = SDKWrapper(weakref.proxy(self._prefix))
__init__ Args: workdir(:class:`~lago.workdir.Workdir`): The enviornment workdir. prefix(:class:~lago.prefix.Prefix): The enviornment Prefix. Returns: None
juraj-google-style
def choices_validator(choices): def validator(value): if value not in choices: raise ValidationError( "{} is not in {}".format(value, list(choices)) ) return validator
Return validator function that will check if ``value in choices``. Args: max_value (list, set, tuple): allowed choices for new validator
juraj-google-style
def get_samples_live(self, sensor_id, last=None): url = "https: headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "sensorId": sensor_id } if last: params["last"] = last url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
Get recent samples, one sample per second for up to the last 2 minutes. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` last (string): starting range, as ISO8601 timestamp Returns: list: dictionary objects containing sample data
juraj-google-style
def copy(self, source_file_names, destination_file_names): err_msg = 'source_file_names and destination_file_names should be equal in length' assert len(source_file_names) == len(destination_file_names), err_msg def _copy_path(source, destination): if not destination.startswith(GCSFileSystem.GCS_PREFIX): raise ValueError('Destination %r must be GCS path.' % destination) if source.endswith('/'): self._gcsIO().copytree(source, destination) else: self._gcsIO().copy(source, destination) exceptions = {} for source, destination in zip(source_file_names, destination_file_names): try: _copy_path(source, destination) except Exception as e: exceptions[source, destination] = e if exceptions: raise BeamIOError('Copy operation failed', exceptions)
Recursively copy the file tree from the source to the destination Args: source_file_names: list of source file objects that needs to be copied destination_file_names: list of destination of the new object Raises: ``BeamIOError``: if any of the copy operations fail
github-repos
def _list_node_dumps(self, node_name): lines = [] font_attr_segs = {} watch_keys = self._debug_dump.debug_watch_keys(node_name) dump_count = 0 for watch_key in watch_keys: debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key) for datum in debug_tensor_data: line = ' Slot %d @ %s @ %.3f ms' % (datum.output_slot, datum.debug_op, (datum.timestamp - self._debug_dump.t0) / 1000.0) lines.append(line) command = 'pt %s:%d -n %d' % (node_name, datum.output_slot, dump_count) font_attr_segs[len(lines) - 1] = [(2, len(line), debugger_cli_common.MenuItem(None, command))] dump_count += 1 output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs) output_with_header = debugger_cli_common.RichTextLines(['%d dumped tensor(s):' % dump_count, '']) output_with_header.extend(output) return output_with_header
List dumped tensor data from a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object.
github-repos
def add_string_pairs_from_text_field_element(xib_file, results, text_field, special_ui_components_prefix): text_field_entry_comment = extract_element_internationalized_comment(text_field) if (text_field_entry_comment is None): return if (text_field.hasAttribute('usesAttributedText') and (text_field.attributes['usesAttributedText'].value == 'YES')): add_string_pairs_from_attributed_ui_element(results, text_field, text_field_entry_comment) else: try: text_field_entry_key = text_field.attributes['text'].value results.append((text_field_entry_key, (text_field_entry_comment + ' default text value'))) except KeyError: pass try: text_field_entry_key = text_field.attributes['placeholder'].value results.append((text_field_entry_key, (text_field_entry_comment + ' placeholder text value'))) except KeyError: pass warn_if_element_not_of_class(text_field, 'TextField', special_ui_components_prefix)
Adds string pairs from a textfield element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. text_field(element): The textfield element from the xib, to extract the string pairs from. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix.
codesearchnet
def draw_text(img, text, position=(10, 10), font='FreeSans.ttf', font_size=14, color=(0, 0, 0)): _check_pil() font_files = _find_font_file(font) if (len(font_files) == 0): logger.warn("Failed to lookup font '{}', falling back to default".format(font)) font = ImageFont.load_default() else: font = ImageFont.truetype(font_files[0], font_size) img = Image.fromarray(img) draw = ImageDraw.Draw(img) draw.text(position, text, fill=color, font=font) return np.asarray(img)
Draws text over the image. Requires PIL. Args: img: The image to use. text: The text string to overlay. position: The text (x, y) position. (Default value = (10, 10)) font: The ttf or open type font to use. (Default value = 'FreeSans.ttf') font_size: The text font size. (Default value = 12) color: The (r, g, b) values for text color. (Default value = (0, 0, 0)) Returns: Image overlayed with text.
codesearchnet
def pymmh3_hash128(key: Union[bytes, bytearray], seed: int = 0, x64arch: bool = True) -> int: if x64arch: return pymmh3_hash128_x64(key, seed) else: return pymmh3_hash128_x86(key, seed)
Implements 128bit murmur3 hash, as per ``pymmh3``. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: integer hash
juraj-google-style
def save(self, sess, save_path, timestep=None): if self._saver is None: raise TensorForceError("register_saver_ops should be called before save") return self._saver.save( sess=sess, save_path=save_path, global_step=timestep, write_meta_graph=False, write_state=True, )
Saves this component's managed variables. Args: sess: The session for which to save the managed variables. save_path: The path to save data to. timestep: Optional, the timestep to append to the file name. Returns: Checkpoint path where the model was saved.
juraj-google-style
async def datacenters(self): response = (await self._api.get('/v1/coordinate/datacenters')) return {data['Datacenter']: data for data in response.body}
Queries for WAN coordinates of Consul servers Returns: Mapping: WAN network coordinates for all Consul servers, organized by DCs. It returns a body like this:: { "dc1": { "Datacenter": "dc1", "Coordinates": [ { "Node": "agent-one", "Coord": { "Adjustment": 0, "Error": 1.5, "Height": 0, "Vec": [0,0,0,0,0,0,0,0] } } ] } } This endpoint serves data out of the server's local Serf data about the WAN, so its results may vary as requests are handled by different servers in the cluster. Also, it does not support blocking queries or any consistency modes.
codesearchnet
def try_evaluate_constant(tensor): with tensor.graph._c_graph.get() as c_graph: return c_api.TF_TryEvaluateConstant_wrapper(c_graph, tensor._as_tf_output())
Evaluates a symbolic tensor as a constant. Args: tensor: a symbolic Tensor. Returns: ndarray if the evaluation succeeds, or None if it fails.
github-repos
def read(cls, data): if isinstance(data, pd.DataFrame): return cls((json.loads( to_json_stat(data, output='dict', version='2.0'), object_pairs_hook=OrderedDict))) elif isinstance(data, OrderedDict): return cls(data) elif (isinstance(data, basestring) and data.startswith(("http: "ftp: return cls(request(data)) elif isinstance(data, basestring): try: json_dict = json.loads(data, object_pairs_hook=OrderedDict) return cls(json_dict) except ValueError: raise else: try: json_dict = json.load(data, object_pairs_hook=OrderedDict) return cls(json_dict) except ValueError: raise
Reads data from URL, Dataframe, JSON string, JSON file or OrderedDict. Args: data: can be a Pandas Dataframe, a JSON file, a JSON string, an OrderedDict or a URL pointing to a JSONstat file. Returns: An object of class Dataset populated with data.
juraj-google-style
def exists_evaluator(self, index): attr_name = self.condition_data[index][0] return (self.attributes.get(attr_name) is not None)
Evaluate the given exists match condition for the user attributes. Args: index: Index of the condition to be evaluated. Returns: Boolean: True if the user attributes have a non-null value for the given condition, otherwise False.
codesearchnet
def site_occupation_statistics( self ): if self.time == 0.0: return None occupation_stats = { label : 0.0 for label in self.site_labels } for site in self.sites: occupation_stats[ site.label ] += site.time_occupied for label in self.site_labels: occupation_stats[ label ] /= self.time return occupation_stats
Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 }
juraj-google-style
def get_flat_tensor_shapes(element_spec): return [spec.shape for spec in get_flat_tensor_specs(element_spec)]
Returns a list `tf.TensorShapes`s for the element tensor representation. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. Returns: A list `tf.TensorShapes`s for the element tensor representation.
github-repos
def get_execution_info(self, driver_id, function_descriptor): if self._worker.load_code_from_local: driver_id = ray.DriverID.nil() if (not function_descriptor.is_actor_method()): self._load_function_from_local(driver_id, function_descriptor) else: with profiling.profile('wait_for_function'): self._wait_for_function(function_descriptor, driver_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[driver_id][function_id] except KeyError as e: message = ('Error occurs in get_execution_info: driver_id: %s, function_descriptor: %s. Message: %s' % (driver_id, function_descriptor, e)) raise KeyError(message) return info
Get the FunctionExecutionInfo of a remote function. Args: driver_id: ID of the driver that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object.
codesearchnet
def l1_regression_loss(y, target, name=None): with tf.name_scope(name, 'l1_regression', [y, target]) as scope: y = tf.convert_to_tensor(y, name='y') target = tf.convert_to_tensor(target, name='target') return reduce_batch_sum(tf.abs(y - target), name=scope)
Calculates the sum of absolute errors between y and target. Args: y: the calculated values. target: the desired values. name: the name for this op, defaults to l1_regression Returns: A tensorflow op.
juraj-google-style
def _BuildFindSpecsFromGroupName(self, group_name, environment_variables): definition = self._artifacts_registry.GetDefinitionByName(group_name) if not definition: return None return self._BuildFindSpecsFromArtifact(definition, environment_variables)
Builds find specifications from a artifact group name. Args: group_name (str): artifact group name. environment_variables (list[str]): environment variable attributes used to dynamically populate environment variables in file and registry artifacts. Returns: list[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no artifact with the given name can be retrieved.
juraj-google-style
def __init__(self, channel): self.GetModel = channel.unary_unary( "/google.cloud.bigquery.v2.ModelService/GetModel", request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.GetModelRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.Model.FromString, ) self.ListModels = channel.unary_unary( "/google.cloud.bigquery.v2.ModelService/ListModels", request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.ListModelsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.ListModelsResponse.FromString, ) self.PatchModel = channel.unary_unary( "/google.cloud.bigquery.v2.ModelService/PatchModel", request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.PatchModelRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.Model.FromString, ) self.DeleteModel = channel.unary_unary( "/google.cloud.bigquery.v2.ModelService/DeleteModel", request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.DeleteModelRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def _extract_defaults(self, defaults_var: 'cfg.Variable') -> 'tuple[cfg.Variable, ...] | None': if all((isinstance(d, _instances.Tuple) for d in defaults_var.data)): return max((d.pyval for d in defaults_var.data), key=len) else: if not (all((isinstance(d, (_instance_base.Instance, _singletons.Unknown, _singletons.Unsolvable)) for d in defaults_var.data)) and all((d.full_name == 'builtins.tuple' for d in defaults_var.data if isinstance(d, _instance_base.Instance)))): self.ctx.errorlog.bad_function_defaults(self.ctx.vm.frames, self.name) return None
Extracts defaults from a Variable, used by set_function_defaults. Args: defaults_var: Variable containing potential default values. Returns: A tuple of default values, if one could be extracted, or None otherwise.
github-repos
def convert_seeded_answers(answers): converted = {} for index, answer in enumerate(answers): converted.setdefault(answer['answer'], {}) converted[answer['answer']]['seeded' + str(index)] = answer['rationale'] return converted
Convert seeded answers into the format that can be merged into student answers. Args: answers (list): seeded answers Returns: dict: seeded answers with student answers format: { 0: { 'seeded0': 'rationaleA' } 1: { 'seeded1': 'rationaleB' } }
juraj-google-style
def parse_datetime(__string: str) -> datetime.datetime: if not __string: datetime_ = datetime.datetime.now(datetime.timezone.utc) else: datetime_ = ciso8601.parse_datetime(__string) if datetime_.tzinfo is None: datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc) return datetime_
Parse ISO-8601 datetime string. Args: __string: Datetime string to parse Returns: Parsed datetime object
juraj-google-style
class Multimodal2VisionEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Multimodal2VisionEncoderLayer`]. Args: config: Multimodal2VisionConfig
github-repos
def claim(self, file_readers): unclaimed_readers = [] vcf_readers = [] for file_reader in file_readers: if self._is_mutect_vcf(file_reader): vcf_reader = vcf.VcfReader(file_reader) vcf_readers.append(_MutectVcfReader(vcf_reader)) else: unclaimed_readers.append(file_reader) return (unclaimed_readers, vcf_readers)
Recognizes and claims MuTect VCFs form the set of all input VCFs. Each defined caller has a chance to evaluate and claim all the incoming files as something that it can process. Args: file_readers: the collection of currently unclaimed files Returns: A tuple of unclaimed readers and MuTectVcfReaders.
juraj-google-style
def add_graph(self, y, x_label=None, y_label='', title='', x_run=None, y_run=None, svg_size_px=None, key_position='bottom right'): if (x_run is None): x_run = self.default_x_run if (y_run is None): y_run = self.default_y_run if (svg_size_px is None): svg_size_px = self.default_svg_size_px for panel in self.panels: x_run = self._load_x_run(x_run) y_run = self._load_y_run(y_run) svg_size_px = self._load_svg_size_px(svg_size_px) panel.add_graph(y=y, x_run=x_run, y_run=y_run, svg_size_px=svg_size_px, y_label=y_label, x_label=(x_label if (x_label is not None) else self.default_x_label), title=title, key_position=key_position)
Add a new graph to the overlap report. Args: y (str): Value plotted on y-axis. x_label (str): Label on x-axis. y_label (str): Label on y-axis. title (str): Title of the plot. x_run ((float,float)): x-range. y_run ((int,int)): y-rang. svg_size_px ((int,int): Size of SVG image in pixels. key_position (str): GnuPlot position of the legend.
codesearchnet
def get_nltk_builder(languages): all_stemmers = [] all_stopwords_filters = [] all_word_characters = set() for language in languages: if (language == 'en'): all_stemmers.append(lunr.stemmer.stemmer) all_stopwords_filters.append(stop_word_filter) all_word_characters.update({'\\w'}) else: (stopwords, word_characters) = _get_stopwords_and_word_characters(language) all_stemmers.append(Pipeline.registered_functions['stemmer-{}'.format(language)]) all_stopwords_filters.append(generate_stop_word_filter(stopwords, language=language)) all_word_characters.update(word_characters) builder = Builder() multi_trimmer = generate_trimmer(''.join(sorted(all_word_characters))) Pipeline.register_function(multi_trimmer, 'lunr-multi-trimmer-{}'.format('-'.join(languages))) builder.pipeline.reset() for fn in chain([multi_trimmer], all_stopwords_filters, all_stemmers): builder.pipeline.add(fn) for fn in all_stemmers: builder.search_pipeline.add(fn) return builder
Returns a builder with stemmers for all languages added to it. Args: languages (list): A list of supported languages.
codesearchnet
def get_structures(self, chemsys_formula_id, final=True): prop = "final_structure" if final else "initial_structure" data = self.get_data(chemsys_formula_id, prop=prop) return [d[prop] for d in data]
Get a list of Structures corresponding to a chemical system, formula, or materials_id. Args: chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234). final (bool): Whether to get the final structure, or the initial (pre-relaxation) structure. Defaults to True. Returns: List of Structure objects.
juraj-google-style
class JetMoeMoE(nn.Module): def __init__(self, config: JetMoeConfig): super(JetMoeMoE, self).__init__() self.input_size = config.hidden_size self.hidden_size = config.intermediate_size self.activation = ACT2FN[config.activation_function] self.bias = torch.nn.Parameter(torch.empty(self.input_size)) self.input_linear = JetMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2) self.output_linear = JetMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size) self.router = JetMoeTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok) def forward(self, layer_input): bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) layer_output = layer_output + self.bias return (layer_output, router_logits)
A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. Args: config: Configuration object with model hyperparameters.
github-repos
def get_outputs_filtered(self, owner, spent=None): outputs = self.fastquery.get_outputs_by_public_key(owner) if (spent is None): return outputs elif (spent is True): return self.fastquery.filter_unspent_outputs(outputs) elif (spent is False): return self.fastquery.filter_spent_outputs(outputs)
Get a list of output links filtered on some criteria Args: owner (str): base58 encoded public_key. spent (bool): If ``True`` return only the spent outputs. If ``False`` return only unspent outputs. If spent is not specified (``None``) return all outputs. Returns: :obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s pointing to another transaction's condition
codesearchnet
def _ensure_proper_types(struct, encoding, force_types): if (force_types is None): return struct res = None if isinstance(struct, (dict, collections.OrderedDict)): res = type(struct)() for (k, v) in struct.items(): res[_ensure_proper_types(k, encoding, force_types)] = _ensure_proper_types(v, encoding, force_types) elif isinstance(struct, list): res = [] for i in struct: res.append(_ensure_proper_types(i, encoding, force_types)) elif isinstance(struct, six.binary_type): res = struct.decode(encoding) elif isinstance(struct, (six.text_type, type(None), type(True), six.integer_types, float)): res = struct elif isinstance(struct, datetime.datetime): res = struct else: raise AnyMarkupError('internal error - unexpected type {0} in parsed markup'.format(type(struct))) if (force_types and isinstance(res, six.text_type)): res = _recognize_basic_types(res) elif (not (force_types or isinstance(res, (dict, collections.OrderedDict, list, six.text_type)))): res = six.text_type(res) return res
A convenience function that recursively makes sure the given structure contains proper types according to value of `force_types`. Args: struct: a structure to check and fix encoding: encoding to use on found bytestrings force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, unmodified `struct` is returned Returns: a fully decoded copy of given structure
codesearchnet
def combine_samples(self, md5_list, filename, type_tag): total_bytes = "" for md5 in md5_list: total_bytes += self.get_sample(md5)['sample']['raw_bytes'] self.remove_sample(md5) return self.store_sample(total_bytes, filename, type_tag)
Combine samples together. This may have various use cases the most significant involving a bunch of sample 'chunks' got uploaded and now we combine them together Args: md5_list: The list of md5s to combine, order matters! filename: name of the file (used purely as meta data not for lookup) type_tag: ('exe','pcap','pdf','json','swf', or ...) Returns: the computed md5 of the combined samples
juraj-google-style
def pprint_value_string(self, value): unit = '' if self.unit is None else ' ' + bytes_to_unicode(self.unit) value = self.pprint_value(value) return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit)
Pretty print the dimension value and unit. Args: value: Dimension value to format Returns: Formatted dimension value string with unit
juraj-google-style
def layer_preprocess(layer_input, hparams, layer_collection=None): assert ('a' not in hparams.layer_preprocess_sequence), 'No residual connections allowed in hparams.layer_preprocess_sequence' assert ('z' not in hparams.layer_preprocess_sequence), 'No residual connections allowed in hparams.layer_preprocess_sequence' return layer_prepostprocess(None, layer_input, sequence=hparams.layer_preprocess_sequence, dropout_rate=hparams.layer_prepostprocess_dropout, norm_type=hparams.norm_type, depth=None, epsilon=hparams.norm_epsilon, dropout_broadcast_dims=comma_separated_string_to_integer_list(getattr(hparams, 'layer_prepostprocess_dropout_broadcast_dims', '')), default_name='layer_prepostprocess', layer_collection=layer_collection)
Apply layer preprocessing. See layer_prepostprocess() for details. A hyperparameters object is passed for convenience. The hyperparameters that may be used are: layer_preprocess_sequence layer_prepostprocess_dropout norm_type hidden_size norm_epsilon Args: layer_input: a Tensor hparams: a hyperparameters object. layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor
codesearchnet
def os_volumes(self): if (not self.__os_volumes): self.__os_volumes = OsVolumes(self.__connection) return self.__os_volumes
Gets the OS Volumes API client. Returns: OsVolumes:
codesearchnet
def _finish_parsing(self, instrumentation_block): formatter = _InstrumentationBlockFormatter(instrumentation_block) return formatter.has_completed_result_block_format(self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
Finishes parsing the instrumentation result block for the final instrumentation run status. Args: instrumentation_block: _InstrumentationBlock, the instrumentation result block for the instrumenation run. Potentially, thisi could actually be method block if the instrumentation outputi is malformed. Returns: A boolean indicating whether the instrumentation run completed with all the tests passing. Raises: signals.TestError: Error raised if the instrumentation failed to complete with either a pass or fail status.
github-repos
def delete_container_instance_group(access_token, subscription_id, resource_group, container_group_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) return do_delete(endpoint, access_token)
Delete a container group from a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response.
juraj-google-style
def _GetSignatureMatchParserNames(self, file_object): parser_names = [] scan_state = pysigscan.scan_state() self._file_scanner.scan_file_object(scan_state, file_object) for scan_result in iter(scan_state.scan_results): format_specification = ( self._formats_with_signatures.GetSpecificationBySignature( scan_result.identifier)) if format_specification.identifier not in parser_names: parser_names.append(format_specification.identifier) return parser_names
Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures.
juraj-google-style
def _call_api(self, method, params=None): url = self.url.format(method=method) if (not params): params = {'token': self.token} else: params['token'] = self.token logger.debug('Send request to %s', url) response = requests.get(url, params=params).json() if self.verify: if (not response['ok']): msg = 'For {url} API returned this bad response {response}' raise Exception(msg.format(url=url, response=response)) return response
Low-level method to call the Slack API. Args: method: {str} method name to call params: {dict} GET parameters The token will always be added
codesearchnet
def GetUnicodeString(value): if isinstance(value, list): value = [GetUnicodeString(item) for item in value] return ''.join(value) if isinstance(value, py2to3.INTEGER_TYPES): value = '{0:d}'.format(value) if not isinstance(value, py2to3.UNICODE_TYPE): return codecs.decode(value, 'utf8', 'ignore') return value
Attempts to convert the argument to a Unicode string. Args: value (list|int|bytes|str): value to convert. Returns: str: string representation of the argument.
juraj-google-style
def create_alias(alias_name, alias_command): alias_name, alias_command = alias_name.strip(), alias_command.strip() alias_table = get_alias_table() if alias_name not in alias_table.sections(): alias_table.add_section(alias_name) alias_table.set(alias_name, 'command', alias_command) _commit_change(alias_table)
Create an alias. Args: alias_name: The name of the alias. alias_command: The command that the alias points to.
juraj-google-style
def lint_fileset(*dirnames, **kwargs): try: rc_filename = kwargs['rc_filename'] description = kwargs['description'] if (len(kwargs) != 2): raise KeyError except KeyError: raise KeyError(_LINT_FILESET_MSG) pylint_shell_command = ['pylint', '--rcfile', rc_filename] pylint_shell_command.extend(dirnames) status_code = subprocess.call(pylint_shell_command) if (status_code != 0): error_message = _ERROR_TEMPLATE.format(description, status_code) print(error_message, file=sys.stderr) sys.exit(status_code)
Lints a group of files using a given rcfile. Keyword arguments are * ``rc_filename`` (``str``): The name of the Pylint config RC file. * ``description`` (``str``): A description of the files and configuration currently being run. Args: dirnames (tuple): Directories to run Pylint in. kwargs: The keyword arguments. The only keyword arguments are ``rc_filename`` and ``description`` and both are required. Raises: KeyError: If the wrong keyword arguments are used.
codesearchnet
def get_plugin(cls, name: str) -> Type[ConnectionPlugin]: if name not in cls.available: raise ConnectionPluginNotRegistered( f"Connection {name!r} is not registered" ) return cls.available[name]
Fetches the connection plugin by name if already registered Args: name: name of the connection plugin Raises: :obj:`nornir.core.exceptions.ConnectionPluginNotRegistered`
juraj-google-style
def descriptors(package): from os import path dpath = _descriptor_path(package) if path.isfile(dpath): import json with open(dpath) as f: jdb = json.load(f) return jdb else: return None
Returns a dictionary of descriptors deserialized from JSON for the specified package. Args: package (str): name of the python package to get settings for.
juraj-google-style
def import_demonstrations(self, demonstrations): if isinstance(demonstrations, dict): if self.unique_state: demonstrations['states'] = dict(state=demonstrations['states']) if self.unique_action: demonstrations['actions'] = dict(action=demonstrations['actions']) self.model.import_demo_experience(**demonstrations) else: if self.unique_state: states = dict(state=list()) else: states = {name: list() for name in demonstrations[0]['states']} internals = {name: list() for name in demonstrations[0]['internals']} if self.unique_action: actions = dict(action=list()) else: actions = {name: list() for name in demonstrations[0]['actions']} terminal = list() reward = list() for demonstration in demonstrations: if self.unique_state: states['state'].append(demonstration['states']) else: for (name, state) in states.items(): state.append(demonstration['states'][name]) for (name, internal) in internals.items(): internal.append(demonstration['internals'][name]) if self.unique_action: actions['action'].append(demonstration['actions']) else: for (name, action) in actions.items(): action.append(demonstration['actions'][name]) terminal.append(demonstration['terminal']) reward.append(demonstration['reward']) self.model.import_demo_experience(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward)
Imports demonstrations, i.e. expert observations. Note that for large numbers of observations, set_demonstrations is more appropriate, which directly sets memory contents to an array an expects a different layout. Args: demonstrations: List of observation dicts
codesearchnet
def _build_endpoint(self, endpoint_name): endpoint_relative = settings.get('asmaster_endpoints', endpoint_name) return '%s%s' % (self.host, endpoint_relative)
Generate an enpoint url from a setting name. Args: endpoint_name(str): setting name for the enpoint to build Returns: (str) url enpoint
juraj-google-style
def ExpandWindowsUserEnvironmentVariables(data_string, knowledge_base, sid=None, username=None): win_environ_regex = re.compile('%([^%]+?)%') components = [] offset = 0 for match in win_environ_regex.finditer(data_string): components.append(data_string[offset:match.start()]) kb_user = knowledge_base.GetUser(sid=sid, username=username) kb_value = None if kb_user: kb_value = getattr(kb_user, match.group(1).lower(), None) if (isinstance(kb_value, string_types) and kb_value): components.append(kb_value) else: components.append(('%%%s%%' % match.group(1))) offset = match.end() components.append(data_string[offset:]) return ''.join(components)
r"""Take a string and expand windows user environment variables based. Args: data_string: A string, e.g. "%TEMP%\\LogFiles" knowledge_base: A knowledgebase object. sid: A Windows SID for a user to expand for. username: A Windows user name to expand for. Returns: A string with available environment variables expanded.
codesearchnet
def set_element_dt(self, el_name, dt, tz=None, el_idx=0): dt = d1_common.date_time.cast_naive_datetime_to_tz(dt, tz) self.get_element_by_name(el_name, el_idx).text = dt.isoformat()
Set the text of the selected element to an ISO8601 formatted datetime. Args: el_name : str Name of element to update. dt : datetime.datetime Date and time to set tz : datetime.tzinfo Timezone to set - Without a timezone, other contextual information is required in order to determine the exact represented time. - If dt has timezone: The ``tz`` parameter is ignored. - If dt is naive (without timezone): The timezone is set to ``tz``. - ``tz=None``: Prevent naive dt from being set to a timezone. Without a timezone, other contextual information is required in order to determine the exact represented time. - ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC. el_idx : int Index of element to use in the event that there are multiple sibling elements with the same name.
codesearchnet
def traverse_ancestors(self, include_self=True): if not isinstance(include_self, bool): raise TypeError("include_self must be a bool") if include_self: c = self else: c = self.parent while c is not None: yield c; c = c.parent
Traverse over the ancestors of this ``Node`` Args: ``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False``
juraj-google-style
def get_json(filename): check_if_this_file_exist(filename) filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-j', '-sort', filename]) if s: s = s.decode('utf-8').rstrip('\r\n') return json.loads(s) else: return s
Return a json value of the exif Get a filename and return a JSON object Arguments: filename {string} -- your filename Returns: [JSON] -- Return a JSON object
codesearchnet
def AnalyzeClient(self, client): keywords = set(["."]) def TryAppend(prefix, keyword): precondition.AssertType(prefix, Text) precondition.AssertType(keyword, Text) if keyword: keyword_string = self._NormalizeKeyword(keyword) keywords.add(keyword_string) if prefix: keywords.add(prefix + ":" + keyword_string) def TryAppendPrefixes(prefix, keyword, delimiter): TryAppend(prefix, keyword) segments = keyword.split(delimiter) for i in range(1, len(segments)): TryAppend(prefix, delimiter.join(segments[0:i])) return len(segments) def TryAppendIP(ip): TryAppend("ip", ip) if TryAppendPrefixes("ip", Text(ip), ".") == 4: return TryAppendPrefixes("ip", Text(ip), ":") def TryAppendMac(mac): TryAppend("mac", mac) if len(mac) == 12: TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)])) TryAppend("host", client.knowledge_base.fqdn) host = client.knowledge_base.fqdn.split(".", 1)[0] TryAppendPrefixes("host", host, "-") TryAppendPrefixes("host", client.knowledge_base.fqdn, ".") TryAppend("", client.knowledge_base.os) TryAppend("", client.Uname()) TryAppend("", client.os_release) TryAppend("", client.os_version) TryAppend("", client.kernel) TryAppend("", client.arch) kb = client.knowledge_base if kb: for user in kb.users: TryAppend("user", user.username) TryAppend("", user.full_name) if user.full_name: for name in user.full_name.split(): TryAppend("", name.strip("\"'()")) for ip in client.GetIPAddresses(): TryAppendIP(ip) for mac in client.GetMacAddresses(): TryAppendMac(mac) client_info = client.startup_info.client_info if client_info: TryAppend("client", client_info.client_name) TryAppend("client", Text(client_info.client_version)) if client_info.labels: for label in client_info.labels: TryAppend("label", label) return keywords
Finds the client_id and keywords for a client. Args: client: A Client object record to find keywords for. Returns: A list of keywords related to client.
juraj-google-style
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False, data_format='channels_first'): if init_zero: gamma_initializer = tf.zeros_initializer() else: gamma_initializer = tf.ones_initializer() if (data_format == 'channels_first'): axis = 1 else: axis = 3 inputs = layers().BatchNormalization(axis=axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, center=True, scale=True, fused=True, gamma_initializer=gamma_initializer)(inputs, training=is_training) if relu: inputs = tf.nn.relu(inputs) return inputs
Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`.
codesearchnet
def qn_to_qubo(expr): try: import sympy except ImportError: raise ImportError("This function requires sympy. Please install it.") assert type(expr) == sympy.Add to_i = lambda s: int(str(s)[1:]) max_i = max(map(to_i, expr.free_symbols)) + 1 qubo = [[0.] * max_i for _ in range(max_i)] for arg in expr.args: syms = arg.free_symbols assert len(syms) <= 2 if len(syms) == 2: assert type(arg) == sympy.Mul i, j = list(map(to_i, syms)) if i > j: i, j = j, i if i == j: if len(arg.args) == 2: qubo[i][i] = float(arg.args[0]) elif len(arg.args) == 1: qubo[i][i] = 1.0 else: raise ValueError(f"Too many args! arg.args = {arg.args}") continue if len(arg.args) == 3: qubo[i][j] = float(arg.args[0]) elif len(arg.args) == 2: qubo[i][j] if len(syms) == 1: if len(arg.args) == 2: assert type(arg) == sympy.Mul i = to_i(next(iter(syms))) qubo[i][i] = float(arg.args[0]) elif len(arg.args) == 1: qubo[i][i] = 1.0 else: raise ValueError(f"Too many args! arg.args = {arg.args}") return qubo
Convert Sympy's expr to QUBO. Args: expr: Sympy's quadratic expression with variable `q0`, `q1`, ... Returns: [[float]]: Returns QUBO matrix.
juraj-google-style
def _SparseReorderGrad(op: ops.Operation, unused_output_indices_grad, output_values_grad): input_indices = op.inputs[0] input_shape = op.inputs[2] num_entries = array_ops.shape(input_indices)[0] entry_indices = math_ops.range(num_entries) sp_unordered = sparse_tensor.SparseTensor(input_indices, entry_indices, input_shape) sp_ordered = sparse_ops.sparse_reorder(sp_unordered) inverted_permutation = array_ops.invert_permutation(sp_ordered.values) return (None, array_ops.gather(output_values_grad, inverted_permutation), None)
Gradients for the SparseReorder op. Args: op: the SparseReorder op unused_output_indices_grad: the incoming gradients of the output indices output_values_grad: the incoming gradients of the output values Returns: Gradient for each of the 3 input tensors: (input_indices, input_values, input_shape) The gradients for input_indices and input_shape is None.
github-repos
def PmfProbLess(pmf1, pmf2): total = 0.0 for (v1, p1) in pmf1.Items(): for (v2, p2) in pmf2.Items(): if (v1 < v2): total += (p1 * p2) return total
Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability
codesearchnet
def minimum(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) with tf.name_scope(name, default_name="minimum"): x1, x2 = binary_arguments_to_tensors(x1, x2) return MinMaxOperation( tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape)).outputs[0]
Binary minimum with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
juraj-google-style
def _create_moving_sequence(image, pad_lefts, total_padding): with tf.name_scope("moving_sequence"): def get_padded_image(args): pad_left, = args pad_right = total_padding - pad_left padding = tf.stack([pad_left, pad_right], axis=-1) z = tf.zeros((1, 2), dtype=pad_left.dtype) padding = tf.concat([padding, z], axis=0) return tf.pad(image, padding) padded_images = tf.map_fn( get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False, back_prop=False) return padded_images
Create a moving image sequence from the given image a left padding values. Args: image: [in_h, in_w, n_channels] uint8 array pad_lefts: [sequence_length, 2] int32 array of left padding values total_padding: tensor of padding values, (pad_h, pad_w) Returns: [sequence_length, out_h, out_w, n_channels] uint8 image sequence, where out_h = in_h + pad_h, out_w = in_w + out_w
juraj-google-style
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): if axis != -1 and axis != len(output.shape) - 1: raise ValueError(f'Only axis=-1 is currently supported. Received: axis={axis}') output, from_logits = _get_logits(output, from_logits, 'Softmax', 'sparse_categorical_crossentropy') target = tf.convert_to_tensor(target) target = tf.cast(target, dtype='int64') output = tf.convert_to_tensor(output) if len(target.shape) == len(output.shape) and target.shape[-1] == 1: target = tf.squeeze(target, axis=-1) if len(output.shape) < 1: raise ValueError(f'Argument `output` must be at least rank 1. Received: output.shape={output.shape}') if len(target.shape) != len(output.shape[:-1]): raise ValueError(f'Argument `output` must have rank (ndim) `target.ndim - 1`. Received: target.shape={target.shape}, output.shape={output.shape}') for e1, e2 in zip(target.shape, output.shape[:-1]): if e1 is not None and e2 is not None and (e1 != e2): raise ValueError(f'Arguments `target` and `output` must have the same shape up until the last dimension: target.shape={target.shape}, output.shape={output.shape}') if not from_logits: output = tf.clip_by_value(output, backend.epsilon(), 1 - backend.epsilon()) output = tf.math.log(output) result = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) return result
Categorical crossentropy with integer targets. Args: target: An integer tensor. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. axis: Int specifying the channels axis. `axis=-1` corresponds to data format `channels_last`, and `axis=1` corresponds to data format `channels_first`. Returns: Output tensor.
github-repos
def WriteSourceFile(self, source_file): debug_event = debug_event_pb2.DebugEvent(source_file=source_file) self._EnsureTimestampAdded(debug_event) _pywrap_debug_events_writer.WriteSourceFile(self._dump_root, debug_event)
Write a SourceFile proto with the writer. Args: source_file: A SourceFile proto, describing the content of a source file involved in the execution of the debugged TensorFlow program.
github-repos
def DeleteSnapshots(self, request, global_params=None): config = self.GetMethodConfig('DeleteSnapshots') return self._RunMethod(config, request, global_params=global_params)
Deletes a snapshot. Args: request: (DataflowProjectsDeleteSnapshotsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (DeleteSnapshotResponse) The response message.
github-repos
def Getattr(self, path, fh=None): del fh if not path: raise fuse.FuseOSError(errno.ENOENT) if path != self.root: full_path = self.root.Add(path) else: full_path = path fd = aff4.FACTORY.Open(full_path, token=self.token) if full_path == "/": return self.MakePartialStat(fd) fd = aff4.FACTORY.Open(full_path, token=self.token) aff4_stat = fd.Get(fd.Schema.STAT) if aff4_stat: return aff4_stat.AsDict() elif fd.Get(fd.Schema.LAST) is None: raise fuse.FuseOSError(errno.ENOENT) else: pass return self.MakePartialStat(fd)
Performs a stat on a file or directory. Args: path: The path to stat. fh: A file handler. Not used. Returns: A dictionary mapping st_ names to their values. Raises: FuseOSError: When a path is supplied that grr doesn't know about, ie an invalid file path. ValueError: If an empty path is passed. (The empty string, when passed to self.root.Add, returns a path for aff4:/, the root directory, which is not the behaviour we want.)
juraj-google-style
def stddev(self, name='stddev'): with self._name_scope(name): try: return self._stddev() except NotImplementedError as original_exception: try: return math_ops.sqrt(self._variance()) except NotImplementedError: raise original_exception
Standard deviation. Standard deviation is defined as, ```none stddev = E[(X - E[X])**2]**0.5 ``` where `X` is the random variable associated with this distribution, `E` denotes expectation, and `stddev.shape = batch_shape + event_shape`. Args: name: Python `str` prepended to names of ops created by this function. Returns: stddev: Floating-point `Tensor` with shape identical to `batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
github-repos
def _check_validity(cls, text): if ((not text[0].lstrip().startswith('1 ')) or (not text[1].lstrip().startswith('2 '))): raise ValueError('Line number check failed') for line in text: line = line.strip() if (str(cls._checksum(line)) != line[(- 1)]): raise ValueError('Checksum validation failed')
Check the validity of a TLE Args: text (tuple of str) Raise: ValueError
codesearchnet
def remove_plugin(self, name, force=False): url = self._url('/plugins/{0}', name) res = self._delete(url, params={'force': force}) self._raise_for_status(res) return True
Remove an installed plugin. Args: name (string): Name of the plugin to remove. The ``:latest`` tag is optional, and is the default if omitted. force (bool): Disable the plugin before removing. This may result in issues if the plugin is in use by a container. Returns: ``True`` if successful
codesearchnet
def create_xml_dom_element(doc, name, value): s = str_or_unicode(value) if (six.PY2 and (not isinstance(s, unicode))): s = s.decode('utf-8', 'ignore') if isinstance(value, bool): s = s.lower() s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s) e = doc.createElement(name) e.appendChild(doc.createTextNode(s)) return e
Returns an XML DOM element with name and text value. Args: doc: minidom.Document, the DOM document it should create nodes from. name: str, the tag of XML element. value: object, whose string representation will be used as the value of the XML element. Illegal or highly discouraged xml 1.0 characters are stripped. Returns: An instance of minidom.Element.
codesearchnet
def _create_dag_op(self, name, params, qargs): if name == "u0": op_class = U0Gate elif name == "u1": op_class = U1Gate elif name == "u2": op_class = U2Gate elif name == "u3": op_class = U3Gate elif name == "x": op_class = XGate elif name == "y": op_class = YGate elif name == "z": op_class = ZGate elif name == "t": op_class = TGate elif name == "tdg": op_class = TdgGate elif name == "s": op_class = SGate elif name == "sdg": op_class = SdgGate elif name == "swap": op_class = SwapGate elif name == "rx": op_class = RXGate elif name == "ry": op_class = RYGate elif name == "rz": op_class = RZGate elif name == "rzz": op_class = RZZGate elif name == "id": op_class = IdGate elif name == "h": op_class = HGate elif name == "cx": op_class = CnotGate elif name == "cy": op_class = CyGate elif name == "cz": op_class = CzGate elif name == "ch": op_class = CHGate elif name == "crz": op_class = CrzGate elif name == "cu1": op_class = Cu1Gate elif name == "cu3": op_class = Cu3Gate elif name == "ccx": op_class = ToffoliGate elif name == "cswap": op_class = FredkinGate else: raise QiskitError("unknown operation for ast node name %s" % name) op = op_class(*params) self.dag.apply_operation_back(op, qargs, [], condition=self.condition)
Create a DAG node out of a parsed AST op node. Args: name (str): operation name to apply to the dag. params (list): op parameters qargs (list(QuantumRegister, int)): qubits to attach to Raises: QiskitError: if encountering a non-basis opaque gate
juraj-google-style
async def send_script(self, conn_id, data): self._ensure_connection(conn_id, True) connection_string = self._get_property(conn_id, "connection_string") msg = dict(connection_string=connection_string, fragment_count=1, fragment_index=0, script=base64.b64encode(data)) await self._send_command(OPERATIONS.SEND_SCRIPT, msg, COMMANDS.SendScriptResponse)
Send a a script to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection data (bytes): the script to send to the device
juraj-google-style
def broadcast(tensor): _check_device(tensor) with ops.device(tensor.device): return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)
Returns a tensor that can be efficiently transferred to other devices. Args: tensor: The tensor to send; must be assigned to a GPU device. Returns: A tensor with the value of `src_tensor`, which can be used as input to ops on other GPU devices.
github-repos
def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None): if name in self._cols_to_vars_map[feature_column]: raise ValueError('Variable already exists.') with trackable.no_manual_dependency_tracking_scope(self._layer): var = self._layer.add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, trainable=self._trainable and trainable, use_resource=use_resource, getter=variable_scope.get_variable) if isinstance(var, variables.PartitionedVariable): for v in var: part_name = name + '/' + str(v._get_save_slice_info().var_offset[0]) self._layer._track_trackable(v, feature_column.name + '/' + part_name) elif isinstance(var, trackable.Trackable): self._layer._track_trackable(var, feature_column.name + '/' + name) self._cols_to_vars_map[feature_column][name] = var return var
Creates a new variable. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. trainable: Whether this variable is trainable or not. use_resource: If true, we use resource variables. Otherwise we use RefVariable. initializer: initializer instance (callable). Returns: The created variable.
github-repos
def _rpc(self, method, *args): with self._lock: apiid = next(self._counter) data = {'id': apiid, 'method': method, 'params': args} request = json.dumps(data) self._client_send(request) response = self._client_receive() if (not response): raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_SERVER) result = json.loads(str(response, encoding='utf8')) if result['error']: raise ApiError(self._ad, result['error']) if (result['id'] != apiid): raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID) if (result.get('callback') is not None): if (self._event_client is None): self._event_client = self._start_event_client() return callback_handler.CallbackHandler(callback_id=result['callback'], event_client=self._event_client, ret_value=result['result'], method_name=method, ad=self._ad) return result['result']
Sends an rpc to the app. Args: method: str, The name of the method to execute. args: any, The args of the method. Returns: The result of the rpc. Raises: ProtocolError: Something went wrong with the protocol. ApiError: The rpc went through, however executed with errors.
codesearchnet
def _get_user_command_string(self): sdk_int = int(self._ad.build_info['build_version_sdk']) if sdk_int < 24: return '' return f'--user {self.user_id}'
Gets the appropriate command argument for specifying user IDs. By default, `SnippetClient` operates within the current user. We don't add the `--user {ID}` arg when Android's SDK is below 24, where multi-user support is not well implemented. Returns: String, the command param section to be formatted into the adb commands.
github-repos
def verify_fileobj(fileobj, writable=False): try: data = fileobj.read(0) except Exception: if (not hasattr(fileobj, 'read')): raise ValueError(('%r not a valid file object' % fileobj)) raise ValueError(("Can't read from file object %r" % fileobj)) if (not isinstance(data, bytes)): raise ValueError(('file object %r not opened in binary mode' % fileobj)) if writable: try: fileobj.write(b'') except Exception: if (not hasattr(fileobj, 'write')): raise ValueError(('%r not a valid file object' % fileobj)) raise ValueError(("Can't write to file object %r" % fileobj))
Verifies that the passed fileobj is a file like object which we can use. Args: writable (bool): verify that the file object is writable as well Raises: ValueError: In case the object is not a file object that is readable (or writable if required) or is not opened in bytes mode.
codesearchnet
def match_bitap(self, text, pattern, loc): s = self.match_alphabet(pattern) def match_bitapScore(e, x): 'Compute and return the score for a match with e errors and x location.\n Accesses loc and pattern through being a closure.\n\n Args:\n e: Number of errors in match.\n x: Location of match.\n\n Returns:\n Overall score for match (0.0 = good, 1.0 = bad).\n ' accuracy = (float(e) / len(pattern)) proximity = abs((loc - x)) if (not self.Match_Distance): return ((proximity and 1.0) or accuracy) return (accuracy + (proximity / float(self.Match_Distance))) score_threshold = self.Match_Threshold best_loc = text.find(pattern, loc) if (best_loc != (- 1)): score_threshold = min(match_bitapScore(0, best_loc), score_threshold) best_loc = text.rfind(pattern, (loc + len(pattern))) if (best_loc != (- 1)): score_threshold = min(match_bitapScore(0, best_loc), score_threshold) matchmask = (1 << (len(pattern) - 1)) best_loc = (- 1) bin_max = (len(pattern) + len(text)) last_rd = None for d in range(len(pattern)): bin_min = 0 bin_mid = bin_max while (bin_min < bin_mid): if (match_bitapScore(d, (loc + bin_mid)) <= score_threshold): bin_min = bin_mid else: bin_max = bin_mid bin_mid = (((bin_max - bin_min) bin_max = bin_mid start = max(1, ((loc - bin_mid) + 1)) finish = (min((loc + bin_mid), len(text)) + len(pattern)) rd = ([0] * (finish + 2)) rd[(finish + 1)] = ((1 << d) - 1) for j in range(finish, (start - 1), (- 1)): if (len(text) <= (j - 1)): charMatch = 0 else: charMatch = s.get(text[(j - 1)], 0) if (d == 0): rd[j] = (((rd[(j + 1)] << 1) | 1) & charMatch) else: rd[j] = (((((rd[(j + 1)] << 1) | 1) & charMatch) | (((last_rd[(j + 1)] | last_rd[j]) << 1) | 1)) | last_rd[(j + 1)]) if (rd[j] & matchmask): score = match_bitapScore(d, (j - 1)) if (score <= score_threshold): score_threshold = score best_loc = (j - 1) if (best_loc > loc): start = max(1, ((2 * loc) - best_loc)) else: break if (match_bitapScore((d + 1), loc) > score_threshold): break last_rd = rd return best_loc
Locate the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1.
codesearchnet
def do_operation_update(self, info, an_op): self.update_op_func(self.metric_name, info, an_op)
Updates an operation using the assigned update_op_func Args: info: (:class:`endpoints_management.control.report_request.Info`): the info instance to update an_op: (:class:`endpoints_management.control.report_request.Info`): the info instance to update Return: `True` if desc is supported, otherwise `False`
juraj-google-style
def _IsValidUrl(self, url): parsed_url = urlparse.urlparse(url) return (parsed_url.scheme in self._SUPPORTED_URL_SCHEMES)
Checks if an URL is considered valid. Returns: bool: True if the URL is valid.
codesearchnet
def find_copy_constructor(type_): copy_ = type_.constructors((lambda x: is_copy_constructor(x)), recursive=False, allow_empty=True) if copy_: return copy_[0] return None
Returns reference to copy constructor. Args: type_ (declarations.class_t): the class to be searched. Returns: declarations.constructor_t: the copy constructor
codesearchnet
def splitdrive(self, path): path = make_string_path(path) if self.is_windows_fs: if (len(path) >= 2): path = self.normcase(path) sep = self._path_separator(path) if (sys.version_info >= (2, 7, 8)): if ((path[0:2] == (sep * 2)) and (path[2:3] != sep)): sep_index = path.find(sep, 2) if (sep_index == (- 1)): return (path[:0], path) sep_index2 = path.find(sep, (sep_index + 1)) if (sep_index2 == (sep_index + 1)): return (path[:0], path) if (sep_index2 == (- 1)): sep_index2 = len(path) return (path[:sep_index2], path[sep_index2:]) if (path[1:2] == self._matching_string(path, ':')): return (path[:2], path[2:]) return (path[:0], path)
Splits the path into the drive part and the rest of the path. Taken from Windows specific implementation in Python 3.5 and slightly adapted. Args: path: the full path to be splitpath. Returns: A tuple of the drive part and the rest of the path, or of an empty string and the full path if drive letters are not supported or no drive is present.
codesearchnet
def get_plot(self, xlim=None, ylim=None, units="thz"): u = freq_units(units) ncolors = max(3, len(self._doses)) ncolors = min(9, ncolors) import palettable colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors y = None alldensities = [] allfrequencies = [] plt = pretty_plot(12, 8) for key, dos in self._doses.items(): frequencies = dos['frequencies'] * u.factor densities = dos['densities'] if y is None: y = np.zeros(frequencies.shape) if self.stack: y += densities newdens = y.copy() else: newdens = densities allfrequencies.append(frequencies) alldensities.append(newdens) keys = list(self._doses.keys()) keys.reverse() alldensities.reverse() allfrequencies.reverse() allpts = [] for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)): allpts.extend(list(zip(frequencies, densities))) if self.stack: plt.fill(frequencies, densities, color=colors[i % ncolors], label=str(key)) else: plt.plot(frequencies, densities, color=colors[i % ncolors], label=str(key), linewidth=3) if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) else: xlim = plt.xlim() relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]] plt.ylim((min(relevanty), max(relevanty))) ylim = plt.ylim() plt.plot([0, 0], ylim, 'k--', linewidth=2) plt.xlabel(r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label)) plt.ylabel(r'$\mathrm{Density\ of\ states}$') plt.legend() leg = plt.gca().get_legend() ltext = leg.get_texts() plt.setp(ltext, fontsize=30) plt.tight_layout() return plt
Get a matplotlib plot showing the DOS. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
juraj-google-style
def UninstallDriver(bundle_name): km = objc.KextManager() cf_bundle_name = km.PyStringToCFString(bundle_name) status = km.iokit.KextManagerUnloadKextWithIdentifier(cf_bundle_name) km.dll.CFRelease(cf_bundle_name) return status
Calls into the IOKit to unload a kext by its name. Args: bundle_name: The bundle identifier of the kernel extension as defined in Info.plist field CFBundleIdentifier. Returns: The error code from the library call. objc.OS_SUCCESS if successfull.
codesearchnet
def case_report_content(store, institute_obj, case_obj): variant_types = { 'causatives_detailed': 'causatives', 'suspects_detailed': 'suspects', 'classified_detailed': 'acmg_classification', 'tagged_detailed': 'manual_rank', 'dismissed_detailed': 'dismiss_variant', 'commented_detailed': 'is_commented', } data = case_obj for individual in data['individuals']: try: sex = int(individual.get('sex', 0)) except ValueError as err: sex = 0 individual['sex_human'] = SEX_MAP[sex] individual['phenotype_human'] = PHENOTYPE_MAP.get(individual['phenotype']) data['comments'] = store.events(institute_obj, case=case_obj, comments=True) data['manual_rank_options'] = MANUAL_RANK_OPTIONS data['dismissed_options'] = DISMISS_VARIANT_OPTIONS data['genetic_models'] = dict(GENETIC_MODELS) data['report_created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") evaluated_variants = {} for vt in variant_types: evaluated_variants[vt] = [] for var_type in ['causatives', 'suspects']: vt = '_'.join([var_type, 'detailed']) for var_id in case_obj.get(var_type,[]): variant_obj = store.variant(var_id) if not variant_obj: continue evaluated_variants[vt].append(variant_obj) for var_obj in store.evaluated_variants(case_id=case_obj['_id']): for vt in variant_types: keyword = variant_types[vt] if keyword in var_obj: evaluated_variants[vt].append(var_obj) for var_type in evaluated_variants: decorated_variants = [] for var_obj in evaluated_variants[var_type]: if var_obj['category'] == 'snv': decorated_info = variant_decorator( store=store, institute_obj=institute_obj, case_obj=case_obj, variant_id=None, variant_obj=var_obj, add_case=False, add_other=False, get_overlapping=False ) else: decorated_info = sv_variant( store=store, institute_id=institute_obj['_id'], case_name=case_obj['display_name'], variant_obj=var_obj, add_case=False, get_overlapping=False ) decorated_variants.append(decorated_info['variant']) data[var_type] = decorated_variants return data
Gather contents to be visualized in a case report Args: store(adapter.MongoAdapter) institute_obj(models.Institute) case_obj(models.Case) Returns: data(dict)
juraj-google-style
def convert_to_tensor(x, dtype=None, sparse=None, ragged=None): if any_symbolic_tensors((x,)): return ConvertToTensor(dtype=dtype, sparse=sparse, ragged=ragged)(x) return backend.core.convert_to_tensor(x, dtype=dtype, sparse=sparse, ragged=ragged)
Convert a NumPy array or Python array to a tensor. Native tensors for the current backend or left unchanged unless the `dtype`, `sparse` or `ragged` arguments are set. Args: x: A NumPy array, Python array (can be nested) or a backend tensor. dtype: The target type. If `None`, the type of `x` is used. sparse: Whether to keep sparse tensors. `False` will cause sparse tensors to be densified. The default value of `None` means that sparse tensors are kept only if the backend supports them. ragged: Whether to keep ragged tensors. `False` will cause ragged tensors to be densified. The default value of `None` means that ragged tensors are kept only if the backend supports them. Returns: A backend tensor of the specified `dtype` and sparseness. Example: >>> x = np.array([1, 2, 3]) >>> y = keras.ops.convert_to_tensor(x)
github-repos
def _calculate_scores(self, query, key): scores = math_ops.matmul(query, key, transpose_b=True) if self.scale is not None: scores *= self.scale return scores
Calculates attention scores as a query-key dot product. Args: query: Query tensor of shape `[batch_size, Tq, dim]`. key: Key tensor of shape `[batch_size, Tv, dim]`. Returns: Tensor of shape `[batch_size, Tq, Tv]`.
github-repos
def get_scan_stats(self): time_spent = time.time() return (self._scan_event_count, self._v1_scan_count, self._v1_scan_response_count, self._v2_scan_count, self._device_scan_counts.copy(), (time_spent - self._last_reset_time))
Return the scan event statistics for this adapter Returns: int : total scan events int : total v1 scan count int : total v1 scan response count int : total v2 scan count dict : device-specific scan counts float : seconds since last reset
codesearchnet