code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def AddCredentialOptions(self, argument_group): argument_group.add_argument( '--credential', action='append', default=[], type=str, dest='credentials', metavar='TYPE:DATA', help=( 'Define a credentials that can be used to unlock encrypted ' 'volumes e.g. BitLocker. The credential is defined as type:data ' 'e.g. "password:BDE-test". Supported credential types are: ' '{0:s}. Binary key data is expected to be passed in BASE-16 ' 'encoding (hexadecimal). WARNING credentials passed via command ' 'line arguments can end up in logs, so use this option with ' 'care.').format(', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))
Adds the credential options to the argument group. The credential options are use to unlock encrypted volumes. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
juraj-google-style
def __init__(self, access_token, user_agent, revoke_uri=None): super(AccessTokenCredentials, self).__init__( access_token, None, None, None, None, None, user_agent, revoke_uri=revoke_uri)
Create an instance of OAuth2Credentials This is one of the few types if Credentials that you should contrust, Credentials objects are usually instantiated by a Flow. Args: access_token: string, access token. user_agent: string, The HTTP User-Agent to provide for this application. revoke_uri: string, URI for revoke endpoint. Defaults to None; a token can't be revoked if this is None.
juraj-google-style
def subscribe(object_type: str, subscriber: str, callback_handler: Callable = None) -> EventQueue: key = _keys.subscribers(object_type) DB.remove_from_list(key, subscriber) DB.append_to_list(key, subscriber) return EventQueue(object_type, subscriber, callback_handler)
Subscribe to the specified object type. Returns an EventQueue object which can be used to query events associated with the object type for this subscriber. Args: object_type (str): Object type subscriber (str): Subscriber name callback_handler (function, optional): Callback handler function. Returns: EventQueue, event queue object.
juraj-google-style
def download_report_hook(count, block_size, total_size): percent = int((((count * block_size) * 100) / total_size)) print((('\r%d%%' % percent) + ' completed'), end='\r')
Report hook for download progress. Args: count: current block number block_size: block size total_size: total size
codesearchnet
def _recursive_remove_blank_dirs(self, path): path = os.path.abspath(path) if ((path == self.path) or (len(path) <= len(self.path))): return if (not os.path.exists(path)): return self._recursive_remove_blank_dirs(os.path.dirname(path)) if os.listdir(path): return shutil.rmtree(path) return self._recursive_remove_blank_dirs(os.path.dirname(path))
Make sure, that blank directories are removed from the storage. Args: path (str): Path which you suspect that is blank.
codesearchnet
def trace_model_call(model, input_signature=None): if input_signature is None: if isinstance(model.call, def_function.Function): input_signature = model.call.input_signature if input_signature is None: input_signature = model_input_signature(model) if input_signature is None: raise_model_input_error(model) @def_function.function(input_signature=input_signature, autograph=False) def _wrapped_model(*args): inputs = args[0] if len(input_signature) == 1 else list(args) with keras_deps.get_call_context_function()().enter(model, inputs=inputs, build_graph=False, call_context_args={'training': False}, saving=True): outputs = model(inputs, training=False) return outputs return _wrapped_model
Trace the model call to create a tf.function for exporting a Keras model. Args: model: A Keras model. input_signature: optional, a list of tf.TensorSpec objects specifying the inputs to the model. Returns: A tf.function wrapping the model's call function with input signatures set. Raises: ValueError: if input signature cannot be inferred from the model.
github-repos
def count_variables_by_type(variables=None): if (variables is None): variables = (tf.global_variables() + tf.local_variables()) unique_types = set((v.dtype.base_dtype for v in variables)) results_dict = {} for dtype in unique_types: if (dtype == tf.string): tf.logging.warning('NB: string Variables present. The memory usage for these Variables will not be accurately computed as it depends on the exact strings stored in a particular session.') vars_of_type = [v for v in variables if (v.dtype.base_dtype == dtype)] num_scalars = sum((v.shape.num_elements() for v in vars_of_type)) results_dict[dtype] = {'num_variables': len(vars_of_type), 'num_scalars': num_scalars} return results_dict
Returns a dict mapping dtypes to number of variables and scalars. Args: variables: iterable of `tf.Variable`s, or None. If None is passed, then all global and local variables in the current graph are used. Returns: A dict mapping tf.dtype keys to a dict containing the keys 'num_scalars' and 'num_variables'.
codesearchnet
def restore_walker(self, dumped_state): selector_string = dumped_state.get(u'selector') if (selector_string is None): raise ArgumentError("Invalid stream walker state in restore_walker, missing 'selector' key", state=dumped_state) selector = DataStreamSelector.FromString(selector_string) walker = self.create_walker(selector) walker.restore(dumped_state) return walker
Restore a stream walker that was previously serialized. Since stream walkers need to be tracked in an internal list for notification purposes, we need to be careful with how we restore them to make sure they remain part of the right list. Args: dumped_state (dict): The dumped state of a stream walker from a previous call to StreamWalker.dump() Returns: StreamWalker: The correctly restored StreamWalker subclass.
codesearchnet
def group_id(self, resource_id): if self._name != 'group': self._request_uri = '{}/{}'.format(self._api_uri, resource_id)
Update the request URI to include the Group ID for specific group retrieval. Args: resource_id (string): The group id.
juraj-google-style
def firmware_outdated(self): datefmt = ' %b %d %Y %H:%M:%S' compat_date = self.compatible_firmware_version.split('compiled')[1] compat_date = datetime.datetime.strptime(compat_date, datefmt) fw_date = self.firmware_version.split('compiled')[1] fw_date = datetime.datetime.strptime(fw_date, datefmt) return (compat_date > fw_date)
Returns whether the J-Link's firmware version is older than the one that the DLL is compatible with. Note: This is not the same as calling ``not jlink.firmware_newer()``. Args: self (JLink): the ``JLink`` instance Returns: ``True`` if the J-Link's firmware is older than the one supported by the DLL, otherwise ``False``.
codesearchnet
def setDataFrame(self, dataFrame): if (not isinstance(dataFrame, pandas.core.frame.DataFrame)): raise TypeError('Argument is not of type pandas.core.frame.DataFrame') self.layoutAboutToBeChanged.emit() self._dataFrame = dataFrame self.layoutChanged.emit()
setter function to _dataFrame. Holds all data. Note: It's not implemented with python properties to keep Qt conventions. Raises: TypeError: if dataFrame is not of type pandas.core.frame.DataFrame. Args: dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
codesearchnet
def load_transliteration_table(lang="en", version="2"): src_dir = "transliteration{}".format(version) p = locate_resource(src_dir, lang) file_handler = _open(p) return pickle.load(file_handler)
Return a morfessor model for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used.
juraj-google-style
def sort_resources(cls, request, resources, fail_enum, header_proto=None): if not request.sorting: return resources value_handlers = cls._get_handler_set(request, fail_enum, header_proto) def sorter(resource_a, resource_b): for handler in value_handlers: val_a, val_b = handler.get_sort_values(resource_a, resource_b) if val_a < val_b: return handler.xform_result(-1) if val_a > val_b: return handler.xform_result(1) return 0 return sorted(resources, key=cmp_to_key(sorter))
Sorts a list of resources based on a list of sort controls Args: request (object): The parsed protobuf request object resources (list of objects): The resources to be sorted fail_enum (int, enum): The enum status to raise with invalid keys header_proto(class): Class to decode a resources header Returns: list: The sorted list of resources
juraj-google-style
def get_longs(): longs = {} fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv') with open(fname, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: word = row[0].lower() word = re.sub(' ', '', word) longs[word] = float(row[2]) return longs
Get a dictionary that maps Backpage city names to their respective longitudes. Returns: dictionary that maps city names (Strings) to longitudes (Floats)
codesearchnet
def add(self, watch_key, tensor_value): if (watch_key not in self._tensor_data): self._tensor_data[watch_key] = _WatchStore(watch_key, mem_bytes_limit=self._watch_mem_bytes_limit) self._tensor_data[watch_key].add(tensor_value)
Add a tensor value. Args: watch_key: A string representing the debugger tensor watch, e.g., 'Dense_1/BiasAdd:0:DebugIdentity'. tensor_value: The value of the tensor as a numpy.ndarray.
codesearchnet
def insert_rows(self, project_id, dataset_id, table_id, rows, insert_ids=None, skip_invalid_rows=False, ignore_unknown_values=False): insert_ids = [str(self.unique_row_id) if not insert_ids else insert_ids[i] for i, _ in enumerate(rows)] rows = [fast_json_loads(fast_json_dumps(r, default=default_encoder)) for r in rows] result, errors = self._insert_all_rows(project_id, dataset_id, table_id, rows, insert_ids, skip_invalid_rows=skip_invalid_rows, ignore_unknown_values=ignore_unknown_values) return (result, errors)
Inserts rows into the specified table. Args: project_id: The project id owning the table. dataset_id: The dataset id owning the table. table_id: The table id. rows: A list of plain Python dictionaries. Each dictionary is a row and each key in it is the name of a field. skip_invalid_rows: If there are rows with insertion errors, whether they should be skipped, and all others should be inserted successfully. ignore_unknown_values: Set this option to true to ignore unknown column names. If the input rows contain columns that are not part of the existing table's schema, those columns are ignored, and the rows are successfully inserted. Returns: A tuple (bool, errors). If first element is False then the second element will be a bigquery.InsertErrorsValueListEntry instance containing specific errors.
github-repos
def __init__(self, warm_start_type, parents): if warm_start_type not in WarmStartTypes: raise ValueError( "Invalid type: {}, valid warm start types are: [{}]".format(warm_start_type, [t for t in WarmStartTypes])) if not parents: raise ValueError("Invalid parents: {}, parents should not be None/empty".format(parents)) self.type = warm_start_type self.parents = set(parents)
Initializes the ``WarmStartConfig`` with the provided ``WarmStartTypes`` and parents. Args: warm_start_type (sagemaker.tuner.WarmStartTypes): This should be one of the supported warm start types in WarmStartType parents (set{str}): Set of parent tuning jobs which will be used to warm start the new tuning job.
juraj-google-style
def setup(self, steps=None, drop_na=False, **kwargs): input_nodes = None selectors = self.model.get('input', {}).copy() selectors.update(kwargs) for (i, b) in enumerate(self.steps): if ((steps is not None) and (i not in steps) and (b.name not in steps)): continue b.setup(input_nodes, drop_na=drop_na, **selectors) input_nodes = b.output_nodes
Set up the sequence of steps for analysis. Args: steps (list): Optional list of steps to set up. Each element must be either an int giving the index of the step in the JSON config block list, or a str giving the (unique) name of the step, as specified in the JSON config. Steps that do not match either index or name will be skipped. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files.
codesearchnet
def add_depth_embedding(x): x_shape = common_layers.shape_list(x) depth = x_shape[-1] num_steps = x_shape[0] shape = [num_steps, 1, 1, depth] depth_embedding = ( tf.get_variable( "depth_embedding", shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth** 0.5)) x += depth_embedding return x
Add n-dimensional embedding as the depth embedding (timing signal). Adds embeddings to represent the position of the step in the recurrent tower. Args: x: a tensor with shape [max_step, batch, length, depth] Returns: a Tensor the same shape as x.
juraj-google-style
def get_service_state(self, service_id: str) -> str: service = self._client.services.get(service_id) for service_task in service.tasks(): service_state = service_task['DesiredState'] return service_state
Get the state of the service. Only the manager nodes can retrieve service state Args: service_id (str): Service id Returns: str, state of the service
juraj-google-style
def closest_point(a, b, p): ap = [p[0]-a[0], p[1]-a[1]] ab = [b[0]-a[0], b[1]-a[1]] mag = float(ab[0]**2 + ab[1]**2) proj = dot(ap, ab) if mag ==0 : dist = 0 else: dist = proj / mag if dist < 0: return [a[0], a[1]] elif dist > 1: return [b[0], b[1]] else: return [a[0] + ab[0] * dist, a[1] + ab[1] * dist]
Finds closest point in a line segment Args: a ([float, float]): x and y coordinates. Line start b ([float, float]): x and y coordinates. Line end p ([float, float]): x and y coordinates. Point to find in the segment Returns: (float, float): x and y coordinates of the closest point
juraj-google-style
def _compute_numeric_jacobian(f, y_size, y_dtype, xs, param, delta): x_shape = xs[param].shape x_dtype = xs[param].dtype x_size = _product(x_shape) * (2 if x_dtype.is_complex else 1) y_size = y_size * (2 if y_dtype.is_complex else 1) x_dtype = x_dtype.real_dtype.as_numpy_dtype y_dtype = y_dtype.real_dtype.as_numpy_dtype xs_dtypes = [x.dtype for x in xs] xs_shapes = [x.shape for x in xs] xs = [numpy_compat.np_asarray(_to_numpy(x)) for x in xs] x = xs[param] scale = numpy_compat.np_asarray(2 * delta, dtype=y_dtype)[()] jacobian = np.zeros((y_size, x_size), dtype=x_dtype) f = _prepare(f, xs_dtypes, xs_shapes) for col in range(x_size): original = x.ravel().view(x_dtype)[col] x.ravel().view(x_dtype)[col] += delta y_pos = _to_numpy(f(*xs)) x.ravel().view(x_dtype)[col] = original x.ravel().view(x_dtype)[col] -= delta y_neg = _to_numpy(f(*xs)) x.ravel().view(x_dtype)[col] = original diff = (y_pos - y_neg) / scale jacobian[:, col] = diff.ravel().view(y_dtype) logging.vlog(1, 'Numeric Jacobian =\n%s', jacobian) return jacobian
Computes the numeric Jacobian for f regarding xs[param]. One can think of the relation among f, xs and y as y = f(xs). Args: f: the function. y_size: the number of elements of the result. y_dtype: the dtype of the result. xs: a list of tensors. param: the index of the target parameter. delta: the amount of perturbation we give to the input. Returns: A 2-d numpy array representing the Jacobian. It has "y_size" rows and "x_size" columns where "x_size" is the number of elements in xs[param] and "y_size" is the number of elements in the result.
github-repos
def register(self, numerics_alert): key = (numerics_alert.device_name, numerics_alert.tensor_name) if (key in self._data): self._data[key].add(numerics_alert) elif (len(self._data) < self._capacity): history = NumericsAlertHistory() history.add(numerics_alert) self._data[key] = history
Register an alerting numeric event. Args: numerics_alert: An instance of `NumericsAlert`.
codesearchnet
def get_urls_for_profiles(edx_video_id, profiles): profiles_to_urls = {profile: None for profile in profiles} try: video_info = get_video_info(edx_video_id) except ValVideoNotFoundError: return profiles_to_urls for encoded_video in video_info["encoded_videos"]: if encoded_video["profile"] in profiles: profiles_to_urls[encoded_video["profile"]] = encoded_video["url"] return profiles_to_urls
Returns a dict mapping profiles to URLs. If the profiles or video is not found, urls will be blank. Args: edx_video_id (str): id of the video profiles (list): list of profiles we want to search for Returns: (dict): A dict containing the profile to url pair
juraj-google-style
def map(self, entity, ext_id, dcm_id): if not entity in self._id_map: self._id_map[entity] = {} self._id_map[entity][ext_id] = dcm_id self._id_map[entity][dcm_id] = ext_id
Maps a CM id and an ext id for an entity. Args: entity: The name of the entity for which the ID relates. ext_id: Placeholder ext id. dcm_id: Real CM id of the object.
github-repos
def sqrt(x): if any_symbolic_tensors((x,)): return Sqrt().symbolic_call(x) x = backend.convert_to_tensor(x) return backend.numpy.sqrt(x)
Return the non-negative square root of a tensor, element-wise. Args: x: Input tensor. Returns: Output tensor, the non-negative square root of `x`.
github-repos
def add_string(self, data): lines = [] while data: match = self._line_end_re.search(data) if (match is None): chunk = data else: chunk = data[:match.end()] data = data[len(chunk):] if (self._buf and self._buf[(- 1)].endswith(b('\r')) and (not chunk.startswith(b('\n')))): lines.append(self._finish_line()) self._buf.append(chunk) if chunk.endswith(b('\n')): lines.append(self._finish_line()) return lines
Process some data splitting it into complete lines and buffering the rest Args: data: A `str` in Python 2 or `bytes` in Python 3 Returns: list of complete lines ending with a carriage return (eg. a progress bar) or a newline.
codesearchnet
def add_oxidation_state_by_site_fraction(structure, oxidation_states): try: for (i, site) in enumerate(structure): new_sp = collections.defaultdict(float) for (j, (el, occu)) in enumerate(get_z_ordered_elmap(site.species)): specie = Specie(el.symbol, oxidation_states[i][j]) new_sp[specie] += occu structure[i] = new_sp return structure except IndexError: raise ValueError('Oxidation state of all sites must be specified in the list.')
Add oxidation states to a structure by fractional site. Args: oxidation_states (list): List of list of oxidation states for each site fraction for each site. E.g., [[2, 4], [3], [-2], [-2], [-2]]
codesearchnet
def patch_f90_compiler(f90_compiler): from numpy.distutils.fcompiler import gnu if os.name != "nt": return if not isinstance(f90_compiler, gnu.Gnu95FCompiler): return f90_compiler.compiler_f77[:] = _remove_fpic(f90_compiler.compiler_f77) f90_compiler.compiler_f90[:] = _remove_fpic(f90_compiler.compiler_f90) c_compiler = f90_compiler.c_compiler if c_compiler.compiler_type != "msvc": raise NotImplementedError( "MSVC is the only supported C compiler on Windows." )
Patch up ``f90_compiler.library_dirs``. Updates flags in ``gfortran`` and ignores other compilers. The only modification is the removal of ``-fPIC`` since it is not used on Windows and the build flags turn warnings into errors. Args: f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler instance.
juraj-google-style
def _reduce_helper(input_shape, output_shape, input_tensor_layout, reduction_fn_string="SUM"): reduce_dims_indices = [ i for i, d in enumerate(input_shape.dims) if d not in output_shape.dims] reduced_input_shape = Shape([ d for d in input_shape.dims if d in output_shape.dims]) perm = [reduced_input_shape.dims.index(d) for d in output_shape.dims] def reduce_slice_fn(xslice): ret = xslice if reduce_dims_indices: ret = reduction_fn(reduction_fn_string)(xslice, reduce_dims_indices) if perm != list(xrange(len(perm))): ret = tf.transpose(ret, perm) return ret reduced_mesh_axes = [] for i in reduce_dims_indices: mesh_axis = input_tensor_layout[i] if mesh_axis is not None: reduced_mesh_axes.append(mesh_axis) return reduce_slice_fn, reduced_mesh_axes
Returns slicewise function and reduced mesh dimensions. Args: input_shape: a Shape output_shape: a Shape input_tensor_layout: a TensorLayout reduction_fn_string: "SUM" or "MAX" Returns: reduce_slice_fn: a function from tf.Tensor to tf.Tensor reduced_mesh_axes: a list of integers
juraj-google-style
def __init__(self, settings, room, queue, files): Process.__init__(self) self._room = room self._queue = queue self._files = files self._data = {} self._connection = Connection.create_from_settings(settings) self._reactor = None self._producer = None self._receiver = None
Initialize. Args: settings (dict): Settings used to create a :class:`Connection` instance room (int): Room queue (:class:`multiprocessing.Queue`): Queue to share data between processes files (dict): Dictionary, where key is the field name, and value is the path
juraj-google-style
def ctc(y_true, y_pred): if len(ops.shape(y_true)) != 2: raise ValueError(f'Targets `y_true` are expected to be a tensor of shape `(batch_size, max_length)` in integer format. Received: y_true.shape={ops.shape(y_true)}') if len(ops.shape(y_pred)) != 3: raise ValueError(f'Logits `y_pred` are expected to be a tensor of shape `(batch_size, max_length, num_classes)`. Received: y_pred.shape={ops.shape(y_pred)}') mask_index = 0 batch_length = ops.shape(y_pred)[0] input_length = ops.shape(y_pred)[1] input_length = input_length * ops.ones((batch_length,), dtype='int32') label_length = ops.cast(ops.sum(y_true != mask_index, axis=-1), dtype='int32') return ops.ctc_loss(y_true, y_pred, label_length, input_length, mask_index=mask_index)
CTC (Connectionist Temporal Classification) loss. Args: y_true: A tensor of shape `(batch_size, max_length)` containing the true labels in integer format. `0` always represents the blank/mask index and should not be used for classes. y_pred: A tensor of shape `(batch_size, max_length, num_classes)` containing logits (the output of your model). They should *not* be normalized via softmax.
github-repos
def get_trivial_search_space(): return pg.floatv(0.0, 1.0)
Trivial search space. Each point in the space is a value in [0, 1]. Returns: A tunable value.
github-repos
def remove_child(self, child): if not isinstance(child, Node): raise TypeError("child must be a Node") try: self.children.remove(child); child.parent = None except: raise RuntimeError("Attempting to remove non-existent child")
Remove child from ``Node`` object Args: ``child`` (``Node``): The child to remove
juraj-google-style
def make_tstore_conn(params, **kwargs): log.setLevel(params.get('log_level', __LOG_LEVEL__)) log.debug("\n%s", params) params.update(kwargs) try: vendor = RdfwConnections['triplestore'][params.get('vendor')] except KeyError: vendor = RdfwConnections['triplestore']['blazegraph'] conn = vendor(**params) return conn
Returns a triplestore connection args: attr_name: The name the connection will be assigned in the config manager params: The paramaters of the connection kwargs: log_level: logging level to use
juraj-google-style
def _run_between_graph_clients(self, client_fn, cluster_spec, num_gpus, *args, **kwargs): threads = [] for task_type in ['chief', 'worker']: for task_id in range(len(cluster_spec.get(task_type, []))): t = threading.Thread(target=self._run_client, args=(client_fn, task_type, task_id, num_gpus, context.executing_eagerly()) + args, kwargs=kwargs) t.start() threads.append(t) self._coord.join(threads)
Runs several clients for between-graph replication. Args: client_fn: a function that needs to accept `task_type`, `task_id`, `num_gpus`. cluster_spec: a dict specifying jobs in a cluster. num_gpus: number of GPUs per worker. *args: will be passed to `client_fn`. **kwargs: will be passed to `client_fn`.
github-repos
def squash_sequence(input_layer): timesteps = len(input_layer.sequence) if not timesteps: raise ValueError('Empty tensor sequence.') elif timesteps == 1: result = input_layer.sequence[0] else: result = tf.concat(input_layer.sequence, 0) return input_layer.with_tensor(result).with_defaults(unroll=timesteps)
Squashes a sequence into a single Tensor with dim 1 being time*batch. A sequence is an array of Tensors, which is not appropriate for most operations, this squashes them together into Tensor. Defaults are assigned such that cleave_sequence requires no args. Args: input_layer: The input layer. Returns: A PrettyTensor containing a single tensor with the first dim containing both time and batch. Raises: ValueError: If the sequence is empty.
juraj-google-style
def _calculateEncodingKey(comparator): encodingName = None for k, v in list(_encodings.items()): if v == comparator: encodingName = k break return encodingName
Gets the first key of all available encodings where the corresponding value matches the comparator. Args: comparator (string): A view name for an encoding. Returns: str: A key for a specific encoding used by python.
juraj-google-style
async def enqueue(content: AsyncIterable[_T], queue: asyncio.Queue[_T | None]) -> None: try: async for part in content: await queue.put(part) finally: await queue.put(None)
Enqueues all content into a queue. When the queue is unbounded, this function will not block. When the queue is bounded, this function will block until the queue has space. Args: content: The content to enqueue. queue: The queue to enqueue to.
github-repos
def VerifyStructure(self, parser_mediator, line): try: structure = self._HEADER.parseString(line) except pyparsing.ParseException: logger.debug('Not a XChat log file') return False (_, month, day, hours, minutes, seconds, year) = structure.date_time month = timelib.MONTH_DICT.get(month.lower(), 0) time_elements_tuple = (year, month, day, hours, minutes, seconds) try: dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple) except ValueError: logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format(structure.date_time)) return False return True
Verify that this file is a XChat log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
codesearchnet
def get_metrics(pred: jax.Array, actual: jax.Array) -> Result: tp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 1)) tn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 0)) fp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 0)) fn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 1)) accuracy = (tp + tn) / (tp + tn + fp + fn + EPS) precision = tp / (tp + fp + EPS) recall = tp / (tp + fn + EPS) fscore = 2 * precision * recall / (precision + recall + EPS) return Result(tp=tp, tn=tn, fp=fp, fn=fn, accuracy=accuracy, precision=precision, recall=recall, fscore=fscore)
Gets evaluation metrics from the prediction and the actual target. Args: pred (jax.Array): A prediction of the target. actual (jax.Array): The actual target. Returns: result (Result): A result.
github-repos
def save(self, name=None, output='png', dirc=None): self.render() if dirc: if not os.path.isdir(os.getcwd() + "/" + str(dirc)): os.makedirs(os.getcwd() + "/" + str(dirc)) if name is None: if dirc: self.fig.savefig(os.getcwd() + "/" + str(dirc) + '/bloch_' + str(self.savenum) + '.' + output) else: self.fig.savefig(os.getcwd() + '/bloch_' + str(self.savenum) + '.' + output) else: self.fig.savefig(name) self.savenum += 1 if self.fig: plt.close(self.fig)
Saves Bloch sphere to file of type ``format`` in directory ``dirc``. Args: name (str): Name of saved image. Must include path and format as well. i.e. '/Users/Paul/Desktop/bloch.png' This overrides the 'format' and 'dirc' arguments. output (str): Format of output image. dirc (str): Directory for output images. Defaults to current working directory.
juraj-google-style
def on_heartbeat(self, message): logger.info("Got a heartbeat") logger.info("Heartbeat message: {}".format(message)) self.heartbeat_thread.update_sequence(message['d']) return
Runs on a heartbeat event from websocket connection Args: message (dict): Full message from Discord websocket connection"
juraj-google-style
def log_response(self, response: Response, trim_log_values: bool=False, **kwargs: Any) -> None: return log_(response.text, response_log, 'info', trim=trim_log_values, **kwargs)
Log a response. Note this is different to log_request, in that it takes a Response object, not a string. Args: response: The Response object to log. Note this is different to log_request which takes a string. trim_log_values: Log an abbreviated version of the response.
codesearchnet
def _inv_hessian_control_inputs(inv_hessian): is_positive_definite = tf.reduce_all(input_tensor=tf.math.is_finite(tf.linalg.cholesky(inv_hessian)), axis=[(- 1), (- 2)]) is_symmetric = tf.equal(bfgs_utils.norm((inv_hessian - _batch_transpose(inv_hessian)), dims=2), 0) return [tf.Assert(is_positive_definite, ['Initial inverse Hessian is not positive definite.', inv_hessian]), tf.Assert(is_symmetric, ['Initial inverse Hessian is not symmetric', inv_hessian])]
Computes control inputs to validate a provided inverse Hessian. These ensure that the provided inverse Hessian is positive definite and symmetric. Args: inv_hessian: The starting estimate for the inverse of the Hessian at the initial point. Returns: A list of tf.Assert ops suitable for use with tf.control_dependencies.
codesearchnet
def recover_cfg(self, start=None, end=None, symbols=None, callback=None, arch_mode=None): if (arch_mode is None): arch_mode = self.binary.architecture_mode self._load(arch_mode=arch_mode) start = (start if start else self.binary.entry_point) (cfg, _) = self._recover_cfg(start=start, end=end, symbols=symbols, callback=callback) return cfg
Recover CFG. Args: start (int): Start address. end (int): End address. symbols (dict): Symbol table. callback (function): A callback function which is called after each successfully recovered CFG. arch_mode (int): Architecture mode. Returns: ControlFlowGraph: A CFG.
codesearchnet
def _parse_list(cls, args): argparser = ArgumentParser(prog='cluster list') group = argparser.add_mutually_exclusive_group() group.add_argument('--id', dest='cluster_id', help='show cluster with this id') group.add_argument('--label', dest='label', help='show cluster with this label') group.add_argument('--state', dest='state', action='store', choices=['up', 'down', 'pending', 'terminating'], help='list only clusters in the given state') pagination_group = group.add_argument_group() pagination_group.add_argument('--page', dest='page', action='store', type=int, help='page number') pagination_group.add_argument('--per-page', dest='per_page', action='store', type=int, help='number of clusters to be retrieved per page') arguments = argparser.parse_args(args) return vars(arguments)
Parse command line arguments to construct a dictionary of cluster parameters that can be used to determine which clusters to list. Args: `args`: sequence of arguments Returns: Dictionary that can be used to determine which clusters to list
codesearchnet
def copy_function(func, name=None): code = func.__code__ newname = (name or func.__name__) newcode = CodeType(code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, code.co_consts, code.co_names, code.co_varnames, code.co_filename, newname, code.co_firstlineno, code.co_lnotab, code.co_freevars, code.co_cellvars) newfunc = FunctionType(newcode, func.__globals__, newname, func.__defaults__, func.__closure__) newfunc.__dict__.update(func.__dict__) return newfunc
Copy a function object with different name. Args: func (function): Function to be copied. name (string, optional): Name of the new function. If not spacified, the same name of `func` will be used. Returns: newfunc (function): New function with different name.
codesearchnet
def remove_father(self, father): self._fathers = [x for x in self._fathers if (x.node_id != father.node_id)]
Remove the father node. Do nothing if the node is not a father Args: fathers: list of fathers to add
codesearchnet
def get_url(self, url): try: req = requests.get(url, headers={ 'Authorization': 'Token {}'.format(self._user_token) }, verify=False) if req.status_code is 403: raise ValueError("Access Denied") else: return req except requests.exceptions.ConnectionError as e: if str(e) == '403 Client Error: Forbidden': raise ValueError('Access Denied') else: raise e
Get a response object for a given url. Arguments: url (str): The url make a get to token (str): The authentication token Returns: obj: The response object
juraj-google-style
def read(path): if fs.exists(path): with open(path) as infile: components = infile.read().split() pid = int(components[0]) date = datetime.date.fromtimestamp(float(components[1])) return (pid, date) else: return (None, None)
Read the contents of a LockFile. Arguments: path (str): Path to lockfile. Returns: Tuple(int, datetime): The integer PID of the lock owner, and the date the lock was required. If the lock is not claimed, both values are None.
codesearchnet
def call(self, name, *args, **kwargs): payload = (name, args, kwargs) self._conn.send((self._CALL, payload)) return self._receive
Asynchronously call a method of the external environment. Args: name: Name of the method to call. *args: Positional arguments to forward to the method. **kwargs: Keyword arguments to forward to the method. Returns: Promise object that blocks and provides the return value when called.
codesearchnet
def _generate_api_config_with_root(self, request): actual_root = self._get_actual_root(request) generator = api_config.ApiConfigGenerator() api = request.body_json['api'] version = request.body_json['version'] lookup_key = (api, version) service_factories = self._backend.api_name_version_map.get(lookup_key) if not service_factories: return None service_classes = [service_factory.service_class for service_factory in service_factories] config_dict = generator.get_config_dict( service_classes, hostname=actual_root) for config in config_dict.get('items', []): lookup_key_with_root = ( config.get('name', ''), config.get('version', ''), actual_root) self._config_manager.save_config(lookup_key_with_root, config) return config_dict
Generate an API config with a specific root hostname. This uses the backend object and the ApiConfigGenerator to create an API config specific to the hostname of the incoming request. This allows for flexible API configs for non-standard environments, such as localhost. Args: request: An ApiRequest, the transformed request sent to the Discovery API. Returns: A string representation of the generated API config.
juraj-google-style
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False): normalize_in_place(a_pyxb, ignore_timestamps) normalize_in_place(b_pyxb, ignore_timestamps) a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb) b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb) are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml) if (not are_equivalent): logger.debug('XML documents not equivalent:') logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml)) return are_equivalent
Determine if SystemMetadata PyXB objects are semantically equivalent. Normalize then compare SystemMetadata PyXB objects for equivalency. Args: a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata PyXB objects are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
codesearchnet
def make_prediction_pipeline(pipeline, args): (predicted_values, errors) = (((pipeline | ('Read CSV Files' >> beam.io.ReadFromText(str(args.predict_data), strip_trailing_newlines=True))) | ('Batch Input' >> beam.ParDo(EmitAsBatchDoFn(args.batch_size)))) | ('Run TF Graph on Batches' >> beam.ParDo(RunGraphDoFn(args.trained_model_dir)).with_outputs('errors', main='main'))) ((predicted_values, errors) | ('Format and Save' >> FormatAndSave(args)))
Builds the prediction pipeline. Reads the csv files, prepends a ',' if the target column is missing, run prediction, and then prints the formated results to a file. Args: pipeline: the pipeline args: command line args
codesearchnet
def initialize_block(self, block_header): state_view = BlockWrapper.state_view_for_block(self._block_cache.block_store.chain_head, self._state_view_factory) settings_view = SettingsView(state_view) self._min_wait_time = settings_view.get_setting('sawtooth.consensus.min_wait_time', self._min_wait_time, int) self._max_wait_time = settings_view.get_setting('sawtooth.consensus.max_wait_time', self._max_wait_time, int) self._valid_block_publishers = settings_view.get_setting('sawtooth.consensus.valid_block_publishers', self._valid_block_publishers, list) block_header.consensus = b'Devmode' self._start_time = time.time() self._wait_time = random.uniform(self._min_wait_time, self._max_wait_time) return True
Do initialization necessary for the consensus to claim a block, this may include initiating voting activates, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): the BlockHeader to initialize. Returns: True
codesearchnet
def print_error_messages_raylet(task_error_queue, threads_stopped): while True: if threads_stopped.is_set(): return try: error, t = task_error_queue.get(block=False) except queue.Empty: threads_stopped.wait(timeout=0.01) continue while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time(): threads_stopped.wait(timeout=1) if threads_stopped.is_set(): break if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD: logger.debug("Suppressing error from worker: {}".format(error)) else: logger.error( "Possible unhandled error from worker: {}".format(error))
Prints message received in the given output queue. This checks periodically if any un-raised errors occured in the background. Args: task_error_queue (queue.Queue): A queue used to receive errors from the thread that listens to Redis. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit.
juraj-google-style
def children(self, as_resources=False): children = [o for (s, p, o) in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))] if as_resources: logger.debug('retrieving children as resources') children = [self.repo.get_resource(child) for child in children] return children
method to return hierarchical children of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
codesearchnet
def maybe_propagate_compile_time_consts_in_xla(op): if control_flow_util.GraphOrParentsInXlaContext(op.graph): op._set_attr('_xla_propagate_compile_time_consts', attr_value_pb2.AttrValue(b=True))
Tells XLA whether to propagate compile-time consts in the loop body. This is needed to make compile time constants available to ops, for example `max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this would always be turned on, but that doesn't work with legacy functionalized while_loops. Args: op: A `While` Operation.
github-repos
def list_projects(self, entity=None): query = gql('\n query Models($entity: String!) {\n models(first: 10, entityName: $entity) {\n edges {\n node {\n id\n name\n description\n }\n }\n }\n }\n ') return self._flatten_edges(self.gql(query, variable_values={'entity': (entity or self.settings('entity'))})['models'])
Lists projects in W&B scoped by entity. Args: entity (str, optional): The entity to scope this project to. Returns: [{"id","name","description"}]
codesearchnet
def buckets_get(self, bucket, projection='noAcl'): args = {'projection': projection} url = (Api._ENDPOINT + (Api._BUCKET_PATH % bucket)) return google.datalab.utils.Http.request(url, credentials=self._credentials, args=args)
Issues a request to retrieve information about a bucket. Args: bucket: the name of the bucket. projection: the projection of the bucket information to retrieve. Returns: A parsed bucket information dictionary. Raises: Exception if there is an error performing the operation.
codesearchnet
def parse(self, filename): path = os.path.abspath(filename) if filename.endswith('.xml'): return PawXmlSetup(path) ppdesc = self.read_ppdesc(path) if (ppdesc is None): logger.critical(('Cannot find ppdesc in %s' % path)) return None psp_type = ppdesc.psp_type parsers = {'FHI': NcAbinitHeader.fhi_header, 'GTH': NcAbinitHeader.gth_header, 'TM': NcAbinitHeader.tm_header, 'Teter': NcAbinitHeader.tm_header, 'HGH': NcAbinitHeader.hgh_header, 'HGHK': NcAbinitHeader.hgh_header, 'ONCVPSP': NcAbinitHeader.oncvpsp_header, 'PAW_abinit_text': PawAbinitHeader.paw_header} try: header = parsers[ppdesc.name](path, ppdesc) except Exception: raise self.Error(((path + ':\n') + straceback())) if (psp_type == 'NC'): pseudo = NcAbinitPseudo(path, header) elif (psp_type == 'PAW'): pseudo = PawAbinitPseudo(path, header) else: raise NotImplementedError('psp_type not in [NC, PAW]') return pseudo
Read and parse a pseudopotential file. Main entry point for client code. Returns: pseudopotential object or None if filename is not a valid pseudopotential file.
codesearchnet
class FlaxElectraSequenceSummary(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.summary = identity if hasattr(self.config, 'summary_use_proj') and self.config.summary_use_proj: if hasattr(self.config, 'summary_proj_to_labels') and self.config.summary_proj_to_labels and (self.config.num_labels > 0): num_classes = self.config.num_labels else: num_classes = self.config.hidden_size self.summary = nn.Dense(num_classes, dtype=self.dtype) activation_string = getattr(self.config, 'summary_activation', None) self.activation = ACT2FN[activation_string] if activation_string else lambda x: x self.first_dropout = identity if hasattr(self.config, 'summary_first_dropout') and self.config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(self.config.summary_first_dropout) self.last_dropout = identity if hasattr(self.config, 'summary_last_dropout') and self.config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(self.config.summary_last_dropout) def __call__(self, hidden_states, cls_index=None, deterministic: bool=True): output = hidden_states[:, 0] output = self.first_dropout(output, deterministic=deterministic) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output, deterministic=deterministic) return output
Compute a single vector summary of a sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
github-repos
def check(self, dsm, independence_factor=5, **kwargs): least_common_mechanism = False message = '' data = dsm.data categories = dsm.categories dsm_size = dsm.size[0] if (not categories): categories = (['appmodule'] * dsm_size) dependent_module_number = [] for j in range(0, dsm_size): dependent_module_number.append(0) for i in range(0, dsm_size): if ((categories[i] != 'framework') and (categories[j] != 'framework') and (data[i][j] > 0)): dependent_module_number[j] += 1 for (index, item) in enumerate(dsm.categories): if ((item == 'broker') or (item == 'applib')): dependent_module_number[index] = 0 if (max(dependent_module_number) <= (dsm_size / independence_factor)): least_common_mechanism = True else: maximum = max(dependent_module_number) message = ('Dependencies to %s (%s) > matrix size (%s) / independence factor (%s) = %s' % (dsm.entities[dependent_module_number.index(maximum)], maximum, dsm_size, independence_factor, (dsm_size / independence_factor))) return (least_common_mechanism, message)
Check least common mechanism. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. independence_factor (int): if the maximum dependencies for one module is inferior or equal to the DSM size divided by the independence factor, then this criterion is verified. Returns: bool: True if least common mechanism, else False
codesearchnet
def get_success_enrollment_message(cls, users, enrolled_in): enrolled_count = len(users) return ( 'success', ungettext( '{enrolled_count} learner was enrolled in {enrolled_in}.', '{enrolled_count} learners were enrolled in {enrolled_in}.', enrolled_count, ).format( enrolled_count=enrolled_count, enrolled_in=enrolled_in, ) )
Create message for the users who were enrolled in a course or program. Args: users: An iterable of users who were successfully enrolled enrolled_in (str): A string identifier for the course or program the users were enrolled in Returns: tuple: A 2-tuple containing a message type and message text
juraj-google-style
def translate_transcode_config(self, transcode_configs): result = [] REALLY_BIG_INT = 9223372036854775807 try: for video_format in self.get_video_formats(): for transcode_config in transcode_configs: min_width = int(transcode_config.get(FieldMap.TRANSCODE_MIN_WIDTH, 0)) min_height = int(transcode_config.get(FieldMap.TRANSCODE_MIN_HEIGHT, 0)) min_bitrate = int(transcode_config.get(FieldMap.TRANSCODE_MIN_BITRATE, 0)) max_width = int(transcode_config.get(FieldMap.TRANSCODE_MAX_WIDTH, REALLY_BIG_INT)) max_height = int(transcode_config.get(FieldMap.TRANSCODE_MAX_HEIGHT, REALLY_BIG_INT)) max_bitrate = int(transcode_config.get(FieldMap.TRANSCODE_MAX_BITRATE, REALLY_BIG_INT)) file_format = transcode_config.get(FieldMap.TRANSCODE_FORMAT, '') if file_format == 'SOURCE_FILE': if 15 not in result: result.append(15) elif min_width <= video_format['resolution']['width'] and video_format['resolution']['width'] <= max_width and (min_height <= video_format['resolution']['height']) and (video_format['resolution']['height'] <= max_height) and (min_bitrate <= video_format['targetBitRate']) and (video_format['targetBitRate'] <= max_bitrate) and (video_format.get('fileType', '') == file_format): if video_format['id'] not in result: result.append(video_format['id']) except: raise Exception('Error determining file formats for transcode') return result
Given a transcode config, returns the CM transcodes that match the config. Args: transcode_config: The transcode configuration feed item. Returns: All trancode objects from Campaign Manager that match the transcode configuration specified.
github-repos
def construct(self, **bindings): context = _assign_values_to_unbound_vars(self._unbound_vars, bindings) context.update(self._partial_context) return self._construct(context)
Constructs the graph and returns either a tensor or a sequence. Args: **bindings: Arguments for every deferred parameter. Returns: The value that is placed into this.
juraj-google-style
def get_dropbox_folder_location(): host_db_path = os.path.join(os.environ['HOME'], '.dropbox/host.db') try: with open(host_db_path, 'r') as f_hostdb: data = f_hostdb.read().split() except IOError: error('Unable to find your Dropbox install =(') dropbox_home = base64.b64decode(data[1]).decode() return dropbox_home
Try to locate the Dropbox folder. Returns: (str) Full path to the current Dropbox folder
codesearchnet
def __init__(self, project_key=None, run_asyncore_thread=True): self.project_key = project_key self.default_type = OOBTree self._root = None self._connection = None if run_asyncore_thread: _init_zeo() self._open_connection() self._init_zeo_root()
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default True): Run external asyncore thread, which handles connections to database? Default True.
juraj-google-style
def en020(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `en020`'.format(value)) self._en020 = value
Corresponds to IDD Field `en020` mean coincident dry-bulb temperature to Enthalpy corresponding to 2.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `en020` Unit: kJ/kg if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def postprocess_monograph(marc_xml, mods, uuid, counter, url): dom = double_linked_dom(mods) if not isinstance(marc_xml, MARCXMLRecord): marc_xml = MARCXMLRecord(marc_xml) add_missing_xml_attributes(dom, counter) fix_invalid_type_parameter(dom) if uuid: add_uuid(dom, uuid) add_marccountry_tag(dom) add_genre(dom) remove_hairs_from_tags(dom) fix_issuance(dom) fix_location_tag(dom) fix_related_item_tag(dom) fix_missing_electronic_locator_tag(dom, url) fix_missing_lang_tags(marc_xml, dom) return dom.prettify()
Fix bugs in `mods` produced by XSLT template. Args: marc_xml (str): Original Aleph record. mods (str): XML string generated by XSLT template. uuid (str): UUID of the package. counter (int): Number of record, is added to XML headers. url (str): URL of the publication (public or not). Returns: str: Updated XML.
juraj-google-style
def CreateProductPartition(client, adgroup_id): ad_group_criterion_service = client.GetService('AdGroupCriterionService', 'v201809') helper = ProductPartitionHelper(adgroup_id) root = helper.CreateSubdivision() new_product_canonical_condition = {'xsi_type': 'ProductCanonicalCondition', 'condition': 'NEW'} used_product_canonical_condition = {'xsi_type': 'ProductCanonicalCondition', 'condition': 'USED'} other_product_canonical_condition = {'xsi_type': 'ProductCanonicalCondition'} helper.CreateUnit(root, new_product_canonical_condition) helper.CreateUnit(root, used_product_canonical_condition) helper.CreateUnit(root, other_product_canonical_condition) result = ad_group_criterion_service.mutate(helper.operations) return result['value']
Creates a ProductPartition tree for the given AdGroup ID. Args: client: an AdWordsClient instance. adgroup_id: a str AdGroup ID. Returns: The ProductPartition tree as a sudsobject.
codesearchnet
def repsep(parser: Union[Parser, Sequence[Input]], separator: Union[Parser, Sequence[Input]]) \ -> RepeatedSeparatedParser: if isinstance(parser, str): parser = lit(parser) if isinstance(separator, str): separator = lit(separator) return RepeatedSeparatedParser(parser, separator)
Match a parser zero or more times separated by another parser. This matches repeated sequences of ``parser`` separated by ``separator``. A list is returned containing the value from each match of ``parser``. The values from ``separator`` are discarded. If there are no matches, an empty list is returned. Args: parser: Parser or literal separator: Parser or literal
juraj-google-style
def coordinate_tensor(shape, axis): if (axis < 0): axis = (tf.size(shape) + axis) r = tf.range(shape[axis]) r_shape = tf.one_hot(axis, tf.size(shape), on_value=(- 1), off_value=1, dtype=tf.int32) return (tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape))
Return a tensor with given shape containing coordinate along given axis. Args: shape: a Tensor representing the shape of the output Tensor axis: an integer Returns: A tensor with shape shape and type tf.int32, where each elements its coordinate along the given axis.
codesearchnet
def approximate_jacobian(f, variables, delta=0.1): def var_jacobian(var): derivatives = tf.map_fn(lambda x: _five_point_stencil(f, var, x, delta), tf.range(tf.size(var)), fn_output_signature=tf.float32) f_shape = tf.shape(derivatives)[1:] transpose_perm = list(range(1, len(f_shape) + 1)) + [0] transpose_derivatives = tf.transpose(derivatives, transpose_perm) reshape_shape = tf.concat([f_shape, tf.shape(var)], 0) return tf.reshape(transpose_derivatives, reshape_shape) return tf.nest.map_structure(var_jacobian, variables)
Approximates the jacobian of f using five point stencil. Suppose the input function returns a tensor `r` under gradient tape `t`. Then this function returns an approximation to `t.jacobian(r, variables, unconnected_gradients=tf.UnconnectedGradients.ZERO)` Args: f: Callable taking no arguments and returning a `tf.Tensor`. variables: Possibly nested structure of `tf.Variable` in which to differentiate `f`. delta: Size of the fundamental perturbation in the stencil. Returns: The approximate jacobian. Has the same structure as the return from a corresponding call to `tf.GradientTape().jacobian`.
github-repos
def parse_hpo_disease(hpo_line): hpo_line = hpo_line.rstrip().split('\t') hpo_info = {} disease = hpo_line[0].split(':') hpo_info['source'] = disease[0] hpo_info['disease_nr'] = int(disease[1]) hpo_info['hgnc_symbol'] = None hpo_info['hpo_term'] = None if len(hpo_line) >= 3: hpo_info['hgnc_symbol'] = hpo_line[2] if len(hpo_line) >= 4: hpo_info['hpo_term'] = hpo_line[3] return hpo_info
Parse hpo disease line Args: hpo_line(str)
juraj-google-style
def is_subtype_of(self, other: 'TraceType') -> bool:
Returns True if `self` is a subtype of `other`. For example, `tf.function` uses subtyping for dispatch: if `a.is_subtype_of(b)` is True, then an argument of `TraceType` `a` can be used as argument to a `ConcreteFunction` traced with an a `TraceType` `b`. Args: other: A TraceType object to be compared against. Example: ```python class Dimension(TraceType): def __init__(self, value: Optional[int]): self.value = value def is_subtype_of(self, other): # Either the value is the same or other has a generalized value that # can represent any specific ones. return (self.value == other.value) or (other.value is None) ```
github-repos
def SetCampaignTargetingCriteria(client, campaign): campaign_criterion_service = client.GetService('CampaignCriterionService') criteria = [{'xsi_type': 'Location', 'id': 21137}, {'xsi_type': 'Location', 'id': 2484}, {'xsi_type': 'Language', 'id': 1000}, {'xsi_type': 'Language', 'id': 1003}] operations = [{'operator': 'ADD', 'operand': {'campaignId': campaign['id'], 'criterion': criterion}} for criterion in criteria] response = campaign_criterion_service.mutate(operations) if (response and ('value' in response)): for criterion in response['value']: print(('Campaign criteria of type "%s" and id "%s" was added.' % (criterion['criterion']['type'], criterion['criterion']['id'])))
Sets targeting criteria for the given campaign. Args: client: An AdWordsClient instance. campaign: A suds object representing the campaign we wish to attach targeting criteria.
codesearchnet
def setLCD(self, password="00000000"): result = False self.setContext("setLCD") try: self.clearCmdMsg() if len(password) != 8: self.writeCmdMsg("Invalid password length.") self.setContext("") return result if not self.request(): self.writeCmdMsg("Bad read CRC on setting") else: if not self.serialCmdPwdAuth(password): self.writeCmdMsg("Password failure") else: req_table = "" fill_len = 40 - len(self.m_lcd_items) for lcdid in self.m_lcd_items: append_val = binascii.hexlify(str(lcdid).zfill(2)) req_table += append_val for i in range(0, fill_len): append_val = binascii.hexlify(str(0).zfill(2)) req_table += append_val req_str = "015731023030443228" + req_table + "2903" req_str += self.calc_crc16(req_str[2:].decode("hex")) self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": self.writeCmdMsg("Success: 06 returned.") result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return result
Serial call to set LCD using meter object bufer. Used with :func:`~ekmmeters.V4Meter.addLcdItem`. Args: password (str): Optional password Returns: bool: True on completion and ACK.
juraj-google-style
def API_Iterator(function, kwargs, results=None, limit=None): class API_Iterator_Instance: def __init__(self, function, kwargs, results=None, limit=None): self.function = function self.kwargs = kwargs self.limit = limit self.results = results self.position = 0 self.count = 0 self.iterable = None self.__find_tag__() def __find_tag__(self): if self.results: for tag in iter(self.results.keys()): if isinstance(self.results[tag], list): self.iterable = tag break if self.iterable is None: print('WARNING API RETURNED NO KEYS WITH LISTS:', ', '.join(self.results.keys())) def __iter__(self): return self def __next__(self): return self.next() def next(self): if self.results is None: self.results = API_Retry(self.function(**self.kwargs)) self.__find_tag__() if self.iterable and self.position >= len(self.results[self.iterable]): page_token = self.results.get('nextPageToken', None) if page_token: if 'body' in self.kwargs: self.kwargs['body']['pageToken'] = page_token else: self.kwargs['pageToken'] = page_token self.results = API_Retry(self.function(**self.kwargs)) self.position = 0 else: raise StopIteration if self.iterable and self.position < len(self.results.get(self.iterable, [])): value = self.results[self.iterable][self.position] self.position += 1 if self.limit is not None: self.count += 1 if self.count > self.limit: raise StopIteration return value else: raise StopIteration return iter(API_Iterator_Instance(function, kwargs, results, limit))
See below API_Iterator_Instance for documentaion, this is just an iter wrapper. Returns: iter(API_Iterator_Instance(function, kwargs, results))
github-repos
def handle_type_error(fn): @wraps(fn) def handle_type_error_wrapper(*args, **kwargs): def any_match(string_list, obj): return filter((lambda x: (x in obj)), string_list) try: return fn(*args, **kwargs) except TypeError as e: message = e.__str__() str_list = ['takes exactly', 'got an unexpected', 'takes no argument'] if ((fn.__name__ in message) and any_match(str_list, message)): raise HTTPError(400, message) raise return handle_type_error_wrapper
Convert ``TypeError`` to ``bottle.HTTPError`` with ``400`` code and message about wrong parameters. Raises: HTTPError: 400 in case too many/too little function parameters were \ given.
codesearchnet
def vapor_pressure(temp, hum): if np.isscalar(hum): hum = np.zeros(temp.shape) + hum assert(temp.shape == hum.shape) positives = np.array(temp >= 273.15) vap_press = np.zeros(temp.shape) * np.nan vap_press[positives] = 6.112 * np.exp((17.62 * (temp[positives] - 273.15)) / (243.12 + (temp[positives] - 273.15))) * hum[positives] / 100. vap_press[~positives] = 6.112 * np.exp((22.46 * (temp[~positives] - 273.15)) / (272.62 + (temp[~positives] - 273.15))) * hum[~positives] / 100. return vap_press
Calculates vapor pressure from temperature and humidity after Sonntag (1990). Args: temp: temperature values hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure). Returns: Vapor pressure in hPa.
juraj-google-style
def readline(self, size=None): data = EMPTY if size == 0: return data while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: return data newline_pos = self.buffer.find(LF) if size: if newline_pos == -1: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: remaining = min(size - len(data), newline_pos) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: if newline_pos == -1: data += self.buffer self.buffer = EMPTY else: data += self.buffer[:newline_pos] self.buffer = self.buffer[newline_pos:]
Read a single line from rfile buffer and return it. Args: size (int): minimum amount of data to read Returns: bytes: One line from rfile.
juraj-google-style
def operations_happening_at_same_time_as( self, scheduled_operation: ScheduledOperation ) -> List[ScheduledOperation]: overlaps = self.query( time=scheduled_operation.time, duration=scheduled_operation.duration) return [e for e in overlaps if e != scheduled_operation]
Finds operations happening at the same time as the given operation. Args: scheduled_operation: The operation specifying the time to query. Returns: Scheduled operations that overlap with the given operation.
juraj-google-style
def resize(self, images: 'torch.Tensor', size: SizeDict, interpolation: Optional['F.InterpolationMode']=None, size_divisor: Optional[int]=None) -> 'torch.Tensor': if interpolation is None: interpolation = self.resample shorter = size.shortest_edge longer = int(MAX_LONGER_EDGE / MAX_SHORTER_EDGE * shorter) heights = images.shape[-2] widths = images.shape[-1] if heights < widths: new_heights = shorter new_widths = widths * (shorter / heights) else: new_heights = heights * (shorter / widths) new_widths = shorter if max(new_heights, new_widths) > longer: scale = longer / max(new_heights, new_widths) new_heights = new_heights * scale new_widths = new_widths * scale new_heights = int(new_heights + 0.5) new_widths = int(new_widths + 0.5) if size_divisor is not None: new_heights = new_heights new_widths = new_widths return F.resize(images, [new_heights, new_widths], interpolation=interpolation)
Resize an image or batch of images to specified size. Args: images (`torch.Tensor`): Image or batch of images to resize. size (`Dict[str, int]`): Size dictionary with shortest_edge key. interpolation (`F.InterpolationMode`, *optional*): Interpolation method to use. size_divisor (`int`, *optional*): Value to ensure height/width are divisible by. Returns: `torch.Tensor`: Resized image or batch of images.
github-repos
def assert_files_same(path1, path2): difflines = compare_files(path1, path2) assert len(difflines) == 0, ''.join(['\n'] + difflines)
Asserts that two files are the same and returns delta using -, ?, + format if not Args: path1 (str): Path to first file path2 (str): Path to second file Returns: None
juraj-google-style
def add_done_callback(self, fn): if self._result_set: _helpers.safe_invoke_callback(fn, self) return self._done_callbacks.append(fn) if (self._polling_thread is None): self._polling_thread = _helpers.start_daemon_thread(target=self._blocking_poll)
Add a callback to be executed when the operation is complete. If the operation is not already complete, this will start a helper thread to poll for the status of the operation in the background. Args: fn (Callable[Future]): The callback to execute when the operation is complete.
codesearchnet
def check_partition_column(partition_column, cols): for k, v in cols.items(): if k == partition_column: if v == "int": return else: raise InvalidPartitionColumn( "partition_column must be int, and not {0}".format(v) ) raise InvalidPartitionColumn( "partition_column {0} not found in the query".format(partition_column) )
Check partition_column existence and type Args: partition_column: partition_column name cols: dict with columns names and python types Returns: None
juraj-google-style
def setRelay(self, seconds, relay, status, password='00000000'): result = False self.setContext('setRelay') try: self.clearCmdMsg() if (len(password) != 8): self.writeCmdMsg('Invalid password length.') self.setContext('') return result if ((seconds < 0) or (seconds > 9999)): self.writeCmdMsg('Relay duration must be between 0 and 9999.') self.setContext('') return result if (not self.requestA()): self.writeCmdMsg('Bad read CRC on setting') elif (not self.serialCmdPwdAuth(password)): self.writeCmdMsg('Password failure') else: req_str = '' req_str = ((((('01573102303038' + binascii.hexlify(str(relay)).zfill(2)) + '28') + binascii.hexlify(str(status)).zfill(2)) + binascii.hexlify(str(seconds).zfill(4))) + '2903') req_str += self.calc_crc16(req_str[2:].decode('hex')) self.m_serial_port.write(req_str.decode('hex')) if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'): self.writeCmdMsg('Success: 06 returned.') result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext('') return result
Serial call to set relay. Args: seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`. relay (int): Selected relay, see :class:`~ekmmeters.Relay`. status (int): Status to set, see :class:`~ekmmeters.RelayState` password (str): Optional password Returns: bool: True on completion and ACK.
codesearchnet
def app_uninstall(self, package_name, keep_data=False): if keep_data: return self.run_cmd('uninstall', '-k', package_name) else: return self.run_cmd('uninstall', package_name)
Uninstall package Args: - package_name(string): package name ex: com.example.demo - keep_data(bool): keep the data and cache directories
juraj-google-style
def _delete_example(self, request): index = int(request.args.get('index')) if (index >= len(self.examples)): return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400) del self.examples[index] self.updated_example_indices = set([(i if (i < index) else (i - 1)) for i in self.updated_example_indices]) self.generate_sprite([ex.SerializeToString() for ex in self.examples]) return http_util.Respond(request, {}, 'application/json')
Deletes the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
codesearchnet
def get_line_count(fname): i = 0 with open(fname) as f: for (i, l) in enumerate(f): pass return (i + 1)
Counts the number of lines in a file. Args: fname: string, name of the file. Returns: integer, the number of lines in the file.
codesearchnet
def _init_vocab_from_list(self, vocab_list): def token_gen(): for token in vocab_list: if (token not in RESERVED_TOKENS): (yield token) self._init_vocab(token_gen())
Initialize tokens from a list of tokens. It is ok if reserved tokens appear in the vocab list. They will be removed. The set of tokens in vocab_list should be unique. Args: vocab_list: A list of tokens.
codesearchnet
def from_json_file(cls, file_name): with open(file_name) as json_data: config = json.load(json_data) return cls(config)
Construct OneViewClient using a json file. Args: file_name: json full path. Returns: OneViewClient:
juraj-google-style
def LocaltimeToUTC(cls, timestamp, timezone, is_dst=False): if (timezone and (timezone != pytz.UTC)): datetime_object = (datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=None) + datetime.timedelta(microseconds=timestamp)) datetime_delta = timezone.utcoffset(datetime_object, is_dst=is_dst) seconds_delta = int(datetime_delta.total_seconds()) timestamp -= (seconds_delta * definitions.MICROSECONDS_PER_SECOND) return timestamp
Converts the timestamp in localtime of the timezone to UTC. Args: timestamp: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. timezone: The timezone (pytz.timezone) object. is_dst: A boolean to indicate the timestamp is corrected for daylight savings time (DST) only used for the DST transition period. Returns: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC or 0 on error.
codesearchnet
def determine_drift(self): try: response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name) drift_request_id = response.get('StackDriftDetectionId', None) if drift_request_id: logging.info('drift_request_id: %s - polling', drift_request_id) drift_calc_done = False while not drift_calc_done: time.sleep(self.nap_time) response = self._cloud_formation.describe_stack_drift_detection_status( StackDriftDetectionId=drift_request_id ) current_state = response.get('DetectionStatus', None) logging.info( 'describe_stack_drift_detection_status(): {}'.format(current_state) ) drift_calc_done = current_state in CALC_DONE_STATES drift_answer = response.get('StackDriftStatus', 'UNKNOWN') logging.info('drift of {}: {}'.format( self._stack_name, drift_answer )) if drift_answer == 'DRIFTED': if self._verbose: self._print_drift_report() return False else: return True else: logging.warning('drift_request_id is None') return False except Exception as wtf: logging.error(wtf, exc_info=True) return False
Determine the drift of the stack. Args: None Returns: Good or Bad; True or False
juraj-google-style
def indexSearch(self, indexes): if (not self._dataFrame.empty): filter0 = (self._dataFrame.index == (- 9999)) for index in indexes: filter1 = (self._dataFrame.index == index) filter0 = np.logical_or(filter0, filter1) return filter0 else: return []
Filters the data by a list of indexes. Args: indexes (list of int): List of index numbers to return. Returns: list: A list containing all indexes with filtered data. Matches will be `True`, the remaining items will be `False`. If the dataFrame is empty, an empty list will be returned.
codesearchnet
def get_object(self, object_ids): for object_id in object_ids: if (not isinstance(object_id, ObjectID)): raise TypeError('Attempting to call `get` on the value {}, which is not an ray.ObjectID.'.format(object_id)) plain_object_ids = [plasma.ObjectID(object_id.binary()) for object_id in object_ids] for i in range(0, len(object_ids), ray._config.worker_fetch_request_size()): self.raylet_client.fetch_or_reconstruct(object_ids[i:(i + ray._config.worker_fetch_request_size())], True) final_results = self.retrieve_and_deserialize(plain_object_ids, 0) unready_ids = {plain_object_ids[i].binary(): i for (i, val) in enumerate(final_results) if (val is plasma.ObjectNotAvailable)} if (len(unready_ids) > 0): while (len(unready_ids) > 0): object_ids_to_fetch = [plasma.ObjectID(unready_id) for unready_id in unready_ids.keys()] ray_object_ids_to_fetch = [ObjectID(unready_id) for unready_id in unready_ids.keys()] fetch_request_size = ray._config.worker_fetch_request_size() for i in range(0, len(object_ids_to_fetch), fetch_request_size): self.raylet_client.fetch_or_reconstruct(ray_object_ids_to_fetch[i:(i + fetch_request_size)], False, self.current_task_id) results = self.retrieve_and_deserialize(object_ids_to_fetch, max([ray._config.get_timeout_milliseconds(), int((0.01 * len(unready_ids)))])) for (i, val) in enumerate(results): if (val is not plasma.ObjectNotAvailable): object_id = object_ids_to_fetch[i].binary() index = unready_ids[object_id] final_results[index] = val unready_ids.pop(object_id) self.raylet_client.notify_unblocked(self.current_task_id) assert (len(final_results) == len(object_ids)) return final_results
Get the value or values in the object store associated with the IDs. Return the values from the local object store for object_ids. This will block until all the values for object_ids have been written to the local object store. Args: object_ids (List[object_id.ObjectID]): A list of the object IDs whose values should be retrieved.
codesearchnet
def _read(self, entry): start_time = time.time() content = self._zip.read(entry.filename) ctx = context.get() if ctx: operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx) operation.counters.Increment( COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx) return content
Read entry content. Args: entry: zip file entry as zipfile.ZipInfo. Returns: Entry content as string.
juraj-google-style
def forward(self, permuted_tokens, tokens_per_expert): fc1_output = self.fc1(permuted_tokens, tokens_per_expert) projection, gate = torch.chunk(fc1_output, 2, dim=-1) fc1_output = nn.functional.silu(projection) * gate fc2_output = self.fc2(fc1_output, tokens_per_expert) return fc2_output
Forward pass of the Grouped MLP. Args: permuted_tokens (torch.Tensor): Permuted input tokens. tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert. Returns: torch.Tensor: Output tensor after passing through the MLP.
github-repos