code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def upsert_and_get(self, conflict_target: List, fields: Dict, index_predicate: str=None): return self.get_queryset().upsert_and_get(conflict_target, fields, index_predicate)
Creates a new record or updates the existing one with the specified data and then gets the row. Arguments: conflict_target: Fields to pass into the ON CONFLICT clause. fields: Fields to insert/update. index_predicate: The index predicate to satisfy an arbiter partial index. Returns: The model instance representing the row that was created/updated.
juraj-google-style
def combine_reducers(reducers): final_reducers = {key: reducer for key, reducer in reducers.items() if hasattr(reducer, '__call__')} sanity_error = None try: assert_reducer_sanity(final_reducers) except Exception as e: sanity_error = e def combination(state=None, action=None): if state is None: state = {} if sanity_error: raise sanity_error has_changed = False next_state = {} for key, reducer in final_reducers.items(): previous_state_for_key = state.get(key) next_state_for_key = reducer(previous_state_for_key, action) if next_state_for_key is None: msg = get_undefined_state_error_message(key, action) raise Exception(msg) next_state[key] = next_state_for_key has_changed = (has_changed or next_state_for_key != previous_state_for_key) return next_state if has_changed else state return combination
composition tool for creating reducer trees. Args: reducers: dict with state keys and reducer functions that are responsible for each key Returns: a new, combined reducer function
juraj-google-style
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True): output = [] if not inputs_normalized: with ops.colocate_with(clusters, ignore_existing=True): clusters = nn_impl.l2_normalize(clusters, axis=1) for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): if not inputs_normalized: inp = nn_impl.l2_normalize(inp, axis=1) output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True)) return output
Computes cosine distance between each input and each cluster center. Args: inputs: list of input Tensor. clusters: cluster Tensor inputs_normalized: if True, it assumes that inp and clusters are normalized and computes the dot product which is equivalent to the cosine distance. Else it L2 normalizes the inputs first. Returns: list of Tensors, where each element corresponds to each element in inp. The value is the distance of each row to all the cluster centers.
github-repos
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding, expected): total_size = 1 for s in input_sizes: total_size *= s x = np.arange(1.0, total_size + 1, dtype=np.float32) x = x.reshape(input_sizes) with self.session() as sess, self.test_scope(): inputs = array_ops.placeholder(dtypes.float32) t = pool_func(inputs, ksize=[1] + window + [1], strides=[1] + strides + [1], padding=padding) vals = sess.run(t, {inputs: x}) actual = vals.flatten() self.assertAllClose(expected, actual)
Verifies the output values of the pooling function. Args: pool_func: Function to be called: co.MaxPool, co.AvgPool. input_sizes: Input tensor dimensions. window: Tuple of kernel dims: planes, rows, cols. strides: Tuple of strides for dims: planes, rows, cols. padding: Padding type. expected: An array containing the expected operation outputs.
github-repos
def get_conda_root(): try: conda_root = _import_conda_root() except ImportError: envs_dir = dirname(CONDA_PREFIX) if (basename(envs_dir) == 'envs'): conda_root = dirname(envs_dir) else: conda_root = _conda_root_from_conda_info() return conda_root
Get the PREFIX of the conda installation. Returns: str: the ROOT_PREFIX of the conda installation
codesearchnet
def setViewModel(self, model): if isinstance(model, DataFrameModel): self.enableEditing(False) self.uncheckButton() selectionModel = self.tableView.selectionModel() self.tableView.setModel(model) model.dtypeChanged.connect(self.updateDelegate) model.dataChanged.connect(self.updateDelegates) del selectionModel
Sets the model for the enclosed TableView in this widget. Args: model (DataFrameModel): The model to be displayed by the Table View.
codesearchnet
def _on_connection_open(self, connection): _log.info("Successfully opened connection to %s", connection.params.host) self._channel = connection.channel(on_open_callback=self._on_channel_open)
Callback invoked when the connection is successfully established. Args: connection (pika.connection.SelectConnection): The newly-estabilished connection.
juraj-google-style
def release(self): if (not self.acquired): return False os.close(self.fd) if os.path.exists(self.path): os.remove(self.path) self.acquired = False return True
Cleans up the lockfile if it was acquired. Args: self (JLock): the ``JLock`` instance Returns: ``False`` if the lock was not released or the lock is not acquired, otherwise ``True``.
codesearchnet
def prepare_context(pipeline, context_in_string, context): logger.debug("starting") parsed_context = get_parsed_context( pipeline=pipeline, context_in_string=context_in_string) context.update(parsed_context) logger.debug("done")
Prepare context for pipeline run. Args: pipeline: dict. Dictionary representing the pipeline. context_in_string: string. Argument string used to initialize context. context: pypyr.context.Context. Merge any new context generated from context_in_string into this context instance. Returns: None. The context instance to use for the pipeline run is contained in the context arg, it's not passed back as a function return.
juraj-google-style
def trace(self, data, callback=None): conn_id = self._find_connection(self.conn_string) if conn_id is not None: self.adapter.notify_event_nowait(self.conn_string, 'trace', data) if callback is not None: callback(conn_id is not None)
Queue data for tracing Args: data (bytearray, string): Unstructured data to trace to any connected client. callback (callable): An optional callback that will be called with a bool value of True when this data actually gets traced. If the client disconnects and the data is dropped instead, callback will be called with False.
juraj-google-style
def multilayer_fully_connected(images, labels): images = pt.wrap(images) with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=1e-05): return images.flatten().fully_connected(100).fully_connected(100).softmax_classifier(10, labels)
Creates a multi layer network of fully_connected layers. Each layer is 100 neurons. Please change this to experiment with architectures. Args: images: The input images. labels: The labels as dense one-hot vectors. Returns: A softmax result.
codesearchnet
def set_reconnect_parameters(self, interval, attempts, restore_state=True): self._reconnect_attempts = max(0, attempts) self._reconnect_interval = max(0, interval) self._reconnect_restore_state = restore_state
Sets the behaviour of the automatic reconnect feature. When a connected SK8 is disconnected unexpectedly (in other words not by a user-triggered action), an automatic attempt to reconnect to the device can be made. If successful this will typically resume the connection with an interruption of only a few seconds. This method allows the application to configure some aspects of the automatic reconnect functionality. Args: interval (float): time in seconds between successive attempts to reconnect. Also applies to the delay between the initial disconnection and the first attempt to reconnect. attempts (int): the number of attempts to make to recreate the connection. This can be set to zero in order to disable the reconnection feature. restore_state (bool): if True, the streaming state of the device will also be restored if possible. For example, the IMU configuration will be re-applied after the reconnection attempt succeeds, to return the SK8 to the same state it was in before the disconnection occurred. Returns: None
codesearchnet
def count_weights(scope=None, exclude=None, graph=None): if scope: scope = (scope if scope.endswith('/') else (scope + '/')) graph = (graph or tf.get_default_graph()) vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if scope: vars_ = [var for var in vars_ if var.name.startswith(scope)] if exclude: exclude = re.compile(exclude) vars_ = [var for var in vars_ if (not exclude.match(var.name))] shapes = [var.get_shape().as_list() for var in vars_] return int(sum((np.prod(shape) for shape in shapes)))
Count learnable parameters. Args: scope: Restrict the count to a variable scope. exclude: Regex to match variable names to exclude. graph: Operate on a graph other than the current default graph. Returns: Number of learnable parameters as integer.
codesearchnet
def _get_resource_view(self, resource_view): if isinstance(resource_view, dict): resource_view = ResourceView(resource_view, configuration=self.configuration) if isinstance(resource_view, ResourceView): return resource_view raise HDXError(('Type %s is not a valid resource view!' % type(resource_view).__name__))
Get resource view id Args: resource_view (Union[ResourceView,Dict]): ResourceView metadata from a ResourceView object or dictionary Returns: ResourceView: ResourceView object
codesearchnet
def from_latents(self, latents: torch.Tensor): quantized_representation = 0 quantized_latents = [] codes = [] codebook_dims_tensor = torch.tensor([0] + [q.codebook_dim for q in self.quantizers]) dims = torch.cumsum(codebook_dims_tensor, dim=0) n_codebooks = np.where(dims <= latents.shape[1])[0].max(axis=0, keepdims=True)[0] for i in range(n_codebooks): hidden_dim_j, hidden_dim_k = (dims[i], dims[i + 1]) quantized_latents_i, codes_i = self.quantizers[i].decode_latents(latents[:, hidden_dim_j:hidden_dim_k, :]) quantized_latents.append(quantized_latents_i) codes.append(codes_i) quantized_representation_i = self.quantizers[i].out_proj(quantized_latents_i) quantized_representation = quantized_representation + quantized_representation_i return (quantized_representation, torch.cat(quantized_latents, dim=1))
Reconstructs the quantized representation from unquantized latents. Args: latents (`torch.Tensor` of shape `(batch_size, total_latent_dimension, time_steps)`): Continuous representation of input after projection. Returns: quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the full-projected space. quantized_latents (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the latent space (continuous representation before quantization).
github-repos
def RemoveTask(self, task): with self._lock: if (task.identifier not in self._tasks_abandoned): raise KeyError('Task {0:s} was not abandoned.'.format(task.identifier)) if (not task.has_retry): raise KeyError('Will not remove a task {0:s} without retry task.'.format(task.identifier)) del self._tasks_abandoned[task.identifier] logger.debug('Removed task {0:s}.'.format(task.identifier))
Removes an abandoned task. Args: task (Task): task. Raises: KeyError: if the task was not abandoned or the task was abandoned and was not retried.
codesearchnet
def _filters_pb(self): num_filters = len(self._field_filters) if (num_filters == 0): return None elif (num_filters == 1): return _filter_pb(self._field_filters[0]) else: composite_filter = query_pb2.StructuredQuery.CompositeFilter(op=enums.StructuredQuery.CompositeFilter.Operator.AND, filters=[_filter_pb(filter_) for filter_ in self._field_filters]) return query_pb2.StructuredQuery.Filter(composite_filter=composite_filter)
Convert all the filters into a single generic Filter protobuf. This may be a lone field filter or unary filter, may be a composite filter or may be :data:`None`. Returns: google.cloud.firestore_v1beta1.types.\ StructuredQuery.Filter: A "generic" filter representing the current query's filters.
codesearchnet
def get_event_report(self, source="log"): ofile = { "output": self.output_file, "log": self.log_file}[source] parser = events.EventsParser() if not ofile.exists: if not self.mpiabort_file.exists: return None else: abort_report = parser.parse(self.mpiabort_file.path) return abort_report try: report = parser.parse(ofile.path) if self.mpiabort_file.exists: logger.critical("Found ABI_MPIABORTFILE!!!!!") abort_report = parser.parse(self.mpiabort_file.path) if len(abort_report) != 1: logger.critical("Found more than one event in ABI_MPIABORTFILE") last_abort_event = abort_report[-1] if report and last_abort_event != report[-1]: report.append(last_abort_event) else: report.append(last_abort_event) return report except Exception as exc: msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc)) self.set_status(self.S_ABICRITICAL, msg=msg) return parser.report_exception(ofile.path, exc)
Analyzes the main logfile of the calculation for possible Errors or Warnings. If the ABINIT abort file is found, the error found in this file are added to the output report. Args: source: "output" for the main output file,"log" for the log file. Returns: :class:`EventReport` instance or None if the source file file does not exist.
juraj-google-style
def ParseFileEntry(self, parser_mediator, file_entry): index_file_parser = ChromeCacheIndexFileParser() file_object = file_entry.GetFileObject() try: index_file_parser.ParseFileObject(parser_mediator, file_object) except (IOError, errors.ParseError) as exception: file_object.close() display_name = parser_mediator.GetDisplayName() raise errors.UnableToParseFile( '[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format( self.NAME, display_name, exception)) try: file_system = file_entry.GetFileSystem() self._ParseIndexTable( parser_mediator, file_system, file_entry, index_file_parser.index_table) finally: file_object.close()
Parses Chrome Cache files. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_entry (dfvfs.FileEntry): file entry. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def __init__(self, name=None): rr = gen_io_ops.whole_file_reader_v2(name=name) super(WholeFileReader, self).__init__(rr, supports_serialize=True)
Create a WholeFileReader. Args: name: A name for the operation (optional).
github-repos
def profile_settings_args_install_json(self, ij, required): profile_args = {} for p in ij.get('params') or []: if p.get('required', False) != required and required is not None: continue if p.get('type').lower() == 'boolean': profile_args[p.get('name')] = self._to_bool(p.get('default', False)) elif p.get('type').lower() == 'choice': valid_values = '|'.join(self.expand_valid_values(p.get('validValues', []))) profile_args[p.get('name')] = '[{}]'.format(valid_values) elif p.get('type').lower() == 'multichoice': profile_args[p.get('name')] = p.get('validValues', []) elif p.get('name') in ['api_access_id', 'api_secret_key']: pass else: types = '|'.join(p.get('playbookDataType', [])) if types: profile_args[p.get('name')] = p.get('default', '<{}>'.format(types)) else: profile_args[p.get('name')] = p.get('default', '') return profile_args
Return args based on install.json params. Args: ij (dict): The install.json contents. required (bool): If True only required args will be returned. Returns: dict: Dictionary of required or optional App args.
juraj-google-style
def list_directories_in_directory(full_directory_path): directories = list() for directory_name in __os.listdir(full_directory_path): if __os.path.isdir(__os.path.join(full_directory_path, directory_name)): directories.append(directory_name) return directories
List the directories in a specified directory Args: full_directory_path: The full directory path to check, derive from the os module Returns: returns a list of directories
juraj-google-style
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1): N_input = N_input or 1 N_output = N_output or 1 N_hidden = N_hidden or tuple() if isinstance(N_hidden, (int, float, basestring)): N_hidden = (int(N_hidden),) hidden_layer_type = hidden_layer_type or tuple() hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type)) if verbosity > 0: print(N_hidden, ' layers of type ', hidden_layer_type) assert(len(N_hidden) == len(hidden_layer_type)) nn = pb.structure.FeedForwardNetwork() nn.addInputModule(pb.structure.BiasUnit(name='bias')) nn.addInputModule(pb.structure.LinearLayer(N_input, name='input')) for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)): Nhid = int(Nhid) nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden'))) nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output')) nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output'])) nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output'])) for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])): Nhid = int(Nhid) nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')], nn['hidden-{}'.format(i + 1)])) i = len(N_hidden) - 1 nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output'])) nn.sortModules() if FAST: try: nn.convertToFastNetwork() except: if verbosity > 0: print('Unable to convert slow PyBrain NN to a fast ARAC network...') if verbosity > 0: print(nn.connections) return nn
Build a neural net with the indicated input, hidden, and outout dimensions Arguments: params (dict or PyBrainParams namedtuple): default: {'N_hidden': 6} (this is the only parameter that affects the NN build) Returns: FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
juraj-google-style
def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename='model.ckpt', scaffold=None, listeners=None, save_graph_def=True): logging.info('Create CheckpointSaverHook.') if saver is not None and scaffold is not None: raise ValueError('You cannot provide both saver and scaffold.') self._saver = saver self._checkpoint_dir = checkpoint_dir self._save_path = os.path.join(checkpoint_dir, checkpoint_basename) self._scaffold = scaffold self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps) self._listeners = listeners or [] self._steps_per_run = 1000000 self._save_graph_def = save_graph_def
Initializes a `CheckpointSaverHook`. Args: checkpoint_dir: `str`, base directory for the checkpoint files. save_secs: `int`, save every N secs. save_steps: `int`, save every N steps. saver: `Saver` object, used for saving. checkpoint_basename: `str`, base name for the checkpoint files. scaffold: `Scaffold`, use to get saver object. listeners: List of `CheckpointSaverListener` subclass instances. Used for callbacks that run immediately before or after this hook saves the checkpoint. save_graph_def: Whether to save the GraphDef and MetaGraphDef to `checkpoint_dir`. The GraphDef is saved after the session is created as `graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as `model.ckpt-*.meta`. Raises: ValueError: One of `save_steps` or `save_secs` should be set. ValueError: At most one of `saver` or `scaffold` should be set.
github-repos
def _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata): feeds = dict(((t.deref()._as_tf_output(), v) for t, v in feed_dict.items())) fetches = [t._as_tf_output() for t in fetch_list] targets = [op._c_op for op in target_list] def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata): self._extend_graph() return self._call_tf_sessionrun(options, feed_dict, fetch_list, target_list, run_metadata) def _prun_fn(handle, feed_dict, fetch_list): if target_list: raise RuntimeError(f'partial_run() requires empty `target_list`. Received: target_list={target_list} (non-empty)') return self._call_tf_sessionprun(handle, feed_dict, fetch_list) if handle is None: return self._do_call(_run_fn, feeds, fetches, targets, options, run_metadata) else: return self._do_call(_prun_fn, handle, feeds, fetches)
Runs a step based on the given fetches and feeds. Args: handle: a handle for partial_run. None if this is just a call to run(). target_list: A list of operations to be run, but not fetched. fetch_list: A list of tensors to be fetched. feed_dict: A dictionary that maps tensors to numpy ndarrays. options: A (pointer to a) [`RunOptions`] protocol buffer, or None run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None Returns: A list of numpy ndarrays, corresponding to the elements of `fetch_list`. If the ith element of `fetch_list` contains the name of an operation, the first Tensor output of that operation will be returned for that element. Raises: tf.errors.OpError: Or one of its subclasses on error.
github-repos
def GetDataStreamByPathSpec(self, path_spec): file_entry = self.GetFileEntryByPathSpec(path_spec) if (not file_entry): return None data_stream_name = getattr(path_spec, 'data_stream', None) return file_entry.GetDataStream(data_stream_name)
Retrieves a data stream for a path specification. Args: path_spec (PathSpec): a path specification. Returns: DataStream: a data stream or None if not available.
codesearchnet
def create_media_asset(access_token, name, options='0'): path = '/Assets' endpoint = ''.join([ams_rest_endpoint, path]) body = (((('{"Name": "' + name) + '", "Options": "') + str(options)) + '"}') return do_ams_post(endpoint, path, body, access_token)
Create Media Service Asset. Args: access_token (str): A valid Azure authentication token. name (str): Media Service Asset Name. options (str): Media Service Options. Returns: HTTP response. JSON body.
codesearchnet
def _process_v1_graph_mode_tensor(self, op_type, tensor, debug_tensor, tensor_debug_mode): if op_type in ('Placeholder', 'PlaceholderWithDefault'): self._placeholder_to_debug_tensor[tensor] = debug_tensor return tensor elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR and op_type != 'Const': self._tensor_aliases[debug_tensor.name] = tensor.name return debug_tensor else: with self._symbolic_tensor_counter_lock: identity_name = 'tfdbg_identity_%d' % self._symbolic_tensor_counter identity = array_ops.identity(tensor, name=identity_name) identity.op._add_control_input(debug_tensor.op) self._tensor_aliases[identity.name] = tensor.name return identity
For V1 graph mode, determine what tensor to output from callback. Args: op_type: Type of the op that outputs the original symbolic tensor. tensor: The original output symbolic tensor. debug_tensor: The debugger-instrumented tensor. tensor_debug_mode: Debug mode used, a tfdbg TensorDebugMode enum. Returns: A symbolic tensor to be returned by the dumping op_callback.
github-repos
def _get_api_call(self, function_name, *args): api_call = dedent() % { 'api_call': function_name, 'args': ', '.join(args) } script = '\n'.join((api.API_SCRIPT, api_call)) try: return self._browser.execute_async_script(script) except TimeoutException: raise APIError
Runs an api call with javascript-formatted arguments. Args: function_name: The name of the KindleAPI call to run. *args: Javascript-formatted arguments to pass to the API call. Returns: The result of the API call. Raises: APIError: If the API call fails or times out.
juraj-google-style
def __lt__(self, other: 'TensorFluent') -> 'TensorFluent': return self._binary_op(self, other, tf.less, tf.float32)
Returns a TensorFluent for the less-then relational operator. Args: self: The first operand. other: The second operand.
juraj-google-style
def __toString(self, values): for key in values: if not values[key] is str: values[key] = str(values[key]) return values
Will replace dict values with string values Args: values (dict): Dictionary of values Returns: Updated values dict
juraj-google-style
def get_gdb_response(self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True): self.verify_valid_gdb_subprocess() if (timeout_sec < 0): self.logger.warning('timeout_sec was negative, replacing with 0') timeout_sec = 0 if USING_WINDOWS: retval = self._get_responses_windows(timeout_sec) else: retval = self._get_responses_unix(timeout_sec) if ((not retval) and raise_error_on_timeout): raise GdbTimeoutError(('Did not get response from gdb after %s seconds' % timeout_sec)) else: return retval
Get response from GDB, and block while doing so. If GDB does not have any response ready to be read by timeout_sec, an exception is raised. Args: timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after raise_error_on_timeout (bool): Whether an exception should be raised if no response was found after timeout_sec Returns: List of parsed GDB responses, returned from gdbmiparser.parse_response, with the additional key 'stream' which is either 'stdout' or 'stderr' Raises: GdbTimeoutError if response is not received within timeout_sec ValueError if select returned unexpected file number NoGdbProcessError if there is no gdb subprocess running
codesearchnet
def splitext(path): parent_path, pathname = split(path) if pathname.startswith(".") and pathname.count(".") == 1: return path, "" if "." not in pathname: return path, "" pathname, ext = pathname.rsplit(".", 1) path = join(parent_path, pathname) return path, "." + ext
Split the extension from the path. Arguments: path (str): A path to split. Returns: (str, str): A tuple containing the path and the extension. Example: >>> splitext('baz.txt') ('baz', '.txt') >>> splitext('foo/bar/baz.txt') ('foo/bar/baz', '.txt') >>> splitext('foo/bar/.foo') ('foo/bar/.foo', '')
juraj-google-style
def _call_api(self, method, params=None): url = self.url.format(method=method) if not params: params = {'token': self.token} else: params['token'] = self.token logger.debug('Send request to %s', url) response = requests.get(url, params=params).json() if self.verify: if not response['ok']: msg = 'For {url} API returned this bad response {response}' raise Exception(msg.format(url=url, response=response)) return response
Low-level method to call the Slack API. Args: method: {str} method name to call params: {dict} GET parameters The token will always be added
juraj-google-style
def _on_queue_declareok(self, frame): _log.info('Successfully declared the %s queue', frame.method.queue) for binding in self._bindings: if (binding['queue'] == frame.method.queue): for key in binding['routing_keys']: _log.info('Asserting %s is bound to %s with the %s key', binding['queue'], binding['exchange'], key) self._channel.queue_bind(callback=None, queue=binding['queue'], exchange=binding['exchange'], routing_key=key) bc_args = dict(queue=frame.method.queue) if (_pika_version < pkg_resources.parse_version('1.0.0b1')): bc_args['consumer_callback'] = self._on_message else: bc_args['on_message_callback'] = self._on_message tag = self._channel.basic_consume(**bc_args) self._consumers[tag] = binding['queue']
Callback invoked when a queue is successfully declared. Args: frame (pika.frame.Method): The message sent from the server.
codesearchnet
def PrepareMatches(self, file_system): if (self._location is not None): self._location_segments = self._SplitPath(self._location, file_system.PATH_SEPARATOR) elif (self._location_regex is not None): path_separator = file_system.PATH_SEPARATOR if (path_separator == '\\'): path_separator = '\\\\' self._location_segments = self._SplitPath(self._location_regex, path_separator) if (self._location_segments is not None): self._number_of_location_segments = len(self._location_segments)
Prepare find specification for matching. Args: file_system (FileSystem): file system.
codesearchnet
def point_line_distance(point, start, end): if (start == end): return distance(point, start) else: un_dist = abs((((end.lat - start.lat) * (start.lon - point.lon)) - ((start.lat - point.lat) * (end.lon - start.lon)))) n_dist = sqrt((((end.lat - start.lat) ** 2) + ((end.lon - start.lon) ** 2))) if (n_dist == 0): return 0 else: return (un_dist / n_dist)
Distance from a point to a line, formed by two points Args: point (:obj:`Point`) start (:obj:`Point`): line point end (:obj:`Point`): line point Returns: float: distance to line, in degrees
codesearchnet
def dry_bulb_temperature(self, value=99.9): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `dry_bulb_temperature`'.format(value)) if value <= -70.0: raise ValueError('value need to be greater -70.0 ' 'for field `dry_bulb_temperature`') if value >= 70.0: raise ValueError('value need to be smaller 70.0 ' 'for field `dry_bulb_temperature`') self._dry_bulb_temperature = value
Corresponds to IDD Field `dry_bulb_temperature` Args: value (float): value for IDD Field `dry_bulb_temperature` Unit: C value > -70.0 value < 70.0 Missing value: 99.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def value_shape(self): for serialized_tensor in self.object_proto.attributes: if serialized_tensor.name == constants.VARIABLE_VALUE_KEY: return self._checkpoint.shape_map[serialized_tensor.checkpoint_key] return None
The shape of the VARIABLE_VALUE tensor. Returns: If found a TensorShape object, otherwise None.
github-repos
def get_by(self, field, value): return self._client.get_by(field=field, value=value)
Gets all drive enclosures that match the filter. The search is case-insensitive. Args: Field: field name to filter. Value: value to filter. Returns: list: A list of drive enclosures.
juraj-google-style
def get_permissions(cls): perms = [] for (kls_name, kls) in cls.registry.items(): for method_name in cls.__dict__.keys(): if method_name.endswith('_view'): perms.append(('%s.%s' % (kls_name, method_name))) return perms
Generates permissions for all CrudView based class methods. Returns: List of Permission objects.
codesearchnet
def remove(self, x): with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) if not tf.executing_eagerly(): x.set_shape([None] + x_shape[1:]) return x
Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
juraj-google-style
def create(self, rs_params): repl_id = rs_params.get('id', None) if repl_id is not None and repl_id in self: raise ReplicaSetError( "replica set with id={id} already exists".format(id=repl_id)) repl = ReplicaSet(rs_params) self[repl.repl_id] = repl return repl.repl_id
create new replica set Args: rs_params - replica set configuration Return repl_id which can use to take the replica set
juraj-google-style
def FindFileContainingSymbol(self, symbol): symbol = _NormalizeFullyQualifiedName(symbol) try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: return self._FindFileContainingSymbolInDb(symbol) except KeyError: pass try: return self._file_desc_by_toplevel_extension[symbol] except KeyError: pass message_name, _, extension_name = symbol.rpartition('.') try: message = self.FindMessageTypeByName(message_name) assert message.extensions_by_name[extension_name] return message.file except KeyError: raise KeyError('Cannot find a file containing %s' % symbol)
Gets the FileDescriptor for the file containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file cannot be found in the pool.
juraj-google-style
def frombase(path1, path2): if not isparent(path1, path2): raise ValueError("path1 must be a prefix of path2") return path2[len(path1) :]
Get the final path of ``path2`` that isn't in ``path1``. Arguments: path1 (str): A PyFilesytem path. path2 (str): A PyFilesytem path. Returns: str: the final part of ``path2``. Example: >>> frombase('foo/bar/', 'foo/bar/baz/egg') 'baz/egg'
juraj-google-style
def raw_decrypt(self, ciphertext): if (not isinstance(ciphertext, int)): raise TypeError(('Expected ciphertext to be an int, not: %s' % type(ciphertext))) decrypt_to_p = ((self.l_function(powmod(ciphertext, (self.p - 1), self.psquare), self.p) * self.hp) % self.p) decrypt_to_q = ((self.l_function(powmod(ciphertext, (self.q - 1), self.qsquare), self.q) * self.hq) % self.q) return self.crt(decrypt_to_p, decrypt_to_q)
Decrypt raw ciphertext and return raw plaintext. Args: ciphertext (int): (usually from :meth:`EncryptedNumber.ciphertext()`) that is to be Paillier decrypted. Returns: int: Paillier decryption of ciphertext. This is a positive integer < :attr:`public_key.n`. Raises: TypeError: if ciphertext is not an int.
codesearchnet
def all(self, data={}, **kwargs): return super(Payment, self).all(data, **kwargs)
Fetch all Payment entities Returns: Dictionary of Payment data
codesearchnet
def CreateCustomizerFeed(client, feed_name): ad_customizer_feed_service = client.GetService('AdCustomizerFeedService', 'v201809') customizer_feed = {'feedName': feed_name, 'feedAttributes': [{'type': 'STRING', 'name': 'Name'}, {'type': 'STRING', 'name': 'Price'}, {'type': 'DATE_TIME', 'name': 'Date'}]} feed_service_operation = {'operator': 'ADD', 'operand': customizer_feed} response = ad_customizer_feed_service.mutate([feed_service_operation]) if (response and ('value' in response)): feed = response['value'][0] feed_data = {'feedId': feed['feedId'], 'nameId': feed['feedAttributes'][0]['id'], 'priceId': feed['feedAttributes'][1]['id'], 'dateId': feed['feedAttributes'][2]['id']} (print('Feed with name "%s" and ID %s was added with:\n\tName attribute ID %s and price attribute ID %s and date attributeID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'], feed_data['priceId'], feed_data['dateId'])) return feed else: raise errors.GoogleAdsError('No feeds were added')
Creates a new AdCustomizerFeed. Args: client: an AdWordsClient instance. feed_name: the name for the new AdCustomizerFeed. Returns: The new AdCustomizerFeed.
codesearchnet
def __init__(self, iterable=None, raise_on_duplicate=False): self._list = list() self._dict = dict() if iterable: if raise_on_duplicate: self._extend(iterable) else: self._update(iterable)
Create a setlist, initializing from iterable if present. Args: iterable (Iterable): Values to initialize the setlist with. raise_on_duplicate: Raise a ValueError if any duplicate values are present.
juraj-google-style
def get(self, dash_id): data = json.loads(r_db.hmget(config.DASH_CONTENT_KEY, dash_id)[0]) return build_response(dict(data=data, code=200))
Read dashboard content. Args: dash_id: dashboard id. Returns: A dict containing the content of that dashboard, not include the meta info.
juraj-google-style
def _generate(cls, strategy, params): if cls._meta.abstract: raise errors.FactoryError( "Cannot generate instances of abstract factory %(f)s; " "Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract " "is either not set or False." % dict(f=cls.__name__)) step = builder.StepBuilder(cls._meta, params, strategy) return step.build()
generate the object. Args: params (dict): attributes to use for generating the object strategy: the strategy to use
juraj-google-style
def _RetryRequest(self, timeout=None, **request_args): while True: try: now = time.time() if (not timeout): timeout = config.CONFIG['Client.http_timeout'] result = requests.request(**request_args) result.raise_for_status() if (not result.ok): raise requests.RequestException(response=result) return ((time.time() - now), result) except IOError as e: self.consecutive_connection_errors += 1 if (self.active_base_url is not None): response = getattr(e, 'response', None) if (getattr(response, 'status_code', None) == 406): raise if (self.consecutive_connection_errors >= self.retry_error_limit): logging.info('Too many connection errors to %s, retrying another URL', self.active_base_url) self.active_base_url = None raise e logging.debug('Unable to connect to frontend. Backing off %s seconds.', self.error_poll_min) self.Wait(self.error_poll_min) else: raise e
Retry the request a few times before we determine it failed. Sometimes the frontend becomes loaded and issues a 500 error to throttle the clients. We wait Client.error_poll_min seconds between each attempt to back off the frontend. Note that this does not affect any timing algorithm in the client itself which is controlled by the Timer() class. Args: timeout: Timeout for retry. **request_args: Args to the requests.request call. Returns: a tuple of duration, urllib.request.urlopen response.
codesearchnet
def __init__(self, wmin, hmin, wmax=None, hmax=None, max_aspect_ratio=None): if max_aspect_ratio is None: max_aspect_ratio = 9999999 self._init(locals())
Randomly crop a box of shape (h, w), sampled from [min, max] (both inclusive). If max is None, will use the input image shape. Args: wmin, hmin, wmax, hmax: range to sample shape. max_aspect_ratio (float): the upper bound of ``max(w,h)/min(w,h)``.
juraj-google-style
def get_node_ip_address(address="8.8.8.8:53"): ip_address, port = address.split(":") s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect((ip_address, int(port))) node_ip_address = s.getsockname()[0] except Exception as e: node_ip_address = "127.0.0.1" if e.errno == 101: try: host_name = socket.getfqdn(socket.gethostname()) node_ip_address = socket.gethostbyname(host_name) except Exception: pass finally: s.close() return node_ip_address
Determine the IP address of the local node. Args: address (str): The IP address and port of any known live service on the network you care about. Returns: The IP address of the current node.
juraj-google-style
def build_graph(device, input_shape, axes, num_layers, mode, scale, train): moment_shape = [] keep_dims = mode == 'py' or mode == 'slow' if keep_dims: for axis in range(len(input_shape)): if axis in axes: moment_shape.append(1) else: moment_shape.append(input_shape[axis]) else: for axis in range(len(input_shape)): if axis not in axes: moment_shape.append(input_shape[axis]) with ops.device('/%s:0' % device): tensor = variables.Variable(random_ops.truncated_normal(input_shape)) for _ in range(num_layers): if train: mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims) else: mean = array_ops.zeros(moment_shape) variance = array_ops.ones(moment_shape) beta = variables.Variable(array_ops.zeros(moment_shape)) gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape)) if mode == 'py': tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale) elif mode == 'op': tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale) elif mode == 'slow': tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale) if train: return gradients_impl.gradients([tensor], variables.trainable_variables()) else: return [tensor]
Build a graph containing a sequence of batch normalizations. Args: device: string, the device to run on. input_shape: shape of the input tensor. axes: axes that are to be normalized across. num_layers: number of batch normalization layers in the graph. mode: "op", "py" or "slow" depending on the implementation. scale: scale after normalization. train: if true, also run backprop. Returns: An array of tensors to run()
github-repos
def import_certificate(self, certificate_data, bay_number=None): uri = "{}/https/certificaterequest".format(self.data['uri']) if bay_number: uri += "?bayNumber=%d" % (bay_number) headers = {'Content-Type': 'application/json'} return self._helper.do_put(uri, certificate_data, -1, headers)
Imports a signed server certificate into the enclosure. Args: certificate_data: Dictionary with Signed certificate and type. bay_number: OA to which the signed certificate will be imported. Returns: Enclosure.
juraj-google-style
def apply( self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, convert_dtype=True, args=(), **kwds ): axis = self._get_axis_number(axis) ErrorMessage.non_verified_udf() if isinstance(func, string_types): if axis == 1: kwds["axis"] = axis result = self._string_function(func, *args, **kwds) if isinstance(result, BasePandasDataset): return result._query_compiler return result elif isinstance(func, dict): if axis == 1: raise TypeError( "(\"'dict' object is not callable\", " "'occurred at index {0}'".format(self.index[0]) ) if len(self.columns) != len(set(self.columns)): warnings.warn( "duplicate column names not supported with apply().", FutureWarning, stacklevel=2, ) elif not callable(func) and not is_list_like(func): raise TypeError("{} object is not callable".format(type(func))) query_compiler = self._query_compiler.apply(func, axis, *args, **kwds) return query_compiler
Apply a function along input axis of DataFrame. Args: func: The function to apply axis: The axis over which to apply the func. broadcast: Whether or not to broadcast. raw: Whether or not to convert to a Series. reduce: Whether or not to try to apply reduction procedures. Returns: Series or DataFrame, depending on func.
juraj-google-style
def commutator(A, B=None): if B: return A * B - B * A return SPre(A) - SPost(A)
Commutator of `A` and `B` If ``B != None``, return the commutator :math:`[A,B]`, otherwise return the super-operator :math:`[A,\cdot]`. The super-operator :math:`[A,\cdot]` maps any other operator ``B`` to the commutator :math:`[A, B] = A B - B A`. Args: A: The first operator to form the commutator of. B: The second operator to form the commutator of, or None. Returns: SuperOperator: The linear superoperator :math:`[A,\cdot]`
juraj-google-style
def create_in_hdx(self): self.check_required_fields() if (not self._update_resource_view(log=True)): self._save_to_hdx('create', 'title')
Check if resource view exists in HDX and if so, update it, otherwise create resource view Returns: None
codesearchnet
def setEditorData(self, spinBox, index): if index.isValid(): value = index.model().data(index, QtCore.Qt.EditRole) spinBox.setValue(value)
Sets the data to be displayed and edited by the editor from the data model item specified by the model index. Args: spinBox (BigIntSpinbox): editor widget. index (QModelIndex): model data index.
juraj-google-style
def decode_function_result(self, function_name, data): description = self.function_data[function_name] arguments = decode_abi(description['decode_types'], data) return arguments
Return the function call result decoded. Args: function_name (str): One of the existing functions described in the contract interface. data (bin): The encoded result from calling `function_name`. Return: List[object]: The values returned by the call to `function_name`.
juraj-google-style
def incident(self, name, owner=None, **kwargs): return Incident(self.tcex, name, owner=owner, **kwargs)
Create the Incident TI object. Args: owner: name: **kwargs: Return:
codesearchnet
def _build(self, input_sequence, state): input_shape = input_sequence.get_shape() if (input_shape[0] is None): raise ValueError('Time dimension of input (dim 0) must be staticallyknown.') seq_length = int(input_shape[0]) (forward_state, backward_state) = state output_sequence_f = [] output_sequence_b = [] with tf.name_scope('forward_rnn'): core_state = forward_state for i in six.moves.range(seq_length): (core_output, core_state) = self._forward_core(input_sequence[(i, :)], core_state) output_sequence_f.append((core_output, core_state)) output_sequence_f = nest.map_structure((lambda *vals: tf.stack(vals)), *output_sequence_f) with tf.name_scope('backward_rnn'): core_state = backward_state for i in six.moves.range((seq_length - 1), (- 1), (- 1)): (core_output, core_state) = self._backward_core(input_sequence[(i, :)], core_state) output_sequence_b.append((core_output, core_state)) output_sequence_b = nest.map_structure((lambda *vals: tf.stack(vals)), *output_sequence_b) return {'outputs': {'forward': output_sequence_f[0], 'backward': output_sequence_b[0]}, 'state': {'forward': output_sequence_f[1], 'backward': output_sequence_b[1]}}
Connects the BidirectionalRNN module into the graph. Args: input_sequence: tensor (time, batch, [feature_1, ..]). It must be time_major. state: tuple of states for the forward and backward cores. Returns: A dict with forward/backard states and output sequences: "outputs":{ "forward": ..., "backward": ...}, "state": { "forward": ..., "backward": ...} Raises: ValueError: in case time dimension is not statically known.
codesearchnet
def charspan(cls, start, end): return cls(Lnk.CHARSPAN, (int(start), int(end)))
Create a Lnk object for a character span. Args: start: the initial character position (cfrom) end: the final character position (cto)
codesearchnet
def get_signature_defs(tflite_model): model = tflite_model if not isinstance(tflite_model, bytearray): model = bytearray(tflite_model) serialized_signature_def_map = signature_def_util.GetSignatureDefMap(model) def _deserialize(serialized): signature_def = meta_graph_pb2.SignatureDef() signature_def.ParseFromString(serialized) return signature_def return {k: _deserialize(v) for k, v in serialized_signature_def_map.items()}
Get SignatureDef dict from the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: TFLite model buffer to get the signature_def. Returns: dict containing serving names to SignatureDefs if exists, otherwise, empty dict. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model. DecodeError: SignatureDef cannot be parsed from TfLite SignatureDef metadata.
github-repos
def send_invitation(self, invitation, **kwargs): return self.email_message(invitation.invitee_identifier, self.invitation_subject, self.invitation_body, invitation.invited_by, **kwargs).send()
Sends an invitation message for a specific invitation. This could be overridden to do other things, such as sending a confirmation email to the sender. Args: invitation: Returns:
codesearchnet
def imflip(img, direction='horizontal'): assert (direction in ['horizontal', 'vertical']) if (direction == 'horizontal'): return np.flip(img, axis=1) else: return np.flip(img, axis=0)
Flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical". Returns: ndarray: The flipped image.
codesearchnet
def stop_app(self, package_name, clear=False): if clear: self.adb_shell(['pm', 'clear', package_name]) else: self.adb_shell(['am', 'force-stop', package_name]) return self
Stop application Args: package_name: string like com.example.app1 clear: bool, remove user data Returns: None
juraj-google-style
def download(self, file: Optional[IO[bytes]]=None, duration_timeout: Optional[float]=None): yield from \ self._current_session.download(file, duration_timeout=duration_timeout)
Download content. Args: file: An optional file object for the document contents. duration_timeout: Maximum time in seconds of which the entire file must be read. Returns: Response: An instance of :class:`.http.request.Response`. See :meth:`WebClient.session` for proper usage of this function. Coroutine.
juraj-google-style
def to_representation(self, instance): if self.id_only(): return instance.pk pk = getattr(instance, 'pk', None) if ((not settings.ENABLE_SERIALIZER_OBJECT_CACHE) or (pk is None)): return self._to_representation(instance) else: if (pk not in self.obj_cache): self.obj_cache[pk] = self._to_representation(instance) return self.obj_cache[pk]
Modified to_representation method. Optionally may cache objects. Arguments: instance: A model instance or data object. Returns: Instance ID if the serializer is meant to represent its ID. Otherwise, a tagged data dict representation.
codesearchnet
def __init__(self, param_specs, non_tensor_params, prefer_static_fields): self._param_specs = param_specs self._non_tensor_params = non_tensor_params self._prefer_static_fields = prefer_static_fields
Initializes a new `_LinearOperatorSpec`. Args: param_specs: Python `dict` of `tf.TypeSpec` instances that describe kwargs to the `LinearOperator`'s constructor that are `Tensor`-like or `CompositeTensor` subclasses. non_tensor_params: Python `dict` containing non-`Tensor` and non- `CompositeTensor` kwargs to the `LinearOperator`'s constructor. prefer_static_fields: Python `tuple` of strings corresponding to the names of `Tensor`-like args to the `LinearOperator`s constructor that may be stored as static values, if known. These are typically shapes, indices, or axis values.
github-repos
def greater(x1, x2): if any_symbolic_tensors((x1, x2)): return Greater().symbolic_call(x1, x2) return backend.numpy.greater(x1, x2)
Return the truth value of `x1 > x2` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparison of `x1` and `x2`.
github-repos
def _execute(self, command, params=None): if not params: params = {} params['id'] = self._id return self._parent.execute(command, params)
Executes a command against the underlying HTML element. Args: command: The name of the command to _execute as a string. params: A dictionary of named parameters to send with the command. Returns: The command's JSON response loaded into a dictionary object.
juraj-google-style
def getmtime(self, path): try: file_obj = self.filesystem.resolve(path) return file_obj.st_mtime except IOError: self.filesystem.raise_os_error(errno.ENOENT, winerror=3)
Returns the modification time of the fake file. Args: path: the path to fake file. Returns: (int, float) the modification time of the fake file in number of seconds since the epoch. Raises: OSError: if the file does not exist.
juraj-google-style
def get_results(self) -> Iterable[PluginScanResult]: for _ in range(self._get_current_processes_nb()): self._task_queue.put(None) for (hostname, hostname_queue) in self._hostname_queues_dict.items(): for i in range(len(self._processes_dict[hostname])): hostname_queue.put(None) received_task_results = 0 expected_task_results = (self._queued_tasks_nb + self._get_current_processes_nb()) while (received_task_results != expected_task_results): result = self._result_queue.get() self._result_queue.task_done() received_task_results += 1 if (result is None): pass else: (yield result) self._task_queue.join() self._result_queue.join() for hostname_queue in self._hostname_queues_dict.values(): hostname_queue.join() for process_list in self._processes_dict.values(): for process in process_list: process.join()
Return the result of previously queued scan commands; new commands cannot be queued once this is called. Returns: The results of all the scan commands previously queued. Each result will be an instance of the scan corresponding command's PluginScanResult subclass. If there was an unexpected error while running the scan command, it will be a 'PluginRaisedExceptionScanResult' instance instead.
codesearchnet
def after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response
Modifies the response object prior to sending it to the client. Used to add CORS headers to the request Args: response (response): Flask response object Returns: `None`
codesearchnet
def build_dataset(instruction_dicts, dataset_from_file_fn, shuffle_files=False, parallel_reads=64): if _no_examples_skipped(instruction_dicts): instruction_ds = tf.data.Dataset.from_tensor_slices([d['filepath'] for d in instruction_dicts]) build_ds_from_instruction = dataset_from_file_fn else: instruction_ds = _build_instruction_ds(instruction_dicts) build_ds_from_instruction = functools.partial(_build_ds_from_instruction, ds_from_file_fn=dataset_from_file_fn) if shuffle_files: instruction_ds = instruction_ds.shuffle(len(instruction_dicts)) ds = instruction_ds.interleave(build_ds_from_instruction, cycle_length=parallel_reads, num_parallel_calls=tf.data.experimental.AUTOTUNE) return ds
Constructs a `tf.data.Dataset` from TFRecord files. Args: instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':} containing the information about which files and which examples to use. The boolean mask will be repeated and zipped with the examples from filepath. dataset_from_file_fn: function returning a `tf.data.Dataset` given a filename. shuffle_files: `bool`, Whether to shuffle the input filenames. parallel_reads: `int`, how many files to read in parallel. Returns: `tf.data.Dataset`
codesearchnet
def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance): path = instance_to_path_map[config_instance] group = prop_tree for elem in path[:-1]: group = getattr(group, elem) assert group._key == config_instance.parent.key setattr(group, config_instance.key, config_instance.value) term = getattr(group, config_instance.key) try: if hasattr(term, '_term'): term._term._config = config_instance return except KeyError: pass try: if hasattr(term, '_config'): term._config = config_instance return except KeyError: pass else: pass
Finds appropriate term in the prop_tree and sets its value from config_instance. Args: configs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs) prop_tree (PropertyDictTree): poperty tree to populate. config_instance (Config):
juraj-google-style
def _add_session(self, session, start_info, groups_by_name): group_name = (start_info.group_name or session.name) if (group_name in groups_by_name): groups_by_name[group_name].sessions.extend([session]) else: group = api_pb2.SessionGroup(name=group_name, sessions=[session], monitor_url=start_info.monitor_url) for (key, value) in six.iteritems(start_info.hparams): group.hparams[key].CopyFrom(value) groups_by_name[group_name] = group
Adds a new Session protobuffer to the 'groups_by_name' dictionary. Called by _build_session_groups when we encounter a new session. Creates the Session protobuffer and adds it to the relevant group in the 'groups_by_name' dict. Creates the session group if this is the first time we encounter it. Args: session: api_pb2.Session. The session to add. start_info: The SessionStartInfo protobuffer associated with the session. groups_by_name: A str to SessionGroup protobuffer dict. Representing the session groups and sessions found so far.
codesearchnet
def call(self, sequence_output: tf.Tensor) -> tf.Tensor: logits = (tf.einsum('bsj,j->bs', sequence_output, self.output_weights) + self.output_bias) / self.temperature return logits
Computes logits per token Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. Returns: logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token.
github-repos
def retrieve_bazel_version(): bazel_executable = shutil.which('bazel') if bazel_executable is None: bazel_executable = shutil.which('bazelisk') if bazel_executable is None: print('Cannot find bazel. Please install bazel/bazelisk.') sys.exit(1) stderr = open(os.devnull, 'wb') curr_version = run_shell([bazel_executable, '--version'], allow_non_zero=True, stderr=stderr) if curr_version.startswith('bazel '): curr_version = curr_version.split('bazel ')[1] curr_version_int = convert_version_to_int(curr_version) if not curr_version_int: print('WARNING: current bazel installation is not a release version.') return curr_version print('You have bazel %s installed.' % curr_version) return curr_version
Retrieve installed bazel version (or bazelisk). Returns: The bazel version detected.
github-repos
def to_graphviz(self) -> str: graph = 'digraph finite_state_machine { rankdir=LR; node [fixedsize=true];' for (origin, dest) in self._transitions.items(): origin = origin.replace(' ', '_') for d in dest: d = d.replace(' ', '_') graph += '{0} -> {1};'.format(origin, d) graph += '}' return graph
Converts the FSM behaviour structure to Graphviz syntax Returns: str: the graph in Graphviz syntax
codesearchnet
def add_node(self, binary_descriptor): try: node_string = parse_binary_descriptor(binary_descriptor) except: self._logger.exception("Error parsing binary node descriptor: %s", binary_descriptor) return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM) try: self.graph.add_node(node_string) except NodeConnectionError: return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE) except ProcessingFunctionError: return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION) except ResourceUsageError: return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE) return Error.NO_ERROR
Add a node to the sensor_graph using a binary node descriptor. Args: binary_descriptor (bytes): An encoded binary node descriptor. Returns: int: A packed error code.
juraj-google-style
def decode_dict(value_fields, client): return {key: decode_value(value, client) for (key, value) in six.iteritems(value_fields)}
Converts a protobuf map of Firestore ``Value``-s. Args: value_fields (google.protobuf.pyext._message.MessageMapContainer): A protobuf map of Firestore ``Value``-s. client (~.firestore_v1beta1.client.Client): A client that has a document factory. Returns: Dict[str, Union[NoneType, bool, int, float, datetime.datetime, \ str, bytes, dict, ~google.cloud.Firestore.GeoPoint]]: A dictionary of native Python values converted from the ``value_fields``.
codesearchnet
def format_sympy_expr(sympy_expr, functions=None): if (functions is None): functions = {} str_expr = str(sympy_expr) result = str_expr.replace(' ', '') for (fn_name, char) in six.iteritems(functions): result = result.replace(fn_name, char) return result
Convert sympy expression into a string which can be encoded. Args: sympy_expr: Any sympy expression tree or string. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: A string representation of the expression suitable for encoding as a sequence input.
codesearchnet
def get_params(width, height, distortion_scale): half_height = int((height / 2)) half_width = int((width / 2)) topleft = (random.randint(0, int((distortion_scale * half_width))), random.randint(0, int((distortion_scale * half_height)))) topright = (random.randint(((width - int((distortion_scale * half_width))) - 1), (width - 1)), random.randint(0, int((distortion_scale * half_height)))) botright = (random.randint(((width - int((distortion_scale * half_width))) - 1), (width - 1)), random.randint(((height - int((distortion_scale * half_height))) - 1), (height - 1))) botleft = (random.randint(0, int((distortion_scale * half_width))), random.randint(((height - int((distortion_scale * half_height))) - 1), (height - 1))) startpoints = [(0, 0), ((width - 1), 0), ((width - 1), (height - 1)), (0, (height - 1))] endpoints = [topleft, topright, botright, botleft] return (startpoints, endpoints)
Get parameters for ``perspective`` for a random perspective transform. Args: width : width of the image. height : height of the image. Returns: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image, List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
codesearchnet
def cdf(self, value, name='cdf'): return self._call_cdf(value, name)
Cumulative distribution function. Given random variable `X`, the cumulative distribution function `cdf` is: ```none cdf(x) := P[X <= x] ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`.
github-repos
def take_at_most_n_seconds(time_s, func, *args, **kwargs): thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.start() thread.join(time_s) if thread.is_alive(): return False return True
A function that returns whether a function call took less than time_s. NOTE: The function call is not killed and will run indefinitely if hung. Args: time_s: Maximum amount of time to take. func: Function to call. *args: Arguments to call the function with. **kwargs: Keyword arguments to call the function with. Returns: True if the function finished in less than time_s seconds.
juraj-google-style
def simple_lmdb_settings(path, map_size=1e9, user_supplied_id=False): def decorator(cls): provider = \ ff.UserSpecifiedIdProvider(key='_id') \ if user_supplied_id else ff.UuidProvider() class Settings(ff.PersistenceSettings): id_provider = provider key_builder = ff.StringDelimitedKeyBuilder('|') database = ff.LmdbDatabase( path, key_builder=key_builder, map_size=map_size) class Model(cls, Settings): pass Model.__name__ = cls.__name__ Model.__module__ = cls.__module__ return Model return decorator
Creates a decorator that can be used to configure sane default LMDB persistence settings for a model Args: path (str): The path where the LMDB database files will be created map_size (int): The amount of space to allot for the database
juraj-google-style
def ProcessNewBlock(self, block): added = set() changed = set() deleted = set() try: for tx in block.FullTransactions: for index, output in enumerate(tx.outputs): state = self.CheckAddressState(output.ScriptHash) if state & AddressState.InWallet > 0: key = CoinReference(tx.Hash, index) if key in self._coins.keys(): coin = self._coins[key] coin.State |= CoinState.Confirmed changed.add(coin) else: newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Confirmed, transaction=tx) self._coins[key] = newcoin added.add(newcoin) if state & AddressState.WatchOnly > 0: self._coins[key].State |= CoinState.WatchOnly changed.add(self._coins[key]) for tx in block.FullTransactions: for input in tx.inputs: if input in self._coins.keys(): if self._coins[input].Output.AssetId == Blockchain.SystemShare().Hash: coin = self._coins[input] coin.State |= CoinState.Spent | CoinState.Confirmed changed.add(coin) else: deleted.add(self._coins[input]) del self._coins[input] for claimTx in [tx for tx in block.Transactions if tx.Type == TransactionType.ClaimTransaction]: for ref in claimTx.Claims: if ref in self._coins.keys(): deleted.add(self._coins[ref]) del self._coins[ref] self._current_height += 1 self.OnProcessNewBlock(block, added, changed, deleted) if len(added) + len(deleted) + len(changed) > 0: self.BalanceChanged() except Exception as e: traceback.print_stack() traceback.print_exc() logger.error("could not process %s " % e)
Processes a block on the blockchain. This should be done in a sequential order, ie block 4 should be only processed after block 3. Args: block: (neo.Core.Block) a block on the blockchain.
juraj-google-style
def _copy_trackable_to_cpu(self, object_map): del object_map raise NotImplementedError('Need to implement _copy_trackable_to_cpu() if the Trackable requires AsyncCheckpoint support.')
Creates a copy of this object onto CPU, also copies values over. Needs to be overridden if the `Trackable` requires AsyncCheckpoint support. The method first checks whether a copy of `self` is already created in `object_map`, and creates one if not already created. Then the method copies the **values** of itself over to its copy mapped by `object_map`. Args: object_map: A dictionary that maps original Trackables to the copied Trackables, which reside in the CPU.
github-repos
def GetFilter(cls, filter_name): try: filt_cls = cls.GetPlugin(filter_name) except KeyError: raise DefinitionError(('Filter %s does not exist.' % filter_name)) return filt_cls()
Return an initialized filter. Only initialize filters once. Args: filter_name: The name of the filter, as a string. Returns: an initialized instance of the filter. Raises: DefinitionError if the type of filter has not been defined.
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: assert already_has_special_tokens and token_ids_1 is None, 'You cannot use ``already_has_special_tokens=False`` with this tokenizer. Please use a slow (full python) tokenizer to activate this argument. Or set `return_special_tokens_mask=True` when calling the encoding method to get the special tokens mask in any tokenizer. ' all_special_ids = self.all_special_ids special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0] return special_tokens_mask
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): List of ids of the second sequence. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def add_trunk_group(self, intf, value): string = 'switchport trunk group {}'.format(value) return self.configure_interface(intf, string)
Adds the specified trunk group to the interface Args: intf (str): The interface name to apply the trunk group to value (str): The trunk group value to apply to the interface Returns: True if the operation as successfully applied otherwise false
juraj-google-style
def get_decomposition_energy(self, entry, pH, V): if self._multielement and not isinstance(entry, MultiEntry): possible_entries = self._generate_multielement_entries( self._filtered_entries, forced_include=[entry]) if entry.phase_type == "solid": possible_entries = [e for e in possible_entries if e.phase_type.count("Solid") == 1] possible_energies = [e.normalized_energy_at_conditions(pH, V) for e in possible_entries] else: possible_energies = [entry.normalized_energy_at_conditions(pH, V)] min_energy = np.min(possible_energies, axis=0) hull = self.get_hull_energy(pH, V) return min_energy - hull
Finds decomposition to most stable entry Args: entry (PourbaixEntry): PourbaixEntry corresponding to compound to find the decomposition for pH (float): pH at which to find the decomposition V (float): voltage at which to find the decomposition Returns: reaction corresponding to the decomposition
juraj-google-style
def extend(self, base: 'KeySpec') -> 'KeySpec':
Extend base key specification and returns self. NOTE(daiyip): When a ``Field`` extends a base Field (from a base schema), it calls ``extend`` on both its ``KeySpec`` and ``ValueSpec``. ``KeySpec.extend`` is to determine whether the ``Field`` key is allowed to be extended, and ``ValueSpec.extend`` is to determine the final ``ValueSpec`` after extension. Args: base: A base ``KeySpec`` object. Returns: An ``KeySpec`` object derived from this key spec by extending the base.
github-repos
def get_summary_description(node_def): if node_def.op != 'TensorSummary': raise ValueError("Can't get_summary_description on %s" % node_def.op) description_str = _compat.as_str_any(node_def.attr['description'].s) summary_description = SummaryDescription() _json_format.Parse(description_str, summary_description) return summary_description
Given a TensorSummary node_def, retrieve its SummaryDescription. When a Summary op is instantiated, a SummaryDescription of associated metadata is stored in its NodeDef. This method retrieves the description. Args: node_def: the node_def_pb2.NodeDef of a TensorSummary op Returns: a summary_pb2.SummaryDescription Raises: ValueError: if the node is not a summary op. @compatibility(eager) Not compatible with eager execution. To write TensorBoard summaries under eager execution, use `tf.contrib.summary` instead. @end_compatibility
github-repos
def gaussian(duration: int, amp: complex, sigma: float, name: str=None) -> SamplePulse: center = (duration / 2) zeroed_width = (duration + 2) return _sampled_gaussian_pulse(duration, amp, center, sigma, zeroed_width=zeroed_width, rescale_amp=True, name=name)
r"""Generates unnormalized gaussian `SamplePulse`. Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity. Applies `left` sampling strategy to generate discrete pulse from continuous function. Integrated area under curve is $\Omega_g(amp, sigma) = amp \times np.sqrt(2\pi \sigma^2)$ Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `duration/2`. sigma: Width (standard deviation) of pulse. name: Name of pulse.
codesearchnet
def _get_model_info(func, parent_class): from transformers.models import auto as auto_module if parent_class is not None: model_name_lowercase = get_model_name(parent_class) else: model_name_lowercase = get_model_name(func) if model_name_lowercase and model_name_lowercase not in getattr(getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE['config_class'][0]), PLACEHOLDER_TO_AUTO_MODULE['config_class'][1]): model_name_lowercase = model_name_lowercase.replace('_', '-') class_name = func.__qualname__.split('.')[0] if model_name_lowercase is None: config_class = None else: try: config_class = getattr(getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE['config_class'][0]), PLACEHOLDER_TO_AUTO_MODULE['config_class'][1])[model_name_lowercase] except KeyError: if model_name_lowercase in HARDCODED_CONFIG_FOR_MODELS: config_class = HARDCODED_CONFIG_FOR_MODELS[model_name_lowercase] else: config_class = 'ModelConfig' print(f'🚨 Config not found for {model_name_lowercase}. You can manually add it to HARDCODED_CONFIG_FOR_MODELS in utils/args_doc.py') return (model_name_lowercase, class_name, config_class)
Extract model information from a function or its parent class. Args: func (`function`): The function to extract information from parent_class (`class`): Optional parent class of the function
github-repos
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
Prunes heads of the model. Args: heads_to_prune (`dict`): See base class `PreTrainedModel`. The input dictionary must have the following format: {layer_num: list of heads to prune in this layer}
github-repos