code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def set_property(self, key, value): value_type = type(value) if value_type not in [str, int, bool]: raise NotImplementedError( 'Only string, integer, and boolean properties are implemented') key_object = self.properties.findChild(name='key', text=key) if key_object is None: key_object = self.soup.new_tag('key') key_object.string = key self.properties.append(key_object) value_object = self.soup.new_tag( {str: 'string', int: 'integer', bool: str(value).lower()}[ value_type]) if value_type is not bool: value_object.string = str(value) self.properties.append(value_object) return value_object = key_object.find_next_sibling() key_object.decompose() value_object.decompose() self.set_property(key, value)
Set a new (or updating existing) key value pair. Args: key: A string containing the key namespace value: A str, int, or bool value Raises: NotImplementedError: an unsupported value-type was provided
juraj-google-style
def local_file(self, filename): LOG.info('Retrieving "%s" from "%s".', filename, self.runway_dir) file_contents = '' file_path = os.path.join(self.runway_dir, filename) try: with open(file_path, 'rt') as lookup_file: file_contents = lookup_file.read() except FileNotFoundError: LOG.warning('File missing "%s".', file_path) raise LOG.debug('Local file contents:\n%s', file_contents) return file_contents
Read the local file in _self.runway_dir_. Args: filename (str): Name of file to retrieve relative to root of _runway_dir_. Returns: str: Contents of local file. Raises: FileNotFoundError: Requested file missing.
juraj-google-style
def _test_or_class_decorator(test_or_class, single_method_decorator): def _decorate_test_or_class(obj): if isinstance(obj, collections.abc.Iterable): return itertools.chain.from_iterable((single_method_decorator(method) for method in obj)) if isinstance(obj, type): cls = obj for name, value in cls.__dict__.copy().items(): if callable(value) and name.startswith(unittest.TestLoader.testMethodPrefix): setattr(cls, name, single_method_decorator(value)) cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy()) return cls return single_method_decorator(obj) if test_or_class is not None: return _decorate_test_or_class(test_or_class) return _decorate_test_or_class
Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result.
github-repos
def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=0.01): if (not self.structure): return None sg = SpacegroupAnalyzer(self.structure) symmops = sg.get_point_group_operations(cartesian=cartesian) points = np.dot(kpoint, [m.rotation_matrix for m in symmops]) rm_list = [] for i in range((len(points) - 1)): for j in range((i + 1), len(points)): if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol): rm_list.append(i) break return np.delete(points, rm_list, axis=0)
Returns a list of unique symmetrically equivalent k-points. Args: kpoint (1x3 array): coordinate of the k-point cartesian (bool): kpoint is in cartesian or fractional coordinates tol (float): tolerance below which coordinates are considered equal Returns: ([1x3 array] or None): if structure is not available returns None
codesearchnet
def __init__(self, vs): shape = Shape([Dimension("stacked", len(vs))] + vs[0].shape.dims) name = "stacked/" + vs[0].name super(StackedVariable, self).__init__( vs[0].mesh, name, shape, vs[0].dtype, None, vs[0].trainable) self._name = name self._masters = [v.get_master() for v in vs] self._original_names = [v.name for v in vs] self._splittable_dims, self._unsplittable_dims = ( self._initialize_all_dimensions_as_splittable())
Create a StackedVariable. Args: vs: a list of Variables
juraj-google-style
def get_bonded_structure(self, structure, decorate=False): from pymatgen.analysis.graphs import StructureGraph if decorate: order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))] structure.add_site_property('order_parameters', order_parameters) sg = StructureGraph.with_local_env_strategy(structure, self) return sg
Obtain a StructureGraph object using this NearNeighbor class. Requires the optional dependency networkx (pip install networkx). Args: structure: Structure object. decorate (bool): whether to annotate site properties with order parameters using neighbors determined by this NearNeighbor class Returns: a pymatgen.analysis.graphs.BondedStructure object
juraj-google-style
def parse_variable(self, variable): data = None if variable is not None: variable = variable.strip() if re.match(self._variable_match, variable): var = re.search(self._variable_parse, variable) data = { 'root': var.group(0), 'job_id': var.group(2), 'name': var.group(3), 'type': var.group(4), } return data
Method to parse an input or output variable. **Example Variable**:: #App:1234:output!String Args: variable (string): The variable name to parse. Returns: (dictionary): Result of parsed string.
juraj-google-style
def parse_date_range(date, alt_end_date=None): NOT_ENDED = "9999" all_years = re.findall(r"\d{4}", date) if alt_end_date: NOT_ENDED = alt_end_date if not all_years: return "****", NOT_ENDED elif len(all_years) == 1: return all_years[0], NOT_ENDED return all_years[0], all_years[1]
Parse input `date` string in free-text format for four-digit long groups. Args: date (str): Input containing years. Returns: tuple: ``(from, to)`` as four-digit strings.
juraj-google-style
def update_port(self, port_information, id_or_uri, timeout=-1): uri = self._client.build_uri(id_or_uri) + "/ports" return self._client.update(port_information, uri, timeout)
Updates an interconnect port. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. port_information (dict): object to update timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: The interconnect.
juraj-google-style
def FromPath(cls, path, follow_symlink = True): precondition.AssertType(follow_symlink, bool) if follow_symlink: stat_obj = os.stat(path) else: stat_obj = os.lstat(path) return cls(path=path, stat_obj=stat_obj)
Returns stat information about the given OS path, calling os.[l]stat. Args: path: A path to perform `stat` on. follow_symlink: True if `stat` of a symlink should be returned instead of a file that it points to. For non-symlinks this setting has no effect. Returns: Stat instance, with information about the given path.
juraj-google-style
def get_fields(model_class): return [attr for (attr, value) in model_class.__dict__.items() if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField))]
Pass in a mongo model class and extract all the attributes which are mongoengine fields Returns: list of strings of field attributes
codesearchnet
def within(self, other: "Interval", inclusive: bool = True) -> bool: if not other: return False if inclusive: return self.start >= other.start and self.end <= other.end else: return self.start > other.start and self.end < other.end
Is this interval contained within the other? Args: other: the :class:`Interval` to check inclusive: use inclusive rather than exclusive range checks?
juraj-google-style
class DFineIntegral(nn.Module): def __init__(self, config: DFineConfig): super().__init__() self.max_num_bins = config.max_num_bins def forward(self, pred_corners: torch.Tensor, project: torch.Tensor) -> torch.Tensor: batch_size, num_queries, _ = pred_corners.shape pred_corners = F.softmax(pred_corners.reshape(-1, self.max_num_bins + 1), dim=1) pred_corners = F.linear(pred_corners, project.to(pred_corners.device)).reshape(-1, 4) pred_corners = pred_corners.reshape(batch_size, num_queries, -1) return pred_corners
A static layer that calculates integral results from a distribution. This layer computes the target location using the formula: `sum{Pr(n) * W(n)}`, where Pr(n) is the softmax probability vector representing the discrete distribution, and W(n) is the non-uniform Weighting Function. Args: max_num_bins (int): Max number of the discrete bins. Default is 32. It can be adjusted based on the dataset or task requirements.
github-repos
def func_load(code, defaults=None, closure=None, globs=None): if isinstance(code, (tuple, list)): code, defaults, closure = code if isinstance(defaults, list): defaults = tuple(defaults) def ensure_value_to_cell(value): def dummy_fn(): value cell_value = dummy_fn.__closure__[0] if not isinstance(value, type(cell_value)): return cell_value return value if closure is not None: closure = tuple((ensure_value_to_cell(_) for _ in closure)) try: raw_code = codecs.decode(code.encode('ascii'), 'base64') except (UnicodeEncodeError, binascii.Error): raw_code = code.encode('raw_unicode_escape') code = marshal.loads(raw_code) if globs is None: globs = globals() return python_types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure)
Deserializes a user defined function. Args: code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. globs: dictionary of global objects. Returns: A function object.
github-repos
def test_batch_sample_paths_2d(self, batch_rank): dtype = tf.float64 mu = np.array([0.2, 0.7]) a = np.array([[0.4, 0.1], [0.3, 0.2]]) b = np.array([[0.33, -0.03], [0.21, 0.5]]) def drift_fn(t, x): return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype) def vol_fn(t, x): return (a * t + b) * tf.ones(x.shape.as_list() + [2], dtype=t.dtype) process = tff.models.GenericItoProcess(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=dtype) times = np.array([0.1, 0.21, 0.32, 0.43, 0.55]) x0 = np.array([0.1, -1.1]) * np.ones([2] * batch_rank + [1, 2]) times_grid = None time_step = 0.01 num_samples = 10000 normal_draws = None paths = self.evaluate(process.sample_paths(times, num_samples=num_samples, initial_state=x0, time_step=time_step, times_grid=times_grid, normal_draws=normal_draws, seed=12134)) num_samples = 10000 self.assertAllClose(list(paths.shape), [2] * batch_rank + [num_samples, 5, 2], atol=0) means = np.mean(paths, axis=batch_rank) times = np.reshape(times, [1] * batch_rank + [-1, 1]) expected_means = np.reshape(x0, [2] * batch_rank + [1, 2]) + 2.0 / 3.0 * mu * np.power(times, 1.5) self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01)
Tests path properties for a batch of 2-dimentional Ito process. We construct the following Ito processes. dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2 dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2 mu_1, mu_2 are constants. s_ij = a_ij t + b_ij For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5. Args: batch_rank: The rank of the batch of processes being simulated.
github-repos
def __checkDecisionParameters(self, result, **values): error = [] if (not result): error.append('Function parameter (result array) should contain one or more header string!') if (not values): error.append('Function parameter (values variables) should contain one or more variable') for header in result: if (not (header in self.header)): error.append((('String (' + header) + ') in result is not in header!')) for header in values: if (not (header in self.header)): error.append((('Variable (' + header) + ') in values is not in header!')) elif (not values[header].split()): error.append((('Variable (' + header) + ') in values is empty string')) if error: return error
Checker of decision parameters, it will raise ValueError if finds something wrong. Args: result (array of str): See public decision methods **values (array of str): See public decision methods Raise: ValueError: Result array none. ValueError: Values dict none. ValueError: Not find result key in header. ValueError: Result value is empty. Returns: Error array values
codesearchnet
def split_identifiers(identifiers=[], proportions={}): abs_proportions = absolute_proportions(proportions, len(identifiers)) parts = {} start_index = 0 for (idx, proportion) in abs_proportions.items(): parts[idx] = identifiers[start_index:(start_index + proportion)] start_index += proportion return parts
Split the given identifiers by the given proportions. Args: identifiers (list): List of identifiers (str). proportions (dict): A dictionary containing the proportions with the identifier from the input as key. Returns: dict: Dictionary containing a list of identifiers per part with the same key as the proportions dict. Example:: >>> split_identifiers( >>> identifiers=['a', 'b', 'c', 'd'], >>> proportions={'melvin' : 0.5, 'timmy' : 0.5} >>> ) {'melvin' : ['a', 'c'], 'timmy' : ['b', 'd']}
codesearchnet
def set_rollover(self, area, enabled): if (area == u'streaming'): self._rollover_streaming = enabled elif (area == u'storage'): self._rollover_storage = enabled else: raise ArgumentError("You must pass one of 'storage' or 'streaming' to set_rollover", area=area)
Configure whether rollover is enabled for streaming or storage streams. Normally a SensorLog is used in ring-buffer mode which means that old readings are automatically overwritten as needed when new data is saved. However, you can configure it into fill-stop mode by using: set_rollover("streaming"|"storage", True|False) By default rollover is set to True for both streaming and storage and can be controlled individually for each one. Args: area (str): Either streaming or storage. enabled (bool): Whether to enable or disable rollover.
codesearchnet
def validate_variable_name(self, name): if (not name): raise SerializerError('Variable name is empty'.format(name)) if (name[0] not in PROPERTY_ALLOWED_START): msg = "Variable name '{}' must starts with a letter" raise SerializerError(msg.format(name)) for item in name: if (item not in PROPERTY_ALLOWED_CHARS): msg = "Invalid variable name '{}': it must only contains letters, numbers and '_' character" raise SerializerError(msg.format(name)) return True
Validate variable name. Arguments: name (string): Property name. Returns: bool: ``True`` if variable name is valid.
codesearchnet
def set_weights(self, new_weights): self._check_sess() assign_list = [self.assignment_nodes[name] for name in new_weights.keys() if (name in self.assignment_nodes)] assert assign_list, 'No variables in the input matched those in the network. Possible cause: Two networks were defined in the same TensorFlow graph. To fix this, place each network definition in its own tf.Graph.' self.sess.run(assign_list, feed_dict={self.placeholders[name]: value for (name, value) in new_weights.items() if (name in self.placeholders)})
Sets the weights to new_weights. Note: Can set subsets of variables as well, by only passing in the variables you want to be set. Args: new_weights (Dict): Dictionary mapping variable names to their weights.
codesearchnet
def play_mp3(self, mp3=None, data=None, block=True): if platform.machine() == 'mips': command = 'madplay -o wave:- - | aplay -M' else: command = 'ffplay -autoexit -nodisp -' if mp3: def gen(m): with open(m, 'rb') as f: d = f.read(1024) while d: yield d d = f.read(1024) data = gen(mp3) if isinstance(data, types.GeneratorType): p = subprocess.Popen(command, stdin=subprocess.PIPE, shell=True) for d in data: p.stdin.write(d) p.stdin.close() else: with tempfile.NamedTemporaryFile(mode='w+b') as f: f.write(data) f.flush() f.seek(0) p = subprocess.Popen(command, stdin=f, shell=True) if block: p.wait()
It supports GeneratorType mp3 stream or mp3 data string Args: mp3: mp3 file data: mp3 generator or data block: if true, block until audio is played.
juraj-google-style
def _try_recover(self, trial, error_msg): try: self.trial_executor.stop_trial( trial, error=error_msg is not None, error_msg=error_msg, stop_logger=False) trial.result_logger.flush() if self.trial_executor.has_resources(trial.resources): logger.info("Attempting to recover" " trial state from last checkpoint.") self.trial_executor.start_trial(trial) if trial.status == Trial.ERROR: raise RuntimeError("Trial did not start correctly.") else: logger.debug("Notifying Scheduler and requeueing trial.") self._requeue_trial(trial) except Exception: logger.exception("Error recovering trial from checkpoint, abort.") self._scheduler_alg.on_trial_error(self, trial) self._search_alg.on_trial_complete(trial.trial_id, error=True)
Tries to recover trial. Notifies SearchAlgorithm and Scheduler if failure to recover. Args: trial (Trial): Trial to recover. error_msg (str): Error message from prior to invoking this method.
juraj-google-style
def set_available(self, show=None): show = self.state.show if show is None else show self.set_presence(PresenceState(available=True, show=show))
Sets the agent availability to True. Args: show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)
juraj-google-style
def update_serial(self, new_serial): new_serial = str(new_serial) if self.has_active_service: raise DeviceError(self, 'Cannot change device serial number when there is service running.') if (self._debug_tag == self.serial): self._debug_tag = new_serial self._serial = new_serial self.adb.serial = new_serial self.fastboot.serial = new_serial
Updates the serial number of a device. The "serial number" used with adb's `-s` arg is not necessarily the actual serial number. For remote devices, it could be a combination of host names and port numbers. This is used for when such identifier of remote devices changes during a test. For example, when a remote device reboots, it may come back with a different serial number. This is NOT meant for switching the object to represent another device. We intentionally did not make it a regular setter of the serial property so people don't accidentally call this without understanding the consequences. Args: new_serial: string, the new serial number for the same device. Raises: DeviceError: tries to update serial when any service is running.
codesearchnet
def __init__(self, protocol): self._protocol = protocol self._current_consumer = self._HEADER self._message = None self._buf_header = None
Configure a Receiver with a specific Bokeh protocol version. Args: protocol (Protocol) : A Bokeh protocol object to use to assemble collected message fragments.
juraj-google-style
def Get(self, project_id): if (project_id in self._emulators): return self._emulators[project_id] emulator = self.Create(project_id) self._emulators[project_id] = emulator return emulator
Returns an existing emulator instance for the provided project_id. If an emulator instance doesn't yet exist, it creates one. Args: project_id: project ID Returns: a DatastoreEmulator
codesearchnet
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter( 'Invalid event object - unsupported data type: {0:s}'.format( event.data_type)) event_values = event.CopyToDict() number_of_volumes = event_values.get('number_of_volumes', 0) volume_serial_numbers = event_values.get('volume_serial_numbers', None) volume_device_paths = event_values.get('volume_device_paths', None) volumes_strings = [] for volume_index in range(0, number_of_volumes): if not volume_serial_numbers: volume_serial_number = 'UNKNOWN' else: volume_serial_number = volume_serial_numbers[volume_index] if not volume_device_paths: volume_device_path = 'UNKNOWN' else: volume_device_path = volume_device_paths[volume_index] volumes_strings.append(( 'volume: {0:d} [serial number: 0x{1:08X}, device path: ' '{2:s}]').format( volume_index + 1, volume_serial_number, volume_device_path)) if volumes_strings: event_values['volumes_string'] = ', '.join(volumes_strings) return self._ConditionalFormatMessages(event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def encode_dict(values_dict): return {key: encode_value(value) for key, value in six.iteritems(values_dict)}
Encode a dictionary into protobuf ``Value``-s. Args: values_dict (dict): The dictionary to encode as protobuf fields. Returns: Dict[str, ~google.cloud.firestore_v1beta1.types.Value]: A dictionary of string keys and ``Value`` protobufs as dictionary values.
juraj-google-style
def _ensure_tuple(item): if isinstance(item, tuple): return item elif isinstance(item, list): return tuple(item) elif isinstance(item, np.ndarray): return tuple(item.tolist()) else: raise NotImplementedError
Simply ensure that the passed item is a tuple. If it is not, then convert it if possible, or raise a NotImplementedError Args: item: the item that needs to become a tuple Returns: the item casted as a tuple Raises: NotImplementedError: if converting the given item to a tuple is not implemented.
codesearchnet
def base64url_decode(input): rem = (len(input) % 4) if (rem > 0): input += (b'=' * (4 - rem)) return base64.urlsafe_b64decode(input)
Helper method to base64url_decode a string. Args: input (str): A base64url_encoded string to decode.
codesearchnet
def data_filter(self, data): try: from .tcex_data_filter import DataFilter return DataFilter(self, data) except ImportError as e: warn = u'Required Module is not installed ({}).'.format(e) self.log.warning(warn)
Return an instance of the Data Filter Class. A simple helper module to filter results from ThreatConnect API or other data source. For example if results need to be filtered by an unsupported field the module allows you to pass the data array/list in and specify one or more filters to get just the results required. Args: data (list): The list of dictionary structure to filter. Returns: (object): An instance of DataFilter Class
codesearchnet
def average_coordination_number(structures, freq=10): coordination_numbers = {} for spec in structures[0].composition.as_dict().keys(): coordination_numbers[spec] = 0.0 count = 0 for t in range(len(structures)): if t % freq != 0: continue count += 1 vnn = VoronoiNN() for atom in range(len(structures[0])): cn = vnn.get_cn(structures[t], atom, use_weights=True) coordination_numbers[structures[t][atom].species_string] += cn elements = structures[0].composition.as_dict() for el in coordination_numbers: coordination_numbers[el] = coordination_numbers[el] / elements[ el] / count return coordination_numbers
Calculates the ensemble averaged Voronoi coordination numbers of a list of Structures using VoronoiNN. Typically used for analyzing the output of a Molecular Dynamics run. Args: structures (list): list of Structures. freq (int): sampling frequency of coordination number [every freq steps]. Returns: Dictionary of elements as keys and average coordination numbers as values.
juraj-google-style
def _try_refresh_access_token(self) -> None: if self.refresh_token: if not self.access_token or self._is_access_token_expired(): self.access_token, self.access_expiration = self._get_access_from_refresh() self.access_expiration = time.time() + self.access_expiration
Attempts to get a new access token using the refresh token, if needed. If the access token is expired and this instance has a stored refresh token, then the refresh token is in the API call to get a new access token. If successful, this instance is modified in-place with that new access token. Args: None Returns: None
juraj-google-style
def double(self, count=0): return 2 * count
Returns the input multiplied by 2. Args: count: Input number that you want to double. Returns: A number that is the double of count.
github-repos
def get_cpus_by_arch(cls, arch): with open('/usr/share/libvirt/cpu_map.xml', 'r') as cpu_map: cpu_xml = ET.parse(cpu_map) try: return cpu_xml.xpath('/cpus/arch[@name="{0}"]'.format(arch))[0] except IndexError: raise LagoException('No such arch: {0}'.format(arch))
Get all CPUs info by arch Args: arch(str): CPU architecture Returns: lxml.etree.element: CPUs by arch XML Raises: :exc:`~LagoException`: If no such ARCH is found
juraj-google-style
def _ParseQuery(self, parser_mediator, database, query, callback, cache): row_cache = cache.GetRowCache(query) try: rows = database.Query(query) except sqlite3.DatabaseError as exception: parser_mediator.ProduceExtractionWarning('unable to run query: {0:s} on database with error: {1!s}'.format(query, exception)) return for (index, row) in enumerate(rows): if parser_mediator.abort: break row_hash = self._HashRow(row) if (row_hash in row_cache): continue try: callback(parser_mediator, query, row, cache=cache, database=database) except Exception as exception: parser_mediator.ProduceExtractionWarning('unable to parse row: {0:d} with callback: {1:s} on database with error: {2!s}'.format(index, callback.__name__, exception)) return row_cache.add(row_hash)
Queries a database and parses the results. Args: parser_mediator (ParserMediator): parser mediator. database (SQLiteDatabase): database. query (str): query. callback (function): function to invoke to parse an individual row. cache (SQLiteCache): cache.
codesearchnet
def wsgi_simple_responder( result: Union[str, bytes], handler: Callable[[Union[str, bytes]], WSGI_TUPLE_TYPE], start_response: TYPE_WSGI_START_RESPONSE, status: str = '200 OK', extraheaders: TYPE_WSGI_RESPONSE_HEADERS = None) \ -> TYPE_WSGI_APP_RESULT: extraheaders = extraheaders or [] (contenttype, extraheaders2, output) = handler(result) response_headers = [('Content-Type', contenttype), ('Content-Length', str(len(output)))] response_headers.extend(extraheaders) if extraheaders2 is not None: response_headers.extend(extraheaders2) start_response(status, response_headers) return [output]
Simple WSGI app. Args: result: the data to be processed by ``handler`` handler: a function returning a ``(contenttype, extraheaders, data)`` tuple, e.g. ``text_result``, ``html_result`` start_response: standard WSGI ``start_response`` function status: status code (default ``"200 OK"``) extraheaders: optional extra HTTP headers Returns: WSGI application result
juraj-google-style
def post_path(self, path: str, path_data: Union[(dict, None)], post_data: Any) -> dict: path = self._insert_vars(path, (path_data or {})) path = (self.BASE_URL + path) self._try_refresh_access_token() return self.session.post(path, json=post_data).json()
Modifies the ESI by an endpoint URL. This method is not marked "private" as it _can_ be used by consuming code, but it's probably easier to call the `get_op` method instead. Args: path: raw ESI URL path path_data: data to format the path with (can be None) post_data: data to send to ESI Returns: ESI data
codesearchnet
def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE): unconnected_gradients = UnconnectedGradients(unconnected_gradients) if self._accumulator is None: raise ValueError('Called jvp() without first tracing anything.') def _fetch_jvp(tensor): if hasattr(tensor, 'handle'): unwrapped_tensor = ops.convert_to_tensor(tensor.handle) else: unwrapped_tensor = tensor result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator, unwrapped_tensor) if result is None and unconnected_gradients == UnconnectedGradients.ZERO: result = array_ops.zeros_like(tensor) return result return nest.map_structure(_fetch_jvp, primals)
Fetches the Jacobian-vector product computed for `primals`. Note that this method performs no computation, and simply looks up a JVP that was already computed (unlike backprop using a `tf.GradientTape`, where the computation happens on the call to `tape.gradient`). Args: primals: A watched Tensor or structure of Tensors to fetch the JVPs for. unconnected_gradients: A value which can either hold 'none' or 'zero' and alters the value which will be returned if no JVP was computed for `primals`. The possible values and effects are detailed in 'tf.UnconnectedGradients' and it defaults to 'none'. Returns: Tensors with the same shapes and dtypes as `primals`, or None if no JVP is available.
github-repos
async def call(self, methname, *args, **kwargs): todo = (methname, args, kwargs) return (await self.task(todo))
Call a remote method by name. Args: methname (str): The name of the remote method. *args: Arguments to the method call. **kwargs: Keyword arguments to the method call. Most use cases will likely use the proxy methods directly: The following two are effectively the same: valu = proxy.getFooBar(x, y) valu = proxy.call('getFooBar', x, y)
codesearchnet
def FromEncoded(self, encoded): stream_type = ((encoded >> 12) & 15) stream_system = bool((encoded & (1 << 11))) stream_id = (encoded & ((1 << 11) - 1)) return DataStream(stream_type, stream_id, stream_system)
Create a DataStream from an encoded 16-bit unsigned integer. Returns: DataStream: The decoded DataStream object
codesearchnet
def lineReceived(self, line): while self._in_header: if line: self._headers.append(line) else: http, status, message = self._headers[0].split(" ", 2) status = int(status) if status == 200: self.factory.get_stream().connected() else: self.factory.continueTrying = 0 self.transport.loseConnection() self.factory.get_stream().disconnected(RuntimeError(status, message)) return self._in_header = False break else: try: self._len_expected = int(line, 16) self.setRawMode() except: pass
Callback issued by twisted when new line arrives. Args: line (str): Incoming line
juraj-google-style
def load(self, context): if not (context.flags.debugger_data_server_grpc_port > 0 or context.flags.debugger_port > 0): return None flags = context.flags try: import tensorflow except ImportError: raise ImportError( 'To use the debugger plugin, you need to have TensorFlow installed:\n' ' pip install tensorflow') try: from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib except ImportError as e: e_type, e_value, e_traceback = sys.exc_info() message = e.msg if hasattr(e, 'msg') else e.message if 'grpc' in message: e_value = ImportError( message + '\n\nTo use the debugger plugin, you need to have ' 'gRPC installed:\n pip install grpcio') six.reraise(e_type, e_value, e_traceback) if flags.debugger_port > 0: interactive_plugin = ( interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context)) logger.info('Starting Interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port) interactive_plugin.listen(flags.debugger_port) return interactive_plugin elif flags.debugger_data_server_grpc_port > 0: noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context) logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port) noninteractive_plugin.listen(flags.debugger_data_server_grpc_port) return noninteractive_plugin raise AssertionError()
Returns the debugger plugin, if possible. Args: context: The TBContext flags including `add_arguments`. Returns: A DebuggerPlugin instance or None if it couldn't be loaded.
juraj-google-style
def ReplaceAll(pattern, rep, s): if (pattern not in _regexp_compile_cache): _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s)
Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements)
codesearchnet
class Distribution: def __init__(self, device_mesh, batch_dim_name=None): self._device_mesh = device_mesh self._batch_dim_name = batch_dim_name def get_data_layout(self, data_shape): raise NotImplementedError() def get_variable_layout(self, variable): raise NotImplementedError() def get_tensor_layout(self, path): raise NotImplementedError() @contextlib.contextmanager def scope(self): original_scope = distribution() set_distribution(self) try: yield finally: set_distribution(original_scope) @property def device_mesh(self): return self._device_mesh @property def batch_dim_name(self): return self._batch_dim_name def distribute_dataset(self, dataset): raise NotImplementedError() def __repr__(self): return f'<{self.__class__.__name__} device_mesh={self.device_mesh}>' def __str__(self): return self.__repr__()
Base class for variable distribution strategies. A `Distribution` has following key functionalities: 1. Distribute the model variables to a `DeviceMesh`. 2. Distribute the input data to a `DeviceMesh`. 3. Distribute an intermediate state tensor in the model. It can create a context scope so that the framework to properly detect the `Distribution` and distribute the variable/data accordingly. Args: device_mesh: A `DeviceMesh` instance.
github-repos
def transform_qubits(self: TSelf_Operation, func: Callable[([Qid], Qid)]) -> TSelf_Operation: return self.with_qubits(*(func(q) for q in self.qubits))
Returns the same operation, but with different qubits. Args: func: The function to use to turn each current qubit into a desired new qubit. Returns: The receiving operation but with qubits transformed by the given function.
codesearchnet
def get_creds(use_personal_account: bool, service_account: str, private_key: str) -> credentials.Credentials: if service_account and private_key: try: with open_local(private_key) as local_path: creds = ee.ServiceAccountCredentials(service_account, local_path) except Exception: raise RuntimeError(f'Unable to open the private key {private_key}.') elif use_personal_account: ee.Authenticate() creds, _ = default() elif is_compute_engine(): creds = compute_engine.Credentials() else: creds, _ = default() creds.refresh(requests.Request()) return creds
Fetches credentials for authentication. If the `use_personal_account` argument is true then it will authenticate with pop-up browser window using personal account. Otherwise, if the application is running in compute engine, it will use credentials of service account bound to the VM. Otherwise, it will try to use user credentials. Args: use_personal_account: A flag to use personal account for ee authentication. service_account: Service account address when using a private key for earth engine authentication. private_key: A private key path to authenticate earth engine using private key. Returns: cred: Credentials object.
github-repos
def end_container(self, header_buf): if not self.__container_nodes: raise ValueError("Attempted to end container with none active.") self.__container_node.add_leaf(_Node(header_buf)) self.__container_node = self.__container_nodes.pop() parent_container_length = self.__container_lengths.pop() self.current_container_length = \ parent_container_length + self.current_container_length + len(header_buf)
Add a node containing the container's header to the current subtree. This node will be added as the leftmost leaf of the subtree that was started by the matching call to start_container. Args: header_buf (bytearray): bytearray containing the container header.
juraj-google-style
def setlogging(mlogger, defval=None): log_level = os.getenv('SYN_LOG_LEVEL', defval) if log_level: log_level = log_level.upper() if log_level not in s_const.LOG_LEVEL_CHOICES: raise ValueError('Invalid log level provided: {}'.format(log_level)) logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT) mlogger.info('log level set to %s', log_level)
Configure synapse logging. Args: mlogger (logging.Logger): Reference to a logging.Logger() defval (str): Default log level Notes: This calls logging.basicConfig and should only be called once per process. Returns: None
juraj-google-style
def send(self, msg): slipDriver = sliplib.Driver() slipData = slipDriver.send(msg) res = self._serialPort.write(slipData) return res
Encodes data to slip protocol and then sends over serial port Uses the SlipLib module to convert the message data into SLIP format. The message is then sent over the serial port opened with the instance of the Faraday class used when invoking send(). Args: msg (bytes): Bytes format message to send over serial port. Returns: int: Number of bytes transmitted over the serial port.
juraj-google-style
def start_trial(self, trial, checkpoint=None): self._commit_resources(trial.resources) try: self._start_trial(trial, checkpoint) except Exception as e: logger.exception("Error starting runner for Trial %s", str(trial)) error_msg = traceback.format_exc() time.sleep(2) self._stop_trial(trial, error=True, error_msg=error_msg) if isinstance(e, AbortTrialExecution): return try: trial.clear_checkpoint() logger.info( "Trying to start runner for Trial %s without checkpoint.", str(trial)) self._start_trial(trial) except Exception: logger.exception( "Error starting runner for Trial %s, aborting!", str(trial)) error_msg = traceback.format_exc() self._stop_trial(trial, error=True, error_msg=error_msg)
Starts the trial. Will not return resources if trial repeatedly fails on start. Args: trial (Trial): Trial to be started. checkpoint (Checkpoint): A Python object or path storing the state of trial.
juraj-google-style
def _PrintAPFSVolumeIdentifiersOverview(self, volume_system, volume_identifiers): header = 'The following Apple File System (APFS) volumes were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Name'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in volume_identifiers: volume = volume_system.GetVolumeByIdentifier(volume_identifier) if (not volume): raise errors.SourceScannerError('Volume missing for identifier: {0:s}.'.format(volume_identifier)) volume_attribute = volume.GetAttribute('name') table_view.AddRow([volume.identifier, volume_attribute.value]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
Prints an overview of APFS volume identifiers. Args: volume_system (dfvfs.APFSVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier.
codesearchnet
def layers(self): layers = [self._layer_def(style) for style in self.styles] return layers
Renders the list of layers to add to the map. Returns: layers (list): list of layer entries suitable for use in mapbox-gl 'map.addLayer()' call
codesearchnet
def edges(self, nodes=None): edges = set() for node in (nodes or self.iterkeys()): ends = self[node].nodes() edges.update([(node, end) for end in ends]) return tuple(edges)
Returns a ``tuple`` of all edges in the ``DictGraph`` an edge is a pair of **node objects**. Arguments: - nodes(iterable) [default: ``None``] iterable of **node objects** if specified the edges will be limited to those outgoing from one of the specified nodes.
juraj-google-style
def __init__(self, map_name, timestamp_dir, cache_options, automount_mountpoint=None, can_do_incremental=False): self.log = logging.getLogger(__name__) self.map_name = map_name self.timestamp_dir = timestamp_dir self.cache_options = cache_options self.can_do_incremental = can_do_incremental if automount_mountpoint is None: timestamp_prefix = '%s/timestamp-%s' % (timestamp_dir, map_name) else: automount_mountpoint = automount_mountpoint.lstrip('/') automount_mountpoint = automount_mountpoint.replace('/', '_') timestamp_prefix = '%s/timestamp-%s-%s' % (timestamp_dir, map_name, automount_mountpoint) self.modify_file = '%s-modify' % timestamp_prefix self.update_file = '%s-update' % timestamp_prefix self.modify_time = None self.update_time = None
Construct an updater object. Args: map_name: A string representing the type of the map we are an Updater for. timestamp_dir: A string with the directory containing our timestamp files. cache_options: A dict containing the options for any caches we create. automount_mountpoint: An optional string containing automount path info. can_do_incremental: Indicates whether or not our source can provide incremental updates at all.
github-repos
def FindEnumTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self.FindFileContainingSymbol(full_name) return self._enum_descriptors[full_name]
Loads the named enum descriptor from the pool. Args: full_name: The full name of the enum descriptor to load. Returns: The enum descriptor for the named type.
juraj-google-style
def __init__(self, mapreduce_spec, shard_state, task_retry_count=0): self._shard_state = shard_state self.mapreduce_spec = mapreduce_spec self.task_retry_count = task_retry_count if self.mapreduce_spec: self.mapreduce_id = self.mapreduce_spec.mapreduce_id else: self.mapreduce_id = None if shard_state: self.shard_id = shard_state.get_shard_id() else: self.shard_id = None self._mutation_pool = _MutationPool(mapreduce_spec=mapreduce_spec) self._counters = _Counters(shard_state) self.counters = self._counters self._pools = {} self.register_pool("mutation_pool", self._mutation_pool) self.register_pool("counters", self.counters)
Constructor. Args: mapreduce_spec: mapreduce specification as model.MapreduceSpec. shard_state: an instance of model.ShardState. This has to be the same instance as the one MapperWorkerHandler mutates. All mutations are flushed to datastore in the end of the slice. task_retry_count: how many times this task has been retried.
juraj-google-style
def subscribe_sns_topic_to_sqs(self, region): sns = self.session.resource('sns', region_name=region) topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)) topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue) auditlog(event='cloudtrail.subscribe_sns_topic_to_sqs', actor=self.ns, data={'account': self.account.account_name, 'region': region}) return topic.attributes['TopicArn']
Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed Args: region (`str`): Name of the AWS region Returns: `str`
codesearchnet
def generate_nb_states(n_states, n_cells, n_genes): W = np.random.dirichlet([1]*n_states, size=(n_cells,)) W = W.T M = np.random.random((n_genes, n_states))*100 R = np.random.randint(1, 100, n_genes) return M, W, R
Generates means and weights for the Negative Binomial Mixture Model. Weights are distributed Dirichlet(1,1,...), means are rand(0, 1). Returned values can be passed to generate_state_data(M, W). Args: n_states (int): number of states or clusters n_cells (int): number of cells n_genes (int): number of genes Returns: M - genes x clusters W - clusters x cells R - genes x 1 - randint(1, 100)
juraj-google-style
def rst_underline(heading: str, underline_char: str) -> str: assert "\n" not in heading assert len(underline_char) == 1 return heading + "\n" + (underline_char * len(heading))
Underlines a heading for RST files. Args: heading: text to underline underline_char: character to use Returns: underlined heading, over two lines (without a final terminating newline)
juraj-google-style
def update_snmp_configuration(self, configuration, timeout=-1): data = configuration.copy() if 'type' not in data: data['type'] = 'snmp-configuration' uri = "{}{}".format(self.data["uri"], self.SNMP_CONFIGURATION_PATH) return self._helper.update(data, uri=uri, timeout=timeout)
Updates the SNMP configuration of a logical interconnect. Changes to the SNMP configuration are asynchronously applied to all managed interconnects. Args: configuration: snmp configuration. Returns: dict: The Logical Interconnect.
juraj-google-style
def write_data(msg_type, profile_name, data, cfg): if profile_name not in cfg.data: cfg.data[profile_name] = {} cfg.data[profile_name][msg_type] = data
Write the settings into the data portion of the cfg. Args: :msg_type: (str) message type to create config entry. :profile_name: (str) name of the profile entry :data: (dict) dict values for the 'settings' :cfg: (jsonconfig.Config) config instance.
juraj-google-style
def read_float(self, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.unpack("%sf" % endian, 4)
Read 4 bytes as a float value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: float:
juraj-google-style
def LoadFromString(cls, yaml_doc): return cls(**googleads.common.LoadFromString(yaml_doc, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES, cls._OPTIONAL_INIT_VALUES))
Creates an AdWordsClient with information stored in a yaml string. Args: yaml_doc: The yaml string containing the cached AdWords data. Returns: An AdWordsClient initialized with the values cached in the string. Raises: A GoogleAdsValueError if the given yaml string does not contain the information necessary to instantiate a client object - either a required key was missing or an OAuth2 key was missing.
codesearchnet
def member_add(self, repl_id, params): repl = self[repl_id] member_id = repl.repl_member_add(params) self[repl_id] = repl return member_id
create instance and add it to existing replcia Args: repl_id - replica set identity params - member params return True if operation success otherwise False
juraj-google-style
def add_group_maintainer(self, name, user): self.project_service.set_auth(self._token_project) self.project_service.add_group_maintainer(name, user)
Add the given user to the named group. Both group and user must already exist for this to succeed. Args: name (string): Name of group. user (string): User to add to group. Raises: requests.HTTPError on failure.
juraj-google-style
def _tracker_str(item): instance = tracker(item) if instance is not None: if isinstance(instance, str): return instance elif isinstance(instance, tuple): return instance else: return instance.uuid else: return item
Returns a string representation of the tracker object for the given item. Args: item: object to get tracker for. fqdn (str): fully-qualified domain name of the object.
juraj-google-style
def _load_tmp_fact(filepath): from hamster_lib import Fact try: with open(filepath, 'rb') as fobj: fact = pickle.load(fobj) except IOError: fact = False else: if not isinstance(fact, Fact): raise TypeError(_( "Something went wrong. It seems our pickled file does not contain" " valid Fact instance. [Content: '{content}'; Type: {type}".format( content=fact, type=type(fact)) )) return fact
Load an 'ongoing fact' from a given location. Args: filepath: Full path to the tmpfile location. Returns: hamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False`` if no file was found. Raises: TypeError: If for some reason our stored instance is no instance of ``hamster_lib.Fact``.
juraj-google-style
def _parse_peer_address(self, config): match = re.search(r'peer-address ([^\s]+)', config) value = match.group(1) if match else None return dict(peer_address=value)
Scans the config block and parses the peer-address value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict
juraj-google-style
def listup_sentence(self, data, counter=0): delimiter = self.delimiter_list[counter] sentence_list = [] [sentence_list.append(sentence + delimiter) for sentence in data.split(delimiter) if sentence != ""] if counter + 1 < len(self.delimiter_list): sentence_list_r = [] [sentence_list_r.extend(self.listup_sentence(sentence, counter+1)) for sentence in sentence_list] sentence_list = sentence_list_r return sentence_list
Divide string into sentence list. Args: data: string. counter: recursive counter. Returns: List of sentences.
juraj-google-style
def send_async(self, transaction, headers=None): return self.transport.forward_request( method='POST', path=self.path, json=transaction, params={'mode': 'async'}, headers=headers)
Submit a transaction to the Federation with the mode `async`. Args: transaction (dict): the transaction to be sent to the Federation node(s). headers (dict): Optional headers to pass to the request. Returns: dict: The transaction sent to the Federation node(s).
juraj-google-style
def extract_only_content(self, path=None, payload=None, objectInput=None): if objectInput: switches = ["-t"] result = self._command_template(switches, objectInput) return result, True, None else: f = file_path(path, payload) switches = ["-t", f] result = self._command_template(switches) return result, path, f
Return only the text content of passed file. These parameters are in OR. Only one of them can be analyzed. Args: path (string): Path of file to analyze payload (string): Payload base64 to analyze objectInput (object): file object/standard input to analyze Returns: text of file passed (string)
juraj-google-style
def find_clients(self, hosts): clients = [] for host in hosts: clients.append(self._get_client_by_hostname(host)) return [client for client in clients if client is not None]
Finds GRR clients given a list of hosts. Args: hosts: List of hostname FQDNs Returns: List of GRR client objects.
juraj-google-style
def write_to_file_by_name(folder, fname, data, mkdir=False): if not os.path.isdir(folder): if mkdir: preparedir(folder) else: created = preparedir(folder, False) if not created: raise ValueError("Failed to find %s." % folder) file_path = os.path.join(folder, fname) with open(file_path, writemode) as outf: try: outf.write(str(data)) return file_path except Exception as e: raise IOError("Failed to write %s to file:\n\t%s" % (fname, str(e)))
Write a string of data to file by filename and folder. Args: folder: Target folder (e.g. c:/ladybug). fname: File name (e.g. testPts.pts). data: Any data as string. mkdir: Set to True to create the directory if doesn't exist (Default: False).
juraj-google-style
def check_new_round(self, hours=24, tournament=1): query = arguments = {'tournament': tournament} raw = self.raw_query(query, arguments)['data']['rounds'][0] if raw is None: return False open_time = utils.parse_datetime_string(raw['openTime']) now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) is_new_round = open_time > now - datetime.timedelta(hours=hours) return is_new_round
Check if a new round has started within the last `hours`. Args: hours (int, optional): timeframe to consider, defaults to 24 tournament (int): ID of the tournament (optional, defaults to 1) Returns: bool: True if a new round has started, False otherwise. Example: >>> NumerAPI().check_new_round() False
juraj-google-style
def add_enum(name=None, index=None, flags=idaapi.hexflag(), bitfield=False): if (name is not None): with ignored(exceptions.EnumNotFound): _get_enum(name) raise exceptions.EnumAlreadyExists() if ((index is None) or (index < 0)): index = idaapi.get_enum_qty() eid = idaapi.add_enum(index, name, flags) if (eid == idaapi.BADADDR): raise exceptions.EnumCreationFailed('Failed creating enum "{}"'.format(name)) if bitfield: idaapi.set_enum_bf(eid, bitfield) return Enum(eid=eid)
Create a new enum. Args: name: Name of the enum to create. index: The index of the enum. Leave at default to append the enum as the last enum. flags: Enum type flags. bitfield: Is the enum a bitfield. Returns: An `Enum` object.
codesearchnet
def custom_apply(self, path: utils.KeyPath, value_spec: class_schema.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, class_schema.Field, Any], Any]]=None) -> Tuple[bool, Any]:
Custom apply on a value based on its original value spec. Args: path: KeyPath of current object under its object tree. value_spec: Original value spec for this field. allow_partial: Whether allow partial object to be created. child_transform: Function to transform child node values into their final values. Transform function is called on leaf nodes first, then on their parents, recursively. Returns: A tuple (proceed_with_standard_apply, value_to_proceed). If proceed_with_standard_apply is set to False, value_to_proceed will be used as final value. Raises: Error when the value is not compatible with the value spec.
github-repos
def _operation_status(self): if (not google_v2_operations.is_done(self._op)): return 'RUNNING' if google_v2_operations.is_success(self._op): return 'SUCCESS' if google_v2_operations.is_canceled(self._op): return 'CANCELED' if google_v2_operations.is_failed(self._op): return 'FAILURE' raise ValueError('Status for operation {} could not be determined'.format(self._op['name']))
Returns the status of this operation. Raises: ValueError: if the operation status cannot be determined. Returns: A printable status string (RUNNING, SUCCESS, CANCELED or FAILURE).
codesearchnet
def new_stories(self, raw=False, limit=None): new_stories = self._get_stories('newstories', limit) if raw: new_stories = [story.raw for story in new_stories] return new_stories
Returns list of item ids of current new stories Args: limit (int): specifies the number of stories to be returned. raw (bool): Flag to indicate whether to transform all objects into raw json. Returns: `list` object containing ids of new stories.
juraj-google-style
def get_conditional_uni(cls, left_parent, right_parent): left, right, _ = cls._identify_eds_ing(left_parent, right_parent) left_u = left_parent.U[0] if left_parent.L == left else left_parent.U[1] right_u = right_parent.U[0] if right_parent.L == right else right_parent.U[1] return left_u, right_u
Identify pair univariate value from parents. Args: left_parent(Edge): left parent right_parent(Edge): right parent Returns: tuple[np.ndarray, np.ndarray]: left and right parents univariate.
juraj-google-style
def __init__(self, channel): self.ListJobs = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/ListJobs", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.ListJobsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.ListJobsResponse.FromString, ) self.GetJob = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/GetJob", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.GetJobRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString, ) self.CreateJob = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/CreateJob", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.CreateJobRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString, ) self.UpdateJob = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/UpdateJob", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.UpdateJobRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString, ) self.DeleteJob = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/DeleteJob", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.DeleteJobRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.PauseJob = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/PauseJob", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.PauseJobRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString, ) self.ResumeJob = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/ResumeJob", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.ResumeJobRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString, ) self.RunJob = channel.unary_unary( "/google.cloud.scheduler.v1.CloudScheduler/RunJob", request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.RunJobRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def voronoi(script, target_layer=0, source_layer=1, backward=True): filter_xml = ''.join([' <filter name="Voronoi Vertex Coloring">\n', ' <Param name="ColoredMesh" ', 'value="{:d}" '.format(target_layer), 'description="To be Colored Mesh" ', 'type="RichMesh" ', '/>\n', ' <Param name="VertexMesh" ', 'value="{:d}" '.format(source_layer), 'description="Vertex Mesh" ', 'type="RichMesh" ', '/>\n', ' <Param name="backward" ', 'value="{}" '.format(str(backward).lower()), 'description="BackDistance" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Given a Mesh 'M' and a Pointset 'P', the filter projects each vertex of P over M and color M according to the geodesic distance from these projected points. Projection and coloring are done on a per vertex basis. Args: script: the FilterScript object or script filename to write the filter to. target_layer (int): The mesh layer whose surface is colored. For each vertex of this mesh we decide the color according to the following arguments. source_layer (int): The mesh layer whose vertexes are used as seed points for the color computation. These seeds point are projected onto the target_layer mesh. backward (bool): If True the mesh is colored according to the distance from the frontier of the voronoi diagram induced by the source_layer seeds. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
codesearchnet
def _build(self, images): num_classes = self._num_output_classes if (len(images.get_shape()) != 4): raise base.IncompatibleShapeError("'images' must have shape [batch_size, height, width, depth].") if (self.WEIGHTS not in self._initializers): if (self._model_size == self.BASIC): self._initializers[self.WEIGHTS] = identity_kernel_initializer elif (self._model_size == self.LARGE): self._initializers[self.WEIGHTS] = noisy_identity_kernel_initializer(num_classes) else: raise ValueError(('Unrecognized model_size: %s' % self._model_size)) if (self.BIASES not in self._initializers): self._initializers[self.BIASES] = tf.zeros_initializer() if (self._model_size == self.BASIC): self._conv_modules = [self._dilated_conv_layer(num_classes, 1, True, 'conv1'), self._dilated_conv_layer(num_classes, 1, True, 'conv2'), self._dilated_conv_layer(num_classes, 2, True, 'conv3'), self._dilated_conv_layer(num_classes, 4, True, 'conv4'), self._dilated_conv_layer(num_classes, 8, True, 'conv5'), self._dilated_conv_layer(num_classes, 16, True, 'conv6'), self._dilated_conv_layer(num_classes, 1, True, 'conv7'), self._dilated_conv_layer(num_classes, 1, False, 'conv8')] elif (self._model_size == self.LARGE): self._conv_modules = [self._dilated_conv_layer((2 * num_classes), 1, True, 'conv1'), self._dilated_conv_layer((2 * num_classes), 1, True, 'conv2'), self._dilated_conv_layer((4 * num_classes), 2, True, 'conv3'), self._dilated_conv_layer((8 * num_classes), 4, True, 'conv4'), self._dilated_conv_layer((16 * num_classes), 8, True, 'conv5'), self._dilated_conv_layer((32 * num_classes), 16, True, 'conv6'), self._dilated_conv_layer((32 * num_classes), 1, True, 'conv7'), self._dilated_conv_layer(num_classes, 1, False, 'conv8')] else: raise ValueError(('Unrecognized model_size: %s' % self._model_size)) dilation_mod = sequential.Sequential(self._conv_modules, name='dilation') return dilation_mod(images)
Build dilation module. Args: images: Tensor of shape [batch_size, height, width, depth] and dtype float32. Represents a set of images with an arbitrary depth. Note that when using the default initializer, depth must equal num_output_classes. Returns: Tensor of shape [batch_size, height, width, num_output_classes] and dtype float32. Represents, for each image and pixel, logits for per-class predictions. Raises: IncompatibleShapeError: If images is not rank 4. ValueError: If model_size is not one of 'basic' or 'large'.
codesearchnet
def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'): current_instance_profiles = resource_action( client, action='list_instance_profiles_for_role', log_format='Found Instance Profiles for %(RoleName)s.', RoleName=role_name)['InstanceProfiles'] for profile in current_instance_profiles: if profile['InstanceProfileName'] == profile_name: LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name) break else: for remove_profile in current_instance_profiles: resource_action( client, action='remove_role_from_instance_profile', log_format='Removed Instance Profile from Role: ' '%(InstanceProfileName)s -> %(RoleName)s', InstanceProfileName=remove_profile['InstanceProfileName'], RoleName=role_name) resource_action( client, action='add_role_to_instance_profile', log_format='Added Instance Profile to Role: ' '%(InstanceProfileName)s -> %(RoleName)s', InstanceProfileName=profile_name, RoleName=role_name) return True
Attach an IAM Instance Profile _profile_name_ to Role _role_name_. Args: role_name (str): Name of Role. profile_name (str): Name of Instance Profile. Returns: True upon successful completion.
juraj-google-style
def get_canonical_name(api_names: Sequence[str], deprecated_api_names: Sequence[str]) -> Optional[str]: non_deprecated_name = next((name for name in api_names if name not in deprecated_api_names), None) if non_deprecated_name: return non_deprecated_name if api_names: return api_names[0] return None
Get preferred endpoint name. Args: api_names: API names iterable. deprecated_api_names: Deprecated API names iterable. Returns: Returns one of the following in decreasing preference: - first non-deprecated endpoint - first endpoint - None
github-repos
def compute_q(self, query_antecedent): ret = mtf.einsum( [query_antecedent, self.wq], reduced_dims=[self.query_input_dim]) if self.combine_dims: ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.q_dims) return ret
Compute query Tensor q. Args: query_antecedent: a Tensor with dimensions {query_input_dim} + other_dims Returns: a Tensor with dimensions query_heads_dims + {key_dim} + other_dims
juraj-google-style
def evaluate(self, tensors) -> Union[ragged_tensor_value.RaggedTensorValue, sparse_tensor.SparseTensorValue, None]: if context.executing_eagerly(): return self._eval_helper(tensors) else: sess = ops.get_default_session() flattened_tensors = nest.flatten(tensors) if sess is None: with self.test_session() as sess: flattened_results = sess.run(flattened_tensors) else: flattened_results = sess.run(flattened_tensors) return nest.pack_sequence_as(tensors, flattened_results)
Evaluates tensors and returns numpy values. Args: tensors: A Tensor or a nested list/tuple of Tensors. Returns: tensors numpy values.
github-repos
def _on_connection_close(self, connection, reply_code_or_reason, reply_text=None): self._channel = None if isinstance(reply_code_or_reason, pika_errs.ConnectionClosed): reply_code = reply_code_or_reason.reply_code reply_text = reply_code_or_reason.reply_text elif isinstance(reply_code_or_reason, int): reply_code = reply_code_or_reason else: reply_code = 0 reply_text = str(reply_code_or_reason) if (reply_code == 200): _log.info('Server connection closed (%s), shutting down', reply_text) connection.ioloop.stop() else: _log.warning('Connection to %s closed unexpectedly (%d): %s', connection.params.host, reply_code, reply_text) self.call_later(1, self.reconnect)
Callback invoked when a previously-opened connection is closed. Args: connection (pika.connection.SelectConnection): The connection that was just closed. reply_code_or_reason (int|Exception): The reason why the channel was closed. In older versions of pika, this is the AMQP code. reply_text (str): The human-readable reason the connection was closed (only in older versions of pika)
codesearchnet
def get_assets(cls, lat, lon, begin=None, end=None): instance = cls('planetary/earth/assets') filters = {'lat': lat, 'lon': lon, 'begin': begin, 'end': end} return instance.get_resource(**filters)
Returns date and ids of flyovers Args: lat: latitude float lon: longitude float begin: date instance end: date instance Returns: json
codesearchnet
def generate_state_data(means, weights): x_true = np.dot(means, weights) sample = np.random.poisson(x_true) return sample.astype(float)
Generates data according to the Poisson Convex Mixture Model. Args: means (array): Cell types- genes x clusters weights (array): Cell cluster assignments- clusters x cells Returns: data matrix - genes x cells
juraj-google-style
def escape_meta(self, string, pos): if ((pos > 0) and (string[(pos - 1)] == '\\')): string = (string[:(pos - 1)] + string[pos:]) else: warnings.warn("Un-escaped meta-character: '{0}' (Escape it with a '\\')".format(string[pos]), Warning) pos += 1 meta = self.meta.search(string, pos) return (string, meta)
Checks if a meta character is escaped or else warns about it. If the meta character has an escape character ('\') preceding it, the meta character is escaped. If it does not, a warning is emitted that the user should escape it. Arguments: string (str): The relevant string in which the character was found. pos (int): The index of the meta character within the string. Returns: The possibly escaped string and the next meta match.
codesearchnet
def _validate_reference_field(parent: message.Message, field: descriptor.FieldDescriptor) -> None: oneof = field.message_type.oneofs[0] for i in range(proto_utils.field_content_length(parent, field)): reference = proto_utils.get_value_at_field_index(parent, field, i) reference_field_name = reference.WhichOneof(oneof.name) if reference_field_name is None: if not (reference.extension or reference.HasField('identifier') or reference.HasField('display')): raise fhir_errors.InvalidFhirError(f'`{reference.DESCRIPTOR.name}` is an empty reference.') return field_options = field.GetOptions() if not field_options.Extensions[annotations_pb2.valid_reference_type]: return if reference.HasField('uri') or reference.HasField('fragment'): return if annotation_utils.get_fhir_version(reference) == annotations_pb2.FhirVersion.DSTU2: return reference_field = reference.DESCRIPTOR.fields_by_name[reference_field_name] if annotation_utils.is_typed_reference_field(reference_field): reference_type = reference_field.GetOptions().Extensions[annotations_pb2.referenced_fhir_type] is_allowed = False for valid_type in field_options.Extensions[annotations_pb2.valid_reference_type]: if valid_type == reference_type or valid_type == 'Resource': is_allowed = True break if not is_allowed: raise fhir_errors.InvalidFhirError(f'Message `{parent.DESCRIPTOR.full_name}` contains an invalid reference type: `{reference_type}` set at: `{reference_field_name}`.')
Ensure that the provided reference field is valid. Args: parent: The containing Message. field: The reference field descriptor. Raises: fhir_errors.InvalidFhirError: In the event of an empty reference (no extensions, no identifier, no display).
github-repos
def write_file(self, file_name, vasp4_compatible=False): def _print_fortran_float(f): s = "{:.10E}".format(f) if f >= 0: return "0." + s[0] + s[2:12] + 'E' + "{:+03}".format(int(s[13:]) + 1) else: return "-." + s[1] + s[3:13] + 'E' + "{:+03}".format(int(s[14:]) + 1) with zopen(file_name, "wt") as f: p = Poscar(self.structure) comment = getattr(self, 'name', p.comment) lines = comment + "\n" lines += " 1.00000000000000\n" latt = self.structure.lattice.matrix lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :]) lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :]) lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :]) if not vasp4_compatible: lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n" lines += "".join(["%6d" % x for x in p.natoms]) + "\n" lines += "Direct\n" for site in self.structure: lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords) lines += " \n" f.write(lines) a = self.dim def write_spin(data_type): lines = [] count = 0 f.write(" {} {} {}\n".format(a[0], a[1], a[2])) for (k, j, i) in itertools.product(list(range(a[2])), list(range(a[1])), list(range(a[0]))): lines.append(_print_fortran_float(self.data[data_type][i, j, k])) count += 1 if count % 5 == 0: f.write(" " + "".join(lines) + "\n") lines = [] else: lines.append(" ") f.write(" " + "".join(lines) + " \n") f.write("".join(self.data_aug.get(data_type, []))) write_spin("total") if self.is_spin_polarized and self.is_soc: write_spin("diff_x") write_spin("diff_y") write_spin("diff_z") elif self.is_spin_polarized: write_spin("diff")
Write the VolumetricData object to a vasp compatible file. Args: file_name (str): Path to a file vasp4_compatible (bool): True if the format is vasp4 compatible
juraj-google-style
def get_image(width, height, want_grayscale, filepath): with ops.Graph().as_default(): with session.Session(): file_data = io_ops.read_file(filepath) channels = 1 if want_grayscale else 3 image_tensor = image_ops.decode_image(file_data, channels=channels).eval() resized_tensor = image_ops.resize_images_v2(image_tensor, (height, width)).eval() return resized_tensor
Returns an image loaded into an np.ndarray with dims [height, width, (3 or 1)]. Args: width: Width to rescale the image to. height: Height to rescale the image to. want_grayscale: Whether the result should be converted to grayscale. filepath: Path of the image file.. Returns: np.ndarray of shape (height, width, channels) where channels is 1 if want_grayscale is true, otherwise 3.
github-repos
def plot_power_factor_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None): import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) pf = self._bz.get_power_factor(relaxation_time=relaxation_time, output=output, doping_levels=False)[temp] plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if (output == 'eig'): plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$']) if (xlim is None): plt.xlim((- 0.5), (self._bz.gap + 0.5)) else: plt.xlim(xlim) plt.ylabel('Power factor, ($\\mu$W/(mK$^2$))', fontsize=30.0) plt.xlabel('E-E$_f$ (eV)', fontsize=30.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
Plot the power factor in function of Fermi level. Semi-log plot Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) tau: A relaxation time in s. By default none and the plot is by units of relaxation time Returns: a matplotlib object
codesearchnet
def join_room(self, room_id_or_alias): if not room_id_or_alias: raise MatrixError("No alias or room ID to join.") path = "/join/%s" % quote(room_id_or_alias) return self._send("POST", path)
Performs /join/$room_id Args: room_id_or_alias (str): The room ID or room alias to join.
juraj-google-style
def get_frequency_shift( self, grid_points, temperatures=np.arange(0, 1001, 10, dtype='double'), epsilons=None, output_filename=None): if self._interaction is None: self.set_phph_interaction() if epsilons is None: _epsilons = [0.1] else: _epsilons = epsilons self._grid_points = grid_points get_frequency_shift(self._interaction, self._grid_points, self._band_indices, _epsilons, temperatures, output_filename=output_filename, log_level=self._log_level)
Frequency shift from lowest order diagram is calculated. Args: epslins(list of float): The value to avoid divergence. When multiple values are given frequency shifts for those values are returned.
juraj-google-style
def _should_merge(self, pytd_type, union): names = self._CONTAINER_NAMES[pytd_type] length = None for t in union.type_list: if isinstance(t, pytd_type): if length is None: length = len(t.parameters) elif length != len(t.parameters): return True elif isinstance(t, pytd.GenericType) and t.name in names: return True return False
Determine whether pytd_type values in the union should be merged. If the union contains the homogeneous flavor of pytd_type (e.g., GenericType(base_type=tuple) when pytd_type is TupleType), or pytd_type values of different lengths, we want to turn all of the pytd_type values into homogeneous ones so that they can be merged into a single container. Args: pytd_type: The pytd type, either TupleType or CallableType. union: a pytd.UnionType Returns: True if the pytd_type values should be merged, False otherwise.
github-repos
def get_appliances(self, location_id): url = "https: headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data
juraj-google-style
def get_hash(path, hash_alg="sha256"): h = hashlib.new(hash_alg) with open(path, "rb") as f: for chunk in iter(functools.partial(f.read, 4096), b''): h.update(chunk) return h.hexdigest()
Get the hash of the file at ``path``. I'd love to make this async, but evidently file i/o is always ready Args: path (str): the path to the file to hash. hash_alg (str, optional): the algorithm to use. Defaults to 'sha256'. Returns: str: the hexdigest of the hash.
juraj-google-style