code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def show_bokehjs(bokehjs_action, develop=False): print() if develop: print('Installed Bokeh for DEVELOPMENT:') else: print('Installed Bokeh:') if (bokehjs_action in ['built', 'installed']): print((' - using %s built BokehJS from bokehjs/build\n' % (bright(yellow('NEWLY')) if (bokehjs_action == 'built') else bright(yellow('PREVIOUSLY'))))) else: print((" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow('PACKAGED')))) print()
Print a useful report after setuptools output describing where and how BokehJS is installed. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree develop (bool, optional) : whether the command was for "develop" mode (default: False) Returns: None
codesearchnet
def parse_unique_urlencoded(content): urlencoded_params = urllib.parse.parse_qs(content) params = {} for key, value in six.iteritems(urlencoded_params): if len(value) != 1: msg = ('URL-encoded content contains a repeated value:' '%s -> %s' % (key, ', '.join(value))) raise ValueError(msg) params[key] = value[0] return params
Parses unique key-value parameters from urlencoded content. Args: content: string, URL-encoded key-value pairs. Returns: dict, The key-value pairs from ``content``. Raises: ValueError: if one of the keys is repeated.
juraj-google-style
def get_decomposition_energy(self, entry, pH, V): if (self._multielement and (not isinstance(entry, MultiEntry))): possible_entries = self._generate_multielement_entries(self._filtered_entries, forced_include=[entry]) if (entry.phase_type == 'solid'): possible_entries = [e for e in possible_entries if (e.phase_type.count('Solid') == 1)] possible_energies = [e.normalized_energy_at_conditions(pH, V) for e in possible_entries] else: possible_energies = [entry.normalized_energy_at_conditions(pH, V)] min_energy = np.min(possible_energies, axis=0) hull = self.get_hull_energy(pH, V) return (min_energy - hull)
Finds decomposition to most stable entry Args: entry (PourbaixEntry): PourbaixEntry corresponding to compound to find the decomposition for pH (float): pH at which to find the decomposition V (float): voltage at which to find the decomposition Returns: reaction corresponding to the decomposition
codesearchnet
def _ReadIntegerDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False): definition_object = self._ReadFixedSizeDataTypeDefinition(definitions_registry, definition_values, data_types.IntegerDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_INTEGER, is_member=is_member, supported_size_values=(1, 2, 4, 8)) attributes = definition_values.get('attributes', None) if attributes: format_attribute = attributes.get('format', definitions.FORMAT_SIGNED) if (format_attribute not in self._INTEGER_FORMAT_ATTRIBUTES): error_message = 'unsupported format attribute: {0!s}'.format(format_attribute) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.format = format_attribute return definition_object
Reads an integer data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: IntegerDataTypeDefinition: integer data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
codesearchnet
def load(self, *modules): for module in modules: if isinstance(module, six.string_types): try: module = get_object(module) except Exception as e: self.errors[module] = e continue self.modules[module.__package__] = module for (loader, module_name, is_pkg) in pkgutil.walk_packages( module.__path__ ): full_name = "{}.{}".format(_package(module), module_name) try: self.modules[full_name] = get_object(full_name) if is_pkg: self.load(self.modules[full_name]) except Exception as e: self.errors[full_name] = e
Load one or more modules. Args: modules: Either a string full path to a module or an actual module object.
juraj-google-style
def case(store, institute_obj, case_obj): case_obj['individual_ids'] = [] for individual in case_obj['individuals']: try: sex = int(individual.get('sex', 0)) except ValueError as err: sex = 0 individual['sex_human'] = SEX_MAP[sex] pheno_map = PHENOTYPE_MAP if case_obj.get('track', 'rare') == 'cancer': pheno_map = CANCER_PHENOTYPE_MAP individual['phenotype_human'] = pheno_map.get(individual['phenotype']) case_obj['individual_ids'].append(individual['individual_id']) case_obj['assignees'] = [store.user(user_email) for user_email in case_obj.get('assignees', [])] suspects = [store.variant(variant_id) or variant_id for variant_id in case_obj.get('suspects', [])] causatives = [store.variant(variant_id) or variant_id for variant_id in case_obj.get('causatives', [])] distinct_genes = set() case_obj['panel_names'] = [] for panel_info in case_obj.get('panels', []): if not panel_info.get('is_default'): continue panel_obj = store.gene_panel(panel_info['panel_name'], version=panel_info.get('version')) distinct_genes.update([gene['hgnc_id'] for gene in panel_obj.get('genes', [])]) full_name = "{} ({})".format(panel_obj['display_name'], panel_obj['version']) case_obj['panel_names'].append(full_name) case_obj['default_genes'] = list(distinct_genes) for hpo_term in itertools.chain(case_obj.get('phenotype_groups', []), case_obj.get('phenotype_terms', [])): hpo_term['hpo_link'] = ("http: .format(hpo_term['phenotype_id'])) o_collaborators = [] for collab_id in case_obj['collaborators']: if collab_id != case_obj['owner'] and store.institute(collab_id): o_collaborators.append(store.institute(collab_id)) case_obj['o_collaborators'] = [(collab_obj['_id'], collab_obj['display_name']) for collab_obj in o_collaborators] irrelevant_ids = ('cust000', institute_obj['_id']) collab_ids = [(collab['_id'], collab['display_name']) for collab in store.institutes() if (collab['_id'] not in irrelevant_ids) and (collab['_id'] not in case_obj['collaborators'])] events = list(store.events(institute_obj, case=case_obj)) for event in events: event['verb'] = VERBS_MAP[event['verb']] case_obj['clinvar_variants'] = store.case_to_clinVars(case_obj['_id']) pheno_groups = institute_obj.get('phenotype_groups') or PHENOTYPE_GROUPS data = { 'status_class': STATUS_MAP.get(case_obj['status']), 'other_causatives': store.check_causatives(case_obj=case_obj), 'comments': store.events(institute_obj, case=case_obj, comments=True), 'hpo_groups': pheno_groups, 'events': events, 'suspects': suspects, 'causatives': causatives, 'collaborators': collab_ids, 'cohort_tags': COHORT_TAGS, 'mme_nodes': current_app.mme_nodes, } return data
Preprocess a single case. Prepare the case to be displayed in the case view. Args: store(adapter.MongoAdapter) institute_obj(models.Institute) case_obj(models.Case) Returns: data(dict): includes the cases, how many there are and the limit.
juraj-google-style
def convert_squeeze(params, w_name, scope_name, inputs, layers, weights, names): print('Converting squeeze ...') if len(params['axes']) > 1: raise AssertionError('Cannot convert squeeze by multiple dimensions') def target_layer(x, axis=int(params['axes'][0])): import tensorflow as tf return tf.squeeze(x, axis=axis) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert squeeze operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def Render(self): xs = [self.xs[0]] ps = [0.0] for (i, p) in enumerate(self.ps): xs.append(self.xs[i]) ps.append(p) try: xs.append(self.xs[(i + 1)]) ps.append(p) except IndexError: pass return (xs, ps)
Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Returns: tuple of (xs, ps)
codesearchnet
def gradient_tensors(self): return self._gradient_tensors
Get the gradient tensors that this object is aware of. Returns: A dict mapping x-tensor names to gradient tensor objects. x-tensor refers to the tensors on the denominator of the differentation.
github-repos
def RemoveEventAttribute(self, attribute_name): if attribute_name not in self._extra_event_attributes: raise KeyError('Event attribute: {0:s} not set'.format(attribute_name)) del self._extra_event_attributes[attribute_name]
Removes an attribute from being set on all events produced. Args: attribute_name (str): name of the attribute to remove. Raises: KeyError: if the event attribute is not set.
juraj-google-style
def handle_encodnig(html): encoding = _get_encoding(dhtmlparser.parseString(html.split('</head>')[0])) if (encoding == 'utf-8'): return html return html.decode(encoding).encode('utf-8')
Look for encoding in given `html`. Try to convert `html` to utf-8. Args: html (str): HTML code as string. Returns: str: HTML code encoded in UTF.
codesearchnet
def list_merge(list_a, list_b): result = [] for item in list_a: if not item in result: result.append(item) for item in list_b: if not item in result: result.append(item) return result
Merge two lists without duplicating items Args: list_a: list list_b: list Returns: New list with deduplicated items from list_a and list_b
juraj-google-style
def pop(self, rebuild=True): layer = self._layers.pop() self.built = False self._functional = None if rebuild: self._maybe_rebuild() return layer
Removes the last layer in the model. Args: rebuild: `bool`. Whether to rebuild the model after removing the layer. Defaults to `True`. Returns: layer: layer instance.
github-repos
def add_input(self, input_): if (not isinstance(input_, Input)): raise TypeError('`input_` must be a Input instance') self.inputs.append(input_)
Adds an input to a Transaction's list of inputs. Args: input_ (:class:`~bigchaindb.common.transaction. Input`): An Input to be added to the Transaction.
codesearchnet
def wc(filename, contents, parsed=None, is_jekyll=False): if is_jekyll: fmt = 'jekyll' else: fmt = 'md/txt' body = parsed.strip() if parsed else contents.strip() words = re.sub(r'\s+', ' ', body, re.MULTILINE) for punctuation in INTERSTITIAL_PUNCTUATION: words = re.sub(punctuation, ' ', words) punct = re.compile('[^\w\s]', re.U) words = punct.sub('', words) real_characters = re.sub(r'\s', '', words) paragraphs = [1 if len(x) == 0 else 0 for x in contents.strip().splitlines()] for index, paragraph in enumerate(paragraphs): if paragraph == 1 and paragraphs[index + 1] == 1: paragraphs[index] = 0 return { 'counts': { 'file': filename, 'type': fmt, 'paragraphs': sum(paragraphs) + 1, 'words': len(re.split('\s+', words)), 'characters_real': len(real_characters), 'characters_total': len(words), } }
Count the words, characters, and paragraphs in a string. Args: contents: the original string to count filename (optional): the filename as provided to the CLI parsed (optional): a parsed string, expected to be plaintext only is_jekyll: whether the original contents were from a Jekyll file Returns: An object containing the various counts
juraj-google-style
def iter_packages(name, range_=None, paths=None): entries = _get_families(name, paths) seen = set() for (repo, family_resource) in entries: for package_resource in repo.iter_packages(family_resource): key = (package_resource.name, package_resource.version) if (key in seen): continue seen.add(key) if range_: if isinstance(range_, basestring): range_ = VersionRange(range_) if (package_resource.version not in range_): continue (yield Package(package_resource))
Iterate over `Package` instances, in no particular order. Packages of the same name and version earlier in the search path take precedence - equivalent packages later in the paths are ignored. Packages are not returned in any specific order. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator.
codesearchnet
def resource(self, resource_type): try: resource = getattr(self.resources, self.safe_rt(resource_type))(self) except AttributeError: self._resources(True) resource = getattr(self.resources, self.safe_rt(resource_type))(self) return resource
Get instance of Resource Class with dynamic type. Args: resource_type: The resource type name (e.g Adversary, User Agent, etc). Returns: (object): Instance of Resource Object child class.
codesearchnet
def process(self, element, *args, **kwargs): (text, uid), prediction = element embedding = prediction.inference l2_norm = np.linalg.norm(embedding) yield {'text': text, 'id': uid, 'embedding': embedding / l2_norm}
For each element in the input PCollection, normalize the embedding vector, and yield a new element with the normalized embedding added Args: element: The element to be processed.
github-repos
def easeOutBack(n, s=1.70158): _checkRange(n) n = (n - 1) return (((n * n) * (((s + 1) * n) + s)) + 1)
A tween function that overshoots the destination a little and then backs into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
codesearchnet
def swapdim(P, dim1=1, dim2=0): if (not isinstance(P, Poly)): return numpy.swapaxes(P, dim1, dim2) dim = P.dim shape = P.shape dtype = P.dtype if (dim1 == dim2): return P m = max(dim1, dim2) if (P.dim <= m): P = chaospy.poly.dimension.setdim(P, (m + 1)) dim = (m + 1) A = {} for key in P.keys: val = P.A[key] key = list(key) (key[dim1], key[dim2]) = (key[dim2], key[dim1]) A[tuple(key)] = val return Poly(A, dim, shape, dtype)
Swap the dim between two variables. Args: P (Poly): Input polynomial. dim1 (int): First dim dim2 (int): Second dim. Returns: (Poly): Polynomial with swapped dimensions. Examples: >>> x,y = variable(2) >>> P = x**4-y >>> print(P) q0^4-q1 >>> print(swapdim(P)) q1^4-q0
codesearchnet
def enable_collective_ops(self, server_def): if not server_def: raise ValueError('server_def is None.') self._collective_ops_server_def = server_def if self._context_handle is not None: logging.warning('Enabling collective ops after program startup may cause error when accessing previously created tensors.') with self._initialize_lock: assert self._initialized server_def_str = self._collective_ops_server_def.SerializeToString() pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str) self._initialize_logical_devices() self._clear_caches()
Enable distributed collective ops with an appropriate server_def. Args: server_def: A tensorflow::ServerDef proto. Enables execution on remote devices. Raises: ValueError: if server_def is None. RuntimeError: if this method is not called at program startup.
github-repos
def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len): start_idx = cur_len + 1 - ngram_size ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist()) return banned_ngrams.get(ngram_idx, [])
Determines the banned tokens for the current hypothesis based on previously generated n-grams. Args: banned_ngrams (`dict`): A dictionary containing previously generated n-grams for each hypothesis. prev_input_ids (`torch.Tensor`): Generated token ids for the current hypothesis. ngram_size (`int`): The number sequential tokens taken as a group which may only occur once before being banned. cur_len (`int`): The current length of the token sequences for which the n-grams are being checked. Returns: List of tokens that are banned.
github-repos
def _assert_validators(self, validators): for validator in sorted( validators, key=lambda validator: validator.insertion_index): try: validator.verify(self) except _exceptions.ValidationError as e: message = validator.print_flags_with_values(self) raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e)))
Asserts if all validators in the list are satisfied. It asserts validators in the order they were created. Args: validators: Iterable(validators.Validator), validators to be verified. Raises: AttributeError: Raised if validators work with a non-existing flag. IllegalFlagValueError: Raised if validation fails for at least one validator.
juraj-google-style
def create_parser(default_name: str) -> argparse.ArgumentParser: argparser = argparse.ArgumentParser(fromfile_prefix_chars='@') argparser.add_argument('-H', '--host', help='Host to which the app binds. [%(default)s]', default='0.0.0.0') argparser.add_argument('-p', '--port', help='Port to which the app binds. [%(default)s]', default=5000, type=int) argparser.add_argument('-o', '--output', help='Logging output. [%(default)s]') argparser.add_argument('-n', '--name', help='Service name. This will be used as prefix for all endpoints. [%(default)s]', default=default_name) argparser.add_argument('--debug', help='Run the app in debug mode. [%(default)s]', action='store_true') argparser.add_argument('--eventbus-host', help='Hostname at which the eventbus can be reached [%(default)s]', default='eventbus') argparser.add_argument('--eventbus-port', help='Port at which the eventbus can be reached [%(default)s]', default=5672, type=int) return argparser
Creates the default brewblox_service ArgumentParser. Service-agnostic arguments are added. The parser allows calling code to add additional arguments before using it in create_app() Args: default_name (str): default value for the --name commandline argument. Returns: argparse.ArgumentParser: a Python ArgumentParser with defaults set.
juraj-google-style
def set_pair(self, term1, term2, value, **kwargs): key = self.key(term1, term2) self.keys.update([term1, term2]) self.pairs[key] = value
Set the value for a pair of terms. Args: term1 (str) term2 (str) value (mixed)
codesearchnet
def __init__(self, coords): self._coords = np.array(coords) self.simplex_dim, self.space_dim = self._coords.shape self.origin = self._coords[-1] if self.simplex_dim == self.space_dim + 1: self.T = self._coords[:-1] - self.origin self.T_inv = np.linalg.inv(self.T)
Initializes a Simplex from vertex coordinates. Args: coords ([[float]]): Coords of the vertices of the simplex. E.g., [[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].
juraj-google-style
def summary(self, fmt=None, initial=True, default=''): if (default and (not self.__dict__)): return default if (fmt == ''): return default keys = [k for (k, v) in self.__dict__.items() if (v is not '')] f = (fmt or (('{' + '}, {'.join(keys)) + '}')) try: summary = CustomFormatter().format(f, **self.__dict__) except KeyError as e: raise ComponentError(('Error building summary, ' + str(e))) if (summary and initial and (not fmt)): summary = (summary[0].upper() + summary[1:]) return summary
Given a format string, return a summary description of a component. Args: component (dict): A component dictionary. fmt (str): Describes the format with a string. If no format is given, you will just get a list of attributes. If you give the empty string (''), you'll get `default` back. By default this gives you the empty string, effectively suppressing the summary. initial (bool): Whether to capitialize the first letter. Default is True. default (str): What to give if there's no component defined. Returns: str: A summary string. Example: r = Component({'colour': 'Red', 'grainsize': 'VF-F', 'lithology': 'Sandstone'}) r.summary() --> 'Red, vf-f, sandstone'
codesearchnet
def script_dir_plus_file(filename, pyobject, follow_symlinks=True): return join(script_dir(pyobject, follow_symlinks), filename)
Get current script's directory and then append a filename Args: filename (str): Filename to append to directory path pyobject (Any): Any Python object in the script follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True. Returns: str: Current script's directory and with filename appended
juraj-google-style
def join(self, *data: Iterable[MaybeBytes]) -> bytes: return self.how.join([bytes(item) for item in chain(*data)])
Iterable join on a delimiter. Args: data: Iterable of items to join. Examples: :: BytesFormat(b' ').join([b'one', b'two', b'three'])
codesearchnet
def export(self, top=True): out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.year)) out.append(self._to_str(self.month)) out.append(self._to_str(self.day)) out.append(self._to_str(self.hour)) out.append(self._to_str(self.minute)) out.append(self._to_str(self.data_source_and_uncertainty_flags)) out.append(self._to_str(self.dry_bulb_temperature)) out.append(self._to_str(self.dew_point_temperature)) out.append(self._to_str(self.relative_humidity)) out.append(self._to_str(self.atmospheric_station_pressure)) out.append(self._to_str(self.extraterrestrial_horizontal_radiation)) out.append(self._to_str(self.extraterrestrial_direct_normal_radiation)) out.append(self._to_str(self.horizontal_infrared_radiation_intensity)) out.append(self._to_str(self.global_horizontal_radiation)) out.append(self._to_str(self.direct_normal_radiation)) out.append(self._to_str(self.diffuse_horizontal_radiation)) out.append(self._to_str(self.global_horizontal_illuminance)) out.append(self._to_str(self.direct_normal_illuminance)) out.append(self._to_str(self.diffuse_horizontal_illuminance)) out.append(self._to_str(self.zenith_luminance)) out.append(self._to_str(self.wind_direction)) out.append(self._to_str(self.wind_speed)) out.append(self._to_str(self.total_sky_cover)) out.append(self._to_str(self.opaque_sky_cover)) out.append(self._to_str(self.visibility)) out.append(self._to_str(self.ceiling_height)) out.append(self._to_str(self.present_weather_observation)) out.append(self._to_str(self.present_weather_codes)) out.append(self._to_str(self.precipitable_water)) out.append(self._to_str(self.aerosol_optical_depth)) out.append(self._to_str(self.snow_depth)) out.append(self._to_str(self.days_since_last_snowfall)) out.append(self._to_str(self.albedo)) out.append(self._to_str(self.liquid_precipitation_depth)) out.append(self._to_str(self.liquid_precipitation_quantity)) return ",".join(out)
Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation
juraj-google-style
def ParseDestList(self, parser_mediator, olecf_item): header_map = self._GetDataTypeMap('dest_list_header') try: (header, entry_offset) = self._ReadStructureFromFileObject(olecf_item, 0, header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile('Unable to parse DestList header with error: {0!s}'.format(exception)) if (header.format_version == 1): entry_map = self._GetDataTypeMap('dest_list_entry_v1') elif (header.format_version in (3, 4)): entry_map = self._GetDataTypeMap('dest_list_entry_v3') else: parser_mediator.ProduceExtractionWarning('unsupported format version: {0:d}.'.format(header.format_version)) return while (entry_offset < olecf_item.size): try: (entry, entry_data_size) = self._ReadStructureFromFileObject(olecf_item, entry_offset, entry_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile('Unable to parse DestList entry with error: {0!s}'.format(exception)) display_name = 'DestList entry at offset: 0x{0:08x}'.format(entry_offset) try: droid_volume_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.droid_volume_identifier, display_name) except (TypeError, ValueError) as exception: droid_volume_identifier = '' parser_mediator.ProduceExtractionWarning('unable to read droid volume identifier with error: {0!s}'.format(exception)) try: droid_file_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.droid_file_identifier, display_name) except (TypeError, ValueError) as exception: droid_file_identifier = '' parser_mediator.ProduceExtractionWarning('unable to read droid file identifier with error: {0!s}'.format(exception)) try: birth_droid_volume_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.birth_droid_volume_identifier, display_name) except (TypeError, ValueError) as exception: birth_droid_volume_identifier = '' parser_mediator.ProduceExtractionWarning('unable to read birth droid volume identifier with error: {0:s}'.format(exception)) try: birth_droid_file_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.birth_droid_file_identifier, display_name) except (TypeError, ValueError) as exception: birth_droid_file_identifier = '' parser_mediator.ProduceExtractionWarning('unable to read birth droid file identifier with error: {0:s}'.format(exception)) if (entry.last_modification_time == 0): date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime(timestamp=entry.last_modification_time) event_data = AutomaticDestinationsDestListEntryEventData() event_data.birth_droid_file_identifier = birth_droid_file_identifier event_data.birth_droid_volume_identifier = birth_droid_volume_identifier event_data.droid_file_identifier = droid_file_identifier event_data.droid_volume_identifier = droid_volume_identifier event_data.entry_number = entry.entry_number event_data.hostname = entry.hostname.rstrip('\x00') event_data.offset = entry_offset event_data.path = entry.path.rstrip('\x00') event_data.pin_status = entry.pin_status event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) entry_offset += entry_data_size
Parses the DestList OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. olecf_item (pyolecf.item): OLECF item. Raises: UnableToParseFile: if the DestList cannot be parsed.
codesearchnet
def preprocess(self, xs): return [self.nesting_field.preprocess(x) for x in super(NestedField, self).preprocess(xs)]
Preprocess a single example. Firstly, tokenization and the supplied preprocessing pipeline is applied. Since this field is always sequential, the result is a list. Then, each element of the list is preprocessed using ``self.nesting_field.preprocess`` and the resulting list is returned. Arguments: xs (list or str): The input to preprocess. Returns: list: The preprocessed list.
codesearchnet
def write(self, name, **data): data["name"] = name if not ("timestamp" in data): data["timestamp"] = datetime.utcnow() try: self.client.index( index=self.get_index(), doc_type=self.doc_type, id=None, body=data ) except TransportError as exc: logger.warning('writing metric %r failure %r', data, exc)
Write the metric to elasticsearch Args: name (str): The name of the metric to write data (dict): Additional data to store with the metric
juraj-google-style
def call(self, hidden_states: tf.Tensor, prev_group_token: tf.Tensor | None=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]: if self.with_group_token: group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1)) if self.group_projector is not None: for layer in self.group_projector: prev_group_token = layer(prev_group_token) group_token = group_token + prev_group_token else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None, output_attentions=None) cat_x = layer_out[0] x, group_token = self.split_x(cat_x) attention = None if self.downsample is not None: x, attention = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs
Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the grouping tensors of Grouping block.
github-repos
def __init__(self, num_packs=1): if num_packs <= 0: raise ValueError('num_packs must be greater than zero.') self.num_packs = num_packs
Initialize the _ConcatAndSplitPacker object. Args: num_packs: specifies the number of split packs that will be formed. Raises: ValueError: if num_packs is not greater than 0.
github-repos
def requires_swimlane_version(min_version=None, max_version=None): if ((min_version is None) and (max_version is None)): raise ValueError('Must provide either min_version, max_version, or both') if (min_version and max_version and (compare_versions(min_version, max_version) < 0)): raise ValueError('min_version must be <= max_version ({}, {})'.format(min_version, max_version)) def decorator(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): swimlane = self._swimlane if (min_version and (compare_versions(min_version, swimlane.build_version, True) < 0)): raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version) if (max_version and (compare_versions(swimlane.build_version, max_version, True) < 0)): raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version) return func(self, *args, **kwargs) return wrapper return decorator
Decorator for SwimlaneResolver methods verifying Swimlane server build version is within a given inclusive range Raises: InvalidVersion: Raised before decorated method call if Swimlane server version is out of provided range ValueError: If neither min_version or max_version were provided, or if those values conflict (2.15 < 2.14)
codesearchnet
def remove_metric(self, metric_name): with self._lock: metric = self._metrics.pop(metric_name, None) if metric: for reporter in self._reporters: reporter.metric_removal(metric) return metric
Remove a metric if it exists and return it. Return None otherwise. If a metric is removed, `metric_removal` will be invoked for each reporter. Arguments: metric_name (MetricName): The name of the metric Returns: KafkaMetric: the removed `KafkaMetric` or None if no such metric exists
juraj-google-style
def set_examples(self, examples): self.store('examples', examples) if len(examples) > 0: self.store('are_sequence_examples', isinstance(examples[0], tf.train.SequenceExample)) return self
Sets the examples to be displayed in WIT. Args: examples: List of example protos. Returns: self, in order to enabled method chaining.
juraj-google-style
def get_mem_usage(**kwargs): try: con_mem_data_list = con._client.get_memory(session=kwargs['con']._session, memory_level=kwargs['mem_type']) usedram = 0 freeram = 0 for con_mem_data in con_mem_data_list: page_size = con_mem_data.page_size node_memory_data_list = con_mem_data.node_memory_data for node_memory_data in node_memory_data_list: ram = (node_memory_data.num_pages * page_size) is_free = node_memory_data.is_free if is_free: freeram += ram else: usedram += ram totalallocated = (usedram + freeram) if (totalallocated > 0): totalallocated = round(((totalallocated / 1024) / 1024), 1) usedram = round(((usedram / 1024) / 1024), 1) freeram = round(((freeram / 1024) / 1024), 1) ramusage = {} ramusage['usedram'] = usedram ramusage['freeram'] = freeram ramusage['totalallocated'] = totalallocated ramusage['errormessage'] = '' except Exception as e: errormessage = ('Get memory failed with error: ' + str(e)) logging.error(errormessage) ramusage['errormessage'] = errormessage return ramusage
Calculates memory statistics from mapd_server _client.get_memory call Kwargs: con(class 'pymapd.connection.Connection'): Mapd connection mem_type(str): [gpu, cpu] Type of memory to gather metrics for Returns: ramusage(dict)::: usedram(float): Amount of memory (in MB) used freeram(float): Amount of memory (in MB) free totalallocated(float): Total amount of memory (in MB) allocated errormessage(str): Error if returned by get_memory call rawdata(list): Raw data returned from get_memory call
codesearchnet
def select_inputs(self, address, nfees, ntokens, min_confirmations=6): unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents'] unspents = [u for u in unspents if u not in self._spents.queue] if len(unspents) == 0: raise Exception("No spendable outputs found") fees = [u for u in unspents if u['amount'] == self.fee][:nfees] tokens = [u for u in unspents if u['amount'] == self.token][:ntokens] if len(fees) != nfees or len(tokens) != ntokens: raise SpoolFundsError("Not enough outputs to spend. Refill your wallet") if self._spents.qsize() > self.SPENTS_QUEUE_MAXSIZE - (nfees + ntokens): [self._spents.get() for i in range(self._spents.qsize() + nfees + ntokens - self.SPENTS_QUEUE_MAXSIZE)] [self._spents.put(fee) for fee in fees] [self._spents.put(token) for token in tokens] return fees + tokens
Selects the inputs for the spool transaction. Args: address (str): bitcoin address to select inputs for nfees (int): number of fees ntokens (int): number of tokens min_confirmations (Optional[int]): minimum number of required confirmations; defaults to 6
juraj-google-style
def _compile_graphql_generic(language, lowering_func, query_emitter_func, schema, graphql_string, type_equivalence_hints, compiler_metadata): ir_and_metadata = graphql_to_ir(schema, graphql_string, type_equivalence_hints=type_equivalence_hints) lowered_ir_blocks = lowering_func(ir_and_metadata.ir_blocks, ir_and_metadata.query_metadata_table, type_equivalence_hints=type_equivalence_hints) query = query_emitter_func(lowered_ir_blocks, compiler_metadata) return CompilationResult(query=query, language=language, output_metadata=ir_and_metadata.output_metadata, input_metadata=ir_and_metadata.input_metadata)
Compile the GraphQL input, lowering and emitting the query using the given functions. Args: language: string indicating the target language to compile to. lowering_func: Function to lower the compiler IR into a compatible form for the target language backend. query_emitter_func: Function that emits a query in the target language from the lowered IR. schema: GraphQL schema object describing the schema of the graph to be queried. graphql_string: the GraphQL query to compile to the target language, as a string. type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. compiler_metadata: optional target specific metadata for usage by the query_emitter_func. Returns: a CompilationResult object
codesearchnet
def object_metadata(save_path): reader = py_checkpoint_reader.NewCheckpointReader(save_path) try: object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY) except errors_impl.NotFoundError: raise ValueError(f'The specified checkpoint "{save_path}" does not appear to be object-based (saved with TF2) since it is missing the key "{base.OBJECT_GRAPH_PROTO_KEY}". Likely it was created with the TF1 name-based saver and does not contain an object dependency graph.') object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph() object_graph_proto.ParseFromString(object_graph_string) return object_graph_proto
Retrieves information about the objects in a checkpoint. Example usage: ```python object_graph = tf.contrib.checkpoint.object_metadata( tf.train.latest_checkpoint(checkpoint_directory)) ckpt_variable_names = set() for node in object_graph.nodes: for attribute in node.attributes: ckpt_variable_names.add(attribute.full_name) ``` Args: save_path: The path to the checkpoint, as returned by `save` or `tf.train.latest_checkpoint`. Returns: A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer. Raises: ValueError: If an object graph was not found in the checkpoint.
github-repos
def get_individuals(variant_source, case_lines=None, case_type='ped', variant_mode='vcf'): individuals = [] ind_dict ={} if variant_mode == 'vcf': head = get_header(variant_source) for index, ind in enumerate(head.individuals): ind_dict[ind] = index if case_lines: family_parser = FamilyParser(case_lines, family_type=case_type) families = family_parser.families logger.debug("Found families {0}".format( ','.join(list(families.keys())))) if len(families) != 1: logger.error("Only one family can be used with vcf adapter") raise IOError case_id = list(families.keys())[0] logger.debug("Family used in analysis: {0}".format(case_id)) for ind_id in family_parser.individuals: ind = family_parser.individuals[ind_id] logger.info("Found individual {0}".format(ind.individual_id)) try: individual = Individual( ind_id=ind_id, case_id=case_id, mother=ind.mother, father=ind.father, sex=str(ind.sex), phenotype=str(ind.phenotype), variant_source=variant_source, ind_index=ind_dict[ind_id], ) individuals.append(individual) except KeyError as err: raise PedigreeError( family_id=case_id, individual_id=ind_id, message="Individual {0} exists in ped file but not in vcf".format(ind_id) ) else: case_id = os.path.basename(variant_source) for ind in ind_dict: individual = Individual( ind_id=ind, case_id=case_id, variant_source=variant_source, ind_index=ind_dict[ind] ) individuals.append(individual) logger.debug("Found individual {0} in {1}".format( ind, variant_source)) elif variant_mode == 'gemini': gq = GeminiQuery(variant_source) ind_dict = gq.sample_to_idx query = "SELECT * from samples" gq.run(query) for individual in gq: logger.debug("Found individual {0} with family id {1}".format( individual['name'], individual['family_id'])) individuals.append( Individual( ind_id=individual['name'], case_id=individual['family_id'], mother=individual['maternal_id'], father=individual['paternal_id'], sex=individual['sex'], phenotype=individual['phenotype'], ind_index=ind_dict.get(individual['name']), variant_source=variant_source, bam_path=None) ) return individuals
Get the individuals from a vcf file, gemini database, and/or a ped file. Args: variant_source (str): Path to a variant source case_lines(Iterable): Ped like lines case_type(str): Format of ped lines Returns: individuals (generator): generator with Individuals
juraj-google-style
def _indexed_case_helper(branch_fns, default, branch_index, name, lower_using_switch_merge=None): branch_fns = _indexed_case_verify_and_canonicalize_args(branch_fns, default, branch_index) with ops.name_scope(name, 'case', [branch_index]): if context.executing_eagerly() and (not hasattr(branch_index, 'graph')): branch_index = array_ops.where(math_ops.less(branch_index, 0) | math_ops.greater_equal(branch_index, len(branch_fns)), len(branch_fns) - 1, branch_index) return branch_fns[int(branch_index)]() return cond_v2.indexed_case(branch_index, branch_fns, lower_using_switch_merge=lower_using_switch_merge)
Implementation of case that emits the n-way indexed Case op. Args: branch_fns: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. branch_index: Optional int `Tensor`, which selects for the corresponding pred_fn_pair. name: A name for this operation (optional). lower_using_switch_merge: Lower this op using switch merge ops (optional). Returns: The tensors returned by the pair whose key matched branch_index, or those returned by `default` if none does. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable.
github-repos
def print_stack_events(self): first_token = '7be7981bd6287dd8112305e8f3822a6f' keep_going = True next_token = first_token current_request_token = None rows = [] try: while keep_going and next_token: if next_token == first_token: response = self._cf_client.describe_stack_events( StackName=self._stack_name ) else: response = self._cf_client.describe_stack_events( StackName=self._stack_name, NextToken=next_token ) next_token = response.get('NextToken', None) for event in response['StackEvents']: row = [] event_time = event.get('Timestamp') request_token = event.get('ClientRequestToken', 'unknown') if current_request_token is None: current_request_token = request_token elif current_request_token != request_token: keep_going = False break row.append(event_time.strftime('%x %X')) row.append(event.get('LogicalResourceId')) row.append(event.get('ResourceStatus')) row.append(event.get('ResourceStatusReason', '')) rows.append(row) if len(rows) > 0: print('\nEvents for the current upsert:') print(tabulate(rows, headers=['Time', 'Logical ID', 'Status', 'Message'])) return True else: print('\nNo stack events found\n') except Exception as wtf: print(wtf) return False
List events from the given stack Args: None Returns: None
juraj-google-style
def _with_dependencies(self, dependencies): new_row_splits = control_flow_ops.with_dependencies(dependencies, self._row_splits) return RowPartition(row_splits=new_row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)
Returns a new RowPartition equal to self with control dependencies. Specifically, self._row_splits is gated by the given control dependencies. Used to add sanity checks to the constructors. Args: dependencies: a list of tensors to use as dependencies. Returns: A new RowPartition object.
github-repos
def get_atom_map(structure): syms = [site.specie.symbol for site in structure] unique_pot_atoms = [] [unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)] atom_map = {} for i, atom in enumerate(unique_pot_atoms): atom_map[atom] = i + 1 return atom_map
Returns a dict that maps each atomic symbol to a unique integer starting from 1. Args: structure (Structure) Returns: dict
juraj-google-style
def emit_counters(self, category, name, pid, timestamp, counters): event = self._create_event('C', category, name, pid, 0, timestamp) event['args'] = counters.copy() self._events.append(event)
Emits a counter record for the dictionary 'counters'. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counters: Dictionary of counter values.
github-repos
def create_tracker(self, restriction): raise NotImplementedError
Produces a new ``RestrictionTracker`` for the given restriction. This API is required to be implemented. Args: restriction: an object that defines a restriction as identified by a Splittable ``DoFn`` that utilizes the current ``RestrictionProvider``. For example, a tuple that gives a range of positions for a Splittable ``DoFn`` that reads files based on byte positions. Returns: an object of type ``RestrictionTracker``.
github-repos
async def claim_work(context): log.debug("Calling claimWork...") payload = { 'workerGroup': context.config['worker_group'], 'workerId': context.config['worker_id'], 'tasks': 1, } try: return await context.queue.claimWork( context.config['provisioner_id'], context.config['worker_type'], payload ) except (taskcluster.exceptions.TaskclusterFailure, aiohttp.ClientError) as exc: log.warning("{} {}".format(exc.__class__, exc))
Find and claim the next pending task in the queue, if any. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict: a dict containing a list of the task definitions of the tasks claimed.
juraj-google-style
def siblings(self, as_resources=False): siblings = set() for parent in self.parents(as_resources=True): for sibling in parent.children(as_resources=as_resources): siblings.add(sibling) if as_resources: siblings.remove(self) if (not as_resources): siblings.remove(self.uri) return list(siblings)
method to return hierarchical siblings of this resource. Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
codesearchnet
def make_access_request(self): del self.issued_at assertion = b'.'.join((self.header(), self.claims(), self.signature())) post_data = {'grant_type': GRANT_TYPE, 'assertion': assertion} resp = requests.post(AUDIENCE, post_data) if (resp.status_code != 200): raise AuthenticationError(resp) return resp
Makes an OAuth2 access token request with crafted JWT and signature. The core of this module. Based on arguments it creates proper JWT for you and signs it with supplied private key. Regardless of present valid token, it always clears ``issued_at`` property, which in turn results in requesting fresh OAuth2 access token. Returns: requests.Response Raises: google_oauth.exceptions.AuthenticationError: If there was any non-200 HTTP-code from Google. requests.RequestException: Something went wrong when doing HTTP request.
codesearchnet
def get_overall_services_health(self) -> str: services_health_status = self.get_services_health() health_status = all(((status == 'Healthy') for status in services_health_status.values())) if health_status: overall_status = 'Healthy' else: overall_status = 'Unhealthy' return overall_status
Get the overall health of all the services. Returns: str, overall health status
codesearchnet
def _parse_device(s: str) -> Tuple[(List[GridQubit], Dict[(str, Set[GridQubit])])]: lines = s.strip().split('\n') qubits = [] measurement_lines = {} for (row, line) in enumerate(lines): for (col, c) in enumerate(line.strip()): if (c != '-'): qubit = GridQubit(row, col) qubits.append(qubit) measurement_line = measurement_lines.setdefault(c, set()) measurement_line.add(qubit) return (qubits, measurement_lines)
Parse ASCIIart device layout into info about qubits and connectivity. Args: s: String representing the qubit layout. Each line represents a row, and each character in the row is a qubit, or a blank site if the character is a hyphen '-'. Different letters for the qubit specify which measurement line that qubit is connected to, e.g. all 'A' qubits share a measurement line. Leading and trailing spaces on each line are ignored. Returns: A list of qubits and a dict mapping measurement line name to the qubits on that measurement line.
codesearchnet
def _PrepareAttributeContainer(self, attribute_container): attribute_values_hash = hash(attribute_container.GetAttributeValuesString()) identifier = identifiers.FakeIdentifier(attribute_values_hash) attribute_container.SetIdentifier(identifier) return copy.deepcopy(attribute_container)
Prepares an attribute container for storage. Args: attribute_container (AttributeContainer): attribute container. Returns: AttributeContainer: copy of the attribute container to store in the fake storage.
juraj-google-style
def _get_connection_state(self, conn_or_int_id): key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError('You must supply either an int connection id or a string internal id to _get_connection_state', id=key) if (key not in table): return self.Disconnected data = table[key] return data['state']
Get a connection's state by either conn_id or internal_id This routine must only be called from the internal worker thread. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id
codesearchnet
def _encode_dict_as_row(record, column_name_map): for k in list(record.keys()): v = record[k] if (isinstance(v, pandas.Timestamp) or isinstance(v, datetime.datetime)): v = record[k] = record[k].isoformat() if (k not in column_name_map): column_name_map[k] = ''.join((c for c in k if (c in Table._VALID_COLUMN_NAME_CHARACTERS))) new_k = column_name_map[k] if (k != new_k): record[new_k] = v del record[k] return record
Encode a dictionary representing a table row in a form suitable for streaming to BQ. This includes encoding timestamps as ISO-compatible strings and removing invalid characters from column names. Args: record: a Python dictionary representing the table row. column_name_map: a dictionary mapping dictionary keys to column names. This is initially empty and built up by this method when it first encounters each column, then used as a cache subsequently. Returns: The sanitized dictionary.
codesearchnet
def decode_header_part(header): if not header: return six.text_type() output = six.text_type() try: for d, c in decode_header(header): c = c if c else 'utf-8' output += ported_string(d, c, 'ignore') except (HeaderParseError, UnicodeError): log.error("Failed decoding header part: {}".format(header)) output += header return output
Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2)
juraj-google-style
def ceil(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.ceil, tf.float32)
Returns a TensorFluent for the ceil function. Args: x: The input fluent. Returns: A TensorFluent wrapping the ceil function.
codesearchnet
def open(in_file, in_fmt=None): fmt = in_file.split('.')[(- 1)] if in_fmt: fmt = in_fmt fmt = fmt.lower() if (fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']): return Image.open(in_file) else: raise NotImplementedError('Cannot open file of type {fmt}'.format(fmt))
Reads in a file from disk. Arguments: in_file: The name of the file to read in in_fmt: The format of in_file, if you want to be explicit Returns: numpy.ndarray
codesearchnet
def _parse_publisher(details): publisher = _get_td_or_none(details, 'ctl00_ContentPlaceHolder1_tblRowNakladatel') if (not publisher): return None publisher = dhtmlparser.removeTags(publisher).strip() if (not publisher): return None return publisher
Parse publisher of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: str/None: Publisher's name as string or None if not found.
codesearchnet
def add_to_loader(loader_cls: Type, classes: List[Type]) -> None: if (not isinstance(classes, list)): classes = [classes] for class_ in classes: tag = '!{}'.format(class_.__name__) if issubclass(class_, enum.Enum): loader_cls.add_constructor(tag, EnumConstructor(class_)) elif (issubclass(class_, str) or issubclass(class_, UserString)): loader_cls.add_constructor(tag, UserStringConstructor(class_)) else: loader_cls.add_constructor(tag, Constructor(class_)) if (not hasattr(loader_cls, '_registered_classes')): loader_cls._registered_classes = dict() loader_cls._registered_classes[tag] = class_
Registers one or more classes with a YAtiML loader. Once a class has been registered, it can be recognized and \ constructed when reading a YAML text. Args: loader_cls: The loader to register the classes with. classes: The class(es) to register, a plain Python class or a \ list of them.
codesearchnet
def update_dns_zone_record(env, zone_id, **kwargs): client = boto3.Session(profile_name=env).client('route53') response = {} hosted_zone_info = client.get_hosted_zone(Id=zone_id) zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.') dns_name = kwargs.get('dns_name') if dns_name and dns_name.endswith(zone_name): dns_name_aws = kwargs.get('dns_name_aws') dns_json = get_template(template_file='infrastructure/dns_upsert.json.j2', **kwargs) LOG.info('Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) try: response = client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch=json.loads(dns_json), ) LOG.info('Upserted DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) except botocore.exceptions.ClientError as error: LOG.info('Error creating DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) LOG.debug(error) else: LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name) LOG.debug('Route53 JSON Response: \n%s', pformat(response))
Create a Route53 CNAME record in _env_ zone. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. Keyword Args: dns_name (str): FQDN of application's dns entry to add/update. dns_name_aws (str): FQDN of AWS resource dns_ttl (int): DNS time-to-live (ttl)
juraj-google-style
def to_xml(self, xmllint=False): root = self._tree.getroot() ret = ET.tostring(ET.ElementTree(root), pretty_print=True) if xmllint: ret = xmllint_format(ret) return ret
Serialize all properties as pretty-printed XML Args: xmllint (boolean): Format with ``xmllint`` in addition to pretty-printing
juraj-google-style
def match_filenames_once(pattern, name=None): with ops.name_scope(name, 'matching_filenames', [pattern]) as name: return variable_v1.VariableV1(name=name, initial_value=io_ops.matching_files(pattern), trainable=False, validate_shape=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])
Save the list of files matching pattern, so it is only computed once. NOTE: The order of the files returned is deterministic. Args: pattern: A file pattern (glob), or 1D tensor of file patterns. name: A name for the operations (optional). Returns: A variable that is initialized to the list of files matching the pattern(s).
github-repos
def sheets_tab_delete(config, auth, sheet_url_or_name, sheet_tab): if config.verbose: print('SHEETS DELETE', sheet_url_or_name, sheet_tab) spreadsheet = sheets_get(config, auth, sheet_url_or_name) if spreadsheet: if len(spreadsheet['sheets']) == 1 and spreadsheet['sheets'][0]['properties']['title'] == sheet_tab: file_delete(config, auth, spreadsheet['properties']['title'], parent=None) else: sheet_id, tab_id = sheets_tab_id(config, auth, sheet_url_or_name, sheet_tab) if tab_id is not None: sheets_batch_update(config, auth, sheet_url_or_name, {'requests': [{'deleteSheet': {'sheetId': tab_id}}]})
Delete a tab in a sheet. Args: config - see starthinker/util/configuration.py auth - user or service url_or_name - one of: URL, document title, or id sheet_tab - name of tab to get id for No Return
github-repos
def __setattr__(self, name: str, val: np.ndarray) -> None: if name.startswith("!"): super(AttributeManager, self).__setattr__(name[1:], val) elif "/" in name: raise KeyError("Attribute name cannot contain slash (/)") else: if self.ds is not None: values = loompy.normalize_attr_values(val) a = ["/row_attrs/", "/col_attrs/"][self.axis] if self.ds.shape[self.axis] != 0 and values.shape[0] != self.ds.shape[self.axis]: raise ValueError(f"Attribute '{name}' must have exactly {self.ds.shape[self.axis]} values but {len(values)} were given") if self.ds._file[a].__contains__(name): del self.ds._file[a + name] self.ds._file[a + name] = values self.ds._file[a + name].attrs["last_modified"] = timestamp() self.ds._file[a].attrs["last_modified"] = timestamp() self.ds._file.attrs["last_modified"] = timestamp() self.ds._file.flush() self.__dict__["storage"][name] = loompy.materialize_attr_values(self.ds._file[a][name][:]) else: self.__dict__["storage"][name] = val
Set the value of a named attribute Args: name (str) Name of the attribute val (np.ndarray) Value of the attribute Remarks: Length must match the corresponding matrix dimension The values are automatically HMTL escaped and converted to ASCII for storage
juraj-google-style
def register_hook(self, hook, priority='NORMAL'): assert isinstance(hook, Hook) if hasattr(hook, 'priority'): raise ValueError('"priority" is a reserved attribute for hooks') priority = get_priority(priority) hook.priority = priority inserted = False for i in range(len(self._hooks) - 1, -1, -1): if priority >= self._hooks[i].priority: self._hooks.insert(i + 1, hook) inserted = True break if not inserted: self._hooks.insert(0, hook)
Register a hook into the hook list. Args: hook (:obj:`Hook`): The hook to be registered. priority (int or str or :obj:`Priority`): Hook priority. Lower value means higher priority.
juraj-google-style
def egress(self, envelope, http_headers, operation, binding_options): custom_headers = self._header_handler.GetHTTPHeaders() http_headers.update(custom_headers) return (envelope, http_headers)
Overriding the egress function to set our headers. Args: envelope: An Element with the SOAP request data. http_headers: A dict of the current http headers. operation: The SoapOperation instance. binding_options: An options dict for the SOAP binding. Returns: A tuple of the envelope and headers.
codesearchnet
def check_dihedral(self, construction_table): c_table = construction_table angles = self.get_angle_degrees(c_table.iloc[(3:, :)].values) problem_index = np.nonzero(((175 < angles) | (angles < 5)))[0] rename = dict(enumerate(c_table.index[3:])) problem_index = [rename[i] for i in problem_index] return problem_index
Checks, if the dihedral defining atom is colinear. Checks for each index starting from the third row of the ``construction_table``, if the reference atoms are colinear. Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices.
codesearchnet
def prepare_request( url: Union[str, methods], data: Optional[MutableMapping], headers: Optional[MutableMapping], global_headers: MutableMapping, token: str, as_json: Optional[bool] = None, ) -> Tuple[str, Union[str, MutableMapping], MutableMapping]: if isinstance(url, methods): as_json = as_json or url.value[3] real_url = url.value[0] else: real_url = url as_json = False if not headers: headers = {**global_headers} else: headers = {**global_headers, **headers} payload: Optional[Union[str, MutableMapping]] = None if real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json): payload, headers = _prepare_json_request(data, token, headers) elif real_url.startswith(ROOT_URL) and not as_json: payload = _prepare_form_encoded_request(data, token) else: real_url = ROOT_URL + real_url payload = _prepare_form_encoded_request(data, token) return real_url, payload, headers
Prepare outgoing request Create url, headers, add token to the body and if needed json encode it Args: url: :class:`slack.methods` item or string of url data: Outgoing data headers: Custom headers global_headers: Global headers token: Slack API token as_json: Post JSON to the slack API Returns: :py:class:`tuple` (url, body, headers)
juraj-google-style
def __init__(self, bits: List[int], initializer: tf.keras.initializers.Initializer=tf.keras.initializers.RandomUniform(), name: Union[None, str]=None): pre_process = [energy_utils.SpinsFromBitstrings()] post_process = [energy_utils.VariableDot(initializer=initializer)] super().__init__(bits, pre_process + post_process, name) self._post_process = post_process
Initializes a BernoulliEnergy. Args: bits: Unique labels for the bits on which this distribution is supported. initializer: A `tf.keras.initializers.Initializer` which specifies how to initialize the values of the parameters. name: Optional name for the model.
github-repos
def replace_punctuation(self, text, excluded=None, replacement=''): if excluded is None: excluded = set() elif not isinstance(excluded, set): excluded = set(excluded) punct = ''.join(self.__punctuation.difference(excluded)) return self.replace_characters( text, characters=punct, replacement=replacement)
Replace punctuation symbols in text. Removes punctuation from input text or replaces them with a string if specified. Characters replaced will be those in string.punctuation. Args: text: The text to be processed. excluded: Set of characters to exclude. replacement: New text that will replace punctuation. Returns: The text without punctuation.
juraj-google-style
def update_file(filename, result, content, indent): parts = re.split('---+', content, 2) frontmatter = yaml.safe_load(parts[1]) frontmatter['counts'] = result['counts'] parts[1] = '\n{}'.format( yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent)) result = '---'.join(parts) with open(filename, 'wb') as f: f.write(result.encode('utf-8')) print('{} updated.'.format(filename))
Updates a Jekyll file to contain the counts form an object This just converts the results to YAML and adds to the Jekyll frontmatter. Args: filename: the Jekyll file to update result: the results object from `wc` content: the contents of the original file indent: the indentation level for dumping YAML
juraj-google-style
def scrape_hive_url(mc_url, num_tracks=sys.maxsize, folders=False, custom_path=''): try: data = get_hive_data(mc_url) except Exception as e: puts_safe((colored.red('Problem downloading ') + mc_url)) print(e) filenames = [] return filenames
Scrape a Hive.co download page. Returns: list: filenames to open
codesearchnet
def plot(self, figure_list): if not self.data == {} and self.data['image_data'] is None: axes = figure_list[0].axes[0] if len(axes.images)>0: self.data['image_data'] = np.array(axes.images[0].get_array()) self.data['extent'] = np.array(axes.images[0].get_extent()) self.plot_settings['cmap'] = axes.images[0].get_cmap().name self.plot_settings['xlabel'] = axes.get_xlabel() self.plot_settings['ylabel'] = axes.get_ylabel() self.plot_settings['title'] = axes.get_title() self.plot_settings['interpol'] = axes.images[0].get_interpolation() Script.plot(self, figure_list)
Plots a dot on top of each selected NV, with a corresponding number denoting the order in which the NVs are listed. Precondition: must have an existing image in figure_list[0] to plot over Args: figure_list:
juraj-google-style
def run_task_external(self, coroutine): self.verify_calling_thread(False, 'run_task_external must not be called from the emulation thread') future = asyncio.run_coroutine_threadsafe(coroutine, self._loop) return future.result()
Inject a task into the emulation loop and wait for it to finish. The coroutine parameter is run as a Task inside the EmulationLoop until it completes and the return value (or any raised Exception) is pased back into the caller's thread. Args: coroutine (coroutine): The task to inject into the event loop. Returns: object: Whatever the coroutine returned.
codesearchnet
def __init__( self, cipher_mode=None, initialization_vector=None, key=None, **kwargs): if not key: raise ValueError('Missing key.') cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None) if cipher_mode is None: raise ValueError('Unsupported cipher mode: {0!s}'.format(cipher_mode)) if cipher_mode != Blowfish.MODE_ECB and not initialization_vector: raise ValueError('Missing initialization vector.') super(BlowfishDecrypter, self).__init__() if cipher_mode == Blowfish.MODE_ECB: self._blowfish_cipher = Blowfish.new(key, mode=cipher_mode) else: self._blowfish_cipher = Blowfish.new( key, IV=initialization_vector, mode=cipher_mode)
Initializes a decrypter. Args: cipher_mode (Optional[str]): cipher mode. initialization_vector (Optional[bytes]): initialization vector. key (Optional[bytes]): key. kwargs (dict): keyword arguments depending on the decrypter. Raises: ValueError: when key is not set, block cipher mode is not supported, or initialization_vector is required and not set.
juraj-google-style
def _get_instance(self, iname, namespace, property_list, local_only, include_class_origin, include_qualifiers): instance_repo = self._get_instance_repo(namespace) rtn_tup = self._find_instance(iname, instance_repo) inst = rtn_tup[1] if (inst is None): raise CIMError(CIM_ERR_NOT_FOUND, _format('Instance not found in repository namespace {0!A}. Path={1!A}', namespace, iname)) rtn_inst = deepcopy(inst) if local_only: for p in rtn_inst: class_origin = rtn_inst.properties[p].class_origin if (class_origin and (class_origin != inst.classname)): del rtn_inst[p] if ((not self._repo_lite) and local_only): try: cl = self._get_class(iname.classname, namespace, local_only=local_only) except CIMError as ce: if (ce.status_code == CIM_ERR_NOT_FOUND): raise CIMError(CIM_ERR_INVALID_CLASS, _format('Class {0!A} not found for instance {1!A} in namespace {2!A}.', iname.classname, iname, namespace)) class_pl = cl.properties.keys() for p in list(rtn_inst): if (p not in class_pl): del rtn_inst[p] self._filter_properties(rtn_inst, property_list) if (not include_qualifiers): self._remove_qualifiers(rtn_inst) if (not include_class_origin): self._remove_classorigin(rtn_inst) return rtn_inst
Local method implements getinstance. This is generally used by other instance methods that need to get an instance from the repository. It attempts to get the instance, copies it, and filters it for input parameters like localonly, includequalifiers, and propertylist. Returns: CIMInstance copy from the repository with property_list filtered, and qualifers removed if include_qualifiers=False and class origin removed if include_class_origin False
codesearchnet
def get_enabled(): raw_services = _get_services() services = set() for service in raw_services: if (info(service['ServiceName'])['StartType'] in ['Auto']): services.add(service['ServiceName']) return sorted(services)
Return a list of enabled services. Enabled is defined as a service that is marked to Auto Start. Returns: list: A list of enabled services CLI Example: .. code-block:: bash salt '*' service.get_enabled
codesearchnet
def __init__(self, max_iterations, damping, unroll_loop=False): assert damping >= 0.0 self.damping = damping super(ConjugateGradient, self).__init__(max_iterations=max_iterations, unroll_loop=unroll_loop)
Creates a new conjugate gradient solver instance. Args: max_iterations: Maximum number of iterations before termination. damping: Damping factor. unroll_loop: Unrolls the TensorFlow while loop if true.
juraj-google-style
def content_ratings(self, **kwargs): path = self._get_id_path('content_ratings') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the content ratings for a TV Series. Args: language: (optional) ISO 639 code. append_to_response: (optional) Comma separated, any collection method. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def add_ema_control_dependencies(vector_quantizer, one_hot_assignments, codes, commitment_loss, decay): updated_ema_count = moving_averages.assign_moving_average(vector_quantizer.ema_count, tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]), decay, zero_debias=False) updated_ema_means = moving_averages.assign_moving_average(vector_quantizer.ema_means, tf.reduce_sum(input_tensor=(tf.expand_dims(codes, 2) * tf.expand_dims(one_hot_assignments, 3)), axis=[0, 1]), decay, zero_debias=False) perturbed_ema_count = (updated_ema_count + 1e-05) with tf.control_dependencies([commitment_loss]): update_means = tf.compat.v1.assign(vector_quantizer.codebook, (updated_ema_means / perturbed_ema_count[(..., tf.newaxis)])) with tf.control_dependencies([update_means]): return tf.identity(commitment_loss)
Add control dependencies to the commmitment loss to update the codebook. Args: vector_quantizer: An instance of the VectorQuantizer class. one_hot_assignments: The one-hot vectors corresponding to the matched codebook entry for each code in the batch. codes: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook. commitment_loss: The commitment loss from comparing the encoder outputs to their neighboring codebook entries. decay: Decay factor for exponential moving average. Returns: commitment_loss: Commitment loss with control dependencies.
codesearchnet
def push(self, x): self._median_tracker.push(x) median = self._median_tracker.get() self._diff_median_tracker.push(abs(x - median))
Adds a new value to the tracker and updates the MAD. Args: x: The value to be added to the tracked stream.
github-repos
class FlaxGreedySearchOutput(ModelOutput): sequences: Optional[jnp.ndarray] = None
Flax Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences.
github-repos
def get_service_for_handle(self, handle): for s in self.services.values(): if s.start_handle <= handle and s.end_handle >= handle: return s return None
Given a characteristic handle, return the :class:`Service` object that the handle belongs to. Args: handle (int): the characteristic handle Returns: None if no service matches the given handle, otherwise a :class:`Service` object.
juraj-google-style
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None, num_readers=1): with tf.name_scope('batch_processing'): data_files = dataset.data_files() if (data_files is None): raise ValueError('No data files found for this dataset') if train: filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16) else: filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1) if (num_preprocess_threads is None): num_preprocess_threads = FLAGS.num_preprocess_threads if (num_preprocess_threads % 4): raise ValueError('Please make num_preprocess_threads a multiple of 4 (%d % 4 != 0).', num_preprocess_threads) if (num_readers is None): num_readers = FLAGS.num_readers if (num_readers < 1): raise ValueError('Please make num_readers at least 1') examples_per_shard = 1024 min_queue_examples = (examples_per_shard * FLAGS.input_queue_memory_factor) if train: examples_queue = tf.RandomShuffleQueue(capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples, dtypes=[tf.string]) else: examples_queue = tf.FIFOQueue(capacity=(examples_per_shard + (3 * batch_size)), dtypes=[tf.string]) if (num_readers > 1): enqueue_ops = [] for _ in range(num_readers): reader = dataset.reader() (_, value) = reader.read(filename_queue) enqueue_ops.append(examples_queue.enqueue([value])) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops)) example_serialized = examples_queue.dequeue() else: reader = dataset.reader() (_, example_serialized) = reader.read(filename_queue) images_and_labels = [] for thread_id in range(num_preprocess_threads): (image_buffer, label_index, bbox, _) = parse_example_proto(example_serialized) image = image_preprocessing(image_buffer, bbox, train, thread_id) images_and_labels.append([image, label_index]) (images, label_index_batch) = tf.train.batch_join(images_and_labels, batch_size=batch_size, capacity=((2 * num_preprocess_threads) * batch_size)) height = FLAGS.image_size width = FLAGS.image_size depth = 3 images = tf.cast(images, tf.float32) images = tf.reshape(images, shape=[batch_size, height, width, depth]) tf.summary.image('images', images) return (images, tf.reshape(label_index_batch, [batch_size]))
Contruct batches of training or evaluation examples from the image dataset. Args: dataset: instance of Dataset class specifying the dataset. See dataset.py for details. batch_size: integer train: boolean num_preprocess_threads: integer, total number of preprocessing threads num_readers: integer, number of parallel readers Returns: images: 4-D float Tensor of a batch of images labels: 1-D integer Tensor of [batch_size]. Raises: ValueError: if data is not found
codesearchnet
def nlargest(self, n=None): if (n is None): return sorted(self.counts(), key=itemgetter(1), reverse=True) else: return heapq.nlargest(n, self.counts(), key=itemgetter(1))
List the n most common elements and their counts. List is from the most common to the least. If n is None, the list all element counts. Run time should be O(m log m) where m is len(self) Args: n (int): The number of elements to return
codesearchnet
def Create(self, name): precondition.AssertType(name, Text) try: constructor = self._constructors[name] except KeyError: message = "No constructor for name '%s' has been registered" message %= name raise ValueError(message) instance = constructor() if not isinstance(instance, self._cls): message = ("Constructor %r for name '%s' returned instance of type %r " "(expected %r)") message %= (constructor, name, type(instance), self._cls) raise TypeError(message) return instance
Creates a new instance. Args: name: A name identifying the constructor to use for instantiation. Returns: An instance of the type that the factory supports.
juraj-google-style
def _GetUsers(self, key_path_suffix): user_key_name, _, key_path_suffix = key_path_suffix.partition( definitions.KEY_PATH_SEPARATOR) if user_key_name == '.DEFAULT': search_key_name = 'S-1-5-18' else: search_key_name = user_key_name user_profile_list_key = self.GetKeyByPath(self._USER_PROFILE_LIST_KEY_PATH) if not user_profile_list_key: return None for user_profile_key in user_profile_list_key.GetSubkeys(): if search_key_name == user_profile_key.name: profile_path_value = user_profile_key.GetValueByName('ProfileImagePath') if not profile_path_value: break profile_path = profile_path_value.GetDataAsObject() if not profile_path: break key_name_upper = user_profile_key.name.upper() if key_name_upper.endswith('_CLASSES'): profile_path = '\\'.join([ profile_path, 'AppData', 'Local', 'Microsoft', 'Windows', 'UsrClass.dat']) else: profile_path = '\\'.join([profile_path, 'NTUSER.DAT']) profile_path_upper = profile_path.upper() registry_file = self._GetCachedUserFileByPath(profile_path_upper) if not registry_file: break key_path_prefix = definitions.KEY_PATH_SEPARATOR.join([ 'HKEY_USERS', user_key_name]) key_path = ''.join([key_path_prefix, key_path_suffix]) registry_file.SetKeyPathPrefix(key_path_prefix) return registry_file.GetKeyByPath(key_path) return None
Virtual key callback to determine the users sub keys. Args: key_path_suffix (str): users Windows Registry key path suffix with leading path separator. Returns: WinRegistryKey: the users Windows Registry key or None if not available.
juraj-google-style
def __init__(self, parent): logger.debug("Initialising log panel") super(Log, self).__init__(parent, padding=8, text="Python console log") log = tk.Text(self, wrap="none") log.grid(column=0, row=0, sticky="W E N S") log.tag_config('critical', foreground="red", underline=True) log.tag_config('error', foreground="red") log.tag_config('warning', foreground="orange") log.tag_config('info') log.tag_config('debug', foreground=" scrollbar = ttk.Scrollbar(self, orient="vertical", command=log.yview) scrollbar.grid(column=1, row=0, sticky="N S") log['yscrollcommand'] = scrollbar.set scrollbar = ttk.Scrollbar(self, orient="horizontal", command=log.xview) scrollbar.grid(column=0, row=1, sticky="W E") log['xscrollcommand'] = scrollbar.set class LogHandler(logging.Handler): def __init__(self, text_widget): logging.Handler.__init__(self) self.text_widget = text_widget self.text_widget.config(state=tk.DISABLED) def flush(self): try: self.text_widget.see("end") except: pass def emit(self, record): msg = self.format(record) msg = msg[:9] + msg[29:] tags = () if msg.startswith("CRITICAL"): tags = 'critical' if msg.startswith("ERROR"): tags = 'error' if msg.startswith("WARNING"): tags = 'warning' if msg.startswith("INFO"): tags = 'info' if msg.startswith("DEBUG"): tags = 'debug' self.text_widget.config(state=tk.NORMAL) self.text_widget.insert("end", msg + "\n", tags) self.text_widget.config(state=tk.DISABLED) self.flush() discord_logger = logging.getLogger("modis.discord_modis") formatter = logging.Formatter( "{levelname:8} {name} - {message}", style="{") discord_handler = LogHandler(log) discord_handler.setFormatter(formatter) discord_logger.addHandler(discord_handler) self.columnconfigure(0, weight=1) self.rowconfigure(0, weight=1)
Create a new text box for the console log. Args: parent: A tk or ttk object
juraj-google-style
def __init__(self, start, width, num_buckets): self._start = start self._width = width self._num_buckets = num_buckets
Create a histogram with linear buckets. Args: start: Lower bound of a starting bucket. width: Bucket width. Smaller width implies a better resolution for percentile estimation. num_buckets: The number of buckets. Upper bound of an ending bucket is defined by start + width * numBuckets.
github-repos
def step(self, observations, raw_rewards, processed_rewards, dones, actions): assert isinstance(observations, np.ndarray) assert isinstance(raw_rewards, np.ndarray) assert isinstance(processed_rewards, np.ndarray) assert isinstance(dones, np.ndarray) assert isinstance(actions, np.ndarray) assert (self.batch_size == observations.shape[0]) assert (self.batch_size == raw_rewards.shape[0]) assert (self.batch_size == processed_rewards.shape[0]) assert (self.batch_size == dones.shape[0]) assert (self.batch_size == actions.shape[0]) for index in range(self.batch_size): trajectory = self._trajectories[index] assert trajectory.is_active trajectory.change_last_time_step(action=actions[index]) trajectory.add_time_step(observation=observations[index], done=dones[index], raw_reward=raw_rewards[index], processed_reward=processed_rewards[index]) if dones[index]: self._complete_trajectory(trajectory, index) assert (not self._trajectories[index].is_active)
Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t
codesearchnet
def next_trials(self): trials = list(self._trial_generator) if self._shuffle: random.shuffle(trials) self._finished = True return trials
Provides Trial objects to be queued into the TrialRunner. Returns: trials (list): Returns a list of trials.
codesearchnet
def reverse(path): if is_rooted(path) or '..' in path: from b2.manager import get_manager get_manager().errors()( 'reverse(path): path is either rooted or contains ".." in the path') if path == '.': return path path = os.path.normpath(path) return os.sep.join('..' for t in path.split(os.sep))
Returns path2 such that `os.path.join(path, path2) == '.'`. `path` may not contain '..' or be rooted. Args: path (str): the path to reverse Returns: the string of the reversed path Example: >>> p1 = 'path/to/somewhere' >>> p2 = reverse('path/to/somewhere') >>> p2 '../../..' >>> os.path.normpath(os.path.join(p1, p2)) '.'
juraj-google-style
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def trigger_methods(instance, args): for name in sorted(args): value = args[name] target = instance if (name.startswith('response_') or name.startswith('reply_')): name = name.replace('response_', '').replace('reply_', '') if hasattr(instance, '_response'): target = instance._response member = getattr(target, name, None) isattr = (name in dir(target)) iscallable = (ismethod(member) and (not isfunction(member))) if ((not iscallable) and (not isattr)): raise PookInvalidArgument('Unsupported argument: {}'.format(name)) if iscallable: member(value) else: setattr(target, name, value)
Triggers specific class methods using a simple reflection mechanism based on the given input dictionary params. Arguments: instance (object): target instance to dynamically trigger methods. args (iterable): input arguments to trigger objects to Returns: None
codesearchnet
def put(self, id, name, description, private, runs_executable_tasks, runs_docker_container_tasks, runs_singularity_container_tasks, active, whitelists): request_url = (self._client.base_api_url + self.detail_url.format(id=id)) data_to_put = {'name': name, 'description': description, 'private': private, 'runs_executable_tasks': runs_executable_tasks, 'runs_docker_container_tasks': runs_docker_container_tasks, 'runs_singularity_container_tasks': runs_singularity_container_tasks, 'active': active, 'whitelists': whitelists} response = self._client.session.put(request_url, data=data_to_put) self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK) return self.response_data_to_model_instance(response.json())
Updates a task queue on the saltant server. Args: id (int): The ID of the task queue. name (str): The name of the task queue. description (str): The description of the task queue. private (bool): A Booleon signalling whether the queue can only be used by its associated user. runs_executable_tasks (bool): A Boolean specifying whether the queue runs executable tasks. runs_docker_container_tasks (bool): A Boolean specifying whether the queue runs container tasks that run in Docker containers. runs_singularity_container_tasks (bool): A Boolean specifying whether the queue runs container tasks that run in Singularity containers. active (bool): A Booleon signalling whether the queue is active. whitelists (list): A list of task whitelist IDs. Returns: :class:`saltant.models.task_queue.TaskQueue`: A task queue model instance representing the task queue just updated.
codesearchnet
def compile_protofile(proto_file_path): out_file = tempfile.mkstemp()[1] try: subprocess.check_output(['protoc', '--include_source_info', '--descriptor_set_out', out_file, proto_file_path]) except subprocess.CalledProcessError as e: sys.exit('protoc returned status {}'.format(e.returncode)) return out_file
Compile proto file to descriptor set. Args: proto_file_path: Path to proto file to compile. Returns: Path to file containing compiled descriptor set. Raises: SystemExit if the compilation fails.
codesearchnet
async def _async_loop(self, urls): results = [] async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: for url in urls: result = asyncio.ensure_future(self._get_async(url, session)) results.append(result) responses = (await asyncio.gather(*results)) return responses
Asynchronous internal method used to request multiple URLs Args: urls (list): URLs to fetch Returns: responses (obj): All URL requests' response coroutines
codesearchnet