code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def delete(self): clone = copy.deepcopy(self) return [(item.delete() and item) for item in clone]
Deletes all objects that matches to the queryset. Note: Unlike RDBMS systems, this method makes individual save calls to backend DB store. So this is exists as more of a comfortable utility method and not a performance enhancement. Returns: List of deleted objects or None if *confirm* not set. Example: >>> Person.objects.filter(age__gte=16, name__startswith='jo').delete()
codesearchnet
def plot_compare(self, other_plotter): data_orig = self.bs_plot_data() data = other_plotter.bs_plot_data() if len(data_orig['distances']) != len(data['distances']): raise ValueError('The two objects are not compatible.') plt = self.get_plot() band_linewidth = 1 for i in range(other_plotter._nb_bands): for d in range(len(data_orig['distances'])): plt.plot(data_orig['distances'][d], [e[i] for e in data['frequency']][d], 'r-', linewidth=band_linewidth) return plt
plot two band structure for comparison. One is in red the other in blue. The two band structures need to be defined on the same symmetry lines! and the distance between symmetry lines is the one of the band structure used to build the PhononBSPlotter Args: another PhononBSPlotter object defined along the same symmetry lines Returns: a matplotlib object with both band structures
juraj-google-style
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto): self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def()) self.assertProtoEquals(expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def()) self.assertProtoEquals(expected_proto, server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def()) self.assertProtoEquals(expected_proto, server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
Verifies that the ClusterSpec generates the correct proto. We are testing this four different ways to ensure that the ClusterSpec returned by the TPUClusterResolver behaves identically to a normal ClusterSpec when passed into the generic ClusterSpec libraries. Args: cluster_spec: ClusterSpec returned by the TPUClusterResolver expected_proto: Expected protobuf
github-repos
def construct_policy(app='coreforrest', env='dev', group='forrest', region='us-east-1', pipeline_settings=None): LOG.info('Create custom IAM Policy for %s.', app) services = pipeline_settings.get('services', {}) LOG.debug('Found requested services: %s', services) services = auto_service(pipeline_settings=pipeline_settings, services=services) if services: credential = get_env_credential(env=env) account_number = credential['accountId'] statements = [] for service, value in services.items(): if value is True: items = [] elif isinstance(value, str): items = [value] else: items = value rendered_statements = render_policy_template( account_number=account_number, app=app, env=env, group=group, items=items, pipeline_settings=pipeline_settings, region=region, service=service) statements.extend(rendered_statements) if statements: policy_json = get_template('infrastructure/iam/wrapper.json.j2', statements=json.dumps(statements)) else: LOG.info('No services defined for %s.', app) policy_json = None return policy_json
Assemble IAM Policy for _app_. Args: app (str): Name of Spinnaker Application. env (str): Environment/Account in AWS group (str):A Application group/namespace region (str): AWS region pipeline_settings (dict): Settings from *pipeline.json*. Returns: json: Custom IAM Policy for _app_. None: When no *services* have been defined in *pipeline.json*.
juraj-google-style
def _RDFClass(cls, table): rdf_cls_name = "OsqueryTable{}".format(hash(table.query)) try: return cls._rdf_cls_cache[rdf_cls_name] except KeyError: pass rdf_cls = compatibility.MakeType(rdf_cls_name, (rdf_structs.RDFProtoStruct,), {}) rdf_cls.AddDescriptor( rdf_structs.ProtoEmbedded( name="metadata", field_number=1, nested=ExportedMetadata)) rdf_cls.AddDescriptor( rdf_structs.ProtoString(name="__query__", field_number=2)) for idx, column in enumerate(table.header.columns): if column.name == "metadata": name = "__metadata__" else: name = column.name descriptor = rdf_structs.ProtoString(name=name, field_number=idx + 3) rdf_cls.AddDescriptor(descriptor) cls._rdf_cls_cache[rdf_cls_name] = rdf_cls return rdf_cls
Creates a dynamic RDF proto struct class for given osquery table. The fields of the proto will correspond to the columns of the table. Args: table: An osquery table for which the class is about to be generated. Returns: A class object corresponding to the given table.
juraj-google-style
def QA_fetch_user(user_cookie, db=DATABASE): collection = DATABASE.account return [res for res in collection.find({'user_cookie': user_cookie}, {"_id": 0})]
get the user Arguments: user_cookie : str the unique cookie_id for a user Keyword Arguments: db: database for query Returns: list --- [ACCOUNT]
juraj-google-style
def load_chkpt_vars(model_path): model_path = get_checkpoint_path(model_path) reader = tfv1.train.NewCheckpointReader(model_path) var_names = reader.get_variable_to_shape_map().keys() result = {} for n in var_names: result[n] = reader.get_tensor(n) return result
Load all variables from a checkpoint to a dict. Args: model_path(str): path to a checkpoint. Returns: dict: a name:value dict
juraj-google-style
def unstack(df, level=(- 1), reset_index=True): df = df.unstack(level=level) if reset_index: df = df.reset_index() df.columns = df.columns.map(_join_names) return df
pd.DataFrame.unstack adapter. Call the `df.unstack` method using the indicated level and afterwards join the column names using an underscore. Args: df (pandas.DataFrame): DataFrame to unstack. level (str, int or list): Level(s) of index to unstack, can pass level name reset_index (bool): Whether to reset the index after unstacking Returns: pandas.Dataframe: unstacked dataframe
codesearchnet
def Mean(self): old_p = 0 total = 0.0 for (x, new_p) in zip(self.xs, self.ps): p = (new_p - old_p) total += (p * x) old_p = new_p return total
Computes the mean of a CDF. Returns: float mean
codesearchnet
def speed_info(self): speed_info = structs.JLinkSpeedInfo() self._dll.JLINKARM_GetSpeedInfo(ctypes.byref(speed_info)) return speed_info
Retrieves information about supported target interface speeds. Args: self (JLink): the ``JLink`` instance Returns: The ``JLinkSpeedInfo`` instance describing the supported target interface speeds.
juraj-google-style
def value_of( self, value: Union[sympy.Basic, float, str] ) -> Union[sympy.Basic, float]: if isinstance(value, str): return self.param_dict.get(value, sympy.Symbol(value)) if isinstance(value, sympy.Basic): if sys.version_info.major < 3: d = {k.encode(): v for k, v in self.param_dict.items()} v = value.subs(d) else: v = value.subs(self.param_dict) return v if v.free_symbols else float(v) return value
Attempt to resolve a Symbol or name or float to its assigned value. If unable to resolve a sympy.Symbol, returns it unchanged. If unable to resolve a name, returns a sympy.Symbol with that name. Args: value: The sympy.Symbol or name or float to try to resolve into just a float. Returns: The value of the parameter as resolved by this resolver.
juraj-google-style
def add_features(self, features, append=True, merge='outer', duplicates='ignore', min_studies=0.0, threshold=0.001): if ((not append) or (not hasattr(self, 'feature_table'))): self.feature_table = FeatureTable(self) self.feature_table.add_features(features, merge=merge, duplicates=duplicates, min_studies=min_studies, threshold=threshold)
Construct a new FeatureTable from file. Args: features: Feature data to add. Can be: (a) A text file containing the feature data, where each row is a study in the database, with features in columns. The first column must contain the IDs of the studies to match up with the image data. (b) A pandas DataFrame, where studies are in rows, features are in columns, and the index provides the study IDs. append (bool): If True, adds new features to existing ones incrementally. If False, replaces old features. merge, duplicates, min_studies, threshold: Additional arguments passed to FeatureTable.add_features().
codesearchnet
def get_flat_tensor_specs(element_spec): return list(itertools.chain.from_iterable((spec._flat_tensor_specs for spec in nest.flatten(element_spec))))
Returns a list `tf.TypeSpec`s for the element tensor representation. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. Returns: A list `tf.TypeSpec`s for the element tensor representation.
github-repos
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return self.bos_token_id + token_ids_0 + self.eos_token_id return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def __init__(self, value=HashingAlgorithmEnum.SHA_256): super(HashingAlgorithm, self).__init__( enums.HashingAlgorithm, value, Tags.HASHING_ALGORITHM)
Construct a HashingAlgorithm object. Args: value (HashingAlgorithm): A HashingAlgorithm enumeration value, (e.g., HashingAlgorithm.MD5). Optional, defaults to HashingAlgorithm.SHA_256.
juraj-google-style
async def _create_remote_user(self, **payload): read_action = get_crud_action(method='create', model='user') user_data = await self.event_broker.ask( action_type=read_action, payload=payload ) return json.loads(user_data)
This method creates a service record in the remote user service with the given email. Args: uid (str): the user identifier to create Returns: (dict): a summary of the user that was created
juraj-google-style
def _init_metadata_service(self, version): metadata_cfg = self._load_config_section(CONFIG_METADATA_SECTION) self._token_metadata = metadata_cfg[CONFIG_TOKEN] proto = metadata_cfg[CONFIG_PROTOCOL] host = metadata_cfg[CONFIG_HOST] self._metadata = MetadataService(host, version) self._metadata.base_protocol = proto self._metadata.set_auth(self._token_metadata)
Method to initialize the Metadata Service from the config data Args: version (string): Version of Boss API to use. Returns: None Raises: (KeyError): if given invalid version.
codesearchnet
class SeamlessM4TProcessor(ProcessorMixin): feature_extractor_class = 'SeamlessM4TFeatureExtractor' tokenizer_class = ('SeamlessM4TTokenizer', 'SeamlessM4TTokenizerFast') def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, src_lang=None, tgt_lang=None, **kwargs): sampling_rate = kwargs.pop('sampling_rate', None) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.') elif text is not None and audios is not None: raise ValueError('Text and audios are mututally exclusive when passed to `SeamlessM4T`. Specify one or another.') elif text is not None: if tgt_lang is not None: self.tokenizer.tgt_lang = tgt_lang if src_lang is not None: self.tokenizer.src_lang = src_lang encoding = self.tokenizer(text, **kwargs) return encoding else: encoding = self.feature_extractor(audios, sampling_rate=sampling_rate, **kwargs) return encoding def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a single processor. [`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for more information. Args: feature_extractor ([`SeamlessM4TFeatureExtractor`]): The audio processor is a required input. tokenizer ([`SeamlessM4TTokenizerFast`]): The tokenizer is a required input.
github-repos
def _update_task(self, task): self.task = task self.task.data.update(self.task_data) self.task_type = task.task_spec.__class__.__name__ self.spec = task.task_spec self.task_name = task.get_name() self.activity = getattr(self.spec, 'service_class', '') self._set_lane_data()
Assigns current task step to self.task then updates the task's data with self.task_data Args: task: Task object.
juraj-google-style
def update_context(self, context, app=None): if ((app is None) and (self._context is _CONTEXT_MISSING) and (not in_app_context())): raise RuntimeError('Attempted to update component context without a bound app context or eager app set! Please pass the related app you want to update the context for!') if (self._context is not _CONTEXT_MISSING): self._context = ImmutableDict(context) else: key = self._get_context_name(app=app) setattr(_CONTEXT_LOCALS, key, ImmutableDict(context))
Replace the component's context with a new one. Args: context (dict): The new context to set this component's context to. Keyword Args: app (flask.Flask, optional): The app to update this context for. If not provided, the result of ``Component.app`` will be used.
codesearchnet
def _term(self, term): term = str(term) if term: self.__query['q'] += term return self
Add a term to the query. Arguments: term (str): The term to add. Returns: SearchHelper: Self
codesearchnet
def subtract(inputs, **kwargs): return Subtract(**kwargs)(inputs)
Functional interface to the `Subtract` layer. Args: inputs: A list of input tensors (exactly 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the difference of the inputs. Examples: ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) subtracted = keras.layers.subtract([x1, x2]) out = keras.layers.Dense(4)(subtracted) model = keras.models.Model(inputs=[input1, input2], outputs=out) ```
github-repos
def _update_explicit_bucket_count(a_float, dist): buckets = dist.explicitBuckets if (buckets is None): raise ValueError((_BAD_UNSET_BUCKETS % u'explicit buckets')) bucket_counts = dist.bucketCounts bounds = buckets.bounds if (len(bucket_counts) < (len(bounds) + 1)): raise ValueError(_BAD_LOW_BUCKET_COUNT) bucket_counts[bisect.bisect(bounds, a_float)] += 1
Adds `a_float` to `dist`, updating its explicit buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not already have explict buckets defined ValueError: if there are not enough bucket count fields in `dist`
codesearchnet
def eval(self, feed_dict=None, session=None): return _eval_using_default_session(self, feed_dict, self.graph, session)
Evaluates this tensor in a `Session`. Note: If you are not using `compat.v1` libraries, you should not need this, (or `feed_dict` or `Session`). In eager execution (or within `tf.function`) you do not need to call `eval`. Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking `Tensor.eval()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See `tf.Session.run` for a description of the valid feed values. session: (Optional.) The `Session` to be used to evaluate this tensor. If none, the default session will be used. Returns: A numpy array corresponding to the value of this tensor.
github-repos
def clear_events(self, event_name): self.lock.acquire() try: q = self.get_event_q(event_name) q.queue.clear() except queue.Empty: return finally: self.lock.release()
Clear all events of a particular name. Args: event_name: Name of the events to be popped.
juraj-google-style
def List(self, request, global_params=None): config = self.GetMethodConfig('List') return self._RunMethod(config, request, global_params=global_params)
Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully. Args: request: (CloudbuildProjectsBuildsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListBuildsResponse) The response message.
github-repos
def get_statistics(self, id_or_uri, port_name=''): uri = self._client.build_uri(id_or_uri) + "/statistics" if port_name: uri = uri + "/" + port_name return self._client.get(uri)
Gets the statistics from an interconnect. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. port_name (str): A specific port name of an interconnect. Returns: dict: The statistics for the interconnect that matches id.
juraj-google-style
def CheckKeyCompatibility(cls, key_path): key_path_upper = key_path.upper() for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES: if key_path_upper.startswith(key_path_prefix): return True logger.warning('Key path: "{0:s}" is currently not supported'.format( key_path)) return False
Checks if a Windows Registry key path is supported by dfWinReg. Args: key_path (str): path of the Windows Registry key. Returns: bool: True if key is compatible or False if not.
juraj-google-style
def previous_weekday(date): weekday = date.weekday() if (weekday == 0): n_days = 3 elif (weekday == 6): n_days = 2 else: n_days = 1 return (date - datetime.timedelta(days=n_days))
Returns the last weekday before date Args: date (datetime or datetime.date) Returns: (datetime or datetime.date) Raises: -
codesearchnet
def add_filter(ds, patterns): if (not plugins.is_datasource(ds)): raise Exception('Filters are applicable only to datasources.') delegate = dr.get_delegate(ds) if delegate.raw: raise Exception("Filters aren't applicable to raw datasources.") if (not delegate.filterable): raise Exception(("Filters aren't applicable to %s." % dr.get_name(ds))) if (ds in _CACHE): del _CACHE[ds] if isinstance(patterns, six.string_types): FILTERS[ds].add(patterns) elif isinstance(patterns, list): FILTERS[ds] |= set(patterns) elif isinstance(patterns, set): FILTERS[ds] |= patterns else: raise TypeError('patterns must be string, list, or set.')
Add a filter or list of filters to a datasource. A filter is a simple string, and it matches if it is contained anywhere within a line. Args: ds (@datasource component): The datasource to filter patterns (str, [str]): A string, list of strings, or set of strings to add to the datasource's filters.
codesearchnet
def put(self, closure, tag=None): closure.tag = tag if tag is not None: with self._queue_lock: self._tagged_queue[tag].put(closure, block=False) self._closures_queued_condition.notify_all() else: with self._put_wait_lock, self._queue_lock: self._queue_free_slot_condition.wait_for(lambda: not self._queue.full()) self._queue.put(closure, block=False) metric_utils.monitor_int('queued_closures', self._queue.qsize()) self._raise_if_error() self._closures_queued_condition.notify()
Put a closure into the queue for later execution. If `mark_failed` was called before `put`, the error from the first invocation of `mark_failed` will be raised. Args: closure: The `Closure` to put into the queue. tag: if not None, put into a queue with the given tag.
github-repos
def pymmh3_hash128_x86(key: Union[bytes, bytearray], seed: int) -> int: def fmix(h): h ^= h >> 16 h = (h * 0x85ebca6b) & 0xFFFFFFFF h ^= h >> 13 h = (h * 0xc2b2ae35) & 0xFFFFFFFF h ^= h >> 16 return h length = len(key) nblocks = int(length / 16) h1 = seed h2 = seed h3 = seed h4 = seed c1 = 0x239b961b c2 = 0xab0e9789 c3 = 0x38b34ae5 c4 = 0xa1e38b93 for block_start in range(0, nblocks * 16, 16): k1 = ( key[block_start + 3] << 24 | key[block_start + 2] << 16 | key[block_start + 1] << 8 | key[block_start + 0] ) k2 = ( key[block_start + 7] << 24 | key[block_start + 6] << 16 | key[block_start + 5] << 8 | key[block_start + 4] ) k3 = ( key[block_start + 11] << 24 | key[block_start + 10] << 16 | key[block_start + 9] << 8 | key[block_start + 8] ) k4 = ( key[block_start + 15] << 24 | key[block_start + 14] << 16 | key[block_start + 13] << 8 | key[block_start + 12] ) k1 = (c1 * k1) & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF k1 = (c2 * k1) & 0xFFFFFFFF h1 ^= k1 h1 = (h1 << 19 | h1 >> 13) & 0xFFFFFFFF h1 = (h1 + h2) & 0xFFFFFFFF h1 = (h1 * 5 + 0x561ccd1b) & 0xFFFFFFFF k2 = (c2 * k2) & 0xFFFFFFFF k2 = (k2 << 16 | k2 >> 16) & 0xFFFFFFFF k2 = (c3 * k2) & 0xFFFFFFFF h2 ^= k2 h2 = (h2 << 17 | h2 >> 15) & 0xFFFFFFFF h2 = (h2 + h3) & 0xFFFFFFFF h2 = (h2 * 5 + 0x0bcaa747) & 0xFFFFFFFF k3 = (c3 * k3) & 0xFFFFFFFF k3 = (k3 << 17 | k3 >> 15) & 0xFFFFFFFF k3 = (c4 * k3) & 0xFFFFFFFF h3 ^= k3 h3 = (h3 << 15 | h3 >> 17) & 0xFFFFFFFF h3 = (h3 + h4) & 0xFFFFFFFF h3 = (h3 * 5 + 0x96cd1c35) & 0xFFFFFFFF k4 = (c4 * k4) & 0xFFFFFFFF k4 = (k4 << 18 | k4 >> 14) & 0xFFFFFFFF k4 = (c1 * k4) & 0xFFFFFFFF h4 ^= k4 h4 = (h4 << 13 | h4 >> 19) & 0xFFFFFFFF h4 = (h1 + h4) & 0xFFFFFFFF h4 = (h4 * 5 + 0x32ac3b17) & 0xFFFFFFFF tail_index = nblocks * 16 k1 = 0 k2 = 0 k3 = 0 k4 = 0 tail_size = length & 15 if tail_size >= 15: k4 ^= key[tail_index + 14] << 16 if tail_size >= 14: k4 ^= key[tail_index + 13] << 8 if tail_size >= 13: k4 ^= key[tail_index + 12] if tail_size > 12: k4 = (k4 * c4) & 0xFFFFFFFF k4 = (k4 << 18 | k4 >> 14) & 0xFFFFFFFF k4 = (k4 * c1) & 0xFFFFFFFF h4 ^= k4 if tail_size >= 12: k3 ^= key[tail_index + 11] << 24 if tail_size >= 11: k3 ^= key[tail_index + 10] << 16 if tail_size >= 10: k3 ^= key[tail_index + 9] << 8 if tail_size >= 9: k3 ^= key[tail_index + 8] if tail_size > 8: k3 = (k3 * c3) & 0xFFFFFFFF k3 = (k3 << 17 | k3 >> 15) & 0xFFFFFFFF k3 = (k3 * c4) & 0xFFFFFFFF h3 ^= k3 if tail_size >= 8: k2 ^= key[tail_index + 7] << 24 if tail_size >= 7: k2 ^= key[tail_index + 6] << 16 if tail_size >= 6: k2 ^= key[tail_index + 5] << 8 if tail_size >= 5: k2 ^= key[tail_index + 4] if tail_size > 4: k2 = (k2 * c2) & 0xFFFFFFFF k2 = (k2 << 16 | k2 >> 16) & 0xFFFFFFFF k2 = (k2 * c3) & 0xFFFFFFFF h2 ^= k2 if tail_size >= 4: k1 ^= key[tail_index + 3] << 24 if tail_size >= 3: k1 ^= key[tail_index + 2] << 16 if tail_size >= 2: k1 ^= key[tail_index + 1] << 8 if tail_size >= 1: k1 ^= key[tail_index + 0] if tail_size > 0: k1 = (k1 * c1) & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF k1 = (k1 * c2) & 0xFFFFFFFF h1 ^= k1 h1 ^= length h2 ^= length h3 ^= length h4 ^= length h1 = (h1 + h2) & 0xFFFFFFFF h1 = (h1 + h3) & 0xFFFFFFFF h1 = (h1 + h4) & 0xFFFFFFFF h2 = (h1 + h2) & 0xFFFFFFFF h3 = (h1 + h3) & 0xFFFFFFFF h4 = (h1 + h4) & 0xFFFFFFFF h1 = fmix(h1) h2 = fmix(h2) h3 = fmix(h3) h4 = fmix(h4) h1 = (h1 + h2) & 0xFFFFFFFF h1 = (h1 + h3) & 0xFFFFFFFF h1 = (h1 + h4) & 0xFFFFFFFF h2 = (h1 + h2) & 0xFFFFFFFF h3 = (h1 + h3) & 0xFFFFFFFF h4 = (h1 + h4) & 0xFFFFFFFF return h4 << 96 | h3 << 64 | h2 << 32 | h1
Implements 128-bit murmur3 hash for x86, as per ``pymmh3``, with some bugfixes. Args: key: data to hash seed: seed Returns: integer hash
juraj-google-style
def GetCampaigns(self, client_customer_id): self.client.SetClientCustomerId(client_customer_id) max_tries = 3 today = time.strftime('%Y%m%d', time.localtime()) for i in xrange(1, max_tries + 1): try: selector = { 'fields': ['Id', 'Name', 'Status', 'BudgetId', 'Amount'], 'predicates': [ { 'field': 'Status', 'operator': 'NOT_EQUALS', 'values': ['REMOVED'] } ], 'dateRange': { 'min': today, 'max': today } } campaigns = self.client.GetService('CampaignService').get(selector) if int(campaigns['totalNumEntries']) > 0: return campaigns['entries'] else: return None except Exception, e: if i == max_tries: raise GoogleAdsError(e) continue
Returns a client account's Campaigns that haven't been removed. Args: client_customer_id: str Client Customer Id used to retrieve Campaigns. Returns: list List of Campaign data objects.
juraj-google-style
def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights, names): print('Converting reduce_sum ...') keepdims = params['keepdims'] > 0 axis = params['axes'] def target_layer(x, keepdims=keepdims, axis=axis): import keras.backend as K return K.sum(x, keepdims=keepdims, axis=axis) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert reduce_sum layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def create_pipeline(gcp_project_id, region, pipeline_name, pipeline_root, csv_file, module_file, beam_runner, metadata_file): example_gen = tfx.components.CsvExampleGen(input_base=csv_file) statistics_gen = tfx.components.StatisticsGen(examples=example_gen.outputs['examples']) schema_gen = tfx.components.SchemaGen(statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) transform = tfx.components.Transform(examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) trainer = tfx.components.Trainer(module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph']) components = [example_gen, statistics_gen, schema_gen, transform, trainer] beam_pipeline_args_by_runner = {'DirectRunner': [], 'DataflowRunner': ['--runner=DataflowRunner', '--project=' + gcp_project_id, '--temp_location=' + os.path.join(pipeline_root, 'tmp'), '--region=' + region]} return tfx.dsl.Pipeline(pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config(metadata_file), beam_pipeline_args=beam_pipeline_args_by_runner[beam_runner])
Create the TFX pipeline. Args: gcp_project_id (str): ID for the google cloud project to deploy the pipeline to. region (str): Region in which to deploy the pipeline. pipeline_name (str): Name for the Beam pipeline pipeline_root (str): Path to artifact repository where TFX stores a pipeline’s artifacts. csv_file (str): Path to the csv input file. module_file (str): Path to module file containing the preprocessing_fn and run_fn. beam_runner (str): Beam runner: DataflowRunner or DirectRunner. metadata_file (str): Path to store a metadata file as a mock metadata database.
github-repos
def bool(name, execute_bool=True, default=None): def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): if core.isset(name) and core.bool(name) == execute_bool: return func(*args, **kwargs) elif default is not None and default == execute_bool: return func(*args, **kwargs) return _decorator return wrapped
Only execute the function if the boolean variable is set. Args: name: The name of the environment variable execute_bool: The boolean value to execute the function on default: The default value if the environment variable is not set (respects `execute_bool`) Returns: The function return value or `None` if the function was skipped.
juraj-google-style
def get_subscription_from_cli(name=None): home = os.path.expanduser('~') azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json' if os.path.isfile(azure_profile_path) is False: print('Error from get_subscription_from_cli(): Cannot find ' + azure_profile_path) return None with io.open(azure_profile_path, 'r', encoding='utf-8-sig') as azure_profile_fd: azure_profile = json.load(azure_profile_fd) for subscription_info in azure_profile['subscriptions']: if (name is None and subscription_info['isDefault'] is True) or \ subscription_info['name'] == name: return subscription_info['id'] return None
Get the default, or named, subscription id from CLI's local cache. Args: name (str): Optional subscription name. If this is set, the subscription id of the named subscription is returned from the CLI cache if present. If not set, the subscription id of the default subscription is returned. Returns: Azure subscription ID string. Requirements: User has run 'az login' once, or is in Azure Cloud Shell.
juraj-google-style
def intersects(self, rect, edges=False): if (self.bottom > rect.top or \ self.top < rect.bottom or \ self.left > rect.right or \ self.right < rect.left): return False if not edges: if (self.bottom == rect.top or \ self.top == rect.bottom or \ self.left == rect.right or \ self.right == rect.left): return False if (self.left == rect.right and self.bottom == rect.top or \ self.left == rect.right and rect.bottom == self.top or \ rect.left == self.right and self.bottom == rect.top or \ rect.left == self.right and rect.bottom == self.top): return False return True
Detect intersections between this rectangle and rect. Args: rect (Rectangle): Rectangle to test for intersections. edges (bool): Accept edge touching rectangles as intersects or not Returns: bool: True if the rectangles intersect, False otherwise
juraj-google-style
def add_field_with_label(self, key, label_description, field): self.inputs[key] = field label = Label(label_description) label.style['margin'] = '0px 5px' label.style['min-width'] = '30%' container = HBox() container.style.update({'justify-content':'space-between', 'overflow':'auto', 'padding':'3px'}) container.append(label, key='lbl' + key) container.append(self.inputs[key], key=key) self.container.append(container, key=key)
Adds a field to the dialog together with a descriptive label and a unique identifier. Note: You can access to the fields content calling the function GenericDialog.get_field(key). Args: key (str): The unique identifier for the field. label_description (str): The string content of the description label. field (Widget): The instance of the field Widget. It can be for example a TextInput or maybe a custom widget.
juraj-google-style
def _isValidQuery(self, query, mode='phonefy'): try: validator = self.modes[mode].get('query_validator') if validator: try: compiledRegexp = re.compile('^{expr}$'.format(expr=validator)) return compiledRegexp.match(query) except AttributeError as e: return True except AttributeError as e: compiledRegexp = re.compile('^{r}$'.format(r=self.validQuery[mode])) return compiledRegexp.match(query)
Method to verify if a given query is processable by the platform. The system looks for the forbidden characters in self.Forbidden list. Args: ----- query: The query to be launched. mode: To be chosen amongst mailfy, phonefy, usufy, searchfy. Return: ------- True | False
codesearchnet
def get_gene_info(ensembl_ids=None, hgnc_symbols=None): uniq_ensembl_ids = set((ensembl_id for ensembl_id in (ensembl_ids or []))) uniq_hgnc_symbols = set((hgnc_symbol for hgnc_symbol in (hgnc_symbols or []))) genes = [] gene_data = [] if uniq_ensembl_ids: for ensembl_id in uniq_ensembl_ids: for res in query_gene(ensembl_id=ensembl_id): gene_data.append(res) elif uniq_hgnc_symbols: for hgnc_symbol in uniq_hgnc_symbols: query_res = query_gene(hgnc_symbol=hgnc_symbol) if query_res: for res in query_res: gene_data.append(res) else: gene_data.append({'hgnc_symbol': hgnc_symbol, 'hgnc_id': None, 'ensembl_id': None, 'description': None, 'chrom': 'unknown', 'start': 0, 'stop': 0, 'hi_score': None, 'constraint_score': None}) for gene in gene_data: genes.append(Gene(symbol=gene['hgnc_symbol'], hgnc_id=gene['hgnc_id'], ensembl_id=gene['ensembl_id'], description=gene['description'], chrom=gene['chrom'], start=gene['start'], stop=gene['stop'], location=get_cytoband_coord(gene['chrom'], gene['start']), hi_score=gene['hi_score'], constraint_score=gene['constraint_score'], omim_number=get_omim_number(gene['hgnc_symbol']))) return genes
Return the genes info based on the transcripts found Args: ensembl_ids (Optional[list]): list of Ensembl gene ids hgnc_symbols (Optional[list]): list of HGNC gene symbols Returns: iterable: an iterable with `Gene` objects
codesearchnet
def get_min_instability(self, min_voltage=None, max_voltage=None): data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.decomp_e_charge is not None: data.append(pair.decomp_e_charge) if pair.decomp_e_discharge is not None: data.append(pair.decomp_e_discharge) return min(data) if len(data) > 0 else None
The minimum instability along a path for a specific voltage range. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Minimum decomposition energy of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments)
juraj-google-style
def get_group(self, name, user_name=None): return self.service.get_group( name, user_name, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get owner of group and the resources it's attached to. Args: name (string): Name of group to query. user_name (optional[string]): Supply None if not interested in determining if user is a member of the given group. Returns: (dict): Keys include 'owner', 'name', 'resources'. Raises: requests.HTTPError on failure.
juraj-google-style
def load_pyfile(self, path): with open(path) as config_file: contents = config_file.read() try: exec(compile(contents, path, 'exec'), self) except Exception as e: raise MalformedConfig(path, six.text_type(e))
Load python file as config. Args: path (string): path to the python file
codesearchnet
def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement): x, next_iteration, deltas, improvement, last_improvement, estimated_improvement = super(LineSearch, self).tf_step( x, iteration, deltas, improvement, last_improvement, estimated_improvement ) next_x = [t + delta for t, delta in zip(x, deltas)] if self.mode == 'linear': next_deltas = deltas next_estimated_improvement = estimated_improvement + self.estimated_incr elif self.mode == 'exponential': next_deltas = [delta * self.parameter for delta in deltas] next_estimated_improvement = estimated_improvement * self.parameter target_value = self.fn_x(next_deltas) next_improvement = tf.divide( x=(target_value - self.base_value), y=tf.maximum(x=next_estimated_improvement, y=util.epsilon) ) return next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement
Iteration loop body of the line search algorithm. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. deltas: Current difference $x_t - x'$. improvement: Current improvement $(f(x_t) - f(x')) / v'$. last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$. estimated_improvement: Current estimated value $v'$. Returns: Updated arguments for next iteration.
juraj-google-style
def decode_list(self, ids): decoded_ids = [] for id_ in ids: if 0 <= id_ < self._num_reserved_ids: decoded_ids.append(RESERVED_TOKENS[int(id_)]) else: decoded_ids.append(id_ - self._num_reserved_ids) return [str(d) for d in decoded_ids]
Transform a sequence of int ids into a their string versions. This method supports transforming individual input/output ids to their string versions so that sequence to/from text conversions can be visualized in a human readable format. Args: ids: list of integers to be converted. Returns: strs: list of human-readable string.
juraj-google-style
def generate_exact(self, model, vcpu_num, host_cpu): nested = {'Intel': 'vmx', 'AMD': 'svm'} cpu = ET.Element('cpu', match='exact') ET.SubElement(cpu, 'model').text = model cpu.append(self.generate_topology(vcpu_num)) vendor = host_cpu.findtext('vendor') if not nested.get(vendor): LOGGER.debug( 'Unknown vendor: {0}, did not configure nested ' 'virtualization cpu flag on guest.'.format(vendor) ) return cpu model_vendor = LibvirtCPU.get_cpu_vendor(family=model) if vendor != model_vendor: LOGGER.debug( ( 'Not enabling nested virtualization feature, host ' 'vendor is: {0}, guest vendor: ' '{1}'.format(vendor, model_vendor) ) ) return cpu flag = nested[vendor] if host_cpu.find('feature/[@name="{0}"]'.format(flag)) is not None: cpu.append(self.generate_feature(name=flag)) else: LOGGER.debug( ( 'missing {0} cpu flag on host, nested ' 'virtualization will probably not ' 'work.' ).format(flag) ) return cpu
Generate exact CPU model with nested virtualization CPU feature. Args: model(str): libvirt supported CPU model vcpu_num(int): number of virtual cpus host_cpu(lxml.etree.Element): the host CPU model Returns: lxml.etree.Element: CPU XML node
juraj-google-style
def explain(self, entry): d = self.get_explanation_dict(entry) print("The uncorrected value of the energy of %s is %f eV" % (entry.composition, d["uncorrected_energy"])) print("The following corrections / screening are applied for %s:\n" % d["compatibility"]) for c in d["corrections"]: print("%s correction: %s\n" % (c["name"], c["description"])) print("For the entry, this correction has the value %f eV." % c[ "value"]) print("-" * 30) print("The final energy after corrections is %f" % d[ "corrected_energy"])
Prints an explanation of the corrections that are being applied for a given compatibility scheme. Inspired by the "explain" methods in many database methodologies. Args: entry: A ComputedEntry.
juraj-google-style
def print_fhir_to_json_string_for_analytics(fhir_proto: message.Message) -> str: printer = _json_printer.JsonPrinter.compact_printer_for_analytics(_PRIMITIVE_HANDLER) return printer.print(fhir_proto)
Returns an Analytic FHIR JSON representation with no spaces or newlines. Args: fhir_proto: The proto to serialize into a JSON string. Returns: An Analytic FHIR JSON representation with no spaces or newlines.
github-repos
def get_stability_criteria(self, s, n): n = get_uvec(n) stress = s * np.outer(n, n) sym_wallace = self.get_symmetric_wallace_tensor(stress) return np.linalg.det(sym_wallace.voigt)
Gets the stability criteria from the symmetric Wallace tensor from an input vector and stress value. Args: s (float): Stress value at which to evaluate the stability criteria n (3x1 array-like): direction of the applied stress
juraj-google-style
def get_videos_for_ids(edx_video_ids, sort_field=None, sort_dir=SortDirection.asc): (videos, __) = _get_videos_for_filter({'edx_video_id__in': edx_video_ids}, sort_field, sort_dir) return videos
Returns an iterator of videos that match the given list of ids. Args: edx_video_ids (list) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order
codesearchnet
def Delete(self, request, global_params=None): config = self.GetMethodConfig('Delete') return self._RunMethod(config, request, global_params=global_params)
Deletes the routine specified by routineId from the dataset. Args: request: (BigqueryRoutinesDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BigqueryRoutinesDeleteResponse) The response message.
github-repos
def serialize_keras_object(obj): if obj is None: return obj if isinstance(obj, PLAIN_TYPES): return obj if isinstance(obj, (list, tuple)): config_arr = [serialize_keras_object(x) for x in obj] return tuple(config_arr) if isinstance(obj, tuple) else config_arr if isinstance(obj, dict): return serialize_dict(obj) if isinstance(obj, bytes): return {'class_name': '__bytes__', 'config': {'value': obj.decode('utf-8')}} if isinstance(obj, slice): return {'class_name': '__slice__', 'config': {'start': serialize_keras_object(obj.start), 'stop': serialize_keras_object(obj.stop), 'step': serialize_keras_object(obj.step)}} if isinstance(obj, type(Ellipsis)): return {'class_name': '__ellipsis__', 'config': {}} if isinstance(obj, backend.KerasTensor): history = getattr(obj, '_keras_history', None) if history: history = list(history) history[0] = history[0].name return {'class_name': '__keras_tensor__', 'config': {'shape': obj.shape, 'dtype': obj.dtype, 'keras_history': history}} if tf.available and isinstance(obj, tf.TensorShape): return obj.as_list() if obj._dims is not None else None if backend.is_tensor(obj): return {'class_name': '__tensor__', 'config': {'value': backend.convert_to_numpy(obj).tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}} if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray) and obj.ndim > 0: return {'class_name': '__numpy__', 'config': {'value': obj.tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}} else: return obj.item() if tf.available and isinstance(obj, tf.DType): return obj.name if isinstance(obj, types.FunctionType) and obj.__name__ == '<lambda>': warnings.warn(f'The object being serialized includes a `lambda`. This is unsafe. In order to reload the object, you will have to pass `safe_mode=False` to the loading function. Please avoid using `lambda` in the future, and use named Python functions instead. This is the `lambda` being serialized: {inspect.getsource(obj)}', stacklevel=2) return {'class_name': '__lambda__', 'config': {'value': python_utils.func_dump(obj)}} if tf.available and isinstance(obj, tf.TypeSpec): ts_config = obj._serialize() ts_config = list(map(lambda x: x.as_list() if isinstance(x, tf.TensorShape) else x.name if isinstance(x, tf.DType) else x, ts_config)) return {'class_name': '__typespec__', 'spec_name': obj.__class__.__name__, 'module': obj.__class__.__module__, 'config': ts_config, 'registered_name': None} inner_config = _get_class_or_fn_config(obj) config_with_public_class = serialize_with_public_class(obj.__class__, inner_config) if config_with_public_class is not None: get_build_and_compile_config(obj, config_with_public_class) record_object_after_serialization(obj, config_with_public_class) return config_with_public_class if isinstance(obj, types.FunctionType): module = obj.__module__ else: module = obj.__class__.__module__ class_name = obj.__class__.__name__ if module == 'builtins': registered_name = None elif isinstance(obj, types.FunctionType): registered_name = object_registration.get_registered_name(obj) else: registered_name = object_registration.get_registered_name(obj.__class__) config = {'module': module, 'class_name': class_name, 'config': inner_config, 'registered_name': registered_name} get_build_and_compile_config(obj, config) record_object_after_serialization(obj, config) return config
Retrieve the config dict by serializing the Keras object. `serialize_keras_object()` serializes a Keras object to a python dictionary that represents the object, and is a reciprocal function of `deserialize_keras_object()`. See `deserialize_keras_object()` for more information about the config format. Args: obj: the Keras object to serialize. Returns: A python dict that represents the object. The python dict can be deserialized via `deserialize_keras_object()`.
github-repos
def main(raw_args=None): if (raw_args is None): raw_args = sys.argv[1:] parser = build_parser() args = parser.parse_args(raw_args) if ((args.firmware_image is None) and (args.gdb is None)): print('You must specify either a firmware image or attach a debugger with --gdb <PORT>') return 1 test_args = ['qemu-system-gnuarmeclipse', '-verbose', '-verbose', '-board', 'STM32F0-Discovery', '-nographic', '-monitor', 'null', '-serial', 'null', '--semihosting-config', 'enable=on,target=native', '-d', 'unimp,guest_errors'] if args.firmware_image: test_args += ['-image', args.firmware_image] if args.gdb: test_args += ['--gdb', ('tcp::%d' % args.gdb)] proc = subprocess.Popen(test_args, stdout=sys.stdout, stderr=sys.stderr) try: proc.communicate() except KeyboardInterrupt: proc.terminate() return 0
Run the iotile-emulate script. Args: raw_args (list): Optional list of commmand line arguments. If not passed these are pulled from sys.argv.
codesearchnet
def _quadratic_sum_cost(self, state: _STATE) -> float: cost = 0.0 total_len = float(len(self._c)) (seqs, _) = state for seq in seqs: cost += ((len(seq) / total_len) ** 2) return (- cost)
Cost function that sums squares of lengths of sequences. Args: state: Search state, not mutated. Returns: Cost which is minus the normalized quadratic sum of each linear sequence section in the state. This promotes single, long linear sequence solutions and converges to number -1. The solution with a lowest cost consists of every node being a single sequence and is always less than 0.
codesearchnet
def get_python_version(): ver = str(sys.version_info) mmm = re.search('.*major=([\\d]), minor=([\\d]), micro=([\\d]+),.*', ver) return mmm.group(1) + '.' + mmm.group(2) + '.' + mmm.group(3)
Retrieves default Python version. Returns: String that is the version of default Python. e.g. '2.7.4'
github-repos
def inverse_removing(self, words_to_remove): mask = np.ones(self.as_np.shape[0], dtype='bool') mask[self.__get_idxs(words_to_remove)] = False if (not self.bow): return ''.join([(self.as_list[i] if mask[i] else 'UNKWORDZ') for i in range(mask.shape[0])]) return ''.join([self.as_list[v] for v in mask.nonzero()[0]])
Returns a string after removing the appropriate words. If self.bow is false, replaces word with UNKWORDZ instead of removing it. Args: words_to_remove: list of ids (ints) to remove Returns: original raw string with appropriate words removed.
codesearchnet
def top_rated(self, **kwargs): path = self._get_path('top_rated') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of top rated movies. By default, this list will only include movies that have 10 or more votes. This list refreshes every day. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def add_exac_info(genes, alias_genes, exac_lines): LOG.info("Add exac pli scores") for exac_gene in parse_exac_genes(exac_lines): hgnc_symbol = exac_gene['hgnc_symbol'].upper() pli_score = exac_gene['pli_score'] for hgnc_id in get_correct_ids(hgnc_symbol, alias_genes): genes[hgnc_id]['pli_score'] = pli_score
Add information from the exac genes Currently we only add the pLi score on gene level The exac resource only use HGNC symbol to identify genes so we need our alias mapping. Args: genes(dict): Dictionary with all genes alias_genes(dict): Genes mapped to all aliases ensembl_lines(iteable): Iteable with raw ensembl info
juraj-google-style
def data(self, index, role=Qt.DisplayRole): if (not index.isValid()): return None col = index.column() columnName = self._dataFrame.columns[index.row()] columnDtype = self._dataFrame[columnName].dtype if ((role == Qt.DisplayRole) or (role == Qt.EditRole)): if (col == 0): if (columnName == index.row()): return index.row() return columnName elif (col == 1): return SupportedDtypes.description(columnDtype) elif (role == DTYPE_ROLE): if (col == 1): return columnDtype else: return None
Retrieve the data stored in the model at the given `index`. Args: index (QtCore.QModelIndex): The model index, which points at a data object. role (Qt.ItemDataRole, optional): Defaults to `Qt.DisplayRole`. You have to use different roles to retrieve different data for an `index`. Accepted roles are `Qt.DisplayRole`, `Qt.EditRole` and `DTYPE_ROLE`. Returns: None if an invalid index is given, the role is not accepted by the model or the column is greater than `1`. The column name will be returned if the given column number equals `0` and the role is either `Qt.DisplayRole` or `Qt.EditRole`. The datatype will be returned, if the column number equals `1`. The `Qt.DisplayRole` or `Qt.EditRole` return a human readable, translated string, whereas the `DTYPE_ROLE` returns the raw data type.
codesearchnet
def save(hdf5_filename, array): hdf5_filename = os.path.expanduser(hdf5_filename) try: h = h5py.File(hdf5_filename, "w") h.create_dataset('CUTOUT', data=array) h.close() except Exception as e: raise ValueError("Could not save HDF5 file {0}.".format(hdf5_filename)) return hdf5_filename
Export a numpy array to a HDF5 file. Arguments: hdf5_filename (str): A filename to which to save the HDF5 data array (numpy.ndarray): The numpy array to save to HDF5 Returns: String. The expanded filename that now holds the HDF5 data
juraj-google-style
def exp(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.exp, tf.float32)
Returns a TensorFluent for the exp function. Args: x: The input fluent. Returns: A TensorFluent wrapping the exp function.
codesearchnet
def on_run_start(self, request): self._is_run_start = True self._update_run_calls_state(request.run_call_count, request.fetches, request.feed_dict, is_callable_runner=request.is_callable_runner) if self._active_tensor_filter: return self._active_tensor_filter_run_start_response self._exit_if_requested_by_user() if self._run_call_count > 1 and (not self._skip_debug): if self._run_through_times > 0: return framework.OnRunStartResponse(framework.OnRunStartAction.NON_DEBUG_RUN, []) elif self._run_through_times == 0: return self._run_start_response or framework.OnRunStartResponse(framework.OnRunStartAction.DEBUG_RUN, self._get_run_debug_urls()) if self._run_start_response is None: self._prep_cli_for_run_start() self._run_start_response = self._launch_cli() if self._active_tensor_filter: self._active_tensor_filter_run_start_response = self._run_start_response if self._run_through_times > 1: self._run_through_times -= 1 self._exit_if_requested_by_user() return self._run_start_response
Overrides on-run-start callback. Args: request: An instance of `OnRunStartRequest`. Returns: An instance of `OnRunStartResponse`.
github-repos
def decrypt_block(self, cipherText): if not self.initialized: raise TypeError("CamCrypt object has not been initialized") if len(cipherText) != BLOCK_SIZE: raise ValueError("cipherText must be %d bytes long (received %d bytes)" % (BLOCK_SIZE, len(cipherText))) plain = ctypes.create_string_buffer(BLOCK_SIZE) self.decblock(self.bitlen, cipherText, self.keytable, plain) return plain.raw
Decrypt a 16-byte block of data. NOTE: This function was formerly called `decrypt`, but was changed when support for decrypting arbitrary-length strings was added. Args: cipherText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `cipherText` is not BLOCK_SIZE (i.e. 16) bytes.
juraj-google-style
def fstat(self, file_des): file_object = self.filesystem.get_open_file(file_des).get_object() return file_object.stat_result.copy()
Return the os.stat-like tuple for the FakeFile object of file_des. Args: file_des: The file descriptor of filesystem object to retrieve. Returns: The FakeStatResult object corresponding to entry_path. Raises: OSError: if the filesystem object doesn't exist.
juraj-google-style
def __init__(self, xcli, product_name, product_version): self.xcli = xcli self.product_name = product_name self.product_version = product_version self.server_name = getfqdn() self.platform = get_platform_details() if not self.product_name: raise ValueError('product_name is empty') if not self.product_version: raise ValueError('product_version is empty')
init an EventsManager Args: xcli (XCLIClient): xcli client to send the event product_name (string): the sending product's name product_version (string): the sending product's version Raises: ValueError: if missing product_name or product_version
juraj-google-style
def _expected_exercise_fn(design, calibration_indices, continuation_value, exercise_value): mask = exercise_value > 0 design_t = tf.transpose(design, [0, 2, 1]) masked = tf.where(tf.expand_dims(tf.transpose(mask), axis=-1), design_t, tf.zeros_like(design_t)) if calibration_indices is None: submask = masked mask_cont_value = continuation_value else: submask = tf.gather(masked, calibration_indices, axis=1) mask_cont_value = tf.gather(continuation_value, calibration_indices) lhs = tf.matmul(submask, submask, transpose_a=True) lhs_pinv = tf.linalg.pinv(lhs) rhs = tf.matmul(submask, tf.expand_dims(tf.transpose(mask_cont_value), axis=-1), transpose_a=True) beta = tf.matmul(lhs_pinv, rhs) continuation = tf.matmul(design_t, beta) return tf.nn.relu(tf.transpose(tf.squeeze(continuation, axis=-1)))
Returns the expected continuation value for each path. Args: design: A real `Tensor` of shape `[batch_size, basis_size, num_samples]`. calibration_indices: A rank 1 integer `Tensor` denoting indices of samples used for regression. continuation_value: A `Tensor` of shape `[num_samples, batch_size]` and of the same dtype as `design`. The optimal value of the option conditional on not exercising now or earlier, taking future information into account. exercise_value: A `Tensor` of the same shape and dtype as `continuation_value`. Value of the option if exercised immideately at the current time Returns: A `Tensor` of the same shape and dtype as `continuation_value` whose `(n, v)`-th entry represents the expected continuation value of sample path `n` under the `v`-th payoff scheme.
github-repos
def _ReadOperatingSystemArtifactValues(self, operating_system_values): if not operating_system_values: raise errors.MalformedPresetError('Missing operating system values.') family = operating_system_values.get('family', None) product = operating_system_values.get('product', None) version = operating_system_values.get('version', None) if not family and not product: raise errors.MalformedPresetError( 'Invalid operating system missing family and product.') return artifacts.OperatingSystemArtifact( family=family, product=product, version=version)
Reads an operating system artifact from a dictionary. Args: operating_system_values (dict[str, object]): operating system values. Returns: OperatingSystemArtifact: an operating system artifact attribute container. Raises: MalformedPresetError: if the format of the operating system values are not set or incorrect.
juraj-google-style
def load_parent_implems(self, parent_implems): for trname, attr, implem in parent_implems.get_custom_implementations(): self.implementations[trname] = implem.copy() self.transitions_at[trname] = attr self.custom_implems.add(trname)
Import previously defined implementations. Args: parent_implems (ImplementationList): List of implementations defined in a parent class.
juraj-google-style
def _validate_at_hash(claims, access_token, algorithm): if (('at_hash' not in claims) and (not access_token)): return elif (('at_hash' in claims) and (not access_token)): msg = 'No access_token provided to compare against at_hash claim.' raise JWTClaimsError(msg) elif (access_token and ('at_hash' not in claims)): msg = 'at_hash claim missing from token.' raise JWTClaimsError(msg) try: expected_hash = calculate_at_hash(access_token, ALGORITHMS.HASHES[algorithm]) except (TypeError, ValueError): msg = 'Unable to calculate at_hash to verify against token claims.' raise JWTClaimsError(msg) if (claims['at_hash'] != expected_hash): raise JWTClaimsError('at_hash claim does not match access_token.')
Validates that the 'at_hash' parameter included in the claims matches with the access_token returned alongside the id token as part of the authorization_code flow. Args: claims (dict): The claims dictionary to validate. access_token (str): The access token returned by the OpenID Provider. algorithm (str): The algorithm used to sign the JWT, as specified by the token headers.
codesearchnet
def _load_credentials_file(credentials_file): try: credentials_file.seek(0) data = json.load(credentials_file) except Exception: logger.warning('Credentials file could not be loaded, will ignore and overwrite.') return {} if (data.get('file_version') != 2): logger.warning('Credentials file is not version 2, will ignore and overwrite.') return {} credentials = {} for (key, encoded_credential) in iteritems(data.get('credentials', {})): try: credential_json = base64.b64decode(encoded_credential) credential = client.Credentials.new_from_json(credential_json) credentials[key] = credential except: logger.warning('Invalid credential {0} in file, ignoring.'.format(key)) return credentials
Load credentials from the given file handle. The file is expected to be in this format: { "file_version": 2, "credentials": { "key": "base64 encoded json representation of credentials." } } This function will warn and return empty credentials instead of raising exceptions. Args: credentials_file: An open file handle. Returns: A dictionary mapping user-defined keys to an instance of :class:`oauth2client.client.Credentials`.
codesearchnet
def _normalize_array(array, domain=(0, 1)): array = np.array(array) array = np.squeeze(array) assert len(array.shape) <= 3 assert np.issubdtype(array.dtype, np.number) assert not np.isnan(array).any() low, high = np.min(array), np.max(array) if domain is None: message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)" log.debug(message, low, high) domain = (low, high) if low < domain[0] or high > domain[1]: message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})." log.info(message.format(low, high, domain[0], domain[1])) array = array.clip(*domain) min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max if np.issubdtype(array.dtype, np.inexact): offset = domain[0] if offset != 0: array -= offset log.debug("Converting inexact array by subtracting -%.2f.", offset) scalar = max_value / (domain[1] - domain[0]) if scalar != 1: array *= scalar log.debug("Converting inexact array by scaling by %.2f.", scalar) return array.clip(min_value, max_value).astype(np.uint8)
Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image
juraj-google-style
def _sanitize_slices(slices, intended_shape, deficient_shape): sanitized_slices = [] idx = 0 for slc in slices: if slc is Ellipsis: if idx < 0: raise ValueError('Found multiple `...` in slices {}'.format(slices)) num_remaining_non_newaxis_slices = sum((s is not array_ops.newaxis for s in slices[slices.index(Ellipsis) + 1:])) idx = -num_remaining_non_newaxis_slices elif slc is array_ops.newaxis: pass else: is_broadcast = intended_shape[idx] > deficient_shape[idx] if isinstance(slc, slice): start, stop, step = (slc.start, slc.stop, slc.step) if start is not None: start = _prefer_static_where(is_broadcast, 0, start) if stop is not None: stop = _prefer_static_where(is_broadcast, 1, stop) if step is not None: step = _prefer_static_where(is_broadcast, 1, step) slc = slice(start, stop, step) else: slc = _prefer_static_where(is_broadcast, 0, slc) idx += 1 sanitized_slices.append(slc) return sanitized_slices
Restricts slices to avoid overflowing size-1 (broadcast) dimensions. Args: slices: iterable of slices received by `__getitem__`. intended_shape: int `Tensor` shape for which the slices were intended. deficient_shape: int `Tensor` shape to which the slices will be applied. Must have the same rank as `intended_shape`. Returns: sanitized_slices: Python `list` of slice objects.
github-repos
def enter(self, layer, inputs, build_graph, training, saving=None): state = {'layer': layer, 'inputs': inputs, 'build_graph': build_graph, 'training': training, 'saving': saving} return CallContextManager(self, state)
Push a Layer and its inputs and state onto the current call context. Args: layer: The `Layer` whose `call` is currently active. inputs: The inputs to the currently active `Layer`. build_graph: Whether currently inside a Graph or FuncGraph. training: Whether currently executing in training or inference mode. saving: Whether currently saving to SavedModel. Returns: Context manager.
github-repos
def search_groups(self, group): group_url = "%s/%s/%s" % (self.url, "group", group) response = self.jss.get(group_url) return LDAPGroupsResults(self.jss, response)
Search for LDAP groups. Args: group: Group to search for. It is not entirely clear how the JSS determines the results- are regexes allowed, or globbing? Returns: LDAPGroupsResult object. Raises: JSSGetError if no results are found.
juraj-google-style
def convert_padding(padding, expected_length=4): explicit_paddings = [] if padding == 'EXPLICIT': raise ValueError("'EXPLICIT' is not a valid value for `padding`. To use explicit padding, `padding` must be a list.") if isinstance(padding, (list, tuple)): for i, dim_paddings in enumerate(padding): if not isinstance(dim_paddings, (list, tuple)): raise ValueError(f'When `padding` is a list, each element of `padding` must be a list/tuple of size 2. Received: padding={padding} with element at index {i} of type {type(dim_paddings)}') if len(dim_paddings) != 2: raise ValueError(f'When `padding` is a list, each element of `padding` must be a list/tuple of size 2. Received: padding={padding} with element at index {i} of size {len(dim_paddings)}') explicit_paddings.extend(dim_paddings) if len(padding) != expected_length: raise ValueError(f'When padding is a list, it must be of size {expected_length}. Received: padding={padding} of size {len(padding)}') padding = 'EXPLICIT' return (padding, explicit_paddings)
Converts Python padding to C++ padding for ops which take EXPLICIT padding. Args: padding: the `padding` argument for a Python op which supports EXPLICIT padding. expected_length: Expected number of entries in the padding list when explicit padding is used. Returns: (padding, explicit_paddings) pair, which should be passed as attributes to a C++ op. Raises: ValueError: If padding is invalid.
github-repos
def extract(self, text: str) -> List[Extraction]: doc = self._tokenizer.tokenize_to_spacy_doc(text) self._load_matcher() matches = [x for x in self._matcher(doc) if (x[1] != x[2])] pos_filtered_matches = [] neg_filtered_matches = [] for (idx, start, end) in matches: span_doc = self._tokenizer.tokenize_to_spacy_doc(doc[start:end].text) this_spacy_rule = self._matcher.get(idx) relations = self._find_relation(span_doc, this_spacy_rule) (rule_id, _) = self._hash_map[idx] this_rule = self._rule_lst[rule_id] if self._filter_match(doc[start:end], relations, this_rule.patterns): value = self._form_output(doc[start:end], this_rule.output_format, relations, this_rule.patterns) if this_rule.polarity: pos_filtered_matches.append((start, end, value, rule_id, relations)) else: neg_filtered_matches.append((start, end, value, rule_id, relations)) return_lst = [] if pos_filtered_matches: longest_lst_pos = self._get_longest(pos_filtered_matches) if neg_filtered_matches: longest_lst_neg = self._get_longest(neg_filtered_matches) return_lst = self._reject_neg(longest_lst_pos, longest_lst_neg) else: return_lst = longest_lst_pos extractions = [] for (start, end, value, rule_id, relation) in return_lst: this_extraction = Extraction(value=value, extractor_name=self.name, start_token=start, end_token=end, start_char=doc[start].idx, end_char=(doc[(end - 1)].idx + len(doc[(end - 1)])), rule_id=rule_id.split('rule_id extractions.append(this_extraction) return extractions
Extract from text Args: text (str): input str to be extracted. Returns: List[Extraction]: the list of extraction or the empty list if there are no matches.
codesearchnet
def bearing(self, format='numeric'): bearings = [] for segment in self: if (len(segment) < 2): bearings.append([]) else: bearings.append(segment.bearing(format)) return bearings
Calculate bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments
codesearchnet
def GetEventTagByIdentifier(self, storage_file, event_identifier): if (not self._index): self._Build(storage_file) lookup_key = event_identifier.CopyToString() event_tag_identifier = self._index.get(lookup_key, None) if (not event_tag_identifier): return None return storage_file.GetEventTagByIdentifier(event_tag_identifier)
Retrieves the most recently updated event tag for an event. Args: storage_file (BaseStorageFile): storage file. event_identifier (AttributeContainerIdentifier): event attribute container identifier. Returns: EventTag: event tag or None if the event has no event tag.
codesearchnet
def dialog_open(self, *, dialog: dict, trigger_id: str, **kwargs) -> SlackResponse: kwargs.update({'dialog': dialog, 'trigger_id': trigger_id}) return self.api_call('dialog.open', json=kwargs)
Open a dialog with a user. Args: dialog (dict): A dictionary of dialog arguments. { "callback_id": "46eh782b0", "title": "Request something", "submit_label": "Request", "state": "Max", "elements": [ { "type": "text", "label": "Origin", "name": "loc_origin" }, { "type": "text", "label": "Destination", "name": "loc_destination" } ] } trigger_id (str): The trigger id of a recent message interaction. e.g. '12345.98765.abcd2358fdea'
codesearchnet
def _get_executor_init(self, workers): raise NotImplementedError
Gets the Pool initializer for multiprocessing. Args: workers: Number of workers. Returns: Function, a Function to initialize the pool
github-repos
def get_type_name_in_language(cls, type_name, sub_type, language): if language in cls.type_methods_cache: m = cls.type_methods_cache[language] if not m: return type_name return m(type_name) found, method = load_language_plugins(language, 'get_type_name') if found: cls.type_methods_cache[language] = method if method: return method(type_name, sub_type) else: return type_name module = importlib.import_module('.lang.%s' % language, package="monolithe.generators") if not hasattr(module, 'get_type_name'): cls.type_methods_cache[language] = None return type_name method = getattr(module, 'get_type_name') cls.type_methods_cache[language] = method return method(type_name, sub_type)
Get the type for the given language Args: type_name (str): the type to convert language (str): the language to use Returns: a type name in the given language Example: get_type_name_in_language("Varchar", "python") >>> str
juraj-google-style
def sort_edge(edges): return sorted(edges, key=(lambda x: (x.L, x.R)))
Sort iterable of edges first by left node indices then right. Args: edges(list[Edge]): List of edges to be sorted. Returns: list[Edge]: Sorted list by left and right node indices.
codesearchnet
def laid_out_pcoord(self, mesh_axis): divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:]) modulus = self.shape[mesh_axis].size def my_fn(pnum): return (pnum return self.slicewise(my_fn, self.laid_out_pnum())
Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar.
juraj-google-style
def fetch(self, invoice_id, data={}, **kwargs): return super(Invoice, self).fetch(invoice_id, data, **kwargs)
Fetch Invoice for given Id Args: invoice_id : Id for which invoice object has to be retrieved Returns: Invoice dict for given invoice Id
juraj-google-style
def autocorrelation(ts, normalized=False, unbiased=False): ts = np.squeeze(ts) if (ts.ndim <= 1): if normalized: ts = ((ts - ts.mean()) / ts.std()) N = ts.shape[0] ar = np.asarray(ts) acf = np.correlate(ar, ar, mode='full') outlen = ((acf.shape[0] + 1) / 2) acf = acf[(outlen - 1):] if unbiased: factor = np.array([(1.0 / (N - m)) for m in range(0, outlen)]) acf = (acf * factor) dt = ((ts.tspan[(- 1)] - ts.tspan[0]) / (len(ts) - 1.0)) lags = (np.arange(outlen) * dt) return Timeseries(acf, tspan=lags, labels=ts.labels) else: lastaxis = (ts.ndim - 1) m = ts.shape[lastaxis] acfs = [ts[(..., i)].autocorrelation(normalized, unbiased)[(..., np.newaxis)] for i in range(m)] res = distob.concatenate(acfs, axis=lastaxis) res.labels[lastaxis] = ts.labels[lastaxis] return res
Returns the discrete, linear convolution of a time series with itself, optionally using unbiased normalization. N.B. Autocorrelation estimates are necessarily inaccurate for longer lags, as there are less pairs of points to convolve separated by that lag. Therefore best to throw out the results except for shorter lags, e.g. keep lags from tau=0 up to one quarter of the total time series length. Args: normalized (boolean): If True, the time series will first be normalized to a mean of 0 and variance of 1. This gives autocorrelation 1 at zero lag. unbiased (boolean): If True, the result at each lag m will be scaled by 1/(N-m). This gives an unbiased estimation of the autocorrelation of a stationary process from a finite length sample. Ref: S. J. Orfanidis (1996) "Optimum Signal Processing", 2nd Ed.
codesearchnet
def switch_to_line_in(self, source=None): if source: uid = source.uid else: uid = self.uid self.avTransport.SetAVTransportURI([('InstanceID', 0), ('CurrentURI', 'x-rincon-stream:{0}'.format(uid)), ('CurrentURIMetaData', '')])
Switch the speaker's input to line-in. Args: source (SoCo): The speaker whose line-in should be played. Default is line-in from the speaker itself.
codesearchnet
def topics(self): cluster = self._client.cluster if (self._client._metadata_refresh_in_progress and self._client._topics): future = cluster.request_update() self._client.poll(future=future) stash = cluster.need_all_topic_metadata cluster.need_all_topic_metadata = True future = cluster.request_update() self._client.poll(future=future) cluster.need_all_topic_metadata = stash return cluster.topics()
Get all topics the user is authorized to view. Returns: set: topics
codesearchnet
def deepnn(x): with tf.name_scope('reshape'): x_image = tf.reshape(x, [(- 1), 28, 28, 1]) with tf.name_scope('conv1'): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu((conv2d(x_image, W_conv1) + b_conv1)) with tf.name_scope('pool1'): h_pool1 = max_pool_2x2(h_conv1) with tf.name_scope('conv2'): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu((conv2d(h_pool1, W_conv2) + b_conv2)) with tf.name_scope('pool2'): h_pool2 = max_pool_2x2(h_conv2) with tf.name_scope('fc1'): W_fc1 = weight_variable([((7 * 7) * 64), 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [(- 1), ((7 * 7) * 64)]) h_fc1 = tf.nn.relu((tf.matmul(h_pool2_flat, W_fc1) + b_fc1)) with tf.name_scope('dropout'): keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) with tf.name_scope('fc2'): W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = (tf.matmul(h_fc1_drop, W_fc2) + b_fc2) return (y_conv, keep_prob)
deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout.
codesearchnet
def _get_countdown_for_next_slice(self, spec): countdown = 0 if (self._processing_limit(spec) != (- 1)): countdown = max(int((parameters.config._SLICE_DURATION_SEC - (self._time() - self._start_time))), 0) return countdown
Get countdown for next slice's task. When user sets processing rate, we set countdown to delay task execution. Args: spec: model.MapreduceSpec Returns: countdown in int.
codesearchnet
def find_many(self, url, type, resource): return [type(item) for item in RestClient.get(url)[resource]]
Get a list of resources Args: url (string): URL to invoke type (class): Class type resource (string): The REST Resource Returns: list of object: List of resource instances
juraj-google-style
def job_history(backend): year = widgets.Output(layout=widgets.Layout(display='flex-inline', align_items='center', min_height='400px')) month = widgets.Output(layout=widgets.Layout(display='flex-inline', align_items='center', min_height='400px')) week = widgets.Output(layout=widgets.Layout(display='flex-inline', align_items='center', min_height='400px')) tabs = widgets.Tab(layout=widgets.Layout(max_height='620px')) tabs.children = [year, month, week] tabs.set_title(0, 'Year') tabs.set_title(1, 'Month') tabs.set_title(2, 'Week') tabs.selected_index = 1 _build_job_history(tabs, backend) return tabs
Widget for displaying job history Args: backend (IBMQbackend): The backend. Returns: Tab: A tab widget for history images.
codesearchnet
def get_msms_df(model, pdb_id, outfile=None, outdir=None, outext='_msms.df', force_rerun=False): outfile = ssbio.utils.outfile_maker(inname=pdb_id, outname=outfile, outdir=outdir, outext=outext) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): try: rd = PDB.ResidueDepth(model) except AssertionError: log.error('{}: unable to run MSMS'.format(pdb_id)) return pd.DataFrame() appender = [] for k in rd.property_keys: x = rd.property_dict[k] chain = k[0] residue = k[1] het = residue[0] resnum = residue[1] icode = residue[2] resdepth = x[0] cadepth = x[1] appender.append((chain, resnum, icode, resdepth, cadepth)) df = pd.DataFrame.from_records(appender, columns=['chain', 'resnum', 'icode', 'res_depth', 'ca_depth']) df.to_csv(outfile) else: log.debug('{}: already ran MSMS and force_rerun={}, loading results'.format(outfile, force_rerun)) df = pd.read_csv(outfile, index_col=0) return df
Run MSMS (using Biopython) on a Biopython Structure Model. Depths are in units Angstroms. 1A = 10^-10 m = 1nm. Returns a dictionary of:: { chain_id:{ resnum1_id: (res_depth, ca_depth), resnum2_id: (res_depth, ca_depth) } } Args: model: Biopython Structure Model Returns: Pandas DataFrame: ResidueDepth property_dict, reformatted
juraj-google-style
def run(self, module, post_check): try: _cwd = os.getcwd() _sys_path = list(sys.path) _sys_argv = list(sys.argv) sys.path.insert(0, os.path.dirname(self._path)) sys.argv = [os.path.basename(self._path)] + self._argv exec(self._code, module.__dict__) post_check() except Exception as e: self._failed = True self._error_detail = traceback.format_exc() _exc_type, _exc_value, exc_traceback = sys.exc_info() filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1] self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt) finally: os.chdir(_cwd) sys.path = _sys_path sys.argv = _sys_argv self.ran = True
Execute the configured source code in a module and run any post checks. Args: module (Module) : a module to execute the configured code in. post_check(callable) : a function that can raise an exception if expected post-conditions are not met after code execution.
juraj-google-style
def _ScanVolume(self, scan_context, scan_node, base_path_specs): if ((not scan_node) or (not scan_node.path_spec)): raise errors.ScannerError('Invalid or missing scan node.') if scan_context.IsLockedScanNode(scan_node.path_spec): self._ScanEncryptedVolume(scan_context, scan_node) if scan_context.IsLockedScanNode(scan_node.path_spec): return if scan_node.IsVolumeSystemRoot(): self._ScanVolumeSystemRoot(scan_context, scan_node, base_path_specs) elif scan_node.IsFileSystem(): self._ScanFileSystem(scan_node, base_path_specs) elif (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW): path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_TSK, location='/', parent=scan_node.path_spec) base_path_specs.append(path_spec) else: for sub_scan_node in scan_node.sub_nodes: self._ScanVolume(scan_context, sub_scan_node, base_path_specs)
Scans a volume scan node for volume and file systems. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): volume scan node. base_path_specs (list[PathSpec]): file system base path specifications. Raises: ScannerError: if the format of or within the source is not supported or the scan node is invalid.
codesearchnet
def insert(self, **fields): if (self.conflict_target or self.conflict_action): compiler = self._build_insert_compiler([fields]) rows = compiler.execute_sql(return_id=True) pk_field_name = self.model._meta.pk.name return rows[0][pk_field_name] return super().create(**fields).pk
Creates a new record in the database. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: fields: The fields of the row to create. Returns: The primary key of the record that was created.
codesearchnet
def _request(self, method, url, body): if ((method != 'POST') and (method != 'PUT')): body = None s = Session() LOGGER.debug('Method: {0}, Url: {1}, Body: {2}.'.format(method, url, body)) req = Request(method, url, json=body) prepped = s.prepare_request(req) res = s.send(prepped, timeout=(self._timeout or None)) res.raise_for_status() return res.json()
Internal method to send request to the remote server. Args: method(str): HTTP Method(GET/POST/PUT/DELET/HEAD). url(str): The request url. body(dict): The JSON object to be sent. Returns: A dict represent the json body from server response. Raises: ConnectionError: Meet network problem (e.g. DNS failure, refused connection, etc). Timeout: A request times out. HTTPError: HTTP request returned an unsuccessful status code.
codesearchnet
def normalize_attr_values(a: Any) -> np.ndarray: scalar = False if np.isscalar(a): a = np.array([a]) scalar = True arr = normalize_attr_array(a) if np.issubdtype(arr.dtype, np.integer) or np.issubdtype(arr.dtype, np.floating): pass elif np.issubdtype(arr.dtype, np.character) or np.issubdtype(arr.dtype, np.object_): arr = normalize_attr_strings(arr) elif np.issubdtype(arr.dtype, np.bool_): arr = arr.astype('ubyte') if scalar: return arr[0] else: return arr
Take all kinds of input values and validate/normalize them. Args: a List, tuple, np.matrix, np.ndarray or sparse matrix Elements can be strings, numbers or bools Returns a_normalized An np.ndarray with elements conforming to one of the valid Loom attribute types Remarks: This method should be used to prepare the values to be stored in the HDF5 file. You should not return the values to the caller; for that, use materialize_attr_values()
juraj-google-style
def bartlett(x): if any_symbolic_tensors((x,)): return Bartlett().symbolic_call(x) return backend.numpy.bartlett(x)
Bartlett window function. The Bartlett window is a triangular window that rises then falls linearly. Args: x: Scalar or 1D Tensor. Window length. Returns: A 1D tensor containing the Bartlett window values. Example: >>> x = keras.ops.convert_to_tensor(5) >>> keras.ops.bartlett(x) array([0. , 0.5, 1. , 0.5, 0. ], dtype=float32)
github-repos
def _make_projcet_list(path): from collections import OrderedDict from matplotlib.colors import LinearSegmentedColormap from matplotlib.colors import rgb2hex as r2h from numpy import linspace proj = [] projects = OrderedDict() file_list = os.listdir(path) for files in file_list: if files.split(".")[0] not in proj and 'json' in files and " proj.append(files.split(".")[0]) colors = _get_colors(len(proj)) p_c = 0 for p in proj: tasks = OrderedDict() temp = [x.split(".")[1] for x in file_list if p in x and " cmspace = linspace(0.95, 0.25, len(temp)) cm = LinearSegmentedColormap.from_list("acorn.{}".format(p), [' N=max((len(temp), 25))) hues = [r2h(cm(cmi)) for cmi in cmspace] h_c = 0 for t in temp: tasks[t] = [hues[h_c],p+"."+t+".json"] h_c += 1 tasks["hex_color"] = colors[p_c] projects[p] = tasks p_c += 1 return projects
Returns a dictionaries in which each project is a key and the tasks are stored as a list within that dictionaly element. Args: path (str): The path to the folder containing the *.json files. Returns: projects (list of dict): A dictionary in which each project is a key containing a list of it's tasks.
juraj-google-style