code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_package_for_module(module): if isinstance(module, six.string_types): try: module = sys.modules[module] except KeyError: return None try: return six.text_type(module.package) except AttributeError: if (module.__name__ == '__main__'): try: file_name = module.__file__ except AttributeError: pass else: base_name = os.path.basename(file_name) split_name = os.path.splitext(base_name) if (len(split_name) == 1): return six.text_type(base_name) return u'.'.join(split_name[:(- 1)]) return six.text_type(module.__name__)
Get package name for a module. Helper calculates the package name of a module. Args: module: Module to get name for. If module is a string, try to find module in sys.modules. Returns: If module contains 'package' attribute, uses that as package name. Else, if module is not the '__main__' module, the module __name__. Else, the base name of the module file name. Else None.
codesearchnet
def _CheckForOutOfOrderStepAndMaybePurge(self, event): if event.step < self.most_recent_step and event.HasField('summary'): self._Purge(event, by_tags=True) else: self.most_recent_step = event.step self.most_recent_wall_time = event.wall_time
Check for out-of-order event.step and discard expired events for tags. Check if the event is out of order relative to the global most recent step. If it is, purge outdated summaries for tags that the event contains. Args: event: The event to use as reference. If the event is out-of-order, all events with the same tags, but with a greater event.step will be purged.
juraj-google-style
def _allocate_ips_to_nics(self, conf): for (dom_name, dom_spec) in conf.get('domains', {}).items(): for (idx, nic) in enumerate(dom_spec.get('nics', [])): if ('ip' in nic): continue net = self._get_net(conf, dom_name, nic) if (net['type'] != 'nat'): continue allocated = net['mapping'].values() vacant = _create_ip(net['gw'], set(range(2, 255)).difference(set([int(ip.split('.')[(- 1)]) for ip in allocated])).pop()) nic['ip'] = vacant self._add_nic_to_mapping(net, dom_spec, nic)
For all the nics of all the domains in the conf that have dynamic ip, allocate one and addit to the network mapping Args: conf (dict): Configuration spec to extract the domains from Returns: None
codesearchnet
def coroutine(func): def wrapper(*args, **kwargs): gen = func(*args, **kwargs) val = next(gen) if (val != None): raise TypeError('Unexpected value from start of coroutine') return gen wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__ return wrapper
Wraps a PEP-342 enhanced generator in a way that avoids boilerplate of the "priming" call to ``next``. Args: func (Callable): The function constructing a generator to decorate. Returns: Callable: The decorated generator.
codesearchnet
def get_tensor_from_node(node): with ops.init_scope(): if getattr(node, 'is_distributed_variable', False): return node elif getattr(node, 'is_distributed_table', False): return node elif getattr(node, 'is_sharded_variable', False): return node elif resource_variable_ops.is_resource_variable(node): return node.handle elif isinstance(node, asset.Asset): return node.asset_path elif tensor_util.is_tf_type(node): return node elif isinstance(node, resource.CapturableResource): return node.resource_handle raise ValueError(f'Cannot convert node {node} to tensor.')
Resolves a saved model graph node into a tensor to be captured. Args: node: a tensor, variable, or resource to be resolved into a capturable tensor Returns: A list of tensors. Raises: ValueError: if the node cannot be converted into a tensor.
github-repos
def dot(poly1, poly2): if ((not isinstance(poly1, Poly)) and (not isinstance(poly2, Poly))): return numpy.dot(poly1, poly2) poly1 = Poly(poly1) poly2 = Poly(poly2) poly = (poly1 * poly2) if ((numpy.prod(poly1.shape) <= 1) or (numpy.prod(poly2.shape) <= 1)): return poly return chaospy.poly.sum(poly, 0)
Dot product of polynomial vectors. Args: poly1 (Poly) : left part of product. poly2 (Poly) : right part of product. Returns: (Poly) : product of poly1 and poly2. Examples: >>> poly = cp.prange(3, 1) >>> print(poly) [1, q0, q0^2] >>> print(cp.dot(poly, numpy.arange(3))) 2q0^2+q0 >>> print(cp.dot(poly, poly)) q0^4+q0^2+1
codesearchnet
def resorted(values): if not values: return values values = sorted(values) first_word = next( (cnt for cnt, val in enumerate(values) if val and not val[0].isdigit()), None ) if first_word is None: return values words = values[first_word:] numbers = values[:first_word] return words + numbers
Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterable object/list/tuple/whatever. Returns: list of sorted values, but with numbers after words
juraj-google-style
def _sample_cell(args, cell_body): env = datalab.utils.commands.notebook_environment() query = None table = None view = None if args['query']: query = _get_query_argument(args, cell_body, env) elif args['table']: table = _get_table(args['table']) elif args['view']: view = datalab.utils.commands.get_notebook_item(args['view']) if (not isinstance(view, datalab.bigquery.View)): raise Exception(('%s is not a view' % args['view'])) else: query = datalab.bigquery.Query(cell_body, values=env) count = args['count'] method = args['method'] if (method == 'random'): sampling = datalab.bigquery.Sampling.random(percent=args['percent'], count=count) elif (method == 'hashed'): sampling = datalab.bigquery.Sampling.hashed(field_name=args['field'], percent=args['percent'], count=count) elif (method == 'sorted'): ascending = (args['order'] == 'ascending') sampling = datalab.bigquery.Sampling.sorted(args['field'], ascending=ascending, count=count) elif (method == 'limit'): sampling = datalab.bigquery.Sampling.default(count=count) else: sampling = datalab.bigquery.Sampling.default(count=count) if query: results = query.sample(sampling=sampling, dialect=args['dialect'], billing_tier=args['billing']) elif view: results = view.sample(sampling=sampling) else: results = table.sample(sampling=sampling) if args['verbose']: print(results.sql) if args['profile']: return datalab.utils.commands.profile_df(results.to_dataframe()) else: return results
Implements the bigquery sample cell magic for ipython notebooks. Args: args: the optional arguments following '%%bigquery sample'. cell_body: optional contents of the cell interpreted as SQL, YAML or JSON. Returns: The results of executing the sampling query, or a profile of the sample data.
codesearchnet
def start_naive_bayes(automated_run, session, path): module = functions.import_string_code_as_module(automated_run.source) random_state = (8 if (not hasattr(module, 'random_state')) else module.random_state) assert (module.metric_to_optimize in automated_run.base_learner_origin.metric_generators) base_estimator = automated_run.base_learner_origin.return_estimator() base_estimator.set_params(**module.default_params) default_params = functions.make_serializable(base_estimator.get_params()) non_searchable_params = dict(((key, val) for (key, val) in iteritems(default_params) if (key not in module.pbounds))) existing_base_learners = [] for base_learner in automated_run.base_learner_origin.base_learners: if (not (base_learner.job_status == 'finished')): continue in_search_space = True for (key, val) in iteritems(non_searchable_params): if (base_learner.hyperparameters[key] != val): in_search_space = False break if in_search_space: existing_base_learners.append(base_learner) target = [] initialization_dict = dict(((key, list()) for key in module.pbounds.keys())) for base_learner in existing_base_learners: all_numerical = True for key in module.pbounds.keys(): if (not isinstance(base_learner.hyperparameters[key], numbers.Number)): all_numerical = False break if (not all_numerical): continue for key in module.pbounds.keys(): initialization_dict[key].append(base_learner.hyperparameters[key]) target.append(base_learner.individual_score[module.metric_to_optimize]) initialization_dict['target'] = (target if (not module.invert_metric) else list(map((lambda x: (- x)), target))) print('{} existing in initialization dictionary'.format(len(initialization_dict['target']))) func_to_optimize = return_func_to_optimize(path, session, automated_run.base_learner_origin, module.default_params, module.metric_to_optimize, module.invert_metric, set(module.integers)) bo = BayesianOptimization(func_to_optimize, module.pbounds) bo.initialize(initialization_dict) np.random.seed(random_state) bo.maximize(**module.maximize_config)
Starts naive bayes automated run Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder
codesearchnet
def decode(self, fp: TextIO) -> BioCCollection: tree = etree.parse(fp) collection = self.__parse_collection(tree.getroot()) collection.encoding = tree.docinfo.encoding collection.standalone = tree.docinfo.standalone collection.version = tree.docinfo.xml_version return collection
Deserialize ``fp`` to a BioC collection object. Args: fp: a ``.read()``-supporting file-like object containing a BioC collection Returns: an object of BioCollection
juraj-google-style
def builder(name, **builder_init_kwargs): (name, builder_kwargs) = _dataset_name_and_kwargs_from_name_str(name) builder_kwargs.update(builder_init_kwargs) if (name in _ABSTRACT_DATASET_REGISTRY): raise DatasetNotFoundError(name, is_abstract=True) if (name in _IN_DEVELOPMENT_REGISTRY): raise DatasetNotFoundError(name, in_development=True) if (name not in _DATASET_REGISTRY): raise DatasetNotFoundError(name) try: return _DATASET_REGISTRY[name](**builder_kwargs) except BaseException: logging.error('Failed to construct dataset %s', name) raise
Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized.
codesearchnet
def from_table(table, fields=None): if (fields is None): fields = '*' elif isinstance(fields, list): fields = ','.join(fields) return Query(('SELECT %s FROM %s' % (fields, table._repr_sql_())))
Return a Query for the given Table object Args: table: the Table object to construct a Query out of fields: the fields to return. If None, all fields will be returned. This can be a string which will be injected into the Query after SELECT, or a list of field names. Returns: A Query object that will return the specified fields from the records in the Table.
codesearchnet
def WriteEventBody(self, event): latitude = getattr(event, 'latitude', None) longitude = getattr(event, 'longitude', None) if latitude is not None and longitude is not None: placemark_xml_element = ElementTree.Element('Placemark') name_xml_element = ElementTree.SubElement(placemark_xml_element, 'name') name_xml_element.text = 'PLACEHOLDER FOR EVENT IDENTIFIER' description_xml_element = ElementTree.SubElement( placemark_xml_element, 'description') description_xml_element.text = ( rawpy.NativePythonFormatterHelper.GetFormattedEventObject(event)) point_xml_element = ElementTree.SubElement( placemark_xml_element, 'Point') coordinates_xml_element = ElementTree.SubElement( point_xml_element, 'coordinates') coordinates_xml_element.text = '{0!s},{1!s}'.format(longitude, latitude) xml_string = ElementTree.tostring(placemark_xml_element) output_text = codecs.decode(xml_string, self._output_mediator.encoding) self._output_writer.Write(output_text)
Writes the body of an event to the output. Args: event (EventObject): event.
juraj-google-style
def make_dataset_from_selfplay(data_extracts): tf_examples = (make_tf_example(features_lib.extract_features(pos), pi, result) for pos, pi, result in data_extracts) return tf_examples
Returns an iterable of tf.Examples. Args: data_extracts: An iterable of (position, pi, result) tuples
juraj-google-style
def add_trial(self, trial): trial.set_verbose(self._verbose) self._trials.append(trial) with warn_if_slow('scheduler.on_trial_add'): self._scheduler_alg.on_trial_add(self, trial) self.trial_executor.try_checkpoint_metadata(trial)
Adds a new trial to this TrialRunner. Trials may be added at any time. Args: trial (Trial): Trial to queue.
codesearchnet
def set_value(self, text): if self.single_line: text = text.replace('\n', '') self.set_text(text)
Sets the text content. Args: text (str): The string content that have to be appended as standard child identified by the key 'text'
juraj-google-style
def CheckPosixThreading(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] for (single_thread_func, multithread_safe_func, pattern) in _THREADING_LIST: if Search(pattern, line): error(filename, linenum, 'runtime/threadsafe_fn', 2, (((('Consider using ' + multithread_safe_func) + '...) instead of ') + single_thread_func) + '...) for improved thread safety.'))
Checks for calls to thread-unsafe functions. Much code has been originally written without consideration of multi-threading. Also, engineers are relying on their old experience; they have learned posix before threading extensions were added. These tests guide the engineers to use thread-safe functions (when using posix directly). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def clean_strings(iterable): retval = [] for val in iterable: try: retval.append(val.strip()) except AttributeError: retval.append(val) return retval
Take a list of strings and clear whitespace on each one. If a value in the list is not a string pass it through untouched. Args: iterable: mixed list Returns: mixed list
codesearchnet
def ones(shape, dtype=None, name=None): with ops.init_scope(): if dtype is None: dtype = floatx() tf_dtype = dtypes_module.as_dtype(dtype) v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name) if py_all(v.shape.as_list()): return variable(v, dtype=dtype, name=name) return v
Instantiates an all-ones variable and returns it. Args: shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, filled with `1.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.ones((3,4)) >>> tf.keras.backend.eval(kvar) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], dtype=float32)
github-repos
def predict_proba(self, X): return collections.deque(self.iter_predict_proba(X), maxlen=1).pop()
Returns the predicted probabilities for ``X``. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. Returns: array of shape (n_samples, n_classes) containing the predicted probabilities.
juraj-google-style
def _Consumers(t, func_graphs): consumers = t.consumers() for func in func_graphs: for input_t, placeholder in _Captures(func): if input_t is t: consumers.extend(_Consumers(placeholder, func_graphs)) return consumers
Returns the consumers of t, crossing closure boundaries where necessary. Args: t: Tensor func_graphs: a list of FuncGraphs that may have captured t. Returns: A list of tensors. The tensors will be from the current graph and/or func_graphs.
github-repos
def WriteValuesToJSONFile(self, state, values): value_counters = {} max_post_size = config.CONFIG['BigQuery.max_file_post_size'] for value in values: class_name = value.__class__.__name__ (output_tracker, created) = self._GetTempOutputFileHandles(class_name) value_counters[class_name] = (value_counters.get(class_name, (- 1)) + 1) if (not ((value_counters[class_name] % max_post_size) output_tracker.gzip_filehandle.flush() if (os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size): self.Flush(state) value_counters[class_name] = 0 (output_tracker, created) = self._GetTempOutputFileHandles(class_name) if (not output_tracker.schema): output_tracker.schema = self.RDFValueToBigQuerySchema(value) if created: self._WriteJSONValue(output_tracker.gzip_filehandle, value) else: self._WriteJSONValue(output_tracker.gzip_filehandle, value, delimiter='\n') for output_tracker in itervalues(self.temp_output_trackers): output_tracker.gzip_filehandle.flush()
Write newline separated JSON dicts for each value. We write each dict separately so we don't have to hold all of the output streams in memory. We open and close the JSON array manually with []. Args: state: rdf_protodict.AttributedDict with the plugin's state. values: RDF values to export.
codesearchnet
def load_ipython_extension(ip): decor = InteractiveDecorator(ip) ip.events.register('post_run_cell', decor.post_run_cell) newhist = AcornHistoryManager(ip.history_manager, decor) ip.history_manager = newhist
Loads the interacting decorator that ships with `acorn` into the ipython interactive shell. Args: ip (IPython.core.interactiveshell.InteractiveShell): ipython shell instance for interacting with the shell variables.
juraj-google-style
def expm1(x): if any_symbolic_tensors((x,)): return Expm1().symbolic_call(x) return backend.numpy.expm1(x)
Calculate `exp(x) - 1` for all elements in the tensor. Args: x: Input values. Returns: Output tensor, element-wise exponential minus one.
github-repos
def encode(self, value: Any) -> geno.DNA:
Encode a value into a DNA. Args: value: A value that conforms to the hyper value definition. Returns: DNA for the value.
github-repos
def set_dna(self, dna: geno.DNA) -> None: self._dna = dna self._decoded_value = None
Use this DNA to generate value. NOTE(daiyip): self._dna is only used in __call__. Thus 'set_dna' can be called multiple times to generate different values. Args: dna: DNA to use to decode the value.
github-repos
def expand_indicators(indicator): if indicator.count(' : ') > 0: indicator_list = [] iregx_pattern = r'^(.*?(?=\s\:\s|$))?' iregx_pattern += r'(?:\s\:\s)?' iregx_pattern += r'((?<=\s\:\s).*?(?=(?:\s)?\:\s|$))?' iregx_pattern += r'(?:(?:\s)?\:\s)?' iregx_pattern += r'((?<=\s\:\s).*?(?=$))?$' iregx = re.compile(iregx_pattern) indicators = iregx.search(indicator) if indicators is not None: indicator_list = list(indicators.groups()) else: indicator_list = [indicator] return indicator_list
Process indicators expanding file hashes/custom indicators into multiple entries. Args: indicator (string): " : " delimited string Returns: (list): a list of indicators split on " : ".
juraj-google-style
def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses): ret = [] pool = {int(k): v for k, v in pool.items()} total_in_pool = len(seeded_answers) merged_pool = convert_seeded_answers(seeded_answers) student_id = get_student_item_dict()['student_id'] for key in pool: total_in_pool += len(pool[key]) if student_id in pool[key].keys(): total_in_pool -= 1 if key in merged_pool: merged_pool[key].update(pool[key].items()) else: merged_pool[key] = pool[key] selected = [] while len(ret) < min(num_responses, total_in_pool): for option, students in merged_pool.items(): student = student_id i = 0 while (student == student_id or i > 100) and (str(option) + student) not in selected: student = random.choice(students.keys()) i += 1 selected.append(str(option)+student) if student.startswith('seeded'): rationale = students[student] else: student_item = get_student_item_dict(student) submission = sas_api.get_answers_for_student(student_item) rationale = submission.get_rationale(0) ret.append({'option': option, 'rationale': rationale}) if len(ret) >= min(num_responses, total_in_pool): break return {"answers": ret}
Get answers from others with simple algorithm, which picks one answer for each option. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm
juraj-google-style
def __init__(self, dims): if isinstance(dims, (tuple, list)): self._dims = tuple((as_dimension(d).value for d in dims)) elif dims is None: self._dims = None elif isinstance(dims, tensor_shape_pb2.TensorShapeProto): if dims.unknown_rank: self._dims = None else: self._dims = tuple((dim.size if dim.size != -1 else None for dim in dims.dim)) elif isinstance(dims, TensorShape): self._dims = dims._dims else: try: dims_iter = iter(dims) except TypeError: self._dims = (as_dimension(dims).value,) else: self._dims = [] for d in dims_iter: try: self._dims.append(as_dimension(d).value) except TypeError as e: raise TypeError("Failed to convert '{0!r}' to a shape: '{1!r}'could not be converted to a dimension. A shape should either be single dimension (e.g. 10), or an iterable of dimensions (e.g. [1, 10, None]).".format(dims, d)) from e self._dims = tuple(self._dims)
Creates a new TensorShape with the given dimensions. Args: dims: A list of Dimensions, or None if the shape is unspecified. Raises: TypeError: If dims cannot be converted to a list of dimensions.
github-repos
def parse_matches(patient_id, match_objs): LOG.info('Parsing MatchMaker matches for patient {}'.format(patient_id)) parsed_matches = [] for match_obj in match_objs: milliseconds_date = match_obj['created']['$date'] mdate = datetime.datetime.fromtimestamp((milliseconds_date / 1000.0)) match_type = 'external' matching_patients = [] parsed_match = {'match_oid': match_obj['_id']['$oid'], 'match_date': mdate} if (match_obj['data']['patient']['id'] == patient_id): match_results = match_obj['results'] for node_result in match_results: if (match_obj['match_type'] == 'internal'): match_type = 'internal' for patient in node_result['patients']: match_patient = {'patient_id': patient['patient']['id'], 'score': patient['score'], 'patient': patient['patient'], 'node': node_result['node']} matching_patients.append(match_patient) else: m_patient = match_obj['data']['patient'] contact_institution = m_patient['contact'].get('institution') if (contact_institution and ('Scout software user' in contact_institution)): match_type = 'internal' score = None for res in match_obj['results']: for patient in res['patients']: LOG.info('Looping in else, patient:{}'.format(patient['patient']['id'])) if (patient['patient']['id'] == patient_id): score = patient['score'] match_patient = {'patient_id': m_patient['id'], 'score': score, 'patient': m_patient, 'node': res['node']} matching_patients.append(match_patient) parsed_match['match_type'] = match_type parsed_match['patients'] = matching_patients parsed_matches.append(parsed_match) parsed_matches = sorted(parsed_matches, key=(lambda k: k['match_date']), reverse=True) return parsed_matches
Parse a list of matchmaker matches objects and returns a readable list of matches to display in matchmaker matches view. Args: patient_id(str): id of a mme patient match_objs(list): list of match objs returned by MME server for the patient # match_objs looks like this: [ { 'node' : { id : node_id , label: node_label}, 'patients' : [ { 'patient': {patient1_data} }, { 'patient': {patient2_data} }, .. ] }, .. ] Returns: parsed_matches(list): a list of parsed match objects
codesearchnet
def from_authorized_user_info(cls, info, scopes=None): keys_needed = set(('refresh_token', 'client_id', 'client_secret')) missing = keys_needed.difference(six.iterkeys(info)) if missing: raise ValueError('Authorized user info was not in the expected format, missing fields {}.'.format(', '.join(missing))) return Credentials(None, refresh_token=info['refresh_token'], token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, scopes=scopes, client_id=info['client_id'], client_secret=info['client_secret'])
Creates a Credentials instance from parsed authorized user info. Args: info (Mapping[str, str]): The authorized user info in Google format. scopes (Sequence[str]): Optional list of scopes to include in the credentials. Returns: google.oauth2.credentials.Credentials: The constructed credentials. Raises: ValueError: If the info is not in the expected format.
codesearchnet
def _draw_frame(self, framedata): original = self.read_frame() if (original is None): self.update_info(self.info_string(message='Finished.', frame=framedata)) return if (self.original is not None): processed = self.process_frame(original.copy()) if (self.cmap_original is not None): original = to_gray(original) elif (not is_color_image(original)): self.original.set_cmap('gray') self.original.set_data(original) else: processed = self.process_frame(original) if (self.cmap_processed is not None): processed = to_gray(processed) elif (not is_color_image(processed)): self.processed.set_cmap('gray') if self.annotations: self.annotate(framedata) self.processed.set_data(processed) self.update_info(self.info_string(frame=framedata))
Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data.
codesearchnet
def watchlist_movies(self, **kwargs): path = self._get_id_path('watchlist_movies') kwargs.update({'session_id': self.session_id}) response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of movies on an account watchlist. Args: page: (optional) Minimum 1, maximum 1000. sort_by: (optional) 'created_at.asc' | 'created_at.desc' language: (optional) ISO 639-1 code. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def dict_of_lists_add(dictionary, key, value): list_objs = dictionary.get(key, list()) list_objs.append(value) dictionary[key] = list_objs
Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None
juraj-google-style
def add_ldap_group_link(self, cn, group_access, provider, **kwargs): path = ('/groups/%s/ldap_group_links' % self.get_id()) data = {'cn': cn, 'group_access': group_access, 'provider': provider} self.manager.gitlab.http_post(path, post_data=data, **kwargs)
Add an LDAP group link. Args: cn (str): CN of the LDAP group group_access (int): Minimum access level for members of the LDAP group provider (str): LDAP provider for the LDAP group **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request
codesearchnet
def is_chief(self): return self._is_chief
Return True if this is a chief supervisor. Returns: A bool.
github-repos
def put_pixel(self, x: int, y: int, color: Tuple[int, int, int]) -> None: lib.TCOD_image_put_pixel(self.image_c, x, y, color)
Change a pixel on this Image. Args: x (int): X pixel of the Image. Starting from the left at 0. y (int): Y pixel of the Image. Starting from the top at 0. color (Union[Tuple[int, int, int], Sequence[int]]): An (r, g, b) sequence or Color instance.
juraj-google-style
def IsAllSpent(self): for item in self.Items: if (item == CoinState.Confirmed): return False return True
Flag indicating if all balance is spend. Returns: bool:
codesearchnet
def _get_block_publisher(self, state_hash): state_view = self._state_view_factory.create_view(state_hash) try: class BatchPublisher(): def send(self, transactions): raise InvalidGenesisConsensusError('Consensus cannot send transactions during genesis.') consensus = ConsensusFactory.get_configured_consensus_module(NULL_BLOCK_IDENTIFIER, state_view) return consensus.BlockPublisher(BlockCache(self._block_store), state_view_factory=self._state_view_factory, batch_publisher=BatchPublisher(), data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_signer.get_public_key().as_hex()) except UnknownConsensusModuleError as e: raise InvalidGenesisStateError(e)
Returns the block publisher based on the consensus module set by the "sawtooth_settings" transaction family. Args: state_hash (str): The current state root hash for reading settings. Raises: InvalidGenesisStateError: if any errors occur getting the BlockPublisher.
codesearchnet
def mat2euler(rmat, axes="sxyz"): try: firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] except (AttributeError, KeyError): firstaxis, parity, repetition, frame = axes i = firstaxis j = _NEXT_AXIS[i + parity] k = _NEXT_AXIS[i - parity + 1] M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3] if repetition: sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) if sy > EPS: ax = math.atan2(M[i, j], M[i, k]) ay = math.atan2(sy, M[i, i]) az = math.atan2(M[j, i], -M[k, i]) else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(sy, M[i, i]) az = 0.0 else: cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) if cy > EPS: ax = math.atan2(M[k, j], M[k, k]) ay = math.atan2(-M[k, i], cy) az = math.atan2(M[j, i], M[i, i]) else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(-M[k, i], cy) az = 0.0 if parity: ax, ay, az = -ax, -ay, -az if frame: ax, az = az, ax return vec((ax, ay, az))
Converts given rotation matrix to euler angles in radian. Args: rmat: 3x3 rotation matrix axes: One of 24 axis sequences as string or encoded tuple Returns: converted euler angles in radian vec3 float
juraj-google-style
def import_module(name): parts = name.split('.') path = None module_name = '' fhandle = None for (index, part) in enumerate(parts): module_name = (part if (index == 0) else ('%s.%s' % (module_name, part))) path = ([path] if (path is not None) else path) try: (fhandle, path, descr) = imp.find_module(part, path) if (module_name in sys.modules): mod = sys.modules[module_name] else: mod = imp.load_module(module_name, fhandle, path, descr) finally: if fhandle: fhandle.close() return mod
Imports a module into the current runtime environment This function emulates the Python import system that allows for importing full path modules. It will break down the module and import each part (or skip if it is already loaded in cache). Args: name (str): The name of the module to import. This should be the full path of the module Returns: The module that was imported
codesearchnet
def _login(self, max_tries=2): if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL): raise BrowserError( 'Current url "%s" is not a signin url ("%s")' % (self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL)) email_field_loaded = lambda br: br.find_elements_by_id('ap_email') self._wait().until(email_field_loaded) tries = 0 while tries < max_tries: email_elem = self.find_element_by_id('ap_email') email_elem.clear() email_elem.send_keys(self._uname) pword_elem = self.find_element_by_id('ap_password') pword_elem.clear() pword_elem.send_keys(self._pword) def creds_entered(_): email_ok = email_elem.get_attribute('value') == self._uname pword_ok = pword_elem.get_attribute('value') == self._pword return email_ok and pword_ok kcr_page_loaded = lambda br: br.title == u'Kindle Cloud Reader' try: self._wait(5).until(creds_entered) self.find_element_by_id('signInSubmit-input').click() self._wait(5).until(kcr_page_loaded) except TimeoutException: tries += 1 else: return raise LoginError
Logs in to Kindle Cloud Reader. Args: max_tries: The maximum number of login attempts that will be made. Raises: BrowserError: If method called when browser not at a signin URL. LoginError: If login unsuccessful after `max_tries` attempts.
juraj-google-style
def segment_similarity(A, B, T=CLOSE_DISTANCE_THRESHOLD): l_a = len(A.points) l_b = len(B.points) idx = index.Index() dex = 0 for i in range((l_a - 1)): idx.insert(dex, bounding_box_from(A.points, i, (i + 1), T), obj=[A.points[i], A.points[(i + 1)]]) dex = (dex + 1) prox_acc = [] for i in range((l_b - 1)): ti = B.points[i].gen2arr() ti1 = B.points[(i + 1)].gen2arr() bb = bounding_box_from(B.points, i, (i + 1), T) intersects = idx.intersection(bb, objects=True) n_prox = [] i_prox = 0 a = 0 for x in intersects: a = (a + 1) pi = x.object[0].gen2arr() pi1 = x.object[1].gen2arr() prox = line_similarity(ti, ti1, pi, pi1, T) i_prox = (i_prox + prox) n_prox.append(prox) if (a != 0): prox_acc.append((i_prox / a)) else: prox_acc.append(0) return (np.mean(prox_acc), prox_acc)
Computes the similarity between two segments Args: A (:obj:`Segment`) B (:obj:`Segment`) Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different
codesearchnet
def _begin_connection_action(self, action): conn_id = action.data['connection_id'] int_id = action.data['internal_id'] callback = action.data['callback'] if self._get_connection_state(conn_id) != self.Disconnected: print(self._connections[conn_id]) callback(conn_id, self.id, False, 'Connection ID is already in use for another connection') return if self._get_connection_state(int_id) != self.Disconnected: callback(conn_id, self.id, False, 'Internal ID is already in use for another connection') return conn_data = { 'state': self.Connecting, 'microstate': None, 'conn_id': conn_id, 'int_id': int_id, 'callback': callback, 'timeout': action.timeout, 'context': action.data['context'] } self._connections[conn_id] = conn_data self._int_connections[int_id] = conn_data
Begin a connection attempt Args: action (ConnectionAction): the action object describing what we are connecting to
juraj-google-style
def func_callsig(func, with_name=True): import inspect argspec = inspect.getargspec(func) (args, varargs, varkw, defaults) = argspec callsig = inspect.formatargspec(*argspec[0:3]) if with_name: callsig = (get_callable_name(func) + callsig) return callsig
String of function call signature Args: func (function): live python function Returns: str: callsig CommandLine: python -m utool.util_str --exec-func_callsig Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> func = func_str >>> callsig = func_callsig(func) >>> result = str(callsig) >>> print(result) func_str(func, args, kwargs, type_aliases, packed, packkw, truncate)
codesearchnet
def update_(self, conf_dict, conf_arg=True): for section, secdict in conf_dict.items(): self[section].update_(secdict, conf_arg)
Update values of configuration options with dict. Args: conf_dict (dict): dict of dict indexed with section and option names. conf_arg (bool): if True, only options that can be set in a config file are updated.
juraj-google-style
def rollapply(data, window, fn): res = data.copy() res[:] = np.nan n = len(data) if window > n: return res for i in range(window - 1, n): res.iloc[i] = fn(data.iloc[i - window + 1:i + 1]) return res
Apply a function fn over a rolling window of size window. Args: * data (Series or DataFrame): Series or DataFrame * window (int): Window size * fn (function): Function to apply over the rolling window. For a series, the return value is expected to be a single number. For a DataFrame, it shuold return a new row. Returns: * Object of same dimensions as data
juraj-google-style
def WriteScanContext(self, scan_context, scan_step=None): if (scan_step is not None): print('Scan step: {0:d}'.format(scan_step)) print('Source type\t\t: {0:s}'.format(scan_context.source_type)) print('') scan_node = scan_context.GetRootScanNode() self.WriteScanNode(scan_context, scan_node) print('')
Writes the source scanner context to stdout. Args: scan_context (SourceScannerContext): the source scanner context. scan_step (Optional[int]): the scan step, where None represents no step.
codesearchnet
def get(app: web.Application, feature_type: Type[Any]=None, key: Hashable=None) -> Any: key = (key or feature_type) if (not key): raise AssertionError('No feature identifier provided') try: found = app[FEATURES_KEY][key] except KeyError: raise KeyError(f'No feature found for "{key}"') if (feature_type and (not isinstance(found, feature_type))): raise AssertionError(f'Found {found} did not match type "{feature_type}"') return found
Finds declared feature. Identification is done based on feature type and key. Args: app (web.Application): The current Aiohttp application. feature_type (Type[Any]): The Python type of the desired feature. If specified, it will be checked against the found feature. key (Hashable): A specific identifier for the desired feature. Defaults to `feature_type` Returns: Any: The feature found for the combination of `feature_type` and `key`
codesearchnet
def write(self, index, value, name=None): return self._implementation.write(index, value, name=name)
Write `value` into index `index` of the TensorArray. Args: index: 0-D. int32 scalar with the index to write to. value: N-D. Tensor of type `dtype`. The Tensor to write to this index. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the write occurs. Use this object for all subsequent operations. Raises: ValueError: if there are more writers than specified.
github-repos
def _finished_callback(self, batch_fut, todo): self._running.remove(batch_fut) err = batch_fut.get_exception() if (err is not None): tb = batch_fut.get_traceback() for (fut, _) in todo: if (not fut.done()): fut.set_exception(err, tb)
Passes exception along. Args: batch_fut: the batch future returned by running todo_tasklet. todo: (fut, option) pair. fut is the future return by each add() call. If the batch fut was successful, it has already called fut.set_result() on other individual futs. This method only handles when the batch fut encountered an exception.
codesearchnet
def convert_shape(params, w_name, scope_name, inputs, layers, weights, names): print('Converting shape ...') def target_layer(x): import tensorflow as tf return tf.shape(x) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert shape operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def ch_stop_time(self, *channels: List[Channel]) -> int: return self.timeslots.ch_stop_time(*channels)
Return maximum start time for supplied channels. Args: *channels: Supplied channels
juraj-google-style
def acquire(self, host: str, port: int, use_ssl: bool=False, host_key: Optional[Any]=None) \ -> Union[Connection, SSLConnection]: assert isinstance(port, int), 'Expect int. Got {}'.format(type(port)) assert not self._closed yield from self._process_no_wait_releases() if use_ssl: connection_factory = functools.partial( self._ssl_connection_factory, hostname=host) else: connection_factory = functools.partial( self._connection_factory, hostname=host) connection_factory = functools.partial( HappyEyeballsConnection, (host, port), connection_factory, self._resolver, self._happy_eyeballs_table, is_ssl=use_ssl ) key = host_key or (host, port, use_ssl) with (yield from self._host_pools_lock): if key not in self._host_pools: host_pool = self._host_pools[key] = HostPool( connection_factory, max_connections=self._max_host_count ) self._host_pool_waiters[key] = 1 else: host_pool = self._host_pools[key] self._host_pool_waiters[key] += 1 _logger.debug('Check out %s', key) connection = yield from host_pool.acquire() connection.key = key with (yield from self._host_pools_lock): self._host_pool_waiters[key] -= 1 return connection
Return an available connection. Args: host: A hostname or IP address. port: Port number. use_ssl: Whether to return a SSL connection. host_key: If provided, it overrides the key used for per-host connection pooling. This is useful for proxies for example. Coroutine.
juraj-google-style
def tasks(self): if (not self.__tasks): self.__tasks = Tasks(self.__connection) return self.__tasks
Gets the Tasks API client. Returns: Tasks:
codesearchnet
def shutdown(self, message=None): for (name, server) in self.servers.items(): server.quit(message)
Disconnect all servers with a message. Args: message (str): Quit message to use on each connection.
codesearchnet
def fix_variable(self, v, value): variables = self.variables try: idx = variables.index(v) except ValueError: raise ValueError('given variable {} is not part of the constraint'.format(v)) if (value not in self.vartype.value): raise ValueError('expected value to be in {}, received {} instead'.format(self.vartype.value, value)) configurations = frozenset(((config[:idx] + config[(idx + 1):]) for config in self.configurations if (config[idx] == value))) if (not configurations): raise UnsatError('fixing {} to {} makes this constraint unsatisfiable'.format(v, value)) variables = (variables[:idx] + variables[(idx + 1):]) self.configurations = configurations self.variables = variables def func(*args): return (args in configurations) self.func = func self.name = '{} ({} fixed to {})'.format(self.name, v, value)
Fix the value of a variable and remove it from the constraint. Args: v (variable): Variable in the constraint to be set to a constant value. val (int): Value assigned to the variable. Values must match the :class:`.Vartype` of the constraint. Examples: This example creates a constraint that :math:`a \\ne b` on binary variables, fixes variable a to 0, and tests two candidate solutions. >>> import dwavebinarycsp >>> const = dwavebinarycsp.Constraint.from_func(operator.ne, ... ['a', 'b'], dwavebinarycsp.BINARY) >>> const.fix_variable('a', 0) >>> const.check({'b': 1}) True >>> const.check({'b': 0}) False
codesearchnet
def entropy(rho: Density, base: float=None) -> float: op = asarray(rho.asoperator()) probs = np.linalg.eigvalsh(op) probs = np.maximum(probs, 0.0) return scipy.stats.entropy(probs, base=base)
Returns the von-Neumann entropy of a mixed quantum state. Args: rho: A density matrix base: Optional logarithm base. Default is base e, and entropy is measures in nats. For bits set base to 2. Returns: The von-Neumann entropy of rho
codesearchnet
def write(self, data, timeout_ms=None): timeout = timeouts.PolledTimeout.from_millis(timeout_ms) while data: self._transport.write(data[:self._transport.adb_connection.maxdata], timeout) data = data[self._transport.adb_connection.maxdata:]
Write data to this stream. Args: data: Data to write. timeout_ms: Timeout to use for the write/Ack transaction, in milliseconds (or as a PolledTimeout object). Raises: AdbProtocolError: If an ACK is not received. AdbStreamClosedError: If the stream is already closed, or gets closed before the write completes.
codesearchnet
def MergeOrAddUser(self, kb_user): user = self.GetUser( sid=kb_user.sid, uid=kb_user.uid, username=kb_user.username) new_attrs = [] merge_conflicts = [] if not user: new_attrs = self._CreateNewUser(kb_user) else: for key, val in iteritems(kb_user.AsDict()): if user.Get(key) and user.Get(key) != val: merge_conflicts.append((key, user.Get(key), val)) user.Set(key, val) new_attrs.append("users.%s" % key) return new_attrs, merge_conflicts
Merge a user into existing users or add new if it doesn't exist. Args: kb_user: A User rdfvalue. Returns: A list of strings with the set attribute names, e.g. ["users.sid"]
juraj-google-style
def get_bounds(changeset): try: return Polygon([ (float(changeset.get('min_lon')), float(changeset.get('min_lat'))), (float(changeset.get('max_lon')), float(changeset.get('min_lat'))), (float(changeset.get('max_lon')), float(changeset.get('max_lat'))), (float(changeset.get('min_lon')), float(changeset.get('max_lat'))), (float(changeset.get('min_lon')), float(changeset.get('min_lat'))), ]) except TypeError: return Polygon()
Get the bounds of the changeset and return it as a Polygon object. If the changeset has not coordinates (case of the changesets that deal only with relations), it returns an empty Polygon. Args: changeset: the XML string of the changeset.
juraj-google-style
def poly_energy(sample_like, poly): msg = ("poly_energy is deprecated and will be removed in dimod 0.9.0." "In the future, use BinaryPolynomial.energy") warnings.warn(msg, DeprecationWarning) return BinaryPolynomial(poly, 'SPIN').energy(sample_like)
Calculates energy of a sample from a higher order polynomial. Args: sample (samples_like): A raw sample. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. poly (dict): Polynomial as a dict of form {term: bias, ...}, where `term` is a tuple of variables and `bias` the associated bias. Returns: float: The energy of the sample.
juraj-google-style
def __init__(self, filename, asarfile, files, baseoffset): self.filename = filename self.asarfile = asarfile self.files = files self.baseoffset = baseoffset
Initializes a new instance of the :see AsarArchive class. Args: filename (str): The path to the *.asar file to read/write from/to. asarfile (File): A open *.asar file object. files (dict): Dictionary of files contained in the archive. (The header that was read from the file). baseoffset (int): Base offset, indicates where in the file the header ends.
juraj-google-style
def postprocess(chunks: typing.List[str]) -> typing.List[str]: chunks = break_before_sequence(chunks, '(') chunks = break_before_sequence(chunks, 'もら') return chunks
Applies some processes to modify the extracted chunks. Args: chunks (List[str]): Source chunks. Returns: Processed chunks.
github-repos
def notify_on_change(enabled: bool=True) -> ContextManager[None]: return thread_local.thread_local_value_scope(_TLS_ENABLE_CHANGE_NOTIFICATION, enabled, True)
Returns a context manager to enable or disable notification upon change. `notify_on_change` is thread-safe and can be nested. For example, in the following code, `_on_change` (thus `_on_bound`) method of `a` will be triggered due to the rebind in the inner `with` statement, and those of `b` will not be triggered as the outer `with` statement disables the notification:: with pg.notify_on_change(False): with pg.notify_on_change(True): a.rebind(b=1) b.rebind(x=2) Args: enabled: If True, enable change notification in current scope. Otherwise, disable notification. Returns: A context manager for allowing/disallowing change notification in scope.
github-repos
def template_string( task: Task, template: str, jinja_filters: FiltersDict = None, **kwargs: Any ) -> Result: jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters text = jinja_helper.render_from_string( template=template, host=task.host, jinja_filters=jinja_filters, **kwargs ) return Result(host=task.host, result=text)
Renders a string with jinja2. All the host data is available in the template Arguments: template (string): template string jinja_filters (dict): jinja filters to enable. Defaults to nornir.config.jinja2.filters **kwargs: additional data to pass to the template Returns: Result object with the following attributes set: * result (``string``): rendered string
juraj-google-style
def delete_resource(self, resource, delete=True): if isinstance(resource, str): if is_valid_uuid(resource) is False: raise HDXError('%s is not a valid resource id!' % resource) return self._remove_hdxobject(self.resources, resource, delete=delete)
Delete a resource from the dataset and also from HDX by default Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True. Returns: bool: True if resource removed or False if not
juraj-google-style
def unique(ar): import dask.array as da if isinstance(ar, da.core.Array): return da.unique(ar) return _unique(ar)
r"""Find the unique elements of an array. It uses ``dask.array.unique`` if necessary. Args: ar (array_like): Input array. Returns: array_like: the sorted unique elements.
codesearchnet
def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None): if (ignore_index is not None): assert (tensor.size() == tensor_other.size()) mask_arr = tensor.ne(ignore_index) tensor = tensor.masked_select(mask_arr) tensor_other = tensor_other.masked_select(mask_arr) return torch.equal(tensor, tensor_other)
Compute ``torch.equal`` with the optional mask parameter. Args: ignore_index (int, optional): Specifies a ``tensor`` index that is ignored. Returns: (bool) Returns ``True`` if target and prediction are equal.
codesearchnet
def normalize_build_spec(self, build_spec): for cmd in build_spec: if (not cmd): continue cmd_name = cmd.keys()[0] cmd_options = cmd.values()[0] cmd_handler = self.get_cmd_handler(cmd_name) self.build_cmds.append(cmd_handler(cmd_options))
Convert a build spec into a list of Command tuples. After running this command, self.build_cmds should hold all the commands that should be run on the disk in self.disk_path. Args: build_spec (dict): The buildspec part from the init file
codesearchnet
def list_bindings(site): ret = dict() sites = list_sites() if site not in sites: log.warning('Site not found: %s', site) return ret ret = sites[site]['bindings'] if not ret: log.warning('No bindings found for site: %s', site) return ret
Get all configured IIS bindings for the specified site. Args: site (str): The name if the IIS Site Returns: dict: A dictionary of the binding names and properties. CLI Example: .. code-block:: bash salt '*' win_iis.list_bindings site
juraj-google-style
def _extract_gcs_api_response_error(message): try: if len(message) == 3: data = json.loads(message[2]) return data['error']['errors'][0]['message'] except Exception: pass return message
A helper function to extract user-friendly error messages from service exceptions. Args: message: An error message from an exception. If this is from our HTTP client code, it will actually be a tuple. Returns: A modified version of the message that is less cryptic.
juraj-google-style
def freeze(script_path, target_dir='frozen', **kw): cmds = [] freeze_start_time = time.time() logging.debug('/\\%s%s Output%s/\\' % ('-' * 10, 'Pyinstaller', '-' * 10)) orig_dir = os.path.abspath('.') script_path = os.path.abspath(script_path) try: os.chdir(target_dir) cmds += _freeze_config() pyinst_path = '%s/thirdparty/pyinstaller' % __path__[0] cur_cmd = 'python -O %s/pyinstaller.py %s --skip-configure' % (pyinst_path, script_path) cmds.append(cur_cmd) if _run(cur_cmd): _freeze_config(force=True) cur_cmd = 'python -O %s/pyinstaller.py %s' % (pyinst_path, script_path) _run(cur_cmd) finally: os.chdir(orig_dir) logging.debug('\\/%s%s Output%s\\/' % ('-' * 10, 'Pyinstaller', '-' * 10)) logging.info('Pyinstaller took [%f] seconds' % (time.time() - freeze_start_time)) return cmds
Wraps pyinstaller and provides an easy to use interface Args: script_path: Absolute path to python script to be frozen. Returns: List of freeze commands ran Raises: subprocess.CalledProcessError: Freeze error. OSError: Freeze not found.
juraj-google-style
def parse_string_to_constructor(ctor_string): orig_ctor_string = ctor_string if ('.' not in ctor_string): ctor_string = ('sonnet.' + ctor_string) if ctor_string.startswith('snt.'): ctor_string = ('sonnet.' + ctor_string[len('snt.'):]) (package_name, rest) = ctor_string.split('.', 1) package = importlib.import_module(package_name) try: return _recursive_getattr(package, rest) except AttributeError: raise ValueError('could not find `{}`, after normalizing to `{}`'.format(orig_ctor_string, ctor_string))
Returns a callable which corresponds to the constructor string. Various modules (eg, ConvNet2D) take constructor arguments which are callables, indicating a submodule to build. These can be passed as actual constructors, eg `snt.LayerNorm`, however that makes the config for that module not trivially serializable. This function tries to map a string representation to the underlying callable, allowing configs to remain serializable where necessary. Args: ctor_string: string representing some module in Sonnet. If the string is provided with no dots, we assume it is a member of Sonnet available at top level, i.e. "LayerNorm" maps to `snt.LayerNorm`. Raises: ValueError: if no matching constructor can be found. Returns: Callable constructor which corresponds to `ctor_string`.
codesearchnet
def set_suite_info(self, suite_info=None): self._suite_info = suite_info or {}
Interface for sub-classes to set user defined extra info to test summary. Args: suite_info: dict, A dict of suite information. Keys and values must be serializable.
github-repos
def register_subcommand(parser: ArgumentParser): train_parser = parser.add_parser('train', help='CLI tool to train a model on a task.') train_parser.add_argument('--train_data', type=str, required=True, help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.') train_parser.add_argument('--column_label', type=int, default=0, help='Column of the dataset csv file with example labels.') train_parser.add_argument('--column_text', type=int, default=1, help='Column of the dataset csv file with example texts.') train_parser.add_argument('--column_id', type=int, default=2, help='Column of the dataset csv file with example ids.') train_parser.add_argument('--skip_first_row', action='store_true', help='Skip the first row of the csv file (headers).') train_parser.add_argument('--validation_data', type=str, default='', help='path to validation dataset.') train_parser.add_argument('--validation_split', type=float, default=0.1, help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.') train_parser.add_argument('--output', type=str, default='./', help='path to saved the trained model.') train_parser.add_argument('--task', type=str, default='text_classification', help='Task to train the model on.') train_parser.add_argument('--model', type=str, default='google-bert/bert-base-uncased', help="Model's name or path to stored model.") train_parser.add_argument('--train_batch_size', type=int, default=32, help='Batch size for training.') train_parser.add_argument('--valid_batch_size', type=int, default=64, help='Batch size for validation.') train_parser.add_argument('--learning_rate', type=float, default=3e-05, help='Learning rate.') train_parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon for Adam optimizer.') train_parser.set_defaults(func=train_command_factory)
Register this command to argparse so it's available for the transformer-cli Args: parser: Root parser to register command-specific arguments
github-repos
def set_spacing(self, space): self.figure.spacing = space if 'subplots_adjust_kwargs' not in self.figure.__dict__: self.figure.subplots_adjust_kwargs = {} if space == 'wide': self.figure.subplots_adjust_kwargs['hspace'] = 0.3 self.figure.subplots_adjust_kwargs['wspace'] = 0.3 else: self.figure.subplots_adjust_kwargs['hspace'] = 0.0 self.figure.subplots_adjust_kwargs['wspace'] = 0.0 return
Set the figure spacing. Sets whether in general there is space between subplots. If all axes are shared, this can be `tight`. Default in code is `wide`. The main difference is the tick labels extend to the ends if space==`wide`. If space==`tight`, the edge tick labels are cut off for clearity. Args: space (str): Sets spacing for subplots. Either `wide` or `tight`.
juraj-google-style
def _CopyTimeFromString(self, time_string): time_string_length = len(time_string) if (time_string_length < 8): raise ValueError('Time string too short.') if ((time_string[2] != ':') or (time_string[5] != ':')): raise ValueError('Invalid time string.') try: hours = int(time_string[0:2], 10) except ValueError: raise ValueError('Unable to parse hours.') if (hours not in range(0, 24)): raise ValueError('Hours value: {0:d} out of bounds.'.format(hours)) try: minutes = int(time_string[3:5], 10) except ValueError: raise ValueError('Unable to parse minutes.') if (minutes not in range(0, 60)): raise ValueError('Minutes value: {0:d} out of bounds.'.format(minutes)) try: seconds = int(time_string[6:8], 10) except ValueError: raise ValueError('Unable to parse day of seconds.') if (seconds not in range(0, 60)): raise ValueError('Seconds value: {0:d} out of bounds.'.format(seconds)) microseconds = None time_zone_offset = None time_zone_string_index = 8 while (time_zone_string_index < time_string_length): if (time_string[time_zone_string_index] in ('+', '-')): break time_zone_string_index += 1 if (time_zone_string_index == (time_string_length - 1)): time_zone_string_index += 1 if ((time_string_length > 8) and (time_string[8] == '.')): time_fraction_length = (time_zone_string_index - 9) if (time_fraction_length not in (3, 6)): raise ValueError('Invalid time string.') try: time_fraction = time_string[9:time_zone_string_index] time_fraction = int(time_fraction, 10) except ValueError: raise ValueError('Unable to parse time fraction.') if (time_fraction_length == 3): time_fraction *= 1000 microseconds = time_fraction if (time_zone_string_index < time_string_length): if (((time_string_length - time_zone_string_index) != 6) or (time_string[(time_zone_string_index + 3)] != ':')): raise ValueError('Invalid time string.') try: hours_from_utc = int(time_string[(time_zone_string_index + 1):(time_zone_string_index + 3)]) except ValueError: raise ValueError('Unable to parse time zone hours offset.') if (hours_from_utc not in range(0, 15)): raise ValueError('Time zone hours offset value out of bounds.') try: minutes_from_utc = int(time_string[(time_zone_string_index + 4):(time_zone_string_index + 6)]) except ValueError: raise ValueError('Unable to parse time zone minutes offset.') if (minutes_from_utc not in range(0, 60)): raise ValueError('Time zone minutes offset value out of bounds.') time_zone_offset = ((hours_from_utc * 60) + minutes_from_utc) if (time_string[time_zone_string_index] != '-'): time_zone_offset = (- time_zone_offset) return (hours, minutes, seconds, microseconds, time_zone_offset)
Copies a time from a string. Args: time_string (str): time value formatted as: hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The seconds fraction and time zone offset are optional. Returns: tuple[int, int, int, int, int]: hours, minutes, seconds, microseconds, time zone offset in minutes. Raises: ValueError: if the time string is invalid or not supported.
codesearchnet
def __init__(self, object_type=None, template_attribute=None): super(CreateRequestPayload, self).__init__( tag=enums.Tags.REQUEST_PAYLOAD ) self._object_type = None self._template_attribute = None self.object_type = object_type self.template_attribute = template_attribute
Construct a Create request payload structure. Args: object_type (enum): An ObjectType enumeration specifying the type of object to create. Optional, defaults to None. Required for read/write. template_attribute (TemplateAttribute): A TemplateAttribute structure containing a set of attributes to set on the new object. Optional, defaults to None. Required for read/write.
juraj-google-style
def new_product(self, name): n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n
Create a new product. Args: name: name of the new product. Returns: A new product instance.
juraj-google-style
def _update_exit_code_from_error(self, error): for (error_type, exit_code) in self.ERROR_CODE_MAP.items(): if isinstance(error, error_type): self.update_exit_code(exit_code) break else: self.update_exit_code(ExitStatus.generic_error)
Set the exit code based on the error type. Args: error (:class:`Exception`): An exception instance.
codesearchnet
def _expand_sequence(self, seq: List[GridQubit]) -> List[GridQubit]: i = 1 while (i < len(seq)): path = self._find_path_between(seq[(i - 1)], seq[i], set(seq)) if path: seq = ((seq[:i] + path) + seq[i:]) else: i += 1 return seq
Tries to expand given sequence with more qubits. Args: seq: Linear sequence of qubits. Returns: New continuous linear sequence which contains all the qubits from seq and possibly new qubits inserted in between.
codesearchnet
def l2_distance(t1, t2, epsilon=1e-12, name=None): with tf.name_scope(name, 'l2_distance', [t1, t2]) as scope: t1 = tf.convert_to_tensor(t1, name='t1') t2 = tf.convert_to_tensor(t2, name='t2') return tf.sqrt(tf.maximum(l2_distance_sq(t1, t2, scope), epsilon))
l2 distance between t1 and t2 and caps the gradient of the Square Root. Args: t1: A tensor. t2: A tensor that is the same size as t1. epsilon: A lower bound for distance, useful to avoid sqrt of very small values that can blow up gradients. name: Optional name for this op. Returns: The l2 distance between t1 and t2.
juraj-google-style
def load_array_types(self, fname): type_defs = '' with open(fname, 'rt') as fh: type_defs = fh.read() try: type_defs = ast.literal_eval(type_defs) except SyntaxError: type_defs = {} self._add_array_types(type_defs)
Load file of previously extracted data types Args: fname (str): Name of file to load array database from
juraj-google-style
def get_parameter_vector(self, include_frozen=False): if include_frozen: return self.parameter_vector return self.parameter_vector[self.unfrozen_mask]
Get an array of the parameter values in the correct order Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
codesearchnet
def version_info(self): if (self._api_version is None): self.query_api_version() return (self._api_version['api-major-version'], self._api_version['api-minor-version'])
Returns API version information for the HMC. This operation does not require authentication. Returns: :term:`HMC API version`: The HMC API version supported by the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.ConnectionError`
codesearchnet
def delete(self, service): url = self._url_format(service) return self.rest_action(self._session.delete, url)
Generic DELETE operation for Learning Modules API. Args: service (str): The endpoint service to use, i.e. gradebook Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: list: the json-encoded content of the response
codesearchnet
def parse_selinux(parts): owner, group = parts[:2] selinux = parts[2].split(":") lsel = len(selinux) path, link = parse_path(parts[-1]) result = { "owner": owner, "group": group, "se_user": selinux[0], "se_role": selinux[1] if lsel > 1 else None, "se_type": selinux[2] if lsel > 2 else None, "se_mls": selinux[3] if lsel > 3 else None, "name": path } if link: result["link"] = link return result
Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included.
juraj-google-style
def _analemma_suns(self): for h in xrange(0, 24): if (self._analemma_position(h) < 0): continue elif (self._analemma_position(h) == 0): chours = [] prevhour = (self.latitude <= 0) num_of_days = (8760 if (not self.is_leap_year) else (8760 + 24)) for hoy in xrange(h, num_of_days, 24): thishour = self.calculate_sun_from_hoy(hoy).is_during_day if (thishour != prevhour): if (not thishour): hoy -= 24 dt = DateTime.from_hoy(hoy, self.is_leap_year) chours.append((dt.month, dt.day, dt.hour)) prevhour = thishour tt = [] for hcount in range(int((len(chours) / 2))): st = chours[(2 * hcount)] en = chours[((2 * hcount) + 1)] if (self.latitude >= 0): tt = (((([self.calculate_sun(*st)] + [self.calculate_sun(st[0], d, h) for d in xrange((st[1] + 1), 29, 7)]) + [self.calculate_sun(m, d, h) for m in xrange((st[0] + 1), en[0]) for d in xrange(3, 29, 7)]) + [self.calculate_sun(en[0], d, h) for d in xrange(3, en[1], 7)]) + [self.calculate_sun(*en)]) else: tt = ((((([self.calculate_sun(*en)] + [self.calculate_sun(en[0], d, h) for d in xrange((en[1] + 1), 29, 7)]) + [self.calculate_sun(m, d, h) for m in xrange((en[0] + 1), 13) for d in xrange(3, 29, 7)]) + [self.calculate_sun(m, d, h) for m in xrange(1, st[0]) for d in xrange(3, 29, 7)]) + [self.calculate_sun(st[0], d, h) for d in xrange(3, st[1], 7)]) + [self.calculate_sun(*st)]) (yield tt) else: (yield tuple((self.calculate_sun(((m % 12) + 1), d, h) for m in xrange(0, 13) for d in (7, 14, 21)))[:(- 2)])
Calculate times that should be used for drawing analemma_curves. Returns: A list of list of analemma suns.
codesearchnet
def median(series): if np.issubdtype(series.dtype, np.number): return series.median() else: return np.nan
Returns the median value of a series. Args: series (pandas.Series): column to summarize.
codesearchnet
def set_white(self, brightness, colourtemp): if (not (25 <= brightness <= 255)): raise ValueError('The brightness needs to be between 25 and 255.') if (not (0 <= colourtemp <= 255)): raise ValueError('The colour temperature needs to be between 0 and 255.') payload = self.generate_payload(SET, {self.DPS_INDEX_MODE: self.DPS_MODE_WHITE, self.DPS_INDEX_BRIGHTNESS: brightness, self.DPS_INDEX_COLOURTEMP: colourtemp}) data = self._send_receive(payload) return data
Set white coloured theme of an rgb bulb. Args: brightness(int): Value for the brightness (25-255). colourtemp(int): Value for the colour temperature (0-255).
codesearchnet
def eq(left: Any, right: Any) -> bool: if left is right: return True if isinstance(left, list) and isinstance(right, list) or (isinstance(left, tuple) and isinstance(right, tuple)): if len(left) != len(right): return False for x, y in zip(left, right): if ne(x, y): return False return True elif isinstance(left, dict): if not isinstance(right, dict) or len(left) != len(right) or set(left.keys()) != set(right.keys()): return False left_items = left.sym_items if isinstance(left, Symbolic) else left.items right_item = right.sym_getattr if isinstance(right, Symbolic) else right.__getitem__ for k, v in left_items(): if ne(v, right_item(k)): return False return True elif hasattr(left, 'sym_eq') and (not inspect.isclass(left)) and (left.sym_eq.__code__ is not Symbolic.sym_eq.__code__): return left.sym_eq(right) elif hasattr(right, 'sym_eq') and (not inspect.isclass(right)) and (right.sym_eq.__code__ is not Symbolic.sym_eq.__code__): return right.sym_eq(left) return pg_typing.callable_eq(left, right)
Compares if two values are equal. Use symbolic equality if possible. Example:: @pg.members([ ('x', pg.typing.Any()) ]) class A(pg.Object): def sym_eq(self, right): if super().sym_eq(right): return True return pg.eq(self.x, right) class B: pass assert pg.eq(1, 1) assert pg.eq(A(1), A(1)) # This is True since A has override `sym_eq`. assert pg.eq(A(1), 1) # Objects of B are compared by references. assert not pg.eq(A(B()), A(B())) Args: left: The left-hand value to compare. right: The right-hand value to compare. Returns: True if left and right is equal or symbolically equal. Otherwise False.
github-repos
def _convert_to_eval_metric(metric_fn): def problem_metric_fn(*args): (scores, weights) = metric_fn(*args) return tf.metrics.mean(scores, weights) return problem_metric_fn
Wrap a metric fn that returns scores and weights as an eval metric fn. The input metric_fn returns values for the current batch. The wrapper aggregates the return values collected over all of the batches evaluated. Args: metric_fn: function that returns scores and weights for the current batch's logits and predicted labels. Returns: function that aggregates the scores and weights from metric_fn.
juraj-google-style
def diversity(layer): def inner(T): layer_t = T(layer) (batch_n, _, _, channels) = layer_t.get_shape().as_list() flattened = tf.reshape(layer_t, [batch_n, (- 1), channels]) grams = tf.matmul(flattened, flattened, transpose_a=True) grams = tf.nn.l2_normalize(grams, axis=[1, 2], epsilon=1e-10) return (sum([sum([tf.reduce_sum((grams[i] * grams[j])) for j in range(batch_n) if (j != i)]) for i in range(batch_n)]) / batch_n) return inner
Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it caculuates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective.
codesearchnet
def apply_transformation(self, structure): if structure.is_ordered: return structure species = [dict(sp) for sp in structure.species_and_occu] for sp in species: for (k, v) in sp.items(): old_occ = sp[k] new_occ = float(Fraction(old_occ).limit_denominator(self.max_denominator)) if self.fix_denominator: new_occ = (around((old_occ * self.max_denominator)) / self.max_denominator) if (round(abs((old_occ - new_occ)), 6) > self.tol): raise RuntimeError('Cannot discretize structure within tolerance!') sp[k] = new_occ return Structure(structure.lattice, species, structure.frac_coords)
Discretizes the site occupancies in the structure. Args: structure: disordered Structure to discretize occupancies Returns: A new disordered Structure with occupancies discretized
codesearchnet
def get_device_topology(self, id_or_uri): uri = (self._client.build_uri(id_or_uri) + '/deviceTopology') return self._client.get(uri)
Retrieves the topology information for the rack resource specified by ID or URI. Args: id_or_uri: Can be either the resource ID or the resource URI. Return: dict: Device topology.
codesearchnet
def __init__(self, serial=None, **kwargs): self.__display = None serial = serial or getenvs('ATX_ADB_SERIALNO', 'ANDROID_SERIAL') self._host = kwargs.get('host') or getenvs( 'ATX_ADB_HOST', 'ANDROID_ADB_SERVER_HOST') or '127.0.0.1' self._port = int(kwargs.get('port') or getenvs( 'ATX_ADB_PORT', 'ANDROID_ADB_SERVER_PORT') or 5037) self._adb_client = adbkit.Client(self._host, self._port) self._adb_device = self._adb_client.device(serial) self._uiauto = uiautomator2.connect_usb(serial) if not self._uiauto.alive: self._uiauto.healthcheck(unlock=False) DeviceMixin.__init__(self) self._randid = base.id_generator(5) self.screen_rotation = None self.swipe = self._uiauto.swipe self.drag = self._uiauto.drag self.press = self._uiauto.press self.long_click = self._uiauto.long_click self.dump = self._uiauto.dump_hierarchy
Initial AndroidDevice Args: serial (str): serial or wlan ip Returns: AndroidDevice object Raises: EnvironmentError
juraj-google-style
def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode: logger.info('Representing {} of class {}'.format( data, self.class_.__name__)) if hasattr(data, 'yatiml_attributes'): logger.debug('Found yatiml_attributes()') attributes = data.yatiml_attributes() if attributes is None: raise RuntimeError(('{}.yatiml_attributes() returned None,' ' where a dict was expected.').format( self.class_.__name__)) else: logger.debug( 'No yatiml_attributes() found, using public attributes') argspec = inspect.getfullargspec(data.__init__) attribute_names = list(argspec.args[1:]) attrs = [(name, getattr(data, name)) for name in attribute_names if name != 'yatiml_extra'] if 'yatiml_extra' in attribute_names: if not hasattr(data, 'yatiml_extra'): raise RuntimeError( ('Class {} takes yatiml_extra but has ' ' no yatiml_extra attribute, and no ' ' yatiml_attributes().').format(self.class_.__name__)) attrs.extend(data.yatiml_extra.items()) attributes = yaml.comments.CommentedMap(attrs) represented = dumper.represent_mapping('tag:yaml.org,2002:map', attributes) cnode = Node(represented) self.__sweeten(dumper, self.class_, cnode) represented = cnode.yaml_node logger.debug('End representing {}'.format(data)) return represented
Represents the class as a MappingNode. Args: dumper: The dumper to use. data: The user-defined object to dump. Returns: A yaml.Node representing the object.
juraj-google-style
def assertShapeEqual(self, input_a, input_b, msg=None): if not isinstance(input_a, (np.ndarray, np.generic, tensor_lib.Tensor)): raise TypeError(f'input_a must be a Numpy ndarray, Numpy scalar, or a Tensor.Instead received {type(input_a)}') if not isinstance(input_b, (np.ndarray, np.generic, tensor_lib.Tensor)): raise TypeError(f'input_b must be a Numpy ndarray, Numpy scalar, or a Tensor.Instead received {type(input_b)}') shape_a = input_a.get_shape().as_list() if isinstance(input_a, tensor_lib.Tensor) else input_a.shape shape_b = input_b.get_shape().as_list() if isinstance(input_b, tensor_lib.Tensor) else input_b.shape self.assertAllEqual(shape_a, shape_b, msg=msg)
Asserts that two Numpy or TensorFlow objects have the same shape. For Tensors, this compares statically known shapes at compile time, not dynamic shapes at runtime. Args: input_a: A Numpy ndarray, Numpy scalar, or a Tensor. input_b: A Numpy ndarray, Numpy scalar, or a Tensor. msg: Optional message to report on failure. Raises: TypeError: If the arguments have the wrong type.
github-repos
def recommend(self, limit=10): expected_list = [(arm_id, beta_dist.expected_value()) for (arm_id, beta_dist) in self.__beta_dist_dict.items()] expected_list = sorted(expected_list, key=(lambda x: x[1]), reverse=True) return expected_list[:limit]
Listup arms and expected value. Args: limit: Length of the list. Returns: [Tuple(`Arms master id`, `expected value`)]
codesearchnet