code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _encode_value(self, value): if isinstance(value, (int, float, str, bool, datetime)): return value elif isinstance(value, list): return [self._encode_value(item) for item in value] elif isinstance(value, dict): result = {} for (key, item) in value.items(): result[key] = self._encode_value(item) return result else: return self._gridfs.put(Binary(pickle.dumps(value)), workflow_id=self._workflow_id)
Encodes the value such that it can be stored into MongoDB. Any primitive types are stored directly into MongoDB, while non-primitive types are pickled and stored as GridFS objects. The id pointing to a GridFS object replaces the original value. Args: value (object): The object that should be encoded for storing in MongoDB. Returns: object: The encoded value ready to be stored in MongoDB.
codesearchnet
def NewFromJSON(data): s = Shake( id=data.get('id', None), name=data.get('name', None), url=data.get('url', None), thumbnail_url=data.get('thumbnail_url', None), description=data.get('description', None), type=data.get('type', None), created_at=data.get('created_at', None), updated_at=data.get('updated_at', None) ) if data.get('owner', None): s.owner = User.NewFromJSON(data.get('owner', None)) return s
Create a new Shake instance from a JSON dict. Args: data (dict): JSON dictionary representing a Shake. Returns: A Shake instance.
juraj-google-style
def reopen_encoded(fileobj, mode='r', fallback_encoding=None): encoding = determine_encoding(fileobj.name, fallback_encoding) fileobj.close() return open(fileobj.name, mode, encoding=encoding)
Makes sure that a file was opened with some valid encoding. Arguments: fileobj (file): The file-object. mode (str, optional): The mode in which to re-open the file. fallback_encoding (str, optional): The encoding in which to re-open the file if it does not specify an encoding itself. Returns: file: The re-opened file.
juraj-google-style
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0): if cifar_version == "cifar10": url = _CIFAR10_URL train_files = _CIFAR10_TRAIN_FILES test_files = _CIFAR10_TEST_FILES prefix = _CIFAR10_PREFIX image_size = _CIFAR10_IMAGE_SIZE label_key = "labels" elif cifar_version == "cifar100" or cifar_version == "cifar20": url = _CIFAR100_URL train_files = _CIFAR100_TRAIN_FILES test_files = _CIFAR100_TEST_FILES prefix = _CIFAR100_PREFIX image_size = _CIFAR100_IMAGE_SIZE if cifar_version == "cifar100": label_key = "fine_labels" else: label_key = "coarse_labels" _get_cifar(tmp_dir, url) data_files = train_files if training else test_files all_images, all_labels = [], [] for filename in data_files: path = os.path.join(tmp_dir, prefix, filename) with tf.gfile.Open(path, "rb") as f: if six.PY2: data = cPickle.load(f) else: data = cPickle.load(f, encoding="latin1") images = data["data"] num_images = images.shape[0] images = images.reshape((num_images, 3, image_size, image_size)) all_images.extend([ np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images) ]) labels = data[label_key] all_labels.extend([labels[j] for j in range(num_images)]) return image_utils.image_generator( all_images[start_from:start_from + how_many], all_labels[start_from:start_from + how_many])
Image generator for CIFAR-10 and 100. Args: cifar_version: string; one of "cifar10" or "cifar100" tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator that produces CIFAR-10 images and labels.
juraj-google-style
def _create_RSA_private_key(self, bytes): try: private_key = serialization.load_pem_private_key( bytes, password=None, backend=default_backend() ) return private_key except Exception: private_key = serialization.load_der_private_key( bytes, password=None, backend=default_backend() ) return private_key
Instantiates an RSA key from bytes. Args: bytes (byte string): Bytes of RSA private key. Returns: private_key (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey): RSA private key created from key bytes.
juraj-google-style
def diff_charsToLines(self, diffs, lineArray): for i in range(len(diffs)): text = [] for char in diffs[i][1]: text.append(lineArray[ord(char)]) diffs[i] = (diffs[i][0], "".join(text))
Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings.
juraj-google-style
def get_gutter_client(alias='default', cache=CLIENT_CACHE, **kwargs): from gutter.client.models import Manager if (not alias): return Manager(**kwargs) elif (alias not in cache): cache[alias] = Manager(**kwargs) return cache[alias]
Creates gutter clients and memoizes them in a registry for future quick access. Args: alias (str or None): Name of the client. Used for caching. If name is falsy then do not use the cache. cache (dict): cache to store gutter managers in. **kwargs: kwargs to be passed the Manger class. Returns (Manager): A gutter client.
codesearchnet
def __instancecheck__(cls, other): try: return bool( isinstance(other, cls.__type__) and cls(other) ) except ValueError: return False
Determine if an instance is of the sliced type and within bounds. Args: other: The instance to test. Returns: True if the object is both of the same type as sliced by the created class as well as within the bounds defined by the class.
juraj-google-style
async def verify_parent_task(chain, link): worker_type = get_worker_type(link.task) if (worker_type not in chain.context.config['valid_decision_worker_types']): raise CoTError('{} is not a valid decision workerType!'.format(worker_type)) if (chain is not link): path = link.get_artifact_full_path('public/task-graph.json') if (not os.path.exists(path)): raise CoTError("{} {}: {} doesn't exist!".format(link.name, link.task_id, path)) link.task_graph = load_json_or_yaml(path, is_path=True, exception=CoTError, message="Can't load {}! %(exc)s".format(path)) for target_link in chain.get_all_links_in_chain(): if ((target_link.parent_task_id == link.task_id) and (target_link.task_id != link.task_id) and (target_link.task_type not in PARENT_TASK_TYPES)): verify_link_in_task_graph(chain, link, target_link) try: (await verify_parent_task_definition(chain, link)) except (BaseDownloadError, KeyError) as e: raise CoTError(e)
Verify the parent task Link. Action task verification is currently in the same verification function as decision tasks, because sometimes we'll have an action task masquerading as a decision task, e.g. in templatized actions for release graphs. To make sure our guess of decision or action task isn't fatal, we call this function; this function uses ``is_action()`` to determine how to verify the task. Args: chain (ChainOfTrust): the chain we're operating on. link (LinkOfTrust): the task link we're checking. Raises: CoTError: on chain of trust verification error.
codesearchnet
def apply_strain(self, strain): s = (1 + np.array(strain)) * np.eye(3) self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
Apply a strain to the lattice. Args: strain (float or list): Amount of strain to apply. Can be a float, or a sequence of 3 numbers. E.g., 0.01 means all lattice vectors are increased by 1%. This is equivalent to calling modify_lattice with a lattice with lattice parameters that are 1% larger.
juraj-google-style
def register_scenario(self, scenario_name, handler): if (scenario_name in self._known_scenarios): raise ArgumentError('Attempted to add the same scenario name twice', scenario_name=scenario_name, previous_handler=self._known_scenarios[scenario_name]) self._known_scenarios[scenario_name] = handler
Register a scenario handler for this object. Scenario handlers are callable functions with no positional arguments that can be called by name with the load_scenario function and should prepare the emulated object into a known state. The purpose of a scenario is to make it easy to get a device into a specific state for testing purposes that may otherwise be difficult or time consuming to prepare on the physical, non-emulated device. Args: scenario_name (str): The name of this scenario that can be passed to load_scenario later in order to invoke the scenario. handler (callable): A callable function that takes no positional arguments and can prepare this object into the given scenario state. It may take required or optional keyword arguments that may be passed to `load_scenario` if needed.
codesearchnet
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() return self._FormatMessages( self.FORMAT_STRING, self.FORMAT_STRING_SHORT, event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def confirm_iam_role(self, account): try: iam = self.session.client('iam') rolearn = iam.get_role(RoleName=self.role_name)['Role']['Arn'] return rolearn except ClientError as e: if (e.response['Error']['Code'] == 'NoSuchEntity'): self.create_iam_role(account) else: raise except Exception as e: self.log.exception('Failed validating IAM role for VPC Flow Log Auditing for {}'.format(e))
Return the ARN of the IAM Role on the provided account as a string. Returns an `IAMRole` object from boto3 Args: account (:obj:`Account`): Account where to locate the role Returns: :obj:`IAMRole`
codesearchnet
def _update_object(object_key: str, event: Event): events_list_key = _keys.events_list(object_key) events_data_key = _keys.events_data(object_key) event_dict = deepcopy(event.config) event_dict.pop('id') DB.append_to_list(events_list_key, event.id, pipeline=True) DB.set_hash_value(events_data_key, event.id, json.dumps(event_dict), pipeline=True)
Update the events list and events data for the object. - Adds the event Id to the list of events for the object. - Adds the event data to the hash of object event data keyed by event id. Args: object_key (str): Key of the object being updated. event (Event): Event object
juraj-google-style
def region(self, start=0, end=None): if end is None: end = len(self.sequence) return '>{}\n{}'.format(self.id, self.sequence[start:end])
Returns a region of ``Sequence.sequence``, in FASTA format. If called without kwargs, the entire sequence will be returned. Args: start (int): Start position of the region to be returned. Default is 0. end (int): End position of the region to be returned. Negative values will function as they do when slicing strings. Returns: str: A region of ``Sequence.sequence``, in FASTA format
juraj-google-style
def cast_type(self, var, cast_type=None): if cast_type is None: cast_type = self.valid_values try: if cast_type == int: return int(var) elif cast_type == float: return float(var) elif type == str: return str(var) elif isinstance(cast_type, list): return type(cast_type[0])(var) else: return None except ValueError: return None return var
cast the value into the type typ if type is not provided it is set to self.valid_values Args: var: variable to be cast type: target type Returns: the variable var csat into type typ
juraj-google-style
def start_automated_run(path, automated_run_id): with functions.DBContextManager(path) as session: automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first() if (not automated_run): raise exceptions.UserError('Automated run {} does not exist'.format(automated_run_id)) automated_run.job_id = get_current_job().id automated_run.job_status = 'started' session.add(automated_run) session.commit() try: if (automated_run.category == 'bayes'): automatedruns.start_naive_bayes(automated_run, session, path) elif (automated_run.category == 'tpot'): automatedruns.start_tpot(automated_run, session, path) elif (automated_run.category == 'greedy_ensemble_search'): automatedruns.start_greedy_ensemble_search(automated_run, session, path) else: raise Exception('Something went wrong. Invalid category for automated run') automated_run.job_status = 'finished' session.add(automated_run) session.commit() except: session.rollback() automated_run.job_status = 'errored' automated_run.description['error_type'] = repr(sys.exc_info()[0]) automated_run.description['error_value'] = repr(sys.exc_info()[1]) automated_run.description['error_traceback'] = traceback.format_exception(*sys.exc_info()) session.add(automated_run) session.commit() raise
Starts automated run. This will automatically create base learners until the run finishes or errors out. Args: path (str): Path to Xcessiv notebook automated_run_id (str): Automated Run ID
codesearchnet
def ias53(msg): d = hex2bin(data(msg)) if d[12] == '0': return None ias = bin2int(d[13:23]) return ias
Indicated airspeed, DBS 5,3 message Args: msg (String): 28 bytes hexadecimal message Returns: int: indicated arispeed in knots
juraj-google-style
def _non_batched_matmul(lhs, rhs, lhs_contraction, rhs_contraction): return math_ops.tensordot(lhs, rhs, axes=(list(lhs_contraction), list(rhs_contraction)))
Compute the non-batched matrix multiplication. If it is the general non-batched/single-batched matrix multiplication, use the highly optimized kernel `tf.tensordot` to handle it. Args: lhs: an array (the left-hand side matrix/vector to be multiplied) rhs: an array (the right-hand side matrix/vector to be multiplied) lhs_contraction: Sequence[int] (the contraction dimensions of lhs) rhs_contraction: Sequence[int] (the contraction dimensions of rhs) Returns: An array that contains the result.
github-repos
def update(self, data): updated = self.set_property('state', data['state']) updated |= self.set_property('notes', sorted(data['notes'] or [])) updated |= self.set_property('last_notice', data['last_notice']) if updated: self.set_property('last_change', datetime.now()) return updated
Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: `bool`
juraj-google-style
def NewOutputModule(cls, name, output_mediator): output_class = cls.GetOutputClass(name) return output_class(output_mediator)
Creates a new output module object for the specified output format. Args: name (str): name of the output module. output_mediator (OutputMediator): output mediator. Returns: OutputModule: output module. Raises: KeyError: if there is no output class found with the supplied name. ValueError: if name is not a string.
juraj-google-style
def createSimpleResourceMap(ore_pid, scimeta_pid, sciobj_pid_list): ore = ResourceMap() ore.initialize(ore_pid) ore.addMetadataDocument(scimeta_pid) ore.addDataDocuments(sciobj_pid_list, scimeta_pid) return ore
Create a simple OAI-ORE Resource Map with one Science Metadata document and any number of Science Data objects. This creates a document that establishes an association between a Science Metadata object and any number of Science Data objects. The Science Metadata object contains information that is indexed by DataONE, allowing both the Science Metadata and the Science Data objects to be discoverable in DataONE Search. In search results, the objects will appear together and can be downloaded as a single package. Args: ore_pid: str Persistent Identifier (PID) to use for the new Resource Map scimeta_pid: str PID for an object that will be listed as the Science Metadata that is describing the Science Data objects. sciobj_pid_list: list of str List of PIDs that will be listed as the Science Data objects that are being described by the Science Metadata. Returns: ResourceMap : OAI-ORE Resource Map
codesearchnet
def get_path_attribute(obj, path): path = path.replace('original.', '').replace('current_user.', '') attr_parts = path.split('.') res = obj try: for part in attr_parts: try: res = getattr(res, part) except AttributeError: res = getattr(res.get(), part) except (peewee.DoesNotExist, AttributeError): return None return res
Given a path like `related_record.related_record2.id`, this method will be able to pull the value of ID from that object, returning None if it doesn't exist. Args: obj (fleaker.db.Model): The object to attempt to pull the value from path (str): The path to follow to pull the value from Returns: (int|str|None): The value at the end of the path. None if it doesn't exist at any point in the path.
codesearchnet
def _ParseDistributedTrackingIdentifier( self, parser_mediator, uuid_object, origin): if uuid_object.version == 1: event_data = windows_events.WindowsDistributedLinkTrackingEventData( uuid_object, origin) date_time = dfdatetime_uuid_time.UUIDTime(timestamp=uuid_object.time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) return '{{{0!s}}}'.format(uuid_object)
Extracts data from a Distributed Tracking identifier. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. uuid_object (uuid.UUID): UUID of the Distributed Tracking identifier. origin (str): origin of the event (event source). Returns: str: UUID string of the Distributed Tracking identifier.
juraj-google-style
def set_peer_link(self, value=None, default=False, disable=False): return self._configure_mlag('peer-link', value, default, disable)
Configures the mlag peer-link value Args: value (str): The value to configure the peer-link default (bool): Configures the peer-link using the default keyword disable (bool): Negates the peer-link using the no keyword Returns: bool: Returns True if the commands complete successfully
codesearchnet
def prepare_data_index(self, silence_percentage, unknown_percentage, wanted_words, validation_percentage, testing_percentage): random.seed(RANDOM_SEED) wanted_words_index = {} for index, wanted_word in enumerate(wanted_words): wanted_words_index[wanted_word] = index + 2 self.data_index = {'validation': [], 'testing': [], 'training': []} unknown_index = {'validation': [], 'testing': [], 'training': []} all_words = {} search_path = os.path.join(self.data_dir, '*', '*.wav') for wav_path in gfile.Glob(search_path): _, word = os.path.split(os.path.dirname(wav_path)) word = word.lower() if word == BACKGROUND_NOISE_DIR_NAME: continue all_words[word] = True set_index = which_set(wav_path, validation_percentage, testing_percentage) if word in wanted_words_index: self.data_index[set_index].append({'label': word, 'file': wav_path}) else: unknown_index[set_index].append({'label': word, 'file': wav_path}) if not all_words: raise Exception('No .wavs found at ' + search_path) for index, wanted_word in enumerate(wanted_words): if wanted_word not in all_words: raise Exception('Expected to find ' + wanted_word + ' in labels but only found ' + ', '.join(all_words.keys())) silence_wav_path = self.data_index['training'][0]['file'] for set_index in ['validation', 'testing', 'training']: set_size = len(self.data_index[set_index]) silence_size = int(math.ceil(set_size * silence_percentage / 100)) for _ in range(silence_size): self.data_index[set_index].append({'label': SILENCE_LABEL, 'file': silence_wav_path}) random.shuffle(unknown_index[set_index]) unknown_size = int(math.ceil(set_size * unknown_percentage / 100)) self.data_index[set_index].extend(unknown_index[set_index][:unknown_size]) for set_index in ['validation', 'testing', 'training']: random.shuffle(self.data_index[set_index]) self.words_list = prepare_words_list(wanted_words) self.word_to_index = {} for word in all_words: if word in wanted_words_index: self.word_to_index[word] = wanted_words_index[word] else: self.word_to_index[word] = UNKNOWN_WORD_INDEX self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX
Prepares a list of the samples organized by set and label. The training loop needs a list of all the available data, organized by which partition it should belong to, and with ground truth labels attached. This function analyzes the folders below the `data_dir`, figures out the right labels for each file based on the name of the subdirectory it belongs to, and uses a stable hash to assign it to a data set partition. Args: silence_percentage: How much of the resulting data should be background. unknown_percentage: How much should be audio outside the wanted classes. wanted_words: Labels of the classes we want to be able to recognize. validation_percentage: How much of the data set to use for validation. testing_percentage: How much of the data set to use for testing. Returns: Dictionary containing a list of file information for each set partition, and a lookup map for each class to determine its numeric index. Raises: Exception: If expected files are not found.
github-repos
def create_image_streamer_client(self): image_streamer = ImageStreamerClient(self.__image_streamer_ip, self.__connection.get_session_id(), self.__connection._apiVersion, self.__connection._sslBundle) return image_streamer
Create the Image Streamer API Client. Returns: ImageStreamerClient:
codesearchnet
def zip_columns(columns): weld_obj = WeldObject(encoder_, decoder_) column_vars = [] for column in columns: col_var = weld_obj.update(column) if isinstance(column, WeldObject): col_var = column.obj_id weld_obj.dependencies[col_var] = column column_vars.append(col_var) arrays = ", ".join(column_vars) weld_template = weld_obj.weld_code = weld_template % { "array": arrays, } return weld_obj
Zip together multiple columns. Args: columns (WeldObject / Numpy.ndarray): lust of columns Returns: A WeldObject representing this computation
juraj-google-style
def CalculateHashes(self, base_path_specs, output_writer): for base_path_spec in base_path_specs: file_system = resolver.Resolver.OpenFileSystem(base_path_spec) file_entry = resolver.Resolver.OpenFileEntry(base_path_spec) if (file_entry is None): logging.warning('Unable to open base path specification:\n{0:s}'.format(base_path_spec.comparable)) continue self._CalculateHashesFileEntry(file_system, file_entry, '', output_writer)
Recursive calculates hashes starting with the base path specification. Args: base_path_specs (list[dfvfs.PathSpec]): source path specification. output_writer (StdoutWriter): output writer.
codesearchnet
def recipe_dcm_to_bigquery(config, auth_read, auth_write, account, report_id, report_name, dataset, table, is_incremental_load): dcm(config, {'auth': auth_read, 'report': {'account': account, 'report_id': report_id, 'name': report_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': dataset, 'table': table, 'header': True, 'is_incremental_load': is_incremental_load}}})
Move existing CM report into a BigQuery table. Args: auth_read (authentication) - Credentials used for reading data. auth_write (authentication) - Credentials used for writing data. account (integer) - CM network id. report_id (integer) - CM report id, empty if using name . report_name (string) - CM report name, empty if using id instead. dataset (string) - Dataset to be written to in BigQuery. table (string) - Table to be written to in BigQuery. is_incremental_load (boolean) - Clear data in destination table during this report's time period, then append report data to existing table.
github-repos
def to_numpy(self, dtype=None, copy=False): return self._default_to_pandas("to_numpy", dtype=dtype, copy=copy)
Convert the DataFrame to a NumPy array. Args: dtype: The dtype to pass to numpy.asarray() copy: Whether to ensure that the returned value is a not a view on another array. Returns: A numpy array.
juraj-google-style
def create_default_views(self, create_datastore_views=False): package = deepcopy(self.data) if self.resources: package['resources'] = self._convert_hdxobjects(self.resources) data = {'package': package, 'create_datastore_views': create_datastore_views} self._write_to_hdx('create_default_views', data, 'package')
Create default resource views for all resources in dataset Args: create_datastore_views (bool): Whether to try to create resource views that point to the datastore Returns: None
juraj-google-style
def reorder_resource_views(self, resource_views): if not isinstance(resource_views, list): raise HDXError('ResourceViews should be a list!') ids = list() for resource_view in resource_views: if isinstance(resource_view, str): resource_view_id = resource_view else: resource_view_id = resource_view['id'] if is_valid_uuid(resource_view_id) is False: raise HDXError('%s is not a valid resource view id!' % resource_view) ids.append(resource_view_id) _, result = self._read_from_hdx('resource view', self.data['id'], 'id', ResourceView.actions()['reorder'], order=ids)
Order resource views in resource. Args: resource_views (List[Union[ResourceView,Dict,str]]): A list of either resource view ids or resource views metadata from ResourceView objects or dictionaries Returns: None
juraj-google-style
def incr(self, counter_name, delta=1): self._state.counters_map.increment(counter_name, delta)
Changes counter by delta. Args: counter_name: the name of the counter to change. str. delta: int.
codesearchnet
def in_sorted(values, value): index = bisect.bisect_left(values, value) if index >= len(values): return False return values[index] == value
Checks if a value is in a sorted list. Uses the :mod:`bisect` builtin to find the insertion point for ``value``. Args: values (List[int]): Integers sorted in ascending order. value (int): Value to check if contained in ``values``. Returns: bool: Indicating if the value is contained.
juraj-google-style
def configure_bigchaindb(command): @functools.wraps(command) def configure(args): config_from_cmdline = None try: if (args.log_level is not None): config_from_cmdline = {'log': {'level_console': args.log_level, 'level_logfile': args.log_level}, 'server': {'loglevel': args.log_level}} except AttributeError: pass bigchaindb.config_utils.autoconfigure(filename=args.config, config=config_from_cmdline, force=True) command(args) return configure
Decorator to be used by command line functions, such that the configuration of bigchaindb is performed before the execution of the command. Args: command: The command to decorate. Returns: The command wrapper function.
codesearchnet
def _contains_op_with_name_and_attribute(self, nodes: Iterable[node_def_pb2.NodeDef], op_name: str, attr_name: str, attr_val: _AttrValType, node_name: str='') -> bool: def match_node_name(name): if not node_name: return True compiled_regex = re.compile(node_name) match = re.fullmatch(compiled_regex, name) return match is not None return any((node.attr.get(attr_name) == attr_val for node in nodes if node.op == op_name and match_node_name(node.name)))
Determine whether there is a node whose operation name matches `op_name`. If `attr_name` is given, additionally check if the `attr_val` matches with the attribute value of the op. Args: nodes: Iterable of NodeDefs. op_name: Name of the op to match. attr_name: Name of the attribute of the op to match. attr_val: Value of the attr_name to check. node_name: Name of the node to match. Accepts regex2 format. Returns: True if there exists a node whose name matches `op_name` and 'attr_val' if 'attr_name' is given.
github-repos
def unparse(node, indentation=None, include_encoding_marker=True): del indentation if not isinstance(node, (list, tuple)): node = (node,) codes = [] if include_encoding_marker: codes.append(' for n in node: if isinstance(n, gast.AST): ast_n = gast.gast_to_ast(n) else: ast_n = n if astunparse is ast: ast.fix_missing_locations(ast_n) codes.append(astunparse.unparse(ast_n).strip()) return '\n'.join(codes)
Returns the source code of given AST. Args: node: The code to compile, as an AST object. indentation: Unused, deprecated. The returning code will always be indented at 4 spaces. include_encoding_marker: Bool, whether to include a comment on the first line to explicitly specify UTF-8 encoding. Returns: code: The source code generated from the AST object source_mapping: A mapping between the user and AutoGraph generated code.
github-repos
def has_implicit_access_to_dashboard(user, obj): request = get_request_or_stub() decoded_jwt = get_decoded_jwt_from_request(request) return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_DASHBOARD_ADMIN_ROLE)
Check that if request user has implicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role. Returns: boolean: whether the request user has access or not
codesearchnet
def max_steps_per_epoch(): return _MAX_STEPS_PER_EPOCH
Get the maximum number of steps for any call to fit/evaluate/predict. Retrieves the limit on the number of epochs set by `keras.config.set_max_steps_per_epoch` or the `KERAS_MAX_STEPS_PER_EPOCH` environment variable. Args: max_epochs: The integer limit on the number of epochs or `None`. If `None`, no limit is applied.
github-repos
def to_qasm(self, header: Optional[str] = None, precision: int = 10, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, ) -> str: return str(self._to_qasm_output(header, precision, qubit_order))
Returns QASM equivalent to the circuit. Args: header: A multi-line string that is placed in a comment at the top of the QASM. Defaults to a cirq version specifier. precision: Number of digits to use when representing numbers. qubit_order: Determines how qubits are ordered in the QASM register.
juraj-google-style
def _decode_filename(base_filename, problem_name, decode_hp): if (decode_hp.shards > 1): base_filename = _add_shard_to_filename(base_filename, decode_hp) if ('beam{beam}.alpha{alpha}.decodes'.format(beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)) in base_filename): return base_filename else: return '{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes'.format(base=base_filename, model=FLAGS.model, hp=FLAGS.hparams_set, problem=problem_name, beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))
Generates decode filename. Args: base_filename: A string, base of the decode filename. problem_name: A string, name of the problem. decode_hp: HParams for decoding. Returns: A string, produced decode filename.
codesearchnet
def cipher(self): if (self.offset is False): self.offset = randrange(5, 25) logging.info('Random offset selected: {0}'.format(self.offset)) logging.debug('Offset set: {0}'.format(self.offset)) ciphered_message_list = list(self.message) for (i, letter) in enumerate(ciphered_message_list): if letter.isalpha(): if letter.isupper(): alphabet = [character.upper() for character in self.alphabet] else: alphabet = self.alphabet logging.debug('Letter: {0}'.format(letter)) logging.debug('Alphabet: {0}'.format(alphabet)) value = alphabet.index(letter) cipher_value = (value + self.offset) if ((cipher_value > 25) or (cipher_value < 0)): cipher_value = (cipher_value % 26) logging.debug('Cipher value: {0}'.format(cipher_value)) ciphered_message_list[i] = alphabet[cipher_value] logging.debug('Ciphered letter: {0}'.format(letter)) self.message = ''.join(ciphered_message_list) return self.message
Applies the Caesar shift cipher. Based on the attributes of the object, applies the Caesar shift cipher to the message attribute. Accepts positive and negative integers as offsets. Required attributes: message offset Returns: String with cipher applied.
codesearchnet
def solve(A, b): r A = asarray(A, float) b = asarray(b, float) if A.shape[0] == 1: with errstate(divide="ignore"): A_ = array([[1.0 / A[0, 0]]]) if not isfinite(A_[0, 0]): raise LinAlgError("Division error.") return dot(A_, b) elif A.shape[0] == 2: a = A[0, 0] b_ = A[0, 1] c = A[1, 0] d = A[1, 1] A_ = array([[d, -b_], [-c, a]]) with errstate(divide="ignore"): A_ /= a * d - b_ * c if not npy_all(isfinite(A_)): raise LinAlgError("Division error.") return dot(A_, b) return _solve(A, b)
r"""Solve for the linear equations :math:`\mathrm A \mathbf x = \mathbf b`. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``.
juraj-google-style
class TFTopPLogitsWarper(TFLogitsWarper): def __init__(self, top_p: float, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1): if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}') if not isinstance(min_tokens_to_keep, int) or min_tokens_to_keep < 1: raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}') self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1]) mask_scores = tf.fill(scores.shape, self.filter_value) cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1) score_mask = cumulative_probs < self.top_p score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1) score_mask = tf.concat((tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool), score_mask[:, self.min_tokens_to_keep:]), axis=-1) topk_next_scores = tf.where(score_mask, topk_scores, mask_scores) scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]]) scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1) next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape) return next_scores
[`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to -inf): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered.
github-repos
def get_function_from_signature(self, function_signature): return next((f for f in self.functions if f.full_name == function_signature), None)
Return a function from a signature Args: function_signature (str): signature of the function (without return statement) Returns: Function
juraj-google-style
def has_values(o): try: next(o) return True except StopIteration: return False
Converts iterator to a boolean. Destroys iterator but returns True if at least one value is present. Args: * o: An iterator instance. Returns: * True if at least one instance or False if none.
github-repos
def _to_components(self, value): raise NotImplementedError('%s._to_components()' % type(self).__name__)
Encodes `value` as a nested structure of `Tensor` or `CompositeTensor`. Args: value: A value compatible with this `TypeSpec`. (Caller is responsible for ensuring compatibility.) Returns: A nested structure of `tf.Tensor` or `tf.CompositeTensor` compatible with `self._component_specs`, which can be used to reconstruct `value`.
github-repos
def store(self, store=None, usage='both', mech=None, overwrite=False, set_default=False): if (store is None): if (rcred_rfc5588 is None): raise NotImplementedError('Your GSSAPI implementation does not have support for RFC 5588') return rcred_rfc5588.store_cred(self, usage, mech, overwrite, set_default) else: if (rcred_cred_store is None): raise NotImplementedError('Your GSSAPI implementation does not have support for manipulating credential stores directly') store = _encode_dict(store) return rcred_cred_store.store_cred_into(store, self, usage, mech, overwrite, set_default)
Store these credentials into the given store This method stores the current credentials into the specified credentials store. If the default store is used, support for :rfc:`5588` is required. Otherwise, support for the credentials store extension is required. :requires-ext:`rfc5588` or :requires-ext:`cred_store` Args: store (dict): the store into which to store the credentials, or None for the default store. usage (str): the usage to store the credentials with -- either 'both', 'initiate', or 'accept' mech (OID): the :class:`MechType` to associate with the stored credentials overwrite (bool): whether or not to overwrite existing credentials stored with the same name, etc set_default (bool): whether or not to set these credentials as the default credentials for the given store. Returns: StoreCredResult: the results of the credential storing operation Raises: GSSError ExpiredCredentialsError MissingCredentialsError OperationUnavailableError DuplicateCredentialsElementError
codesearchnet
def get_name_servers(self, id_or_uri): uri = (self._client.build_uri(id_or_uri) + '/nameServers') return self._client.get(uri)
Gets the named servers for an interconnect. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. Returns: dict: the name servers for an interconnect.
codesearchnet
def user_lists(self, username, member_type='USER'): return self.client.service.getUserLists(username, member_type, self.proxy_id)
Look up all the lists that the user is a member of. Args: username (str): The MIT username of the user member_type(str): The type of user, "USER" or "STRING" Returns: list of strings: names of the lists that this user is a member of
codesearchnet
def dumps(messages): serialized_messages = [] try: for message in messages: message_dict = message._dump() serialized_messages.append(message_dict) except AttributeError: _log.error("Improper object for messages serialization.") raise TypeError("Message have to be instance of Message class or subclass.") return json.dumps(serialized_messages, sort_keys=True)
Serialize messages to a JSON formatted str Args: messages (list): The list of messages to serialize. Each message in the messages is subclass of Messge. Returns: str: Serialized messages. Raises: TypeError: If at least one message is not instance of Message class or subclass.
juraj-google-style
def decode(value, strip=False): if value is None: return None if isinstance(value, bytes) and not isinstance(value, unicode): value = value.decode("utf-8") if strip: return unicode(value).strip() return unicode(value)
Python 2/3 friendly decoding of output. Args: value (str | unicode | bytes | None): The value to decode. strip (bool): If True, `strip()` the returned string. (Default value = False) Returns: str: Decoded value, if applicable.
juraj-google-style
def set_hostname(hostname): with salt.utils.winapi.Com(): conn = wmi.WMI() comp = conn.Win32_ComputerSystem()[0] return comp.Rename(Name=hostname)
Set the hostname of the windows minion, requires a restart before this will be updated. .. versionadded:: 2016.3.0 Args: hostname (str): The hostname to set Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt 'minion-id' system.set_hostname newhostname
juraj-google-style
def train_model(samples_path: str, labels_path: str, model_state_output_path: str): samples = pandas.read_csv(samples_path) labels = pandas.read_csv(labels_path) xgb = xgboost.XGBClassifier(max_depth=3) xgb.fit(samples, labels) xgb.save_model(model_state_output_path) return xgb
Function to train the XGBoost model. Args: samples_path: path to csv file containing the training data labels_path: path to csv file containing the labels for the training data model_state_output_path: Path to store the trained model
github-repos
def get_function_args_defaults(f): signature = get_signature(f) parameter = inspect.Parameter _SUPPORTED_ARG_TYPES = [parameter.POSITIONAL_ONLY, parameter.POSITIONAL_OR_KEYWORD] args = [name for name, p in signature.parameters.items() if p.kind in _SUPPORTED_ARG_TYPES] defaults = [p.default for p in signature.parameters.values() if p.kind in _SUPPORTED_ARG_TYPES and p.default is not p.empty] return (args, defaults)
Returns the function arguments of a given function. Returns: (args: List[str], defaults: List[Any]). The first list names the arguments of the method and the second one has the values of the default arguments. This is similar to ``inspect.getfullargspec()``'s results, except it doesn't include bound arguments and may follow function wrappers.
github-repos
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True): return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title, show=show, save=save)
Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer DataSet target and output values are denormalized before plotting with: output * std + mean Which inverses the normalization (output - mean) / std Args: trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet ds (DataSet): a pybrain DataSet to override the one contained in `trainer`. Required if trainer is a Network instance rather than a Trainer instance. mean (float): mean of the denormalized dataset (default: 0) Only affects the scale of the plot std (float): std (standard deviation) of the denormalized dataset (default: 1) title (str): title to display on the plot. Returns: 3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
codesearchnet
def run_experiment(hparams): estimator = train_and_maybe_evaluate(hparams) schema = taxi.read_schema(hparams.schema_file) tf_transform_output = tft.TFTransformOutput(hparams.tf_transform_dir) eval_model_dir = os.path.join(hparams.output_dir, EVAL_MODEL_DIR) receiver_fn = lambda: model.eval_input_receiver_fn(tf_transform_output, schema) tfma.export.export_eval_savedmodel(estimator=estimator, export_dir_base=eval_model_dir, eval_input_receiver_fn=receiver_fn)
Train the model then export it for tf.model_analysis evaluation. Args: hparams: Holds hyperparameters used to train the model as name/value pairs.
github-repos
def loadnetcdf(filename, copy=True): filename = str(Path(filename).expanduser()) if copy: dataarray = xr.open_dataarray(filename).copy() else: dataarray = xr.open_dataarray(filename, chunks={}) if dataarray.name is None: dataarray.name = filename.rstrip('.nc') for key, val in dataarray.coords.items(): if val.dtype.kind == 'S': dataarray[key] = val.astype('U') elif val.dtype == np.int32: dataarray[key] = val.astype('i8') return dataarray
Load a dataarray from a NetCDF file. Args: filename (str): Filename (*.nc). copy (bool): If True, dataarray is copied in memory. Default is True. Returns: dataarray (xarray.DataArray): Loaded dataarray.
juraj-google-style
def load_data_split(proc_data_dir): ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin')) ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin')) ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin')) return (ds_train, ds_val, ds_test)
Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data)
codesearchnet
def _prefix_from_ip_int(self, ip_int): prefixlen = self._max_prefixlen while prefixlen: if ip_int & 1: break ip_int >>= 1 prefixlen -= 1 if ip_int == (1 << prefixlen) - 1: return prefixlen else: raise NetmaskValueError('Bit pattern does not match /1*0*/')
Return prefix length from a bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format. Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask.
juraj-google-style
def set_cookie(self, key, value, domain=None, path='/', secure=False, httponly=True): self._cookies[key] = value if domain: self._cookies[key]['domain'] = domain if path: self._cookies[key]['path'] = path if secure: self._cookies[key]['secure'] = secure if httponly: self._cookies[key]['httponly'] = httponly
Set a cookie. Args: key (:obj:`str`): Cookie name value (:obj:`str`): Cookie value domain (:obj:`str`): Cookie domain path (:obj:`str`): Cookie value secure (:obj:`bool`): True if secure, False otherwise httponly (:obj:`bool`): True if it's a HTTP only cookie, False otherwise
juraj-google-style
def set_json(self, obj, status=HttpStatusCodes.HTTP_200): obj = json.dumps(obj, sort_keys=True, default=(lambda x: str(x))) self.set_status(status) self.set_header(HttpResponseHeaders.CONTENT_TYPE, 'application/json') self.set_content(obj)
Helper method to set a JSON response. Args: obj (:obj:`object`): JSON serializable object status (:obj:`str`, optional): Status code of the response
codesearchnet
def get_2d_local_memory_v2(x, query_shape, memory_flange): (_, height, width, depth_x) = common_layers.shape_list(x) paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]] padded_x = tf.pad(x, paddings) padded_x.set_shape([None, (height + (2 * memory_flange[0])), (width + (2 * memory_flange[1])), depth_x]) num_h_memory_blocks = ((height num_w_memory_blocks = ((width x_memory_blocks = _extract_blocks(padded_x, query_shape[0], query_shape[1]) x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks, 2) x_left_width = tf.concat(x_width_blocks[:(num_w_memory_blocks - 1)], axis=2) x_right_width = tf.concat(x_width_blocks[1:], axis=2) x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4) x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1) x_top_height = tf.concat(x_height_blocks[:(num_h_memory_blocks - 1)], axis=1) x_bottom_height = tf.concat(x_height_blocks[1:], axis=1) x = tf.concat([x_top_height, x_bottom_height], axis=3) return x
Gathering memory blocks around query blocks. flange is half of query . Only works if memory flanges are half of query sizes. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor.
codesearchnet
def files_delete(self, *, id: str, **kwargs) -> SlackResponse: kwargs.update({'id': id}) return self.api_call('files.delete', json=kwargs)
Deletes a file. Args: id (str): The file id. e.g. 'F1234467890'
codesearchnet
def list(self, name=None, all=False, filters=None): resp = self.client.api.images(name=name, all=all, filters=filters) return [self.get(r['Id']) for r in resp]
List images on the server. Args: name (str): Only show images belonging to the repository ``name`` all (bool): Show intermediate image layers. By default, these are filtered out. filters (dict): Filters to be processed on the image list. Available filters: - ``dangling`` (bool) - ``label`` (str): format either ``key`` or ``key=value`` Returns: (list of :py:class:`Image`): The images. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def _SetFieldType(self, field_proto, field_desc, package, scope): if field_proto.type_name: desc = self._GetTypeFromScope(package, field_proto.type_name, scope) else: desc = None if not field_proto.HasField('type'): if isinstance(desc, descriptor.Descriptor): field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE else: field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( field_proto.type) if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): field_desc.message_type = desc if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.enum_type = desc if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: field_desc.has_default_value = False field_desc.default_value = [] elif field_proto.HasField('default_value'): field_desc.has_default_value = True if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): field_desc.default_value = float(field_proto.default_value) elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: field_desc.default_value = field_proto.default_value elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: field_desc.default_value = field_proto.default_value.lower() == 'true' elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.default_value = field_desc.enum_type.values_by_name[ field_proto.default_value].number elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: field_desc.default_value = text_encoding.CUnescape( field_proto.default_value) else: field_desc.default_value = int(field_proto.default_value) else: field_desc.has_default_value = False if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): field_desc.default_value = 0.0 elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: field_desc.default_value = u'' elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: field_desc.default_value = False elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.default_value = field_desc.enum_type.values[0].number elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: field_desc.default_value = b'' else: field_desc.default_value = 0 field_desc.type = field_proto.type
Sets the field's type, cpp_type, message_type and enum_type. Args: field_proto: Data about the field in proto format. field_desc: The descriptor to modiy. package: The package the field's container is in. scope: Enclosing scope of available types.
juraj-google-style
def _create_all_weights(self, var_list): _ = self.iterations self._create_hypers() self._create_slots(var_list)
Creates all weights, including iterations, hyperparameters and slot vars. This will add newly created variables to `optimizer.weights`. New variables are only created when this method is called the first time, or when called with different variables in the var_list. Args: var_list: list or tuple of `Variable` objects that will be minimized using this optimizer.
github-repos
def serialize(data): return rapidjson.dumps(data, skipkeys=False, ensure_ascii=False, sort_keys=True)
Serialize a dict into a JSON formatted string. This function enforces rules like the separator and order of keys. This ensures that all dicts are serialized in the same way. This is specially important for hashing data. We need to make sure that everyone serializes their data in the same way so that we do not have hash mismatches for the same structure due to serialization differences. Args: data (dict): dict to serialize Returns: str: JSON formatted string
codesearchnet
def _process_image_id(self): try: image_info = self.image_id.strip().split(':') self.image_publisher = image_info[0] self.image_offer = image_info[1] self.image_sku = image_info[2] self.image_version = image_info[3] except Exception: self.image_publisher = None
Split image id into component values. Example: SUSE:SLES:12-SP3:2018.01.04 Publisher:Offer:Sku:Version Raises: If image_id is not a valid format.
codesearchnet
def _init_index(root_dir, schema, index_name): index_dir = os.path.join(root_dir, index_name) try: if (not os.path.exists(index_dir)): os.makedirs(index_dir) return (create_in(index_dir, schema), index_dir) else: return (open_dir(index_dir), index_dir) except Exception as e: logger.error("Init error: failed to open search index at: '{}': {} ".format(index_dir, e)) raise
Creates new index or opens existing. Args: root_dir (str): root dir where to find or create index. schema (whoosh.fields.Schema): schema of the index to create or open. index_name (str): name of the index. Returns: tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory.
codesearchnet
def register_extension(self, ext_in, ext_out, force=False): if not force and (ext_in in self.__extensions.keys()): self.log_warning("Extension %s already exist, ignore redefinition." % ext_in) return self.__extensions[ext_in] = ext_out
Add/register a file extension. Args: ext_in (str): Extension of input files. ext_out (str): Extension of corresponding output files. force (bool): If ``force`` is set to ``True``, simply overwrite existing extensions, otherwise do nothing. If the ``logger`` is set, log a warning about the duplicate extension if ``force == False``.
juraj-google-style
def MessageToJson(message, including_default_value_fields=False): js = _MessageToJsonObject(message, including_default_value_fields) return json.dumps(js, indent=2)
Converts protobuf message to JSON format. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular message fields and oneof fields are not affected by this option. Returns: A string containing the JSON formatted protocol buffer message.
juraj-google-style
def get_all_thread_ids(self): json = self._get_json(self._url.thread_list()) return [thread['no'] for page in json for thread in page['threads']]
Return the ID of every thread on this board. Returns: list of ints: List of IDs of every thread on this board.
codesearchnet
def handle_upnp_error(self, xml_error): xml_error = xml_error.encode('utf-8') error = XML.fromstring(xml_error) log.debug("Error %s", xml_error) error_code = error.findtext( '. if error_code is not None: description = self.UPNP_ERRORS.get(int(error_code), '') raise SoCoUPnPException( message='UPnP Error {} received: {} from {}'.format( error_code, description, self.soco.ip_address), error_code=error_code, error_description=description, error_xml=xml_error ) else: log.error("Unknown error received from %s", self.soco.ip_address) raise UnknownSoCoException(xml_error)
Disect a UPnP error, and raise an appropriate exception. Args: xml_error (str): a unicode string containing the body of the UPnP/SOAP Fault response. Raises an exception containing the error code.
juraj-google-style
def _FormatField(self, field): if self._FIELD_DELIMITER and isinstance(field, py2to3.STRING_TYPES): return field.replace(self._FIELD_DELIMITER, ' ') return field
Formats a field. Args: field (str): field value. Returns: str: formatted field value.
juraj-google-style
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): do_image_splitting = images_kwargs.get('do_image_splitting', None) or self.do_image_splitting max_image_size = images_kwargs.get('max_image_size', None) or self.max_image_size size = images_kwargs.get('size', None) or self.size if do_image_splitting: height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=size['longest_edge']) height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=4096) aspect_ratio = width / height if width >= height: resized_width = math.ceil(width / max_image_size['longest_edge']) * max_image_size['longest_edge'] resized_height = int(width / aspect_ratio) resized_height = math.ceil(height / max_image_size['longest_edge']) * max_image_size['longest_edge'] elif height > width: resized_height = math.ceil(height / max_image_size['longest_edge']) * max_image_size['longest_edge'] resized_width = int(height * aspect_ratio) resized_width = math.ceil(width / max_image_size['longest_edge']) * max_image_size['longest_edge'] max_height = max_width = max_image_size['longest_edge'] if resized_height > max_height or resized_width > max_width: num_rows = math.ceil(resized_height / max_height) num_cols = math.ceil(resized_width / max_width) num_patches = num_rows * num_cols + 1 return num_patches
A utility that returns number of image patches for a given image size. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. images_kwargs (`dict`, *optional*) Any kwargs to override defaults of the image processor. Returns: `int`: Number of patches per image.
github-repos
def rtruediv(self, other, axis="columns", level=None, fill_value=None): return self._binary_op( "rtruediv", other, axis=axis, level=level, fill_value=fill_value )
Div this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the div against this. axis: The axis to div over. level: The Multilevel index level to apply div over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the rdiv applied.
juraj-google-style
class TFIdeficsVisionEncoder(tf.keras.layers.Layer): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layers = [TFIdeficsVisionEncoderLayer(config, name=f'layers.{i}') for i in range(config.num_hidden_layers)] self.gradient_checkpointing = False def call(self, inputs_embeds, attention_mask: Optional[tf.Tensor]=None, causal_attention_mask: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = tf.recompute_grad(create_custom_forward(encoder_layer), hidden_states, attention_mask, causal_attention_mask) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`TFIdeficsVisionEncoderLayer`]. Args: config: IdeficsVisionConfig
github-repos
def fit_transform(self, tables=None, transformer_dict=None, transformer_list=None, missing=None): if (missing is None): missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('fit_transform'), DeprecationWarning) transformed = {} if (tables is None): tables = self.table_dict if ((transformer_dict is None) and (transformer_list is None)): transformer_dict = self.transformer_dict for table_name in tables: (table, table_meta) = tables[table_name] transformed_table = self.fit_transform_table(table, table_meta, transformer_dict, transformer_list) transformed[table_name] = transformed_table return transformed
Create, apply and store the specified transformers for the given tables. Args: tables(dict): Mapping of table names to `tuple` where each tuple is on the form (`pandas.DataFrame`, `dict`). The `DataFrame` contains the table data and the `dict` the corresponding meta information. If not specified, the tables will be retrieved using the meta_file. transformer_dict(dict): Mapping `tuple(str, str)` -> `str` where the tuple is (table_name, column_name). transformer_list(list): List of transformers to use. Overrides the transformers in the meta_file. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).
codesearchnet
def FindMessageTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if (full_name not in self._descriptors): self.FindFileContainingSymbol(full_name) return self._descriptors[full_name]
Loads the named descriptor from the pool. Args: full_name: The full name of the descriptor to load. Returns: The descriptor for the named type.
codesearchnet
def Trim(lst, limit): limit = max(0, limit) clipping = lst[limit:] del lst[limit:] return clipping
Trims a given list so that it is not longer than given limit. Args: lst: A list to trim. limit: A maximum number of elements in the list after trimming. Returns: A suffix of the input list that was trimmed.
codesearchnet
def notify(self, notices): tmpl_html = get_template('required_tags_notice.html') tmpl_text = get_template('required_tags_notice.txt') for (recipient, data) in list(notices.items()): body_html = tmpl_html.render(data=data) body_text = tmpl_text.render(data=data) send_notification(subsystem=self.ns, recipients=[recipient], subject=self.email_subject, body_html=body_html, body_text=body_text)
Send notifications to the recipients provided Args: notices (:obj:`dict` of `str`: `list`): A dictionary mapping notification messages to the recipient. Returns: `None`
codesearchnet
def completely_parse_reader(parser: Parser[(Input, Output)], reader: Reader[Input]) -> Result[Output]: result = (parser << eof).consume(reader) if isinstance(result, Continue): return Success(result.value) else: used = set() unique_expected = [] for expected_lambda in result.expected: expected = expected_lambda() if (expected not in used): used.add(expected) unique_expected.append(expected) return Failure(result.farthest.expected_error(' or '.join(unique_expected)))
Consume reader and return Success only on complete consumption. This is a helper function for ``parse`` methods, which return ``Success`` when the input is completely consumed and ``Failure`` with an appropriate message otherwise. Args: parser: The parser doing the consuming reader: The input being consumed Returns: A parsing ``Result``
codesearchnet
def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool: max_actions = { Channels.facebook: 3, Channels.skype: 3, Channels.ms_teams: 3, Channels.line: 99, Channels.slack: 100, Channels.emulator: 100, Channels.direct_line: 100, Channels.webchat: 100, Channels.cortana: 100, } return button_cnt <= max_actions[channel_id] if channel_id in max_actions else False
Determine if a number of Card Actions are supported by a Channel. Args: channel_id (str): The Channel to check if the Card Actions are supported in. button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel. Returns: bool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support that number of Card Actions.
juraj-google-style
def download_listing(self, file: Optional[IO], duration_timeout: Optional[float]=None) -> ListingResponse: if (self._session_state != SessionState.directory_request_sent): raise RuntimeError('File request not sent') self._session_state = SessionState.file_request_sent (yield from self.download(file=file, rewind=False, duration_timeout=duration_timeout)) try: if (self._response.body.tell() == 0): listings = () elif (self._listing_type == 'mlsd'): self._response.body.seek(0) machine_listings = wpull.protocol.ftp.util.parse_machine_listing(self._response.body.read().decode('utf-8', errors='surrogateescape'), convert=True, strict=False) listings = list(wpull.protocol.ftp.util.machine_listings_to_file_entries(machine_listings)) else: self._response.body.seek(0) file = io.TextIOWrapper(self._response.body, encoding='utf-8', errors='surrogateescape') listing_parser = ListingParser(file=file) listings = list(listing_parser.parse_input()) _logger.debug('Listing detected as %s', listing_parser.type) file.detach() except (ListingError, ValueError) as error: raise ProtocolError(*error.args) from error self._response.files = listings self._response.body.seek(0) self._session_state = SessionState.response_received return self._response
Read file listings. Args: file: A file object or asyncio stream. duration_timeout: Maximum time in seconds of which the entire file must be read. Returns: A Response populated the file listings Be sure to call :meth:`start_file_listing` first. Coroutine.
codesearchnet
def filter_queryset(self, request, term, queryset=None, **dependent_fields): if (queryset is None): queryset = self.get_queryset() search_fields = self.get_search_fields() select = Q() term = term.replace('\t', ' ') term = term.replace('\n', ' ') for t in [t for t in term.split(' ') if (not (t == ''))]: select &= reduce((lambda x, y: (x | Q(**{y: t}))), search_fields, Q(**{search_fields[0]: t})) if dependent_fields: select &= Q(**dependent_fields) return queryset.filter(select).distinct()
Return QuerySet filtered by search_fields matching the passed term. Args: request (django.http.request.HttpRequest): The request is being passed from the JSON view and can be used to dynamically alter the response queryset. term (str): Search term queryset (django.db.models.query.QuerySet): QuerySet to select choices from. **dependent_fields: Dependent fields and their values. If you want to inherit from ModelSelect2Mixin and later call to this method, be sure to pop everything from keyword arguments that is not a dependent field. Returns: QuerySet: Filtered QuerySet
codesearchnet
def mlir_convert(options, saved_model_dir, input_tensors, output_tensors, **kwargs): test_params = kwargs.get('test_params', {}) extra_convert_options = kwargs.get('extra_convert_options', zip_test_utils.ExtraConvertOptions()) tflite_model = None log = '' signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir, [signature_key]) converter.allow_custom_ops = extra_convert_options.allow_custom_ops converter.experimental_new_quantizer = options.mlir_quantizer if options.make_tf_ptq_tests: if options.hlo_aware_conversion: tf_quantization_mode = 'DEFAULT' else: tf_quantization_mode = 'LEGACY_INTEGER' converter._experimental_tf_quantization_mode = tf_quantization_mode if options.run_with_flex: converter.target_spec.supported_ops = set([lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS]) if options.enable_dynamic_update_slice: converter._experimental_enable_dynamic_update_slice = True converter.unfold_batchmatmul = options.unfold_batchmatmul if test_params.get('dynamic_range_quantize', False): converter.optimizations = [lite.Optimize.DEFAULT] if options.experimental_low_bit_qat: converter._experimental_low_bit_qat = True if test_params.get('fully_quantize', False): converter.optimizations = [lite.Optimize.DEFAULT] min_value, max_value = test_params.get('input_range', (-1, 1)) def representative_dataset(input_tensors): calibration_inputs = {} for name, shape, dtype in input_tensors: if shape: dims = [1 if dim.value is None else dim.value for dim in shape.dims] calibration_inputs[name] = np.random.uniform(min_value, max_value, tuple(dims)).astype(dtype.as_numpy_dtype) return calibration_inputs def representative_dataset_gen(): for _ in range(100): yield representative_dataset(input_tensors) if test_params.get('quant_16x8', False): converter.target_spec.supported_ops = [lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8] else: converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8] converter.representative_dataset = representative_dataset_gen if extra_convert_options.inference_input_type: converter.inference_input_type = extra_convert_options.inference_input_type if extra_convert_options.inference_output_type: converter.inference_output_type = extra_convert_options.inference_output_type try: tflite_model = converter.convert() if options.expected_ops_in_converted_model: ops_list = tflite_test_util.get_ops_list(tflite_model) for expected_op in options.expected_ops_in_converted_model: if expected_op not in ops_list: tflite_model = None raise ValueError('{} op not found in the converted model'.format(expected_op)) except Exception as e: log = str(e) return (tflite_model, log)
Convert a saved model into a tflite model with MLIR-based conversion. Args: options: A lite.testing.generate_examples_lib.Options instance. saved_model_dir: Path to the saved model. input_tensors: List of input tensor tuples `(name, shape, type)`. output_tensors: List of output tensors (names). **kwargs: Extra parameters. Returns: output tflite model, log_txt from conversion or None, log_txt if it did not convert properly.
github-repos
async def verify_scriptworker_task(chain, obj): errors = [] if (obj.worker_impl != 'scriptworker'): errors.append('{} {} must be run from scriptworker!'.format(obj.name, obj.task_id)) raise_on_errors(errors)
Verify the signing trust object. Currently the only check is to make sure it was run on a scriptworker. Args: chain (ChainOfTrust): the chain we're operating on obj (ChainOfTrust or LinkOfTrust): the trust object for the signing task.
codesearchnet
def html_page_for_render_items(bundle, docs_json, render_items, title, template=None, template_variables={}): if (title is None): title = DEFAULT_TITLE (bokeh_js, bokeh_css) = bundle json_id = make_id() json = escape(serialize_json(docs_json), quote=False) json = wrap_in_script_tag(json, 'application/json', json_id) script = wrap_in_script_tag(script_for_render_items(json_id, render_items)) context = template_variables.copy() context.update(dict(title=title, bokeh_js=bokeh_js, bokeh_css=bokeh_css, plot_script=(json + script), docs=render_items, base=FILE, macros=MACROS)) if (len(render_items) == 1): context['doc'] = context['docs'][0] context['roots'] = context['doc'].roots context['plot_div'] = '\n'.join((div_for_render_item(item) for item in render_items)) if (template is None): template = FILE elif isinstance(template, string_types): template = _env.from_string(('{% extends base %}\n' + template)) html = template.render(context) return encode_utf8(html)
Render an HTML page from a template and Bokeh render items. Args: bundle (tuple): a tuple containing (bokehjs, bokehcss) docs_json (JSON-like): Serialized Bokeh Document render_items (RenderItems) Specific items to render from the document and where title (str or None) A title for the HTML page. If None, DEFAULT_TITLE is used template (str or Template or None, optional) : A Template to be used for the HTML page. If None, FILE is used. template_variables (dict, optional): Any Additional variables to pass to the template Returns: str
codesearchnet
def image_format(value): if (value.image.format.upper() not in constants.ALLOWED_IMAGE_FORMATS): raise ValidationError(MESSAGE_INVALID_IMAGE_FORMAT)
Confirms that the uploaded image is of supported format. Args: value (File): The file with an `image` property containing the image Raises: django.forms.ValidationError
codesearchnet
def GetString(self): string_list = [] string_list.append('Report generated from: {0:s}'.format(self.plugin_name)) time_compiled = getattr(self, 'time_compiled', 0) if time_compiled: time_compiled = timelib.Timestamp.CopyToIsoFormat(time_compiled) string_list.append('Generated on: {0:s}'.format(time_compiled)) filter_string = getattr(self, 'filter_string', '') if filter_string: string_list.append('Filter String: {0:s}'.format(filter_string)) string_list.append('') string_list.append('Report text:') string_list.append(self.text) return '\n'.join(string_list)
Retrieves a string representation of the report. Returns: str: string representation of the report.
codesearchnet
def top_1(x, reduced_dim, dtype=tf.int32, name=None): reduced_dim = convert_to_dimension(reduced_dim) with tf.name_scope(name, default_name="top_1"): max_val = reduce_max(x, reduced_dim=reduced_dim) is_max = to_float(equal(x, max_val)) pos = mtf_range(x.mesh, reduced_dim, tf.float32) ret = reduce_max(is_max * pos, reduced_dim=reduced_dim) ret = cast(ret, dtype) return ret, max_val
Argmax and Max. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims dtype: a tf.dtype (for the output) name: an optional string Returns: indices: a Tensor with given dtype values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)
juraj-google-style
def create_package(name, data, package_cls=None): from rez.package_maker__ import PackageMaker maker = PackageMaker(name, data, package_cls=package_cls) return maker.get_package()
Create a package given package data. Args: name (str): Package name. data (dict): Package data. Must conform to `package_maker.package_schema`. Returns: `Package` object.
juraj-google-style
def save(tiff_filename, numpy_data): tiff_filename = os.path.expanduser(tiff_filename) if (type(numpy_data) is str): fp = open(png_filename, 'wb') fp.write(numpy_data) fp.close() return png_filename try: img = tiff.imsave(tiff_filename, numpy_data) except Exception as e: raise ValueError('Could not save TIFF file {0}.'.format(tiff_filename)) return tiff_filename
Export a numpy array to a TIFF file. Arguments: tiff_filename: A filename to which to save the TIFF data numpy_data: The numpy array to save to TIFF Returns: String. The expanded filename that now holds the TIFF data
codesearchnet
def open_port(upnp, internal_port, external_start_port=None): if external_start_port is None: external_start_port = internal_port if upnp is None: return False def register(internal, external): mapping = upnp.getspecificportmapping(external, 'UDP') if mapping is not None: lanaddr, internal_mapped, name, _, _ = mapping is_valid_mapping = ( lanaddr == upnp.lanaddr and name == RAIDEN_IDENTIFICATOR and internal_mapped == internal ) is_not_our_mapping = ( internal_mapped != internal and name != RAIDEN_IDENTIFICATOR ) is_previous_mapping = ( internal_mapped != internal and name == RAIDEN_IDENTIFICATOR and lanaddr == upnp.lanaddr ) if is_valid_mapping: log.debug( 'keeping pre-existing portmapping', internal=internal, external=external, lanaddr=lanaddr, ) return True elif lanaddr != upnp.lanaddr: log.debug( 'ignoring existing mapping for other IP', internal=internal, external=external, other_ip=lanaddr, our_ip=upnp.lanaddr, ) return False elif is_not_our_mapping: log.debug( 'ignoring existing mapping for other program', name=name, ) return False elif is_previous_mapping: log.debug('releasing previous port mapping') upnp.deleteportmapping(external, 'UDP') log.debug('trying to create new port mapping', internal=internal, external=external) return upnp.addportmapping( external, 'UDP', upnp.lanaddr, internal, RAIDEN_IDENTIFICATOR, '', ) external_port = external_start_port success = register(internal_port, external_port) while not success and external_port <= MAX_PORT: external_port += 1 log.debug('trying', external=external_port) success = register(internal_port, external_port) if success: return upnp.externalipaddress(), external_port else: log.error( 'could not register a port-mapping', location='FIXME', ) return False return False
Open a port for the raiden service (listening at `internal_port`) through UPnP. Args: internal_port (int): the target port of the raiden service external_start_port (int): query for an external port starting here (default: internal_port) Returns: external_ip_address, external_port (tuple(str, int)): if successful or None
juraj-google-style
def _escaped_token_to_subtoken_ids(self, escaped_token): return [self._subtoken_string_to_id[subtoken] for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)]
Converts an escaped token string to a list of subtoken IDs. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtoken IDs as integers.
codesearchnet
def _error_messages(self, driver_id): assert isinstance(driver_id, ray.DriverID) message = self.redis_client.execute_command( "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.ERROR_INFO, "", driver_id.binary()) if message is None: return [] gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) error_messages = [] for i in range(gcs_entries.EntriesLength()): error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData( gcs_entries.Entries(i), 0) assert driver_id.binary() == error_data.DriverId() error_message = { "type": decode(error_data.Type()), "message": decode(error_data.ErrorMessage()), "timestamp": error_data.Timestamp(), } error_messages.append(error_message) return error_messages
Get the error messages for a specific driver. Args: driver_id: The ID of the driver to get the errors for. Returns: A list of the error messages for this driver.
juraj-google-style
def __init__(self, code=None, contract_properties=0, name=None, version=None, author=None, email=None, description=None): self.Code = code self.ContractProperties = contract_properties self.Name = name self.CodeVersion = version self.Author = author self.Email = email self.Description = description
Create an instance. Args: code (neo.Core.FunctionCode): contract_properties (neo.SmartContract.ContractParameterType): contract type. name (bytes): version (bytes): author (bytes): email (bytes): description (bytes):
juraj-google-style
def _GetKeysDefaultEmpty(self, top_level, keys, depth=1): keys = set(keys) match = {} if (depth == 1): for key in keys: value = top_level.get(key, None) if (value is not None): match[key] = value else: for (_, parsed_key, parsed_value) in plist_interface.RecurseKey(top_level, depth=depth): if (parsed_key in keys): match[parsed_key] = parsed_value if (set(match.keys()) == keys): return match return match
Retrieves plist keys, defaulting to empty values. Args: top_level (plistlib._InternalDict): top level plist object. keys (set[str]): names of keys that should be returned. depth (int): depth within the plist, where 1 is top level. Returns: dict[str, str]: values of the requested keys.
codesearchnet