code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def build_graph(device, input_shape, perm, datatype, num_iters): with ops.device('/%s:0' % device): total_size = np.prod(input_shape) inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape) t = constant_op.constant(inp, shape=input_shape) outputs = [] transpose_op = array_ops.transpose(t, perm) outputs.append(transpose_op) for _ in range(1, num_iters): with ops.control_dependencies([transpose_op]): transpose_op = array_ops.transpose(t, perm) outputs.append(transpose_op) return control_flow_ops.group(*outputs)
builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. datatype: numpy data type of the input tensor. num_iters: number of iterations to run transpose. Returns: An array of tensors to run()
github-repos
def _compile_ast_node_to_ir(schema, current_schema_type, ast, location, context): basic_blocks = [] local_unique_directives = get_unique_directives(ast) fields = _get_fields(ast) (vertex_fields, property_fields) = fields fragment = _get_inline_fragment(ast) filter_operations = get_local_filter_directives(ast, current_schema_type, vertex_fields) fragment_exists = (fragment is not None) fields_exist = (vertex_fields or property_fields) if (fragment_exists and fields_exist): raise GraphQLCompilationError(u'Cannot compile GraphQL that has inline fragment and selected fields in the same selection. Please move the selected fields inside the inline fragment.') if (location.field is not None): if fragment_exists: raise AssertionError(u'Found inline fragment at a property field: {} {}'.format(location, fragment)) if (len(property_fields) > 0): raise AssertionError(u'Found property fields on a property field: {} {}'.format(location, property_fields)) for filter_operation_info in filter_operations: filter_block = process_filter_directive(filter_operation_info, location, context) if (isinstance(location, FoldScopeLocation) and (location.field == COUNT_META_FIELD_NAME)): set_fold_innermost_scope(context) expected_field = expressions.LocalField(COUNT_META_FIELD_NAME) replacement_field = expressions.FoldedContextField(location, GraphQLInt) visitor_fn = expressions.make_replacement_visitor(expected_field, replacement_field) filter_block = filter_block.visit_and_update_expressions(visitor_fn) visitor_fn = expressions.make_type_replacement_visitor(expressions.ContextField, (lambda context_field: expressions.GlobalContextField(context_field.location, context_field.field_type))) filter_block = filter_block.visit_and_update_expressions(visitor_fn) set_fold_count_filter(context) context['global_filters'].append(filter_block) else: basic_blocks.append(filter_block) if (location.field is not None): _compile_property_ast(schema, current_schema_type, ast, location, context, local_unique_directives) elif fragment_exists: basic_blocks.extend(_compile_fragment_ast(schema, current_schema_type, fragment, location, context)) else: basic_blocks.extend(_compile_vertex_ast(schema, current_schema_type, ast, location, context, local_unique_directives, fields)) return basic_blocks
Compile the given GraphQL AST node into a list of basic blocks. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: the current GraphQL AST node, obtained from the graphql library location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: list of basic blocks corresponding to this GraphQL AST node
codesearchnet
def clean_dataset_tags(self): (tags_dict, wildcard_tags) = Tags.tagscleanupdicts() def delete_tag(tag): logger.info(('%s - Deleting tag %s!' % (self.data['name'], tag))) return (self.remove_tag(tag), False) def update_tag(tag, final_tags, wording, remove_existing=True): text = ('%s - %s: %s -> ' % (self.data['name'], wording, tag)) if (not final_tags): logger.error(('%snothing!' % text)) return (False, True) tags_lower_five = final_tags[:5].lower() if ((tags_lower_five == 'merge') or (tags_lower_five == 'split') or ((';' not in final_tags) and (len(final_tags) > 50))): logger.error(('%s%s - Invalid final tag!' % (text, final_tags))) return (False, True) if remove_existing: self.remove_tag(tag) tags = ', '.join(self.get_tags()) if self.add_tags(final_tags.split(';')): logger.info(('%s%s! Dataset tags: %s' % (text, final_tags, tags))) else: logger.warning(('%s%s - At least one of the tags already exists! Dataset tags: %s' % (text, final_tags, tags))) return (True, False) def do_action(tag, tags_dict_key): whattodo = tags_dict[tags_dict_key] action = whattodo[u'action'] final_tags = whattodo[u'final tags (semicolon separated)'] if (action == u'Delete'): (changed, error) = delete_tag(tag) elif (action == u'Merge'): (changed, error) = update_tag(tag, final_tags, 'Merging') elif (action == u'Fix spelling'): (changed, error) = update_tag(tag, final_tags, 'Fixing spelling') elif (action == u'Non English'): (changed, error) = update_tag(tag, final_tags, 'Anglicising', remove_existing=False) else: changed = False error = False return (changed, error) def process_tag(tag): changed = False error = False if (tag in tags_dict.keys()): (changed, error) = do_action(tag, tag) else: for wildcard_tag in wildcard_tags: if fnmatch.fnmatch(tag, wildcard_tag): (changed, error) = do_action(tag, wildcard_tag) break return (changed, error) anychange = False anyerror = False for tag in self.get_tags(): (changed, error) = process_tag(tag) if changed: anychange = True if error: anyerror = True return (anychange, anyerror)
Clean dataset tags according to tags cleanup spreadsheet and return if any changes occurred Returns: Tuple[bool, bool]: Returns (True if tags changed or False if not, True if error or False if not)
codesearchnet
def get_videos_for_ids( edx_video_ids, sort_field=None, sort_dir=SortDirection.asc ): videos, __ = _get_videos_for_filter( {"edx_video_id__in":edx_video_ids}, sort_field, sort_dir, ) return videos
Returns an iterator of videos that match the given list of ids. Args: edx_video_ids (list) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order
juraj-google-style
def __leastsq_fit(tomo_data, weights=None, trace=None, beta=None): if (trace is None): trace = 1.0 data = tomo_data['data'] keys = data[0]['circuit'].keys() counts = [] shots = [] ops = [] for dat in data: for key in keys: counts.append(dat['counts'][key]) shots.append(dat['shots']) projectors = dat['circuit'][key] op = __projector(projectors['meas'], tomo_data['meas_basis']) if ('prep' in projectors): op_prep = __projector(projectors['prep'], tomo_data['prep_basis']) op = np.kron(op_prep.conj(), op) ops.append(op) counts = np.array(counts) shots = np.array(shots) freqs = (counts / shots) if (weights is None): if (beta is None): beta = 0.50922 K = len(keys) freqs_hedged = ((counts + beta) / (shots + (K * beta))) weights = np.sqrt((shots / (freqs_hedged * (1 - freqs_hedged)))) return __tomo_linear_inv(freqs, ops, weights, trace=trace)
Reconstruct a state from unconstrained least-squares fitting. Args: tomo_data (list[dict]): state or process tomography data. weights (list or array or None): weights to use for least squares fitting. The default is standard deviation from a binomial distribution. trace (float or None): trace of returned operator. The default is 1. beta (float or None): hedge parameter (>=0) for computing frequencies from zero-count data. The default value is 0.50922. Returns: numpy.array: A numpy array of the reconstructed operator.
codesearchnet
def site_specific_coordination_numbers(self): specific_coordination_numbers = {} for site in self.sites: specific_coordination_numbers[site.label] = site.site_specific_neighbours() return specific_coordination_numbers
Returns a dictionary of coordination numbers for each site type. Args: None Returns: (Dict(Str:List(Int))) : Dictionary of coordination numbers for each site type, e.g.:: { 'A' : [ 2, 4 ], 'B' : [ 2 ] }
codesearchnet
def get_settable_properties(cls): results = [] for attr, value in vars(cls).items(): if isinstance(value, property) and value.fset is not None: results.append(attr) return results
Gets the settable properties of a class. Only returns the explicitly defined properties with setters. Args: cls: A class in Python.
github-repos
def get_job(self, job_resource_name: str) -> Dict: return self.service.projects().programs().jobs().get( name=job_resource_name).execute()
Returns metadata about a previously created job. See get_job_result if you want the results of the job and not just metadata about the job. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. Returns: A dictionary containing the metadata.
juraj-google-style
def _get_formatted_date(dataset_date, date_format=None): if dataset_date: if date_format: return dataset_date.strftime(date_format) else: return dataset_date.date().isoformat() else: return None
Get supplied dataset date as string in specified format. If no format is supplied, an ISO 8601 string is returned. Args: dataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
juraj-google-style
def _GetValueAsObject(self, property_value): if property_value.type == pyolecf.value_types.BOOLEAN: return property_value.data_as_boolean if property_value.type in self._INTEGER_TYPES: return property_value.data_as_integer if property_value.type in self._STRING_TYPES: return property_value.data_as_string try: data = property_value.data except IOError: data = None return data
Retrieves the property value as a Python object. Args: property_value (pyolecf.property_value): OLECF property value. Returns: object: property value as a Python object.
juraj-google-style
def all(self, customer_id, data={}, **kwargs): url = '{}/{}/tokens'.format(self.base_url, customer_id) return self.get_url(url, data, **kwargs)
Get all tokens for given customer Id Args: customer_id : Customer Id for which tokens have to be fetched Returns: Token dicts for given cutomer Id
codesearchnet
def _StartProfiling(self, configuration): if not configuration: return if configuration.HaveProfileMemoryGuppy(): self._guppy_memory_profiler = profilers.GuppyMemoryProfiler( self._name, configuration) self._guppy_memory_profiler.Start() if configuration.HaveProfileMemory(): self._memory_profiler = profilers.MemoryProfiler( self._name, configuration) self._memory_profiler.Start() if configuration.HaveProfileProcessing(): identifier = '{0:s}-processing'.format(self._name) self._processing_profiler = profilers.ProcessingProfiler( identifier, configuration) self._processing_profiler.Start() if configuration.HaveProfileSerializers(): identifier = '{0:s}-serializers'.format(self._name) self._serializers_profiler = profilers.SerializersProfiler( identifier, configuration) self._serializers_profiler.Start() if configuration.HaveProfileStorage(): self._storage_profiler = profilers.StorageProfiler( self._name, configuration) self._storage_profiler.Start() if configuration.HaveProfileTaskQueue(): self._task_queue_profiler = profilers.TaskQueueProfiler( self._name, configuration) self._task_queue_profiler.Start()
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration.
juraj-google-style
def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT): if len(self.segments) > 0: return self.segments[0].points[0].time.strftime(name_format) + ".gpx" else: return "EmptyTrack"
Generates a name for the track The name is generated based on the date of the first point of the track, or in case it doesn't exist, "EmptyTrack" Args: name_format (str, optional): Name formar to give to the track, based on its start time. Defaults to DEFAULT_FILE_NAME_FORMAT Returns: str
juraj-google-style
def create_walker(self, selector, skip_all=True): if selector.buffered: walker = BufferedStreamWalker(selector, self._engine, skip_all=skip_all) self._queue_walkers.append(walker) return walker if (selector.match_type == DataStream.CounterType): walker = CounterStreamWalker(selector) else: walker = VirtualStreamWalker(selector) self._virtual_walkers.append(walker) return walker
Create a stream walker based on the given selector. This function returns a StreamWalker subclass that will remain up to date and allow iterating over and popping readings from the stream(s) specified by the selector. When the stream walker is done, it should be passed to destroy_walker so that it is removed from internal lists that are used to always keep it in sync. Args: selector (DataStreamSelector): The selector describing the streams that we want to iterate over. skip_all (bool): Whether to start at the beginning of the data or to skip everything and start at the end. Defaults to skipping everything. This parameter only has any effect on buffered stream selectors. Returns: StreamWalker: A properly updating stream walker with the given selector.
codesearchnet
def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]: words = [] token_idx = char_start_idx = char_end_idx = chars_idx = 0 for i, word in enumerate(text.split(' ')): token = self.tokenizer.tokenize(word) if start <= token_idx <= end: if token_idx == start: char_start_idx = chars_idx if token_idx == end: char_end_idx = chars_idx + len(word) words += [word] if token_idx > end: break token_idx += len(token) chars_idx += len(word) + 1 return {'answer': ' '.join(words), 'start': max(0, char_start_idx), 'end': min(len(text), char_end_idx)}
When decoding from token probabilities, this method maps token indexes to actual word in the initial context. Args: text (`str`): The actual context to extract the answer from. start (`int`): The answer starting token index. end (`int`): The answer end token index. Returns: Dictionary like `{'answer': str, 'start': int, 'end': int}`
github-repos
def _ComputeUniquifier(self, debuggee): uniquifier = hashlib.sha1() if (('minorversion' not in debuggee.get('labels', [])) and ('sourceContexts' not in debuggee)): uniquifier_computer.ComputeApplicationUniquifier(uniquifier) return uniquifier.hexdigest()
Computes debuggee uniquifier. The debuggee uniquifier has to be identical on all instances. Therefore the uniquifier should not include any random numbers and should only be based on inputs that are guaranteed to be the same on all instances. Args: debuggee: complete debuggee message without the uniquifier Returns: Hex string of SHA1 hash of project information, debuggee labels and debuglet version.
codesearchnet
def CreateDynamicDisplayAdSettings(client, opener): media_service = client.GetService('MediaService', 'v201809') logo = {'xsi_type': 'Image', 'mediaId': _CreateImage(media_service, opener, 'https: dynamic_settings = {'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!'} return dynamic_settings
Creates dynamic display ad settings. Args: client: an AdWordsClient instance. opener: an OpenerDirector instance. Returns: A dict containing the dynamic display ad settings.
codesearchnet
def update_config_pwd(msg, cfg): msg_type = msg.__class__.__name__.lower() key_fmt = ((msg.profile + '_') + msg_type) if isinstance(msg._auth, (MutableSequence, tuple)): cfg.pwd[key_fmt] = ' :: '.join(msg._auth) else: cfg.pwd[key_fmt] = msg._auth
Updates the profile's auth entry with values set by the user. This will overwrite existing values. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
codesearchnet
def wrap_inference_results(inference_result_proto): inference_proto = inference_pb2.InferenceResult() if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): inference_proto.classification_result.CopyFrom( inference_result_proto.result) elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): inference_proto.regression_result.CopyFrom(inference_result_proto.result) return inference_proto
Returns packaged inference results from the provided proto. Args: inference_result_proto: The classification or regression response proto. Returns: An InferenceResult proto with the result from the response.
juraj-google-style
def _analemma_position(self, hour): low = self.calculate_sun(12, 21, hour).is_during_day high = self.calculate_sun(6, 21, hour).is_during_day if (low and high): return 1 elif (low or high): return 0 else: return (- 1)
Check what the analemma position is for an hour. This is useful for calculating hours of analemma curves. Returns: -1 if always night, 0 if both day and night, 1 if always day.
codesearchnet
def collect_hunt_results(self, hunt): if not os.path.isdir(self.output_path): os.makedirs(self.output_path) output_file_path = os.path.join( self.output_path, '.'.join((self.hunt_id, 'zip'))) if os.path.exists(output_file_path): print('{0:s} already exists: Skipping'.format(output_file_path)) return None self._check_approval_wrapper( hunt, self._get_and_write_archive, hunt, output_file_path) results = self._extract_hunt_results(output_file_path) print('Wrote results of {0:s} to {1:s}'.format( hunt.hunt_id, output_file_path)) return results
Download current set of files in results. Args: hunt: The GRR hunt object to download files from. Returns: list: tuples containing: str: human-readable description of the source of the collection. For example, the name of the source host. str: path to the collected data. Raises: ValueError: if approval is needed and approvers were not specified.
juraj-google-style
def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', pagesize='2MB', mount=True, set_shmmax=False): group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) if max_map_count < 2 * nr_hugepages: max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } if set_shmmax: shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages if shmmax_minsize > shmmax_current: sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) if fstab_entry: lfstab.remove_entry(fstab_entry) entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) lfstab.add_entry(entry) if mount: fstab_mount(mnt_point)
Enable hugepages on system. Args: user (str) -- Username to allow access to hugepages to group (str) -- Group name to own hugepages nr_hugepages (int) -- Number of pages to reserve max_map_count (int) -- Number of Virtual Memory Areas a process can own mnt_point (str) -- Directory to mount hugepages on pagesize (str) -- Size of hugepages mount (bool) -- Whether to Mount hugepages
juraj-google-style
def get_HDX_code_from_location_partial(location, locations=None, configuration=None): hdx_code = Locations.get_HDX_code_from_location(location, locations, configuration) if hdx_code is not None: return hdx_code, True if locations is None: locations = Locations.validlocations(configuration) locationupper = location.upper() for locdict in locations: locationname = locdict['title'].upper() if locationupper in locationname or locationname in locationupper: return locdict['name'].upper(), False return None, False
Get HDX code for location Args: location (str): Location for which to get HDX code locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Tuple[Optional[str], bool]: HDX code and if the match is exact or (None, False) for no match
juraj-google-style
def validate(self, message): if hasattr(message, '__json__'): message = message.__json__() if isinstance(message['body'], six.text_type): message['body'] = json.loads(message['body']) elif isinstance(message['body'], six.binary_type): warnings.warn('Message body is not unicode', DeprecationWarning) message['body'] = json.loads(message['body'].decode('utf-8')) if ('topic' not in message['body']): message['body'] = {'topic': message.get('topic'), 'msg': message['body']} if (not self.validate_signatures): return if (not (message['topic'] == message['body']['topic'])): raise RuntimeWarning('Topic envelope mismatch.') if (not fedmsg.crypto.validate(message['body'], **self.hub.config)): raise RuntimeWarning('Failed to authn message.')
Validate the message before the consumer processes it. This needs to raise an exception, caught by moksha. Args: message (dict): The message as a dictionary. This must, at a minimum, contain the 'topic' key with a unicode string value and 'body' key with a dictionary value. However, the message might also be an object with a ``__json__`` method that returns a dict with a 'body' key that can be a unicode string that is JSON-encoded. Raises: RuntimeWarning: If the message is not valid. UnicodeDecodeError: If the message body is not unicode or UTF-8 and also happens to contain invalid UTF-8 binary.
codesearchnet
def AddWarning(self, warning): self._RaiseIfNotWritable() self._AddAttributeContainer( self._CONTAINER_TYPE_EXTRACTION_WARNING, warning)
Adds an warning. Args: warning (ExtractionWarning): warning. Raises: IOError: when the storage file is closed or read-only. OSError: when the storage file is closed or read-only.
juraj-google-style
def windows(self): from foxpuppet.windows import BrowserWindow return [BrowserWindow(self.selenium, handle) for handle in self.selenium.window_handles]
Return a list of all open windows. Returns: list: List of FoxPuppet BrowserWindow objects.
codesearchnet
def objects_delete(self, bucket, key): url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))) datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)
Deletes the specified object. Args: bucket: the name of the bucket. key: the key of the object within the bucket. Raises: Exception if there is an error performing the operation.
juraj-google-style
def notify(self, notices): tmpl_html = get_template('required_tags_notice.html') tmpl_text = get_template('required_tags_notice.txt') for recipient, data in list(notices.items()): body_html = tmpl_html.render(data=data) body_text = tmpl_text.render(data=data) send_notification( subsystem=self.ns, recipients=[recipient], subject=self.email_subject, body_html=body_html, body_text=body_text )
Send notifications to the recipients provided Args: notices (:obj:`dict` of `str`: `list`): A dictionary mapping notification messages to the recipient. Returns: `None`
juraj-google-style
def _PromptUserForEncryptedVolumeCredential(self, scan_context, locked_scan_node, output_writer): credentials = credentials_manager.CredentialsManager.GetCredentials(locked_scan_node.path_spec) if (locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER): line = 'Found an APFS encrypted volume.' elif (locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE): line = 'Found a BitLocker encrypted volume.' elif (locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE): line = 'Found a CoreStorage (FVDE) encrypted volume.' else: line = 'Found an encrypted volume.' output_writer.WriteLine(line) credentials_list = list(credentials.CREDENTIALS) credentials_list.append('skip') output_writer.WriteLine('Supported credentials:') output_writer.WriteLine('') for (index, name) in enumerate(credentials_list): output_writer.WriteLine(' {0:d}. {1:s}'.format((index + 1), name)) output_writer.WriteLine('') result = False while (not result): output_writer.WriteString('Select a credential to unlock the volume: ') input_line = sys.stdin.readline() input_line = input_line.strip() if (input_line in credentials_list): credential_identifier = input_line else: try: credential_identifier = int(input_line, 10) credential_identifier = credentials_list[(credential_identifier - 1)] except (IndexError, ValueError): output_writer.WriteLine('Unsupported credential: {0:s}'.format(input_line)) continue if (credential_identifier == 'skip'): break getpass_string = 'Enter credential data: ' if (sys.platform.startswith('win') and (sys.version_info[0] < 3)): getpass_string = self._EncodeString(getpass_string) credential_data = getpass.getpass(getpass_string) output_writer.WriteLine('') result = self._source_scanner.Unlock(scan_context, locked_scan_node.path_spec, credential_identifier, credential_data) if (not result): output_writer.WriteLine('Unable to unlock volume.') output_writer.WriteLine('')
Prompts the user to provide a credential for an encrypted volume. Args: scan_context (SourceScannerContext): the source scanner context. locked_scan_node (SourceScanNode): the locked scan node. output_writer (StdoutWriter): the output writer.
codesearchnet
def write_bottom_half(f, row_metadata_df, data_df, data_null, data_float_format, metadata_null): size_of_left_bottom_half_df = (row_metadata_df.shape[0], 1 + row_metadata_df.shape[1]) left_bottom_half_df = pd.DataFrame(np.full(size_of_left_bottom_half_df, metadata_null, dtype=object)) bottom_half_df = pd.concat([left_bottom_half_df, data_df.reset_index(drop=True)], axis=1) bottom_half_df.columns = range(bottom_half_df.shape[1]) bottom_half_df.iloc[:, 0] = row_metadata_df.index.values row_metadata_col_indices = range(1, 1 + row_metadata_df.shape[1]) bottom_half_df.iloc[:, row_metadata_col_indices] = ( row_metadata_df.astype(str).replace("nan", value=metadata_null).values) bottom_half_df.to_csv(f, header=False, index=False, sep="\t", na_rep=data_null, float_format=data_float_format)
Write the bottom half of the gct file: row metadata and data. Args: f (file handle): handle for output file row_metadata_df (pandas df) data_df (pandas df) data_null (string): how to represent missing values in the data metadata_null (string): how to represent missing values in the metadata data_float_format (string): how many decimal points to keep in representing data Returns: None
juraj-google-style
def read_value(self): return array_ops.identity(self._variable, name='read')
Returns the value of this variable, read in the current context. Can be different from value() if it's on another device, with control dependencies, etc. Returns: A `Tensor` containing the value of the variable.
github-repos
def getHostCaPath(self, name): cert = self.getHostCert(name) if (cert is None): return None return self._getCaPath(cert)
Gets the path to the CA certificate that issued a given host keypair. Args: name (str): The name of the host keypair. Examples: Get the path to the CA cert which issue the cert for "myhost": mypath = cdir.getHostCaPath('myhost') Returns: str: The path if exists.
codesearchnet
def decode(message, pblite, ignore_first_item=False): if (not isinstance(pblite, list)): logger.warning('Ignoring invalid message: expected list, got %r', type(pblite)) return if ignore_first_item: pblite = pblite[1:] if (pblite and isinstance(pblite[(- 1)], dict)): extra_fields = {int(field_number): value for (field_number, value) in pblite[(- 1)].items()} pblite = pblite[:(- 1)] else: extra_fields = {} fields_values = itertools.chain(enumerate(pblite, start=1), extra_fields.items()) for (field_number, value) in fields_values: if (value is None): continue try: field = message.DESCRIPTOR.fields_by_number[field_number] except KeyError: if (value not in [[], '', 0]): logger.debug('Message %r contains unknown field %s with value %r', message.__class__.__name__, field_number, value) continue if (field.label == FieldDescriptor.LABEL_REPEATED): _decode_repeated_field(message, field, value) else: _decode_field(message, field, value)
Decode pblite to Protocol Buffer message. This method is permissive of decoding errors and will log them as warnings and continue decoding where possible. The first element of the outer pblite list must often be ignored using the ignore_first_item parameter because it contains an abbreviation of the name of the protobuf message (eg. cscmrp for ClientSendChatMessageResponseP) that's not part of the protobuf. Args: message: protocol buffer message instance to decode into. pblite: list representing a pblite-serialized message. ignore_first_item: If True, ignore the item at index 0 in the pblite list, making the item at index 1 correspond to field 1 in the message.
codesearchnet
def errorhandler_callback(cls, exc): if exc.flash_message: flash(exc.flash_message, exc.flash_level) if (exc.redirect is not MISSING): return redirect(url_for(exc.redirect, **exc.redirect_args)) error_result = exc.error_page() if (error_result is not None): return (error_result, (exc.status_code or 500))
This function should be called in the global error handlers. This will allow for consolidating of cleanup tasks if the exception bubbles all the way to the top of the stack. For example, this method will automatically rollback the database session if the exception bubbles to the top. This is the method that :meth:`register_errorhandler` adds as an errorhandler. See the documentation there for more info. Args: exc (FleakerBaseException): The exception that was thrown that we are to handle.
codesearchnet
def format_snippet(sensor_graph): output = [] output.append("disable") output.append("clear") output.append("reset") for node in sensor_graph.dump_nodes(): output.append('add_node "{}"'.format(node)) for streamer in sensor_graph.streamers: line = "add_streamer '{}' '{}' {} {} {}".format(streamer.selector, streamer.dest, streamer.automatic, streamer.format, streamer.report_type) if streamer.with_other is not None: line += ' --withother {}'.format(streamer.with_other) output.append(line) for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()): output.append("set_constant '{}' {}".format(stream, value)) output.append("persist") output.append("back") app_tag = sensor_graph.metadata_database.get('app_tag') app_version = sensor_graph.metadata_database.get('app_version') if app_tag is not None: if app_version is None: app_version = "0.0" output.append("test_interface") output.append("set_version app %d --version '%s'" % (app_tag, app_version)) output.append("back") output.append("config_database") output.append("clear_variables") for slot, conf_vars in sensor_graph.config_database.items(): for conf_var, conf_def in conf_vars.items(): conf_type, conf_val = conf_def if conf_type == 'binary': conf_val = 'hex:' + hexlify(conf_val) elif isinstance(conf_val, str): conf_val = '"%s"' % conf_val output.append("set_variable '{}' {} {} {}".format(slot, conf_var, conf_type, conf_val)) output.append("back") output.append("reset") return "\n".join(output) + '\n'
Format this sensor graph as iotile command snippets. This includes commands to reset and clear previously stored sensor graphs. Args: sensor_graph (SensorGraph): the sensor graph that we want to format
juraj-google-style
def run_metadata_graphs(name, data, step=None): summary_metadata = summary_pb2.SummaryMetadata() summary_metadata.plugin_data.plugin_name = 'graph_run_metadata_graph' summary_metadata.plugin_data.content = b'1' data = config_pb2.RunMetadata(function_graphs=data.function_graphs, partition_graphs=data.partition_graphs) with summary_scope(name, 'graph_run_metadata_graph_summary', [data, step]) as (tag, _): with ops.device('cpu:0'): tensor = constant_op.constant(data.SerializeToString(), dtype=dtypes.string) return write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
Writes graphs from a RunMetadata summary. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A RunMetadata proto to write. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
github-repos
def to(self, device: Optional[torch.device], dtype: Optional[torch.dtype]) -> Rotation: if self._rot_mats is not None: return Rotation(rot_mats=self._rot_mats.to(device=device, dtype=dtype), quats=None) elif self._quats is not None: return Rotation(rot_mats=None, quats=self._quats.to(device=device, dtype=dtype), normalize_quats=False) else: raise ValueError('Both rotations are None')
Analogous to the to() method of torch Tensors Args: device: A torch device dtype: A torch dtype Returns: A copy of the Rotation using the new device and dtype
github-repos
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): exp_blocked_to_pad = torch.cat([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2) band_mask = torch.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask
Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size].
github-repos
def build_from_config(self, config): if config: if 'input_shape' in config: self.build(config['input_shape']) elif 'shapes_dict' in config: self.build(**config['shapes_dict'])
Builds the layer's states with the supplied config dict. By default, this method calls the `build(config["input_shape"])` method, which creates weights based on the layer's input shape in the supplied config. If your config contains other information needed to load the layer's state, you should override this method. Args: config: Dict containing the input shape associated with this layer.
github-repos
def __init__(self, length=None, experimenter=None): super().__init__(action_type=ActionType.OFPAT_EXPERIMENTER) self.length = length self.experimenter = experimenter
Create ActionExperimenterHeader with the optional parameters below. Args: experimenter (int): The experimenter field is the Experimenter ID, which takes the same form as in struct ofp_experimenter.
juraj-google-style
def _get_new_alive_state(self, new_seq, new_log_probs, new_cache): new_finished_flags = tf.equal(new_seq[(:, :, (- 1))], self.eos_id) new_log_probs += (tf.to_float(new_finished_flags) * (- INF)) (top_alive_seq, top_alive_log_probs, top_alive_cache) = _gather_topk_beams([new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size, self.beam_size) return {_StateKeys.ALIVE_SEQ: top_alive_seq, _StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs, _StateKeys.ALIVE_CACHE: top_alive_cache}
Gather the top k sequences that are still alive. Args: new_seq: New sequences generated by growing the current alive sequences int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1] new_log_probs: Log probabilities of new sequences float32 tensor with shape [batch_size, beam_size] new_cache: Dict of cached values for each sequence. Returns: Dictionary with alive keys from _StateKeys: {Top beam_size sequences that are still alive (don't end with eos_id) Log probabilities of top alive sequences Dict cache storing decoder states for top alive sequences}
codesearchnet
def _process_for_docstring(self, node, node_type): if (node.doc is not None): if (node_type == 'module'): if (not node.body): for key in list(self._tokenized_triple_quotes.keys()): quote_record = self._tokenized_triple_quotes.get(key) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[key] else: for i in range(0, node.body[0].lineno): quote_record = self._tokenized_triple_quotes.get(i) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[i] break elif (not node.body): lineno = self._find_docstring_line_for_no_body(node.fromlineno) quote_record = self._tokenized_triple_quotes.get(lineno) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[lineno] else: doc_row = self._find_docstring_line(node.fromlineno, node.tolineno) quote_record = self._tokenized_triple_quotes.get(doc_row) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[doc_row]
Check for docstring quote consistency. Args: node: the AST node being visited. node_type: the type of node being operated on.
codesearchnet
def run_exit_code(self, returncode): exit_status = False self.log.info('[run] Exit Code {}'.format(returncode)) self.reports.increment_total() valid_exit_codes = self.profile.get('exit_codes', [0]) self.reports.exit_code(returncode) if returncode in valid_exit_codes: exit_status = True self.reports.profile_execution(True) print('App Exit Code: {}{}{}'.format(c.Style.BRIGHT, c.Fore.GREEN, returncode)) else: print( 'App Exit Code: {}{}{}{} (Valid Exit Codes: {})'.format( c.Style.BRIGHT, c.Fore.RED, returncode, c.Fore.RESET, self.profile.get('exit_codes', [0]), ) ) self.reports.profile_execution(False) self.exit_code = 1 if self.args.halt_on_fail: raise RuntimeError('App exited with invalid exit code {}'.format(returncode)) return exit_status
Handle the exit code for the current run. Args: returncode (int): The return exit code. Raises: RuntimeError: Raise on invalid exit code if halt_on_fail is True. Returns: bool: True if exit code is a valid exit code, else False.
juraj-google-style
def _add_property(self, name, default_value): name = str(name) self._properties[name] = default_value
Add a device property with a given default value. Args: name (str): The name of the property to add default_value (int, bool): The value of the property
juraj-google-style
def non_transactional(func, args, kwds, allow_existing=True): from . import tasklets ctx = tasklets.get_context() if (not ctx.in_transaction()): return func(*args, **kwds) if (not allow_existing): raise datastore_errors.BadRequestError(('%s cannot be called within a transaction.' % func.__name__)) save_ctx = ctx while ctx.in_transaction(): ctx = ctx._parent_context if (ctx is None): raise datastore_errors.BadRequestError('Context without non-transactional ancestor') save_ds_conn = datastore._GetConnection() try: if hasattr(save_ctx, '_old_ds_conn'): datastore._SetConnection(save_ctx._old_ds_conn) tasklets.set_context(ctx) return func(*args, **kwds) finally: tasklets.set_context(save_ctx) datastore._SetConnection(save_ds_conn)
A decorator that ensures a function is run outside a transaction. If there is an existing transaction (and allow_existing=True), the existing transaction is paused while the function is executed. Args: allow_existing: If false, throw an exception if called from within a transaction. If true, temporarily re-establish the previous non-transactional context. Defaults to True. This supports two forms, similar to transactional(). Returns: A wrapper for the decorated function that ensures it runs outside a transaction.
codesearchnet
def __chunk(segment, abbr=False): names = ('north', 'east', 'south', 'west', 'north') if (not abbr): sjoin = '-' else: names = [s[0].upper() for s in names] sjoin = '' if ((segment % 2) == 0): return (names[segment].capitalize(), sjoin.join((names[segment].capitalize(), names[segment], names[(segment + 1)])), sjoin.join((names[segment].capitalize(), names[(segment + 1)])), sjoin.join((names[(segment + 1)].capitalize(), names[segment], names[(segment + 1)]))) else: return (names[segment].capitalize(), sjoin.join((names[segment].capitalize(), names[(segment + 1)], names[segment])), sjoin.join((names[(segment + 1)].capitalize(), names[segment])), sjoin.join((names[(segment + 1)].capitalize(), names[(segment + 1)], names[segment])))
Generate a ``tuple`` of compass direction names. Args: segment (list): Compass segment to generate names for abbr (bool): Names should use single letter abbreviations Returns: bool: Direction names for compass segment
codesearchnet
def fts_intersection(self, segs): fts_vecs = [self.fts(s) for s in self.filter_segs(segs)] return reduce(lambda a, b: a & b, fts_vecs)
Return the features shared by `segs` Args: segs (list): list of Unicode IPA segments Returns: set: set of (value, feature) tuples shared by the valid segments in `segs`
juraj-google-style
def get_imported_namespaces(self, must_have_imported_data_type=False, consider_annotations=False, consider_annotation_types=False): imported_namespaces = [] for (imported_namespace, reason) in self._imported_namespaces.items(): if (must_have_imported_data_type and (not reason.data_type)): continue if ((not consider_annotations) and (not (reason.data_type or reason.alias or reason.annotation_type))): continue if ((not consider_annotation_types) and (not (reason.data_type or reason.alias or reason.annotation))): continue imported_namespaces.append(imported_namespace) imported_namespaces.sort(key=(lambda n: n.name)) return imported_namespaces
Returns a list of Namespace objects. A namespace is a member of this list if it is imported by the current namespace and a data type is referenced from it. Namespaces are in ASCII order by name. Args: must_have_imported_data_type (bool): If true, result does not include namespaces that were not imported for data types. consider_annotations (bool): If false, result does not include namespaces that were only imported for annotations consider_annotation_types (bool): If false, result does not include namespaces that were only imported for annotation types. Returns: List[Namespace]: A list of imported namespaces.
codesearchnet
def ldr(scatterer, h_pol=True): Z = scatterer.get_Z() if h_pol: return (Z[0,0] - Z[0,1] + Z[1,0] - Z[1,1]) / \ (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) else: return (Z[0,0] + Z[0,1] - Z[1,0] - Z[1,1]) / \ (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
Linear depolarizarion ratio (LDR) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), return LDR_h. If False, return LDR_v. Returns: The LDR.
juraj-google-style
def __batch_update(self, train_events, test_events, n_epoch): for epoch in range(n_epoch): if (n_epoch != 1): np.random.shuffle(train_events) for e in train_events: self.rec.update(e, batch_train=True) MPR = self.__batch_evaluate(test_events) if self.debug: logger.debug(('epoch %2d: MPR = %f' % ((epoch + 1), MPR)))
Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training.
codesearchnet
def update_exif_for_rotated_image(exif): orientation_value = exif.get('0th', ).get( piexif.ImageIFD.Orientation, exif.get('1st', ).get( piexif.ImageIFD.Orientation, None)) if orientation_value is not None: exif['0th'][piexif.ImageIFD.Orientation] = 1 if exif.get('1st', {}).get(piexif.ImageIFD.Orientation) is not None: exif['1st'][piexif.ImageIFD.Orientation] = 1 if orientation_value > 4: for exif_tag in ['0th', '1st']: if exif.get(exif_tag) is not None: x, y = (exif.get(exif_tag).get(piexif.ImageIFD.ImageWidth), exif.get(exif_tag).get(piexif.ImageIFD.ImageLength)) if x is not None and y is not None: exif[exif_tag][piexif.ImageIFD.ImageWidth] = y exif[exif_tag][piexif.ImageIFD.ImageLength] = x x, y = (exif.get(exif_tag).get(piexif.ImageIFD.XResolution), exif.get(exif_tag).get(piexif.ImageIFD.YResolution)) if x is not None and y is not None: exif[exif_tag][piexif.ImageIFD.XResolution] = y exif[exif_tag][piexif.ImageIFD.YResolution] = x x, y = (exif.get(exif_tag).get(piexif.ImageIFD.TileWidth), exif.get(exif_tag).get(piexif.ImageIFD.TileLength)) if x is not None and y is not None: exif[exif_tag][piexif.ImageIFD.TileWidth] = y exif[exif_tag][piexif.ImageIFD.TileLength] = x if exif.get('Exif') is not None: x, y = (exif.get('Exif').get(piexif.ExifIFD.PixelXDimension), exif.get('Exif').get(piexif.ExifIFD.PixelYDimension)) if x is not None and y is not None: exif['Exif'][piexif.ExifIFD.PixelXDimension] = y exif['Exif'][piexif.ExifIFD.PixelYDimension] = x if exif.get('thumbnail') is not None: try: thumbnail = pil_open(io.BytesIO(exif.get('thumbnail'))) thumbnail = autorotate(thumbnail, orientation=orientation_value) with io.BytesIO() as bio: thumbnail.save(bio, format='jpeg') bio.seek(0) exif['thumbnail'] = bio.read() except Exception as e: warnings.warn("deprecated", DeprecationWarning) return exif
Modifies the Exif tag if rotation has been performed. 0th, 1st -------- ImageWidth = 256 ImageLength = 257 XResolution = 282 YResolution = 283 TileWidth = 322 TileLength = 323 Exif ---- PixelXDimension = 40962 PixelYDimension = 40963 Args: exif (dict): The parsed Exif tag Returns: The modified Exif dict.
juraj-google-style
class Siglip2Encoder(nn.Module): def __init__(self, config: Siglip2Config): super().__init__() self.config = config self.layers = nn.ModuleList([Siglip2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Siglip2EncoderLayer`]. Args: config: Siglip2Config
github-repos
def swd_sync(self, pad=False): if pad: self._dll.JLINK_SWD_SyncBytes() else: self._dll.JLINK_SWD_SyncBits() return None
Causes a flush to write all data remaining in output buffers to SWD device. Args: self (JLink): the ``JLink`` instance pad (bool): ``True`` if should pad the data to full byte size Returns: ``None``
juraj-google-style
def ExamineEvent(self, mediator, event): event_data_type = getattr(event, 'data_type', '') if event_data_type == 'windows:registry:service': service = WindowsService.FromEvent(event) self._service_collection.AddService(service)
Analyzes an event and creates Windows Services as required. At present, this method only handles events extracted from the Registry. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
juraj-google-style
def execute(desktop_file, files=None, return_cmd=False, background=False): desktop_file_exec = parse(desktop_file)['Exec'] for i in desktop_file_exec.split(): if i.startswith('%'): desktop_file_exec = desktop_file_exec.replace(i, '') desktop_file_exec = desktop_file_exec.replace(r'%F', '') desktop_file_exec = desktop_file_exec.replace(r'%f', '') if files: for i in files: desktop_file_exec += ' ' + i if parse(desktop_file)['Terminal']: desktop_file_exec = eval( ('__import__("libdesktop").applications.terminal(exec_="%s",' ' keep_open_after_cmd_exec=True, return_cmd=True)') % desktop_file_exec) if return_cmd: return desktop_file_exec desktop_file_proc = sp.Popen([desktop_file_exec], shell=True) if not background: desktop_file_proc.wait()
Execute a .desktop file. Executes a given .desktop file path properly. Args: desktop_file (str) : The path to the .desktop file. files (list): Any files to be launched by the .desktop. Defaults to empty list. return_cmd (bool): Return the command (as ``str``) instead of executing. Defaults to ``False``. background (bool): Run command in background. Defaults to ``False``. Returns: str: Only if ``return_cmd``. Returns command instead of running it. Else returns nothing.
juraj-google-style
def __init__(self, n_output_node, input_shape): super(MlpGenerator, self).__init__(n_output_node, input_shape) if len(self.input_shape) > 1: raise ValueError("The input dimension is too high.")
Initialize the instance. Args: n_output_node: An integer. Number of output nodes in the network. input_shape: A tuple. Input shape of the network. If it is 1D, ensure the value is appended by a comma in the tuple.
juraj-google-style
def get_error_intro(tf_error): if hasattr(tf_error, 'op') and hasattr(tf_error.op, 'name'): op_name = tf_error.op.name else: op_name = None intro_lines = ['--------------------------------------', RL('!!! An error occurred during the run !!!', 'blink'), ''] out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines) if op_name is not None: out.extend(debugger_cli_common.RichTextLines(['You may use the following commands to debug:'])) out.extend(_recommend_command('ni -a -d -t %s' % op_name, 'Inspect information about the failing op.', create_link=True)) out.extend(_recommend_command('li -r %s' % op_name, 'List inputs to the failing op, recursively.', create_link=True)) out.extend(_recommend_command('lt', 'List all tensors dumped during the failing run() call.', create_link=True)) else: out.extend(debugger_cli_common.RichTextLines(['WARNING: Cannot determine the name of the op that caused the error.'])) more_lines = ['', 'Op name: %s' % op_name, 'Error type: ' + str(type(tf_error)), '', 'Details:', str(tf_error), '', '--------------------------------------', ''] out.extend(debugger_cli_common.RichTextLines(more_lines)) return out
Generate formatted intro for TensorFlow run-time error. Args: tf_error: (errors.OpError) TensorFlow run-time error object. Returns: (RichTextLines) Formatted intro message about the run-time OpError, with sample commands for debugging.
github-repos
def im_open(self, *, user: str, **kwargs) -> SlackResponse: kwargs.update({"user": user}) return self.api_call("im.open", json=kwargs)
Opens a direct message channel. Args: user (str): The user id to open a DM with. e.g. 'W1234567890'
juraj-google-style
def __rtruediv__(self, other): raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', please use
Use `__floordiv__` via `x // y` instead. This function exists only to have a better error message. Instead of: `TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`, this function will explicitly call for usage of `//` instead. Args: other: Another `Dimension`. Raises: TypeError.
github-repos
def user_has_access(self, user): if ROLE_ADMIN in user.roles: return True if self.enabled: if not self.required_roles: return True for role in self.required_roles: if role in user.roles: return True return False
Check if a user has access to view information for the account Args: user (:obj:`User`): User object to check Returns: True if user has access to the account, else false
juraj-google-style
def get_unique_tags(field_to_obs): return {field: sorted(set([x.get('tag', '') for x in observations])) for field, observations in field_to_obs.items() if field in TAG_FIELDS}
Returns a dictionary of tags that a user could query over. Args: field_to_obs: Dict that maps string field to `Observation` list. Returns: A dict that maps keys in `TAG_FIELDS` to a list of string tags present in the event files. If the dict does not have any observations of the type, maps to an empty list so that we can render this to console.
juraj-google-style
def validate_bindings(bindings): if (not isinstance(bindings, (list, tuple))): raise exceptions.ConfigurationException('bindings must be a list or tuple of dictionaries, but was a {}'.format(type(bindings))) for binding in bindings: missing_keys = [] for key in ('queue', 'exchange', 'routing_keys'): if (key not in binding): missing_keys.append(key) if missing_keys: raise exceptions.ConfigurationException('a binding is missing the following keys from its settings value: {}'.format(missing_keys)) if (not isinstance(binding['routing_keys'], (list, tuple))): raise exceptions.ConfigurationException('routing_keys must be a list or tuple, but was a {}'.format(type(binding['routing_keys'])))
Validate the bindings configuration. Raises: exceptions.ConfigurationException: If the configuration provided is of an invalid format.
codesearchnet
def load_data_and_labels(filename, encoding='utf-8'): (sents, labels) = ([], []) (words, tags) = ([], []) with open(filename, encoding=encoding) as f: for line in f: line = line.rstrip() if line: (word, tag) = line.split('\t') words.append(word) tags.append(tag) else: sents.append(words) labels.append(tags) (words, tags) = ([], []) return (sents, labels)
Loads data and label from a file. Args: filename (str): path to the file. encoding (str): file encoding format. The file format is tab-separated values. A blank line is required at the end of a sentence. For example: ``` EU B-ORG rejects O German B-MISC call O to O boycott O British B-MISC lamb O . O Peter B-PER Blackburn I-PER ... ``` Returns: tuple(numpy array, numpy array): data and labels. Example: >>> filename = 'conll2003/en/ner/train.txt' >>> data, labels = load_data_and_labels(filename)
codesearchnet
def __init__(self, encoding, buffer_size=2048): super(EncodedTextReader, self).__init__() self._buffer = '' self._buffer_size = buffer_size self._current_offset = 0 self._encoding = encoding self.lines = ''
Initializes the encoded text reader object. Args: encoding (str): encoding. buffer_size (Optional[int]): buffer size.
juraj-google-style
def beta_to_uni(text, strict=False): param_key = (strict,) try: t = _BETA_CONVERSION_TRIES[param_key] except KeyError: t = _create_conversion_trie(*param_key) _BETA_CONVERSION_TRIES[param_key] = t transform = [] idx = 0 possible_word_boundary = False while idx < len(text): if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN]) if step: possible_word_boundary = text[idx] in _BETA_PUNCTUATION key, value = step transform.append(value) idx += len(key) else: possible_word_boundary = True transform.append(text[idx]) idx += 1 if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA: transform[-1] = _FINAL_LC_SIGMA converted = ''.join(transform) return converted
Converts the given text from betacode to unicode. Args: text: The beta code text to convert. All of this text must be betacode. strict: Flag to allow for flexible diacritic order on input. Returns: The converted text.
juraj-google-style
def read_user_data(self, user_data_path): raw_user_data = read_value_from_path(user_data_path) variables = self.get_variables() return parse_user_data(variables, raw_user_data, self.name)
Reads and parses a user_data file. Args: user_data_path (str): path to the userdata file Returns: str: the parsed user data file
juraj-google-style
def update_user_groups(self, user, claims): if settings.GROUPS_CLAIM is not None: django_groups = [group.name for group in user.groups.all()] if settings.GROUPS_CLAIM in claims: claim_groups = claims[settings.GROUPS_CLAIM] if not isinstance(claim_groups, list): claim_groups = [claim_groups, ] else: logger.debug( "The configured groups claim '{}' was not found in the access token".format(settings.GROUPS_CLAIM)) claim_groups = [] groups_to_remove = set(django_groups) - set(claim_groups) groups_to_add = set(claim_groups) - set(django_groups) for group_name in groups_to_remove: group = Group.objects.get(name=group_name) user.groups.remove(group) logger.debug("User removed from group '{}'".format(group_name)) for group_name in groups_to_add: try: if settings.MIRROR_GROUPS: group, _ = Group.objects.get_or_create(name=group_name) logger.debug("Created group '{}'".format(group_name)) else: group = Group.objects.get(name=group_name) user.groups.add(group) logger.debug("User added to group '{}'".format(group_name)) except ObjectDoesNotExist: pass
Updates user group memberships based on the GROUPS_CLAIM setting. Args: user (django.contrib.auth.models.User): User model instance claims (dict): Claims from the access token
juraj-google-style
def from_dict(cls, d): for cat in ['HEADER', 'VERS']: if (cat not in d): d[cat] = None alat = (d['ALAT'] * bohr_to_angstrom) plat = (d['PLAT'] * alat) species = [] positions = [] for site in d['SITE']: species.append(re.split('[0-9*]', site['ATOM'])[0]) positions.append((site['POS'] * alat)) if (('CLASS' in d) and ('SPCGRP' in d) and (len(d['SITE']) == len(d['CLASS']))): try: structure = Structure.from_spacegroup(d['SPCGRP'], plat, species, positions, coords_are_cartesian=True) except ValueError: structure = Structure(plat, species, positions, coords_are_cartesian=True, to_unit_cell=True) else: structure = Structure(plat, species, positions, coords_are_cartesian=True, to_unit_cell=True) return cls(structure, header=d['HEADER'], version=d['VERS'])
Creates a CTRL file object from a dictionary. The dictionary must contain the items "ALAT", PLAT" and "SITE". Valid dictionary items are: ALAT: the a-lattice parameter PLAT: (3x3) array for the lattice vectors SITE: list of dictionaries: {'ATOM': class label, 'POS': (3x1) array of fractional coordinates} CLASS (optional): list of unique atom labels as str SPCGRP (optional): space group symbol (str) or number (int) HEADER (optional): HEADER text as a str VERS (optional): LMTO version as a str Args: d: The CTRL file as a dictionary. Returns: An LMTOCtrl object.
codesearchnet
def execute_before(self, sensor_graph, scope_stack): parent = scope_stack[-1] new_scope = TriggerScope(sensor_graph, scope_stack, parent.clock(self.interval, basis=self.basis)) scope_stack.append(new_scope)
Execute statement before children are executed. Args: sensor_graph (SensorGraph): The sensor graph that we are building or modifying scope_stack (list(Scope)): A stack of nested scopes that may influence how this statement allocates clocks or other stream resources.
juraj-google-style
def tagged(pode, tag): if tag.startswith(' tag = tag[1:] return (pode[1]['tags'].get(tag) is not None)
Check if a packed node has a given tag. Args: pode (tuple): A packed node. tag (str): The tag to check. Examples: Check if a node is tagged with "woot" and dostuff if it is. if s_node.tagged(node,'woot'): dostuff() Notes: If the tag starts with `#`, this is removed prior to checking. Returns: bool: True if the tag is present. False otherwise.
codesearchnet
def raise_not_enough_arguments(self, string): requested = errors.number((self.counter + 1)) number = len(self.positional) verb = ('was' if (number == 1) else 'were') what = "Requested {} formatting argument for '{}' but only {} {} supplied!" what = what.format(requested, string, number, verb) raise errors.ArgumentError(what)
Raises an errors.ArgumentError if not enough arguments were supplied. Takes care of formatting for detailed error messages. Arguments: string (str): The string of the phrase for which there weren't enough arguments. Raises: errors.ArgumentError with a detailed error message.
codesearchnet
def determine_encoding(path, default=None): byte_order_marks = ( ('utf-8-sig', (codecs.BOM_UTF8, )), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) try: with open(path, 'rb') as infile: raw = infile.read(4) except IOError: return default for encoding, boms in byte_order_marks: if any(raw.startswith(bom) for bom in boms): return encoding return default
Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file.
juraj-google-style
def certificate_rabbitmq(self): if (not self.__certificate_rabbitmq): self.__certificate_rabbitmq = CertificateRabbitMQ(self.__connection) return self.__certificate_rabbitmq
Gets the Certificate RabbitMQ API client. Returns: CertificateRabbitMQ:
codesearchnet
def generate_lars_path(weighted_data, weighted_labels): x_vector = weighted_data alphas, _, coefs = lars_path(x_vector, weighted_labels, method='lasso', verbose=False) return alphas, coefs
Generates the lars path for weighted data. Args: weighted_data: data that has been weighted by kernel weighted_label: labels, weighted by kernel Returns: (alphas, coefs), both are arrays corresponding to the regularization parameter and coefficients, respectively
juraj-google-style
def register_items(self, items): for item in items: item.set_parent(self) self.items.extend(items)
Bulk ``register_item``. Args: items (iterable[Tree]): Sequence of nodes to be registered as children.
juraj-google-style
def numpy(self) -> npt.ArrayLike: maybe_arr = self._numpy() return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
Copy of the contents of this Tensor into a NumPy array or scalar. Unlike NumPy arrays, Tensors are immutable, so this method has to copy the contents to ensure safety. Use `memoryview` to get a readonly view of the contents without doing a copy: >>> t = tf.constant([42]) >>> np.asarray(memoryview(t)) array([42], dtype=int32) Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor is on GPU, it will have to be transferred to CPU first in order for `memoryview` to work. Returns: A NumPy array of the same shape and dtype or a NumPy scalar, if this Tensor has rank 0. Raises: ValueError: If the dtype of this Tensor does not have a compatible NumPy dtype.
github-repos
def __init__(self, text: str, name: YangIdentifier = None, rev: str = None): super().__init__(text) self.name = name self.rev = rev
Initialize the parser instance. Args: name: Expected module name. rev: Expected revision date.
juraj-google-style
def show_events(self, status=None, nids=None): nrows, ncols = get_terminal_size() for task in self.iflat_tasks(status=status, nids=nids): report = task.get_event_report() if report: print(make_banner(str(task), width=ncols, mark="=")) print(report)
Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout Args: status: if not None, only the tasks with this status are select nids: optional list of node identifiers used to filter the tasks.
juraj-google-style
def __init__(self, channel): self.ListMonitoredResourceDescriptors = channel.unary_unary( "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsResponse.FromString, ) self.GetMonitoredResourceDescriptor = channel.unary_unary( "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMonitoredResourceDescriptorRequest.SerializeToString, response_deserializer=google_dot_api_dot_monitored__resource__pb2.MonitoredResourceDescriptor.FromString, ) self.ListMetricDescriptors = channel.unary_unary( "/google.monitoring.v3.MetricService/ListMetricDescriptors", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsResponse.FromString, ) self.GetMetricDescriptor = channel.unary_unary( "/google.monitoring.v3.MetricService/GetMetricDescriptor", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMetricDescriptorRequest.SerializeToString, response_deserializer=google_dot_api_dot_metric__pb2.MetricDescriptor.FromString, ) self.CreateMetricDescriptor = channel.unary_unary( "/google.monitoring.v3.MetricService/CreateMetricDescriptor", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateMetricDescriptorRequest.SerializeToString, response_deserializer=google_dot_api_dot_metric__pb2.MetricDescriptor.FromString, ) self.DeleteMetricDescriptor = channel.unary_unary( "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.DeleteMetricDescriptorRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListTimeSeries = channel.unary_unary( "/google.monitoring.v3.MetricService/ListTimeSeries", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesResponse.FromString, ) self.CreateTimeSeries = channel.unary_unary( "/google.monitoring.v3.MetricService/CreateTimeSeries", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateTimeSeriesRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def _request_reports(self, domains): params = [{'url': domain} for domain in domains] responses = self._requests.multi_get( self.BASE_URL, query_params=params, to_json=False) return responses
Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: AlexaRankingApi endpoint URL suffix. Returns: A list of the responses.
juraj-google-style
def traverse_nodes(self, node_set, depth=0): tab = " " result = list() for n in node_set: repr = ( n if self.nodes[n]["type"] == "variable" else f"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}" ) result.append(f"{tab * depth}{repr}") result.extend( self.traverse_nodes(self.successors(n), depth=depth + 1) ) return result
BFS traversal of nodes that returns name traversal as large string. Args: node_set: Set of input nodes to begin traversal. depth: Current traversal depth for child node viewing. Returns: type: String containing tabbed traversal view.
juraj-google-style
def _kl_pareto_pareto(a, b, name=None): with tf.name_scope(name or "kl_pareto_pareto"): final_batch_shape = distribution_util.get_broadcast_shape( a.concentration, b.concentration, a.scale, b.scale) common_type = dtype_util.common_dtype( [a.concentration, b.concentration, a.scale, b.scale], tf.float32) return tf.where( a.scale >= b.scale, b.concentration * (tf.math.log(a.scale) - tf.math.log(b.scale)) + tf.math.log(a.concentration) - tf.math.log(b.concentration) + b.concentration / a.concentration - 1.0, tf.broadcast_to(tf.cast(np.inf, common_type), final_batch_shape))
Calculate the batched KL divergence KL(a || b) with a and b Pareto. Args: a: instance of a Pareto distribution object. b: instance of a Pareto distribution object. name: (optional) Name to use for created operations. default is "kl_pareto_pareto". Returns: Batchwise KL(a || b)
juraj-google-style
def get_forced_variation(self, experiment_key, user_id): if not self.is_valid: self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation')) return None if not validator.is_non_empty_string(experiment_key): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key')) return None if not isinstance(user_id, string_types): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return None forced_variation = self.config.get_forced_variation(experiment_key, user_id) return forced_variation.key if forced_variation else None
Gets the forced variation for a given user and experiment. Args: experiment_key: A string key identifying the experiment. user_id: The user ID. Returns: The forced variation key. None if no forced variation key.
juraj-google-style
def calculate_sun_from_date_time(self, datetime, is_solar_time=False): if datetime.year != 2016 and self.is_leap_year: datetime = DateTime(datetime.month, datetime.day, datetime.hour, datetime.minute, True) sol_dec, eq_of_time = self._calculate_solar_geometry(datetime) hour = datetime.float_hour is_daylight_saving = self.is_daylight_saving_hour(datetime.hoy) hour = hour + 1 if self.is_daylight_saving_hour(datetime.hoy) else hour sol_time = self._calculate_solar_time(hour, eq_of_time, is_solar_time) * 60 if sol_time / 4 < 0: hour_angle = sol_time / 4 + 180 else: hour_angle = sol_time / 4 - 180 zenith = math.degrees(math.acos (math.sin(self._latitude) * math.sin(math.radians(sol_dec)) + math.cos(self._latitude) * math.cos(math.radians(sol_dec)) * math.cos(math.radians(hour_angle)))) altitude = 90 - zenith if altitude > 85: atmos_refraction = 0 else: if altitude > 5: atmos_refraction = 58.1 / math.tan(math.radians(altitude)) - 0.07 / (math.tan(math.radians(altitude)))**3 + 0.000086 / (math.tan(math.radians(altitude)))**5 else: if altitude > -0.575: atmos_refraction = 1735 + altitude * (-518.2 + altitude * (103.4 + altitude * (-12.79 + altitude * 0.711))) else: atmos_refraction = -20.772 / math.tan( math.radians(altitude)) atmos_refraction /= 3600 altitude += atmos_refraction if hour_angle > 0: azimuth = (math.degrees( math.acos( ( (math.sin(self._latitude) * math.cos(math.radians(zenith))) - math.sin(math.radians(sol_dec))) / (math.cos(self._latitude) * math.sin(math.radians(zenith))) ) ) + 180) % 360 else: azimuth = (540 - math.degrees(math.acos(( (math.sin(self._latitude) * math.cos(math.radians(zenith))) - math.sin(math.radians(sol_dec))) / (math.cos(self._latitude) * math.sin(math.radians(zenith)))) )) % 360 altitude = math.radians(altitude) azimuth = math.radians(azimuth) return Sun(datetime, altitude, azimuth, is_solar_time, is_daylight_saving, self.north_angle)
Get Sun for an hour of the year. This code is originally written by Trygve Wastvedt \ (Trygve.Wastvedt@gmail.com) based on (NOAA) and modified by Chris Mackey and Mostapha Roudsari Args: datetime: Ladybug datetime is_solar_time: A boolean to indicate if the input hour is solar time. (Default: False) Returns: A sun object for this particular time
juraj-google-style
def from_cif_string(cif_string, transformations=None, primitive=True, occupancy_tolerance=1.0): parser = CifParser.from_string(cif_string, occupancy_tolerance) raw_string = re.sub("'", '"', cif_string) cif_dict = parser.as_dict() cif_keys = list(cif_dict.keys()) s = parser.get_structures(primitive)[0] partial_cif = cif_dict[cif_keys[0]] if ('_database_code_ICSD' in partial_cif): source = (partial_cif['_database_code_ICSD'] + '-ICSD') else: source = 'uploaded cif' source_info = {'source': source, 'datetime': str(datetime.datetime.now()), 'original_file': raw_string, 'cif_data': cif_dict[cif_keys[0]]} return TransformedStructure(s, transformations, history=[source_info])
Generates TransformedStructure from a cif string. Args: cif_string (str): Input cif string. Should contain only one structure. For cifs containing multiple structures, please use CifTransmuter. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. primitive (bool): Option to set if the primitive cell should be extracted. Defaults to True. However, there are certain instances where you might want to use a non-primitive cell, e.g., if you are trying to generate all possible orderings of partial removals or order a disordered structure. occupancy_tolerance (float): If total occupancy of a site is between 1 and occupancy_tolerance, the occupancies will be scaled down to 1. Returns: TransformedStructure
codesearchnet
def get_numeric_feature_names(example): numeric_features = ('float_list', 'int64_list') features = get_example_features(example) return sorted([feature_name for feature_name in features if (features[feature_name].WhichOneof('kind') in numeric_features)])
Returns a list of feature names for float and int64 type features. Args: example: An example. Returns: A list of strings of the names of numeric features.
codesearchnet
def init(self, force_deploy=False): machines = self.provider_conf.machines networks = self.provider_conf.networks _networks = [] for network in networks: ipnet = IPNetwork(network.cidr) _networks.append({ "netpool": list(ipnet)[10:-10], "cidr": network.cidr, "roles": network.roles, "gateway": ipnet.ip }) vagrant_machines = [] vagrant_roles = {} j = 0 for machine in machines: for _ in range(machine.number): vagrant_machine = { "name": "enos-%s" % j, "cpu": machine.flavour_desc["core"], "mem": machine.flavour_desc["mem"], "ips": [n["netpool"].pop() for n in _networks], } vagrant_machines.append(vagrant_machine) for role in machine.roles: vagrant_roles.setdefault(role, []).append(vagrant_machine) j = j + 1 logger.debug(vagrant_roles) loader = FileSystemLoader(searchpath=TEMPLATE_DIR) env = Environment(loader=loader, autoescape=True) template = env.get_template('Vagrantfile.j2') vagrantfile = template.render(machines=vagrant_machines, provider_conf=self.provider_conf) vagrantfile_path = os.path.join(os.getcwd(), "Vagrantfile") with open(vagrantfile_path, 'w') as f: f.write(vagrantfile) v_env = dict(os.environ) v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=False, env=v_env) if force_deploy: v.destroy() v.up() v.provision() roles = {} for role, machines in vagrant_roles.items(): for machine in machines: keyfile = v.keyfile(vm_name=machine['name']) port = v.port(vm_name=machine['name']) address = v.hostname(vm_name=machine['name']) roles.setdefault(role, []).append( Host(address, alias=machine['name'], user=self.provider_conf.user, port=port, keyfile=keyfile)) networks = [{ 'cidr': str(n["cidr"]), 'start': str(n["netpool"][0]), 'end': str(n["netpool"][-1]), 'dns': '8.8.8.8', 'gateway': str(n["gateway"]), 'roles': n["roles"] } for n in _networks] logger.debug(roles) logger.debug(networks) return (roles, networks)
Reserve and deploys the vagrant boxes. Args: force_deploy (bool): True iff new machines should be started
juraj-google-style
def initialize_logger(): logger = logging.getLogger('steppy') logger.setLevel(logging.INFO) message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s', datefmt='%Y-%m-%d %H:%M:%S') console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) console_handler.setFormatter(fmt=message_format) logger.addHandler(console_handler) return logger
Initialize steppy logger. This logger is used throughout the steppy library to report computation progress. Example: Simple use of steppy logger: .. code-block:: python initialize_logger() logger = get_logger() logger.info('My message inside pipeline') result looks like this: .. code:: 2018-06-02 12:33:48 steppy >>> My message inside pipeline Returns: logging.Logger: logger object formatted in the steppy style
codesearchnet
def interpolate(self, x: types.RealTensor, y: types.RealTensor, name: str=None): name = name or self._name + '_interpolate' with tf.name_scope(name): x = tf.convert_to_tensor(x, dtype=self._dtype, name='x') y = tf.convert_to_tensor(y, dtype=self._dtype, name='y') y = tf.expand_dims(y, axis=-2) xy = cubic.interpolate(y, self._spline_yz, name='interpolation_in_y_direction') xy_rank = xy.shape.rank perm = [xy_rank - 1] + list(range(xy_rank - 1)) yx = tf.transpose(xy, perm=perm) perm_original = list(range(1, xy_rank)) + [0] x = tf.expand_dims(tf.transpose(x, [xy_rank - 2] + list(range(xy_rank - 2))), axis=-1) z_values = linear.interpolate(x, self._xdata, yx) return tf.squeeze(tf.transpose(z_values, perm=perm_original), axis=-2)
Performs 2-D interpolation on a specified set of points. Args: x: Real-valued `Tensor` of shape `batch_shape + [num_points]`. Defines the x-coordinates at which the interpolation should be performed. Note that `batch_shape` should be the same as in the underlying data. y: A `Tensor` of the same shape and `dtype` as `x`. Defines the y-coordinates at which the interpolation should be performed. name: Python `str` name prefixed to ops created by this function. Default value: `None` which is mapped to the default name `interpolate`. Returns: A `Tensor` of the same shape and `dtype` as `x`. Represents the interpolated values of the function on for the coordinates `(x, y)`.
github-repos
def delete(self, filename): if is_package(filename): self.connection["jss"].Package(filename).delete() else: self.connection["jss"].Script(filename).delete()
Delete a package or script from the distribution server. This method simply finds the Package or Script object from the database with the API GET call and then deletes it. This will remove the file from the database blob. For setups which have file share distribution points, you will need to delete the files on the shares also. Args: filename: Filename (no path) to delete.
juraj-google-style
def read_record(cls, file_handle): buf_length_expected = 12 buf = file_handle.read(buf_length_expected) if not buf: return None if len(buf) != buf_length_expected: raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' % (buf_length_expected, codecs.encode(buf, 'hex'))) length, length_mask_expected = struct.unpack('<QI', buf) length_mask_actual = cls._masked_crc32c(buf[:8]) if length_mask_actual != length_mask_expected: raise ValueError('Not a valid TFRecord. Mismatch of length mask: %s' % codecs.encode(buf, 'hex')) buf_length_expected = length + 4 buf = file_handle.read(buf_length_expected) if len(buf) != buf_length_expected: raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' % (buf_length_expected, codecs.encode(buf, 'hex'))) data, data_mask_expected = struct.unpack('<%dsI' % length, buf) data_mask_actual = cls._masked_crc32c(data) if data_mask_actual != data_mask_expected: raise ValueError('Not a valid TFRecord. Mismatch of data mask: %s' % codecs.encode(buf, 'hex')) return data
Read a record from a TFRecords file. Args: file_handle: The file to read from. Returns: None if EOF is reached; the paylod of the record otherwise. Raises: ValueError: If file appears to not be a valid TFRecords file.
github-repos
def _ParseFiletime(self, byte_stream): filetime_map = self._GetDataTypeMap('filetime') try: filetime = self._ReadStructureFromByteStream(byte_stream, 0, filetime_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to parse FILETIME value with error: {0!s}'.format(exception)) if (filetime == 0): return None try: return dfdatetime_filetime.Filetime(timestamp=filetime) except ValueError: raise errors.ParseError('Invalid FILETIME value: 0x{0:08x}'.format(filetime))
Parses a FILETIME date and time value from a byte stream. Args: byte_stream (bytes): byte stream. Returns: dfdatetime.Filetime: FILETIME date and time value or None if no value is set. Raises: ParseError: if the FILETIME could not be parsed.
codesearchnet
def detect_gpt(self, filename, offset, fs_guid): self.logger.debug('Detecting GPT partition type') if (fs_guid not in self.__gpt_plugins): return None else: plugins = self.__gpt_plugins.get(fs_guid) for plugin in plugins: if plugin.detect(filename, offset): return plugin.get_volume_object() return None
Used by rawdisk.session.Session to match gpt partitions agains filesystem plugins. Args: filename: device or file that it will read in order to detect the filesystem fs_id: filesystem guid to match (ex. {EBD0A0A2-B9E5-4433-87C0-68B6B72699C7}) offset: offset for the filesystem that is being matched Returns: Volume object supplied by matched plugin. If there is no match, None is returned
codesearchnet
def E(poly, dist=None, **kws): if (not isinstance(poly, (distributions.Dist, polynomials.Poly))): print(type(poly)) print('Approximating expected value...') out = quadrature.quad(poly, dist, veceval=True, **kws) print('done') return out if isinstance(poly, distributions.Dist): (dist, poly) = (poly, polynomials.variable(len(poly))) if (not poly.keys): return numpy.zeros(poly.shape, dtype=int) if isinstance(poly, (list, tuple, numpy.ndarray)): return [E(_, dist, **kws) for _ in poly] if (poly.dim < len(dist)): poly = polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys mom = dist.mom(numpy.array(keys).T, **kws) A = poly.A if (len(dist) == 1): mom = mom[0] out = numpy.zeros(poly.shape) for i in range(len(keys)): out += (A[keys[i]] * mom[i]) out = numpy.reshape(out, shape) return out
Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist): Input to take expected value on. dist (Dist): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E(dist)) [1. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.E(poly, dist)) [1. 1. 0. 0.]
codesearchnet
def parse_args(self, argv: list[str]) -> ParsedArgs: tool_args = self._parser.parse_args(argv) return self.process_parsed_args(tool_args)
Parses argv. Args: argv: sys.argv[1:] Returns: A ParsedArgs object
github-repos
def _CheckStorageFile(self, storage_file_path): if os.path.exists(storage_file_path): if not os.path.isfile(storage_file_path): raise errors.BadConfigOption( 'Storage file: {0:s} already exists and is not a file.'.format( storage_file_path)) logger.warning('Appending to an already existing storage file.') dirname = os.path.dirname(storage_file_path) if not dirname: dirname = '.' if not os.access(dirname, os.W_OK): raise errors.BadConfigOption( 'Unable to write to storage file: {0:s}'.format(storage_file_path))
Checks if the storage file path is valid. Args: storage_file_path (str): path of the storage file. Raises: BadConfigOption: if the storage file path is invalid.
juraj-google-style
def json_compare(self, db_data, user_data): if isinstance(db_data, (string_types)): db_data = json.loads(db_data) if isinstance(user_data, (string_types)): user_data = json.loads(user_data) return self.deep_diff(db_data, user_data)
Validate data in user data. Args: db_data (str): The data store in Redis. user_data (str): The user provided data. Returns: bool: True if the data passed validation.
juraj-google-style
def write_supercells_with_displacements(supercell, cells_with_disps, filename='geo.gen'): write_dftbp((filename + 'S'), supercell) for ii in range(len(cells_with_disps)): write_dftbp((filename + 'S-{:03d}'.format((ii + 1))), cells_with_disps[ii])
Writes perfect supercell and supercells with displacements Args: supercell: perfect supercell cells_with_disps: supercells with displaced atoms filename: root-filename
codesearchnet
def umask(self, new_mask): if (not is_int_type(new_mask)): raise TypeError('an integer is required') old_umask = self.filesystem.umask self.filesystem.umask = new_mask return old_umask
Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type.
codesearchnet
def _faster_to_representation(self, instance): ret = {} fields = self._readable_fields is_fast = isinstance(instance, prefetch.FastObject) id_fields = self._readable_id_fields for field in fields: attribute = None if (is_fast and (not isinstance(field, (DynamicGenericRelationField, DynamicRelationField)))): if ((field in id_fields) and (field.source not in instance)): attribute = instance.get((field.source + '_id')) ret[field.field_name] = attribute continue else: try: attribute = instance[field.source] except KeyError: if hasattr(instance, field.source): attribute = getattr(instance, field.source) else: attribute = field.get_attribute(instance) print(('Missing %s from %s' % (field.field_name, self.__class__.__name__))) else: try: attribute = field.get_attribute(instance) except SkipField: continue if (attribute is None): ret[field.field_name] = None else: ret[field.field_name] = field.to_representation(attribute) return ret
Modified to_representation with optimizations. 1) Returns a plain old dict as opposed to OrderedDict. (Constructing ordered dict is ~100x slower than `{}`.) 2) Ensure we use a cached list of fields (this optimization exists in DRF 3.2 but not 3.1) Arguments: instance: a model instance or data object Returns: Dict of primitive datatypes.
codesearchnet