code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def convert_to_string(self, productions): symbols = [] for production in tf.unstack(productions, axis=1): (lhs, rhs) = self.production_rules[tf.argmax(input=production, axis=(- 1))] if (not symbols): if (lhs != self.start_symbol): raise ValueError('`productions` must begin with `self.start_symbol`.') symbols = rhs else: index = symbols.index(lhs) symbols = ((symbols[:index] + rhs) + symbols[(index + 1):]) string = ''.join(symbols) return string
Converts a sequence of productions into a string of terminal symbols. Args: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors. Returns: str that concatenates all terminal symbols from `productions`. Raises: ValueError: If the first production rule does not begin with `self.start_symbol`.
codesearchnet
def save(self, path, check=True): with open(path, 'w') as f: if check: if ("LOCATION" not in self._data or self._data["LOCATION"] is None): raise ValueError('location is not valid.') if ("DESIGN CONDITIONS" not in self._data or self._data["DESIGN CONDITIONS"] is None): raise ValueError('design_conditions is not valid.') if ("TYPICAL/EXTREME PERIODS" not in self._data or self._data["TYPICAL/EXTREME PERIODS"] is None): raise ValueError( 'typical_or_extreme_periods is not valid.') if ("GROUND TEMPERATURES" not in self._data or self._data["GROUND TEMPERATURES"] is None): raise ValueError('ground_temperatures is not valid.') if ("HOLIDAYS/DAYLIGHT SAVINGS" not in self._data or self._data["HOLIDAYS/DAYLIGHT SAVINGS"] is None): raise ValueError( 'holidays_or_daylight_savings is not valid.') if ("COMMENTS 1" not in self._data or self._data["COMMENTS 1"] is None): raise ValueError('comments_1 is not valid.') if ("COMMENTS 2" not in self._data or self._data["COMMENTS 2"] is None): raise ValueError('comments_2 is not valid.') if ("DATA PERIODS" not in self._data or self._data["DATA PERIODS"] is None): raise ValueError('data_periods is not valid.') if ("LOCATION" in self._data and self._data["LOCATION"] is not None): f.write(self._data["LOCATION"].export() + "\n") if ("DESIGN CONDITIONS" in self._data and self._data["DESIGN CONDITIONS"] is not None): f.write(self._data["DESIGN CONDITIONS"].export() + "\n") if ("TYPICAL/EXTREME PERIODS" in self._data and self._data["TYPICAL/EXTREME PERIODS"] is not None): f.write(self._data["TYPICAL/EXTREME PERIODS"].export() + "\n") if ("GROUND TEMPERATURES" in self._data and self._data["GROUND TEMPERATURES"] is not None): f.write(self._data["GROUND TEMPERATURES"].export() + "\n") if ("HOLIDAYS/DAYLIGHT SAVINGS" in self._data and self._data["HOLIDAYS/DAYLIGHT SAVINGS"] is not None): f.write( self._data["HOLIDAYS/DAYLIGHT SAVINGS"].export() + "\n") if ("COMMENTS 1" in self._data and self._data["COMMENTS 1"] is not None): f.write(self._data["COMMENTS 1"].export() + "\n") if ("COMMENTS 2" in self._data and self._data["COMMENTS 2"] is not None): f.write(self._data["COMMENTS 2"].export() + "\n") if ("DATA PERIODS" in self._data and self._data["DATA PERIODS"] is not None): f.write(self._data["DATA PERIODS"].export() + "\n") for item in self._data["WEATHER DATA"]: f.write(item.export(False) + "\n")
Save WeatherData in EPW format to path. Args: path (str): path where EPW file should be saved
juraj-google-style
def get_priority(priority): if isinstance(priority, int): if ((priority < 0) or (priority > 100)): raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
Get priority value. Args: priority (int or str or :obj:`Priority`): Priority. Returns: int: The priority value.
codesearchnet
def inception_resnet_v2_arg_scope(weight_decay=0.00004, batch_norm_decay=0.9997, batch_norm_epsilon=0.001): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), biases_regularizer=slim.l2_regularizer(weight_decay)): batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, } with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as scope: return scope
Returns the scope with the default parameters for inception_resnet_v2. Args: weight_decay: the weight decay for weights variables. batch_norm_decay: decay for the moving average of batch_norm momentums. batch_norm_epsilon: small float added to variance to avoid dividing by zero. Returns: a arg_scope with the parameters needed for inception_resnet_v2.
juraj-google-style
def _relation_exists(cls, connection, relation): schema_name, table_name = relation.split('.') exists_query = with connection.cursor() as cursor: cursor.execute(exists_query, [schema_name, table_name]) result = cursor.fetchall() return result == [(1,)]
Returns True if relation exists in the postgres db. Otherwise returns False. Args: connection: connection to postgres database who stores mpr data. relation (str): name of the table, view or materialized view. Note: relation means table, view or materialized view here. Returns: boolean: True if relation exists, False otherwise.
juraj-google-style
def ParseNameSpace(self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs): if (database is None): raise ValueError('Missing database value.') if (table is None): raise ValueError('Missing table value.') strings = cache.GetResults('strings') if (not strings): esedb_table = database.get_table_by_name('string') strings = self._GetDictFromStringsTable(parser_mediator, esedb_table) cache.StoreDictInCache('strings', strings) for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues(parser_mediator, table.name, esedb_record) event_data = FileHistoryNamespaceEventData() event_data.file_attribute = record_values.get('fileAttrib', None) event_data.identifier = record_values.get('id', None) event_data.parent_identifier = record_values.get('parentId', None) event_data.usn_number = record_values.get('usn', None) event_data.original_filename = strings.get(event_data.identifier, None) created_timestamp = record_values.get('fileCreated') if created_timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=created_timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) modified_timestamp = record_values.get('fileModified') if modified_timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=modified_timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) if ((not created_timestamp) and (not modified_timestamp)): date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the namespace table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
codesearchnet
def create_storage_client(pipeline_options, use_credentials=True): if use_credentials: credentials = auth.get_service_credentials(pipeline_options) else: credentials = None if credentials: google_cloud_options = pipeline_options.view_as(GoogleCloudOptions) from google.api_core import client_info beam_client_info = client_info.ClientInfo(user_agent='apache-beam/%s (GPN:Beam)' % beam_version.__version__) extra_headers = {'x-goog-custom-audit-job': google_cloud_options.job_name if google_cloud_options.job_name else 'UNKNOWN'} if google_cloud_options.gcs_custom_audit_entries is not None: extra_headers.update(google_cloud_options.gcs_custom_audit_entries) return storage.Client(credentials=credentials.get_google_auth_credentials(), project=google_cloud_options.project, client_info=beam_client_info, extra_headers=extra_headers) else: return storage.Client.create_anonymous_client()
Create a GCS client for Beam via GCS Client Library. Args: pipeline_options(apache_beam.options.pipeline_options.PipelineOptions): the options of the pipeline. use_credentials(bool): whether to create an authenticated client based on pipeline options or an anonymous client. Returns: A google.cloud.storage.client.Client instance.
github-repos
def __ginibre_matrix(nrow, ncol=None, seed=None): if ncol is None: ncol = nrow if seed is not None: np.random.seed(seed) G = np.random.normal(size=(nrow, ncol)) + \ np.random.normal(size=(nrow, ncol)) * 1j return G
Return a normally distributed complex random matrix. Args: nrow (int): number of rows in output matrix. ncol (int): number of columns in output matrix. seed (int): Optional. To set a random seed. Returns: ndarray: A complex rectangular matrix where each real and imaginary entry is sampled from the normal distribution.
juraj-google-style
def send_log_messages(self, messages: List[LogMessage]) -> None: errors = upload_rows(self._bq_client, self._table_metadata, cast(List[Dict], messages)) if errors: for error in errors: self._fallback_logger.send_log_message({'log_type': LogType.SYSTEM.value, 'error': error}) raise RuntimeError('BigQuery logging failed: Check Cloud Logs.')
Sends multiple log messages to BigQuery. Args: * messages: list of LogMessage dictionaries Returns: * None Raises: * RuntimeError: if BigQuery insert fails
github-repos
def netmiko_save_config( task: Task, cmd: str = "", confirm: bool = False, confirm_response: str = "" ) -> Result: conn = task.host.get_connection("netmiko", task.nornir.config) if cmd: result = conn.save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response ) else: result = conn.save_config(confirm=confirm, confirm_response=confirm_response) return Result(host=task.host, result=result, changed=True)
Execute Netmiko save_config method Arguments: cmd(str, optional): Command used to save the configuration. confirm(bool, optional): Does device prompt for confirmation before executing save operation confirm_response(str, optional): Response send to device when it prompts for confirmation Returns: :obj: `nornir.core.task.Result`: * result (``str``): String showing the CLI output from the save operation
juraj-google-style
def create_tree(tree): config.LOGGER.info("\nCreating tree on Kolibri Studio...") channel_id, channel_link = tree.upload_tree() return channel_link, channel_id
create_tree: Upload tree to Kolibri Studio Args: tree (ChannelManager): manager to handle communication to Kolibri Studio Returns: channel id of created channel and link to channel
juraj-google-style
def _resize_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, interpolation: 'F.InterpolationMode', input_data_format: ChannelDimension) -> 'torch.Tensor': new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) resized_image = F.resize(image, (new_height, new_width), interpolation=interpolation) return resized_image
Resizes an image to a target resolution while maintaining aspect ratio. Args: image ("torch.Tensor"): The input image. target_resolution (tuple): The target resolution (height, width) of the image. interpolation (`InterpolationMode`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: "torch.Tensor": The resized and padded image.
github-repos
def overlap(ival0, ival1): min0, max0 = ival0 min1, max1 = ival1 return max(0, min(max0, max1) - max(min0, min1)) > 0
Determine if two interval tuples have overlap. Args: iv0 ((int,int)): An interval tuple iv1 ((int,int)); An interval tuple Returns: (bool): True if the intervals overlap, otherwise False
juraj-google-style
def write_hex(fout, buf, offset, width=16): skipped_zeroes = 0 for (i, chunk) in enumerate(chunk_iter(buf, width)): if (chunk == (b'\x00' * width)): skipped_zeroes += 1 continue elif (skipped_zeroes != 0): fout.write(' -- skipped zeroes: {}\n'.format(skipped_zeroes)) skipped_zeroes = 0 fout.write('{:016x} '.format(((i * width) + offset))) column = ' '.join([' '.join(['{:02x}'.format(c) for c in subchunk]) for subchunk in chunk_iter(chunk, 8)]) w = (((width * 2) + (width - 1)) + ((width if (len(column) != w): column += (' ' * (w - len(column))) fout.write(column) fout.write(' |') for c in chunk: if (c in PRINTABLE_CHARS): fout.write(chr(c)) else: fout.write('.') if (len(chunk) < width): fout.write((' ' * (width - len(chunk)))) fout.write('|') fout.write('\n')
Write the content of 'buf' out in a hexdump style Args: fout: file object to write to buf: the buffer to be pretty printed offset: the starting offset of the buffer width: how many bytes should be displayed per row
codesearchnet
def most_specific_convertible_shape(self, other): other = as_shape(other) if ((self._dims is None) or (other.dims is None) or (self.ndims != other.ndims)): return unknown_shape() dims = ([Dimension(None)] * self.ndims) for (i, (d1, d2)) in enumerate(zip(self._dims, other.dims)): if ((d1 is not None) and (d2 is not None) and (d1 == d2)): dims[i] = d1 return TensorShape(dims)
Returns the most specific TensorShape convertible with `self` and `other`. * TensorShape([None, 1]) is the most specific TensorShape convertible with both TensorShape([2, 1]) and TensorShape([5, 1]). Note that TensorShape(None) is also convertible with above mentioned TensorShapes. * TensorShape([1, 2, 3]) is the most specific TensorShape convertible with both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more less specific TensorShapes convertible with above mentioned TensorShapes, e.g. TensorShape([1, 2, None]), TensorShape(None). Args: other: Another `TensorShape`. Returns: A `TensorShape` which is the most specific convertible shape of `self` and `other`.
codesearchnet
def search(self, query, verbose=0): if (verbose > 0): print(('searching ' + query)) query = query.lower() qgram = ng(query, self.slb) qocument = set() for q in qgram: if (q in self.ngrams.keys()): for i in self.ngrams[q]: qocument.add(i) self.qocument = qocument results = {} for i in qocument: for j in self.D[i].keys(): if (not (j in results.keys())): results[j] = 0 results[j] = (results[j] + self.D[i][j]) sorted_results = sorted(results.items(), key=operator.itemgetter(1), reverse=True) return [self.elements[f[0]] for f in sorted_results]
Searches files satisfying query It first decompose the query in ngrams, then score each document containing at least one ngram with the number. The ten document having the most ngrams in common with the query are selected. Args: query (str): what to search; results_number (int): number of results to return (default: 10)
codesearchnet
def abi_to_fasta(input, output): direcs = [input] zip_files = list_files(input, ['zip']) if zip_files: direcs.extend(_process_zip_files(zip_files)) for d in direcs: files = list_files(d, ['ab1', 'abi']) seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files] fastas = ['>{}\n{}'.format(s.id, str(s.seq)) for s in seqs] ofile = (os.path.basename(os.path.normpath(d)) + '.fasta') opath = os.path.join(output, ofile) open(opath, 'w').write('\n'.join(fastas))
Converts ABI or AB1 files to FASTA format. Args: input (str): Path to a file or directory containing abi/ab1 files or zip archives of abi/ab1 files output (str): Path to a directory for the output FASTA files
codesearchnet
def parse_response(response, encoding='utf-8'): return requests_toolbelt.multipart.decoder.MultipartDecoder.from_response( response, encoding ).parts
Parse a multipart Requests.Response into a tuple of BodyPart objects. Args: response: Requests.Response encoding: The parser will assume that any text in the HTML body is encoded with this encoding when decoding it for use in the ``text`` attribute. Returns: tuple of BodyPart Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode), encoding (str).
juraj-google-style
def detect_intent_knowledge(project_id, session_id, language_code, knowledge_base_id, texts): import dialogflow_v2beta1 as dialogflow session_client = dialogflow.SessionsClient() session_path = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session_path)) for text in texts: text_input = dialogflow.types.TextInput( text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) knowledge_base_path = dialogflow.knowledge_bases_client \ .KnowledgeBasesClient \ .knowledge_base_path(project_id, knowledge_base_id) query_params = dialogflow.types.QueryParameters( knowledge_base_names=[knowledge_base_path]) response = session_client.detect_intent( session=session_path, query_input=query_input, query_params=query_params) print('=' * 20) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text)) print('Knowledge results:') knowledge_answers = response.query_result.knowledge_answers for answers in knowledge_answers.answers: print(' - Answer: {}'.format(answers.answer)) print(' - Confidence: {}'.format( answers.match_confidence))
Returns the result of detect intent with querying Knowledge Connector. Args: project_id: The GCP project linked with the agent you are going to query. session_id: Id of the session, using the same `session_id` between requests allows continuation of the conversation. language_code: Language of the queries. knowledge_base_id: The Knowledge base's id to query against. texts: A list of text queries to send.
juraj-google-style
def upload_predictions(self, file_path, tournament=1): self.logger.info("uploading predictions...") auth_query = arguments = {'filename': os.path.basename(file_path), 'tournament': tournament} submission_resp = self.raw_query(auth_query, arguments, authorization=True) submission_auth = submission_resp['data']['submission_upload_auth'] with open(file_path, 'rb') as fh: requests.put(submission_auth['url'], data=fh.read()) create_query = arguments = {'filename': submission_auth['filename'], 'tournament': tournament} create = self.raw_query(create_query, arguments, authorization=True) self.submission_id = create['data']['create_submission']['id'] return self.submission_id
Upload predictions from file. Args: file_path (str): CSV file with predictions that will get uploaded tournament (int): ID of the tournament (optional, defaults to 1) Returns: str: submission_id Example: >>> api = NumerAPI(secret_key="..", public_id="..") >>> api.upload_predictions() '93c46857-fed9-4594-981e-82db2b358daf'
juraj-google-style
def connect(self, uuid_value, wait=None): if self.connected: raise HardwareError('Cannot connect when we are already connected') if (uuid_value not in self._scanned_devices): self.scan(wait=wait) with self._scan_lock: if (uuid_value not in self._scanned_devices): raise HardwareError('Could not find device to connect to by UUID', uuid=uuid_value) connstring = self._scanned_devices[uuid_value]['connection_string'] self.connect_direct(connstring)
Connect to a specific device by its uuid Attempt to connect to a device that we have previously scanned using its UUID. If wait is not None, then it is used in the same was a scan(wait) to override default wait times with an explicit value. Args: uuid_value (int): The unique id of the device that we would like to connect to. wait (float): Optional amount of time to force the device adapter to wait before attempting to connect.
codesearchnet
def add_dict_to_hash(a_hash, a_dict): if a_dict is None: return for k, v in a_dict.items(): a_hash.update(b'\x00' + k.encode('utf-8') + b'\x00' + v.encode('utf-8'))
Adds `a_dict` to `a_hash` Args: a_hash (`Hash`): the secure hash, e.g created by hashlib.md5 a_dict (dict[string, [string]]): the dictionary to add to the hash
juraj-google-style
def WriteTaskCompletion(self, aborted=False): self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_TASK: raise IOError('Unsupported storage type.') self._task.aborted = aborted task_completion = self._task.CreateTaskCompletion() self._storage_file.WriteTaskCompletion(task_completion)
Writes task completion information. Args: aborted (Optional[bool]): True if the session was aborted. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed.
juraj-google-style
def prepare_framework_container_def(model, instance_type, s3_operations): deploy_image = model.image if not deploy_image: region_name = model.sagemaker_session.boto_session.region_name deploy_image = fw_utils.create_image_uri( region_name, model.__framework_name__, instance_type, model.framework_version, model.py_version) base_name = utils.base_name_from_image(deploy_image) model.name = model.name or utils.name_from_base(base_name) bucket = model.bucket or model.sagemaker_session._default_bucket script = os.path.basename(model.entry_point) key = '{}/source/sourcedir.tar.gz'.format(model.name) if model.source_dir and model.source_dir.lower().startswith('s3: code_dir = model.source_dir model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) else: code_dir = 's3: model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) s3_operations['S3Upload'] = [{ 'Path': model.source_dir or script, 'Bucket': bucket, 'Key': key, 'Tar': True }] deploy_env = dict(model.env) deploy_env.update(model._framework_env_vars()) try: if model.model_server_workers: deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(model.model_server_workers) except AttributeError: pass return sagemaker.container_def(deploy_image, model.model_data, deploy_env)
Prepare the framework model container information. Specify related S3 operations for Airflow to perform. (Upload `source_dir`) Args: model (sagemaker.model.FrameworkModel): The framework model instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. s3_operations (dict): The dict to specify S3 operations (upload `source_dir`). Returns: dict: The container information of this framework model.
juraj-google-style
def _sanitize_input_structure(input_structure): input_structure = input_structure.copy() input_structure.remove_spin() input_structure = input_structure.get_primitive_structure(use_site_props=False) if ('magmom' in input_structure.site_properties): input_structure.remove_site_property('magmom') return input_structure
Sanitize our input structure by removing magnetic information and making primitive. Args: input_structure: Structure Returns: Structure
codesearchnet
def process_exception_message(exception): exception_message = str(exception) for replace_char in ['\t', '\n', '\\n']: exception_message = exception_message.replace(replace_char, '' if replace_char != '\t' else ' ') return exception_message.replace('section', 'alias')
Process an exception message. Args: exception: The exception to process. Returns: A filtered string summarizing the exception.
juraj-google-style
async def connect(self, conn_id, connection_string): id_number = int(connection_string) if id_number not in self.devices: raise DeviceAdapterError(conn_id, 'connect', 'device not found') if self._get_conn_id(connection_string) is not None: raise DeviceAdapterError(conn_id, 'connect', 'device already connected') dev = self.devices[id_number] if dev.connected: raise DeviceAdapterError(conn_id, 'connect', 'device already connected') dev.connected = True self._setup_connection(conn_id, connection_string) self._track_property(conn_id, 'device', dev)
Asynchronously connect to a device Args: conn_id (int): A unique identifer that will refer to this connection connection_string (string): A DeviceAdapter specific string that can be used to connect to a device using this DeviceAdapter. callback (callable): A function that will be called when the connection attempt finishes as callback(conection_id, adapter_id, success: bool, failure_reason: string or None)
juraj-google-style
def _with_inner_rank(self, inner_rank): rank = self.rank if rank is None: raise ValueError('Rank must be known to adjust inner_rank') elif rank < 2: if inner_rank == rank: return self raise ValueError('Cannot change inner_rank if rank < 2') else: new_num_row_partitions = rank - inner_rank return self._with_num_row_partitions(new_num_row_partitions)
Returns the same shape but a different inner_rank. All dimensions that are to be represented in the inner_shape must be dense. See inner_rank. Args: inner_rank: the new inner_rank of the shape. Returns: the same shape but a different inner_rank Raises: ValueError if the new dense rank is invalid, or the old rank is unknown.
github-repos
def _RunScripts(self, run_dir=None): with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir: try: self.logger.info('Starting %s scripts.', self.script_type) script_dict = self.retriever.GetScripts(dest_dir) self.executor.RunScripts(script_dict) finally: self.logger.info('Finished running %s scripts.', self.script_type)
Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.
codesearchnet
def _check_params(self, parameters): a_valid_fn = [] if self.target_fn is None: if callable(self): a_valid_fn.append(self.__call__) else: raise TypeError('invalid argument: tested object is not callable,\ please provide a valid target_fn') elif isinstance(self.target_fn, types.FunctionType) \ or isinstance(self.target_fn, types.MethodType): a_valid_fn.append(self.target_fn) else: a_valid_fn.append(self.target_fn.__call__) if not isinstance(parameters, str): for p in parameters: for fn in a_valid_fn: if has_arg(fn, p): pass else: raise ValueError('{} is not a valid parameter'.format(p)) else: raise TypeError('invalid argument: list or dictionnary expected')
Checks for mistakes in 'parameters' Args : parameters: dict, parameters to be checked Raises : ValueError: if any parameter is not a valid argument for the target function or the target function is not defined TypeError: if argument parameters is not iterable
juraj-google-style
def Glob2Regex(glob_pattern): if not glob_pattern: raise ValueError('Missing glob pattern.') regex_pattern = [] glob_pattern_index = 0 glob_pattern_length = len(glob_pattern) while glob_pattern_index < glob_pattern_length: character = glob_pattern[glob_pattern_index] glob_pattern_index += 1 if character == '*': regex_pattern.append('.*') elif character == '?': regex_pattern.append('.') elif character != '[': regex_character = re.escape(character) regex_pattern.append(regex_character) else: glob_group_index = glob_pattern_index if (glob_group_index < glob_pattern_length and glob_pattern[glob_group_index] == '!'): glob_group_index += 1 if (glob_group_index < glob_pattern_length and glob_pattern[glob_group_index] == ']'): glob_group_index += 1 while (glob_group_index < glob_pattern_length and glob_pattern[glob_group_index] != ']'): glob_group_index += 1 if glob_group_index >= glob_pattern_length: regex_pattern.append('\\[') continue glob_group = glob_pattern[glob_pattern_index:glob_group_index] glob_pattern_index = glob_group_index + 1 glob_group = glob_group.replace('\\', '\\\\') if py2to3.PY_3_7_AND_LATER: glob_group = glob_group.replace('|', '\\|') regex_pattern.append('[') if glob_group[0] == '!': regex_pattern.append('^') glob_group = glob_group[1:] elif glob_group[0] == '^': regex_pattern.append('\\') regex_pattern.append(glob_group) regex_pattern.append(']') return ''.join(regex_pattern)
Converts a glob pattern to a regular expression. This function supports basic glob patterns that consist of: * matches everything ? matches any single character [seq] matches any character in sequence [!seq] matches any character not in sequence Args: glob_pattern (str): glob pattern. Returns: str: regular expression pattern. Raises: ValueError: if the glob pattern cannot be converted.
juraj-google-style
def update_parameters(parameters, grads, learning_rate=1.2): W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] dW1 = grads['dW1'] db1 = grads['db1'] dW2 = grads['dW2'] db2 = grads['db2'] W1 -= (learning_rate * dW1) b1 -= (learning_rate * db1) W2 -= (learning_rate * dW2) b2 -= (learning_rate * db2) parameters = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} return parameters
Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters
codesearchnet
def cube(width, height, depth, center=(0.0, 0.0, 0.0), normals=True, uvs=True) -> VAO: width, height, depth = width / 2.0, height / 2.0, depth / 2.0 pos = numpy.array([ center[0] + width, center[1] - height, center[2] + depth, center[0] + width, center[1] + height, center[2] + depth, center[0] - width, center[1] - height, center[2] + depth, center[0] + width, center[1] + height, center[2] + depth, center[0] - width, center[1] + height, center[2] + depth, center[0] - width, center[1] - height, center[2] + depth, center[0] + width, center[1] - height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] + width, center[1] - height, center[2] + depth, center[0] + width, center[1] + height, center[2] - depth, center[0] + width, center[1] + height, center[2] + depth, center[0] + width, center[1] - height, center[2] + depth, center[0] + width, center[1] - height, center[2] - depth, center[0] + width, center[1] - height, center[2] + depth, center[0] - width, center[1] - height, center[2] + depth, center[0] + width, center[1] - height, center[2] - depth, center[0] - width, center[1] - height, center[2] + depth, center[0] - width, center[1] - height, center[2] - depth, center[0] - width, center[1] - height, center[2] + depth, center[0] - width, center[1] + height, center[2] + depth, center[0] - width, center[1] + height, center[2] - depth, center[0] - width, center[1] - height, center[2] + depth, center[0] - width, center[1] + height, center[2] - depth, center[0] - width, center[1] - height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] + width, center[1] - height, center[2] - depth, center[0] - width, center[1] - height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] - width, center[1] - height, center[2] - depth, center[0] - width, center[1] + height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] - width, center[1] + height, center[2] - depth, center[0] + width, center[1] + height, center[2] + depth, center[0] - width, center[1] + height, center[2] - depth, center[0] - width, center[1] + height, center[2] + depth, center[0] + width, center[1] + height, center[2] + depth, ], dtype=numpy.float32) if normals: normal_data = numpy.array([ -0, 0, 1, -0, 0, 1, -0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, ], dtype=numpy.float32) if uvs: uvs_data = numpy.array([ 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0 ], dtype=numpy.float32) vao = VAO("geometry:cube") vao.buffer(pos, '3f', ['in_position']) if normals: vao.buffer(normal_data, '3f', ['in_normal']) if uvs: vao.buffer(uvs_data, '2f', ['in_uv']) return vao
Creates a cube VAO with normals and texture coordinates Args: width (float): Width of the cube height (float): Height of the cube depth (float): Depth of the cube Keyword Args: center: center of the cube as a 3-component tuple normals: (bool) Include normals uvs: (bool) include uv coordinates Returns: A :py:class:`demosys.opengl.vao.VAO` instance
juraj-google-style
def list_dir(root, prefix=False): root = os.path.expanduser(root) directories = list(filter((lambda p: os.path.isdir(os.path.join(root, p))), os.listdir(root))) if (prefix is True): directories = [os.path.join(root, d) for d in directories] return directories
List all directories at a given root Args: root (str): Path to directory whose folders need to be listed prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found
codesearchnet
def create_assembly_instance(self, assembly_uri, part_uri, configuration): payload = { "documentId": part_uri["did"], "elementId": part_uri["eid"], "versionId": part_uri["wvm"], "isAssembly": False, "isWholePartStudio": True, "configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration) } return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] + '/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload)
Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data
juraj-google-style
def save_wav_file(filename, wav_data, sample_rate): with tf.compat.v1.Session(graph=tf.Graph()) as sess: wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, []) sample_rate_placeholder = tf.compat.v1.placeholder(tf.int32, []) wav_data_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 1]) wav_encoder = tf.audio.encode_wav(wav_data_placeholder, sample_rate_placeholder) wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder) sess.run(wav_saver, feed_dict={wav_filename_placeholder: filename, sample_rate_placeholder: sample_rate, wav_data_placeholder: np.reshape(wav_data, (-1, 1))})
Saves audio sample data to a .wav audio file. Args: filename: Path to save the file to. wav_data: 2D array of float PCM-encoded audio data. sample_rate: Samples per second to encode in the file.
github-repos
def autodiff_tree(func, wrt, motion, mode, preserve_result, check_dims, verbose): import tangent namespace = {'tangent': tangent, 'numpy': numpy} done = set() final = gast.Module(body=[]) namespace.update(six.get_function_globals(func)) (node, required) = autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose) final.body.extend(node.body) to_do = set(required) if ((motion == 'split') and (mode == 'reverse')): done.add((func, wrt)) to_do -= done while to_do: (func, wrt) = to_do.pop() namespace.update(six.get_function_globals(func)) (node, required) = autodiff_ast(func=func, wrt=wrt, motion='split', mode=mode, preserve_result=True, check_dims=False, verbose=verbose) final.body.extend(node.body) done.add((func, wrt)) to_do.update(required) to_do -= done return (final, namespace)
Perform AD on all functions in a call tree. This function walks the call tree and differentiates each function in it. It also ensures that the global namespaces that each function in the call tree was in are merged. The `tangent` and `numpy` packages are added to the namespace here, so that the gradient templates can assume that they are present. Args: See `grad`. Returns: final: A single module which contains the primals and adjoints of all the functions in the call tree. namespace: A merged dictionary with all the variables in the global namespaces of each function. The primals and adjoints need access to these in order to execute.
codesearchnet
def _translate(pattern, case_sensitive=True): if (not case_sensitive): pattern = pattern.lower() (i, n) = (0, len(pattern)) res = '' while (i < n): c = pattern[i] i = (i + 1) if (c == '*'): res = (res + '[^/]*') elif (c == '?'): res = (res + '.') elif (c == '['): j = i if ((j < n) and (pattern[j] == '!')): j = (j + 1) if ((j < n) and (pattern[j] == ']')): j = (j + 1) while ((j < n) and (pattern[j] != ']')): j = (j + 1) if (j >= n): res = (res + '\\[') else: stuff = pattern[i:j].replace('\\', '\\\\') i = (j + 1) if (stuff[0] == '!'): stuff = ('^' + stuff[1:]) elif (stuff[0] == '^'): stuff = ('\\' + stuff) res = ('%s[%s]' % (res, stuff)) else: res = (res + re.escape(c)) return res
Translate a wildcard pattern to a regular expression. There is no way to quote meta-characters. Arguments: pattern (str): A wildcard pattern. case_sensitive (bool): Set to `False` to use a case insensitive regex (default `True`). Returns: str: A regex equivalent to the given pattern.
codesearchnet
def snapshot(self, wiki=False, streamed=False, action=None, chunk_size=1024, **kwargs): path = ('/projects/%s/snapshot' % self.get_id()) result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs) return utils.response_content(result, streamed, action, chunk_size)
Return a snapshot of the repository. Args: wiki (bool): If True return the wiki repository streamed (bool): If True the data will be processed by chunks of `chunk_size` and each chunk is passed to `action` for treatment. action (callable): Callable responsible of dealing with chunk of data chunk_size (int): Size of each chunk **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the content could not be retrieved Returns: str: The uncompressed tar archive of the repository
codesearchnet
class DPTPreActResidualLayer(nn.Module): def __init__(self, config): super().__init__() self.use_batch_norm = config.use_batch_norm_in_fusion_residual use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual) self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual) if self.use_batch_norm: self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size) self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.activation1(hidden_state) hidden_state = self.convolution1(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm1(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution2(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm2(hidden_state) return hidden_state + residual
ResidualConvUnit, pre-activate residual unit. Args: config (`[DPTConfig]`): Model configuration class defining the model architecture.
github-repos
def _FormatPropertyName(self, property_name): fix_key = re.sub('(.)([A-Z][a-z]+)', '\\1_\\2', property_name) return re.sub('([a-z0-9])([A-Z])', '\\1_\\2', fix_key).lower()
Formats a camel case property name as snake case. Args: property_name (str): property name in camel case. Returns: str: property name in snake case.
codesearchnet
def wait_for_js(function): @functools.wraps(function) def wrapper(*args, **kwargs): if len(args) < 1: return function(*args, **kwargs) else: self = args[0] if hasattr(self, 'wait_for_js'): self.wait_for_js() return function(*args, **kwargs) return wrapper
Method decorator that waits for JavaScript dependencies before executing `function`. If the function is not a method, the decorator has no effect. Args: function (callable): Method to decorate. Returns: Decorated method
juraj-google-style
def AddKeywordsForName(self, name, keywords): data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords)
Associates keywords with name. Records that keywords are associated with name. Args: name: A name which should be associated with some keywords. keywords: A collection of keywords to associate with name.
juraj-google-style
def IsErrorSuppressedByNolint(category, linenum): return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment.
juraj-google-style
def load_ini(self, ini_file): if (ini_file and (not os.path.exists(ini_file))): self.log.critical(f'Settings file specified but not found. {ini_file}') sys.exit(1) if (not ini_file): ini_file = f'{self.cwd}/settings.ini' if os.path.exists(ini_file): config = configparser.RawConfigParser(allow_no_value=True) config.read(ini_file) for (key, value) in self.spec.items(): entry = None if (value['type'] == str): entry = config.get('settings', option=key.lower(), fallback=None) elif (value['type'] == bool): entry = config.getboolean('settings', option=key.lower(), fallback=None) elif (value['type'] == int): entry = config.getint('settings', option=key.lower(), fallback=None) elif (value['type'] == float): entry = config.getfloat('settings', option=key.lower(), fallback=None) elif (value['type'] in [list, dict]): entries = config.get('settings', option=key.lower(), fallback=None) if entries: try: entry = json.loads(entries) except json.decoder.JSONDecodeError as _err: self.log.critical(f'Error parsing json from ini file. {entries}') sys.exit(1) if (entry is not None): setattr(self, key.upper(), entry)
Load the contents from the ini file Args: ini_file (str): The file from which the settings should be loaded
codesearchnet
def ProduceEventWithEventData(self, event, event_data): if event.timestamp is None: raise errors.InvalidEvent('Event timestamp value not set.') if event.timestamp < self._INT64_MIN or event.timestamp > self._INT64_MAX: raise errors.InvalidEvent('Event timestamp value out of bounds.') event_data_hash = event_data.GetAttributeValuesHash() if event_data_hash != self._last_event_data_hash: event_data = copy.deepcopy(event_data) self.ProcessEvent( event_data, parser_chain=self.GetParserChain(), file_entry=self._file_entry) self._storage_writer.AddEventData(event_data) self._last_event_data_hash = event_data_hash self._last_event_data_identifier = event_data.GetIdentifier() if self._last_event_data_identifier: event.SetEventDataIdentifier(self._last_event_data_identifier) event.parser = self.GetParserChain() self._storage_writer.AddEvent(event) self._number_of_events += 1 self.last_activity_timestamp = time.time()
Produces an event. Args: event (EventObject): event. event_data (EventData): event data. Raises: InvalidEvent: if the event timestamp value is not set or out of bounds.
juraj-google-style
def _create_slots(self, var_list): pass
Create all slots needed by the variables. Args: var_list: A list of `Variable` objects.
github-repos
def decode(image, symbols=None): pixels, width, height = _pixel_data(image) results = [] with _image_scanner() as scanner: if symbols: disable = set(ZBarSymbol).difference(symbols) for symbol in disable: zbar_image_scanner_set_config( scanner, symbol, ZBarConfig.CFG_ENABLE, 0 ) for symbol in symbols: zbar_image_scanner_set_config( scanner, symbol, ZBarConfig.CFG_ENABLE, 1 ) with _image() as img: zbar_image_set_format(img, _FOURCC['L800']) zbar_image_set_size(img, width, height) zbar_image_set_data(img, cast(pixels, c_void_p), len(pixels), None) decoded = zbar_scan_image(scanner, img) if decoded < 0: raise PyZbarError('Unsupported image format') else: results.extend(_decode_symbols(_symbols_for_image(img))) return results
Decodes datamatrix barcodes in `image`. Args: image: `numpy.ndarray`, `PIL.Image` or tuple (pixels, width, height) symbols: iter(ZBarSymbol) the symbol types to decode; if `None`, uses `zbar`'s default behaviour, which is to decode all symbol types. Returns: :obj:`list` of :obj:`Decoded`: The values decoded from barcodes.
juraj-google-style
def is_tracking_shield_displayed(self): with self.selenium.context(self.selenium.CONTEXT_CHROME): if (self.window.firefox_version >= 63): el = self.root.find_element(*self._tracking_protection_shield_locator) return (el.get_attribute('active') is not None) el = self.root.find_element(By.ID, 'tracking-protection-icon') return bool(el.get_attribute('state'))
Tracking Protection shield. Returns: bool: True or False if the Tracking Shield is displayed.
codesearchnet
def stack50(op, delay): n = 50 delays = delay + tf.range(0, n, dtype=float) / 10000.0 start_t = time.time() func = tf.function(lambda: tf.stack([op(delays[i]) for i in range(n)])) r_numpy = func().numpy() end_t = time.time() print('') print('Total time = %5.3f seconds using %s' % (end_t - start_t, str(op))) print('Returned values from the ops:') np.set_printoptions(precision=4, suppress=True) print(r_numpy) sys.stdout.flush()
Create a tf.stack of 50 sleep ops. Args: op: The sleep op, either sleep_op.SyncSleep or sleep_op.AsyncSleep. delay: Each op should finish at least float `delay` seconds after it starts.
github-repos
def generate(self, past_values: torch.Tensor) -> SamplePatchTSMixerRegressionOutput: num_parallel_samples = self.num_parallel_samples outputs = self(past_values=past_values, target_values=None, output_hidden_states=False) distribution = self.distribution_output.distribution(outputs.regression_outputs) samples = [distribution.sample() for _ in range(num_parallel_samples)] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSMixerRegressionOutput(sequences=samples)
Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the target values. Return: [`SamplePatchTSMixerRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`.
github-repos
def is_subtype_of(self, other: trace.TraceType) -> bool: if type(self) is not type(other): return False is_subtype = True def check_attribute(attribute_self, attribute_other): nonlocal is_subtype if not is_subtype: return if isinstance(attribute_self, trace.TraceType): if not attribute_self.is_subtype_of(attribute_other): is_subtype = False return elif attribute_self != attribute_other: is_subtype = False try: nest.map_structure(check_attribute, self._serialize(), other._serialize()) except (ValueError, TypeError): return False return is_subtype
Returns True if `self` is a subtype of `other`. Implements the tf.types.experimental.func.TraceType interface. If not overridden by a subclass, the default behavior is to assume the TypeSpec is covariant upon attributes that implement TraceType and invariant upon rest of the attributes as well as the structure and type of the TypeSpec. Args: other: A TraceType object.
github-repos
def inquire_property(name, doc=None): def inquire_property(self): if (not self._started): msg = 'Cannot read {0} from a security context whose establishment has not yet been started.' raise AttributeError(msg) return getattr(self._inquire(**{name: True}), name) return property(inquire_property, doc=doc)
Creates a property based on an inquire result This method creates a property that calls the :python:`_inquire` method, and return the value of the requested information. Args: name (str): the name of the 'inquire' result information Returns: property: the created property
codesearchnet
def fit2dArrayToFn(arr, fn, mask=None, down_scale_factor=None, output_shape=None, guess=None, outgrid=None): if (mask is None): mask = np.ones(shape=arr.shape, dtype=bool) if (down_scale_factor is None): if (mask.sum() > 1000): down_scale_factor = 0.3 else: down_scale_factor = 1 if (down_scale_factor != 1): arr2 = zoom(arr, down_scale_factor) mask = zoom(mask, down_scale_factor, output=bool) else: arr2 = arr (x, y) = np.where(mask) z = arr2[mask] (parameters, cov_matrix) = curve_fit(fn, (x, y), z, p0=guess) perr = np.sqrt(np.diag(cov_matrix)) if (outgrid is not None): (yy, xx) = outgrid rebuilt = fn((yy, xx), *parameters) else: if (output_shape is None): output_shape = arr.shape fx = (arr2.shape[0] / output_shape[0]) fy = (arr2.shape[1] / output_shape[1]) rebuilt = np.fromfunction((lambda x, y: fn(((x * fx), (y * fy)), *parameters)), output_shape) return (rebuilt, parameters, perr)
Fit a 2d array to a 2d function USE ONLY MASKED VALUES * [down_scale_factor] map to speed up fitting procedure, set value smaller than 1 * [output_shape] shape of the output array * [guess] must be scaled using [scale_factor] Returns: Fitted map, fitting params (scaled), error
codesearchnet
def annotations_from_file(filename): import edflib e = edflib.EdfReader(filename, annotations_mode='all') return e.read_annotations()
Get a list of event annotations from an EDF (European Data Format file or EDF+ file, using edflib. Args: filename: EDF+ file Returns: list: annotation events, each in the form [start_time, duration, text]
codesearchnet
def get_missing_services(self, services): required_services = set(services) provided_services = set(self._services.keys()) missing_services = required_services.difference(provided_services) return sorted(missing_services)
Check if all required services are provided Args: services: List with the service names which are required Returns: List with missing services
codesearchnet
def new(cls, access_token, environment='prod'): api_client = ApiClient.new(access_token, environment) return cls(api_client)
Create new storage service client. Arguments: environment(str): The service environment to be used for the client. 'prod' or 'dev'. access_token(str): The access token used to authenticate with the service Returns: A storage_service.Client instance
juraj-google-style
def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs): merges = [' '.join(m) for m in tokenizer.bpe_ranks.keys()] vocab = tokenizer.get_vocab() return cls(vocab, merges, *args, **kwargs)
Creates TFGPT2Tokenizer from GPT2Tokenizer Args: tokenizer (GPT2Tokenizer) Examples: ```python from transformers import AutoTokenizer, TFGPT2Tokenizer tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tf_tokenizer = TFGPT2Tokenizer.from_tokenizer(tokenizer) ```
github-repos
def notify(self, cuuid, event_data): euuid = str(uuid.uuid1()) if ('encryption' in self.registry[cuuid]): client_key = self.registry[cuuid]['encryption'] else: client_key = None logger.debug(('<%s> <%s> Sending NOTIFY event to client with event data: %s' % (str(cuuid), str(euuid), pformat(event_data)))) try: ip_address = self.registry[cuuid]['host'] except KeyError: logger.warning(('<%s> <%s> Host not found in registry! Transmit Canceled' % (str(cuuid), str(euuid)))) return False try: port = self.registry[cuuid]['port'] except KeyError: logger.warning(('<%s> <%s> Port not found! Transmit Canceled' % (str(cuuid), str(euuid)))) return False packet = serialize_data({'method': 'NOTIFY', 'event_data': event_data, 'euuid': euuid}, self.compression, self.encryption, client_key) address = (ip_address, port) self.event_uuids[euuid] = 0 logger.debug(('<%s> Currently processing events: %s' % (cuuid, pformat(self.event_uuids)))) logger.debug(('<%s> New NOTIFY event being processed:' % cuuid)) logger.debug(('<%s> EUUID: %s' % (cuuid, euuid))) logger.debug(('<%s> Event Data: %s' % (cuuid, pformat(event_data)))) self.listener.send_datagram(packet, address) self.listener.call_later(self.timeout, self.retransmit, {'euuid': euuid, 'response': packet, 'cuuid': cuuid})
This function will send a NOTIFY event to a registered client. NOTIFY messages are nearly identical to EVENT messages, except that NOTIFY messages are always sent from server -> client. EVENT messages are always sent from client -> server. In addition to this difference, NOTIFY messages are not processed by a middleware to determine if they are legal or not, since all messages from the server should be considered LEGAL. Args: cuuid (string): The client uuid to send the event data to. event_data (any): The event data that we will be sending to the client. Returns: None
codesearchnet
def _parse_trunk_groups(self, config): values = re.findall('switchport trunk group ([^\\s]+)', config, re.M) return dict(trunk_groups=values)
Scans the specified config and parses the trunk group values Args: config (str): The interface configuraiton blcok Returns: A dict object with the trunk group values that can be merged into the resource dict
codesearchnet
def PauliX(local_space, states=None): (local_space, states) = _get_pauli_args(local_space, states) (g, e) = states return (LocalSigma.create(g, e, hs=local_space) + LocalSigma.create(e, g, hs=local_space))
r"""Pauli-type X-operator .. math:: \hat{\sigma}_x = \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} on an arbitrary two-level system. Args: local_space (str or int or .LocalSpace): Associated Hilbert space. If :class:`str` or :class:`int`, a :class:`LocalSpace` with a matching label will be created. states (None or tuple[int or str]): The labels for the basis states for the two levels on which the operator acts. If None, the two lowest levels are used. Returns: Operator: Local X-operator as a linear combination of :class:`LocalSigma`
codesearchnet
def __init__( self, resolver_context, compression_method=None, file_object=None): if file_object is not None and compression_method is None: raise ValueError( 'File-like object provided without corresponding compression ' 'method.') super(CompressedStream, self).__init__(resolver_context) self._compression_method = compression_method self._file_object = file_object self._file_object_set_in_init = bool(file_object) self._compressed_data = b'' self._current_offset = 0 self._decompressor = None self._realign_offset = True self._uncompressed_data = b'' self._uncompressed_data_offset = 0 self._uncompressed_data_size = 0 self._uncompressed_stream_size = None
Initializes a file-like object. If the file-like object is chained do not separately use the parent file-like object. Args: resolver_context (Context): resolver context. compression_method (Optional[str]): method used to the compress the data. file_object (Optional[file]): parent file-like object. Raises: ValueError: if file_object provided but compression_method is not.
juraj-google-style
def register_test_preprocessor(cls, test_names: Union[str, List]): if isinstance(test_names, str): test_names = [test_names] def apply(preprocessor): for test_name in test_names: if test_name not in cls._test_preprocessor: cls._test_preprocessor[test_name] = [] cls._test_preprocessor[test_name].append(preprocessor) return preprocessor return apply
Decorator to register a preprocessor function for specific tests. This decorator is used to associate a preprocessor function with one or more test names. The preprocessor function will be called before the corresponding test is executed, allowing for modification of the test specification or environment setup. Args: test_names: A string or a list of strings representing the names of the tests for which the preprocessor should be registered. The test names should match the names generated by `parse_test_methods`. Returns: A decorator function that takes the preprocessor function as an argument and registers it.
github-repos
def __init__(self, num_classes=1000): super(Xception, self).__init__() self.num_classes = num_classes self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False) self.bn1 = nn.BatchNorm2d(32) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32,64,3,bias=False) self.bn2 = nn.BatchNorm2d(64) self.relu2 = nn.ReLU(inplace=True) self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True) self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True) self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True) self.block4=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block5=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block6=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block7=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block8=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block9=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block10=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block11=Block(728,728,3,1,start_with_relu=True,grow_first=True) self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False) self.conv3 = SeparableConv2d(1024,1536,3,1,1) self.bn3 = nn.BatchNorm2d(1536) self.relu3 = nn.ReLU(inplace=True) self.conv4 = SeparableConv2d(1536,2048,3,1,1) self.bn4 = nn.BatchNorm2d(2048) self.fc = nn.Linear(2048, num_classes)
Constructor Args: num_classes: number of classes
juraj-google-style
def __setitem__(self, predicates, new_value): if self.df is not None and self.column_name is not None: self.df[self.column_name] = self.mask(predicates, new_value)
Summary Args: predicates (TYPE): Description new_value (TYPE): Description Returns: TYPE: Description
juraj-google-style
def put(self, filename, encoding=None): from . import LocalFile if os.path.isdir(filename) and self.source is None: raise ValueError("Cannot write this object to " "directory %s without an explicit filename." % filename) target = get_target_path(filename, self.source) if (encoding is not None) and (encoding != self.encoded_with): raise ValueError('%s is already encoded as "%s"' % self, self.encoded_with) with self.open('rb') as infile, open(target, 'wb') as outfile: for line in infile: outfile.write(line) return LocalFile(target)
Write the file to the given path Args: filename(str): path to write this file to Returns: LocalFile: reference to the copy of the file stored at ``filename``
juraj-google-style
def __init__(self, min_interval_sec=10, max_interval_sec=600, multiplier=2): self.min_interval_sec = min_interval_sec self.max_interval_sec = max_interval_sec self.multiplier = multiplier self.Succeeded()
Class constructor. Args: min_interval_sec: initial small delay. max_interval_sec: maximum delay between retries. multiplier: factor for exponential increase.
juraj-google-style
def read_from_hdx(identifier, configuration=None): resourceview = ResourceView(configuration=configuration) result = resourceview._load_from_hdx('resource view', identifier) if result: return resourceview return None
Reads the resource view given by identifier from HDX and returns ResourceView object Args: identifier (str): Identifier of resource view configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[ResourceView]: ResourceView object if successful read, None if not
codesearchnet
def _RetryLoop(self, func, timeout=None): timeout = timeout or self.DEFAULT_TIMEOUT deadline = time.time() + timeout sleep = 1 while True: try: return func(timeout) except grpc.RpcError: if time.time() + sleep > deadline: raise time.sleep(sleep) sleep *= 2 timeout = deadline - time.time()
Retries an operation until success or deadline. Args: func: The function to run. Must take a timeout, in seconds, as a single parameter. If it raises grpc.RpcError and deadline has not be reached, it will be run again. timeout: Retries will continue until timeout seconds have passed.
juraj-google-style
async def _on_event(self, event_): conv_id = event_.conversation_id.id try: conv = (await self._get_or_fetch_conversation(conv_id)) except exceptions.NetworkError: logger.warning('Failed to fetch conversation for event notification: %s', conv_id) else: self._sync_timestamp = parsers.from_timestamp(event_.timestamp) conv_event = conv.add_event(event_) if (conv_event is not None): (await self.on_event.fire(conv_event)) (await conv.on_event.fire(conv_event))
Receive a hangouts_pb2.Event and fan out to Conversations. Args: event_: hangouts_pb2.Event instance
codesearchnet
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): if 'datatype' not in overrides: datatypes = [self.interface.datatype] + self.datatype overrides['datatype'] = list(util.unique_iterator(datatypes)) return super(Dataset, self).clone(data, shared_data, new_type, *args, **overrides)
Clones the object, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned object
juraj-google-style
def get_electron_number(self, charge=0): atomic_number = constants.elements['atomic_number'].to_dict() return (sum([atomic_number[atom] for atom in self['atom']]) - charge)
Return the number of electrons. Args: charge (int): Charge of the molecule. Returns: int:
codesearchnet
def __init__(self, file_entry): super(VShadowVolume, self).__init__(file_entry.name) self._file_entry = file_entry
Initializes a volume. Args: file_entry (VShadowFileEntry): a VSS file entry.
juraj-google-style
def check_status(status, expected, path, headers=None, resp_headers=None, body=None, extras=None): if (status in expected): return msg = ('Expect status %r from Google Storage. But got status %d.\nPath: %r.\nRequest headers: %r.\nResponse headers: %r.\nBody: %r.\nExtra info: %r.\n' % (expected, status, path, headers, resp_headers, body, extras)) if (status == httplib.UNAUTHORIZED): raise AuthorizationError(msg) elif (status == httplib.FORBIDDEN): raise ForbiddenError(msg) elif (status == httplib.NOT_FOUND): raise NotFoundError(msg) elif (status == httplib.REQUEST_TIMEOUT): raise TimeoutError(msg) elif (status == httplib.REQUESTED_RANGE_NOT_SATISFIABLE): raise InvalidRange(msg) elif ((status == httplib.OK) and (308 in expected) and (httplib.OK not in expected)): raise FileClosedError(msg) elif (status >= 500): raise ServerError(msg) else: raise FatalError(msg)
Check HTTP response status is expected. Args: status: HTTP response status. int. expected: a list of expected statuses. A list of ints. path: filename or a path prefix. headers: HTTP request headers. resp_headers: HTTP response headers. body: HTTP response body. extras: extra info to be logged verbatim if error occurs. Raises: AuthorizationError: if authorization failed. NotFoundError: if an object that's expected to exist doesn't. TimeoutError: if HTTP request timed out. ServerError: if server experienced some errors. FatalError: if any other unexpected errors occurred.
codesearchnet
def get_paths(self, key): final_paths = [] if (key in self.__cli): paths = (self.__cli[key] or []) from_conf = False else: paths = (self.__config.get(key) or []) from_conf = True for path in flatten_list(paths): final_path = self.__abspath(path, from_conf) if final_path: final_paths.append(final_path) return final_paths
Same as `ConfigParser.get_path` for a list of paths. Args: key: str, the key to lookup the paths with Returns: list: The paths.
codesearchnet
def candidates(self, word): if self.known([word]): return {word} res = [x for x in self.edit_distance_1(word)] tmp = self.known(res) if tmp: return tmp if self._distance == 2: tmp = self.known([x for x in self.__edit_distance_alt(res)]) if tmp: return tmp return {word}
Generate possible spelling corrections for the provided word up to an edit distance of two, if and only when needed Args: word (str): The word for which to calculate candidate spellings Returns: set: The set of words that are possible candidates
juraj-google-style
def throw(self, exception_class, should_throw): return self.__copy_and_set('throws', self._throws + [(exception_class, should_throw)])
Defines if the an exception should be thrown after the request is sent Args: exception_class (class): The class of the exception to instantiate should_throw (function): The predicate that should indicate if the exception should be thrown. This function will be called with the response as a parameter Returns: The request builder instance in order to chain calls
juraj-google-style
def gcs(line, cell=None): parser = google.datalab.utils.commands.CommandParser(prog='%gcs', description=) copy_parser = parser.subcommand('copy', 'Copy one or more Google Cloud Storage objects to a ' 'different location.') copy_parser.add_argument('-s', '--source', help='The name of the object(s) to copy', nargs='+') copy_parser.add_argument('-d', '--destination', required=True, help='The copy destination. For multiple source objects this must be a ' 'bucket.') copy_parser.set_defaults(func=_gcs_copy) create_parser = parser.subcommand('create', 'Create one or more Google Cloud Storage buckets.') create_parser.add_argument('-p', '--project', help='The project associated with the objects') create_parser.add_argument('-b', '--bucket', help='The name of the bucket(s) to create', nargs='+') create_parser.set_defaults(func=_gcs_create) delete_parser = parser.subcommand('delete', 'Delete one or more Google Cloud Storage buckets or ' 'objects.') delete_parser.add_argument('-b', '--bucket', nargs='*', help='The name of the bucket(s) to remove') delete_parser.add_argument('-o', '--object', nargs='*', help='The name of the object(s) to remove') delete_parser.set_defaults(func=_gcs_delete) list_parser = parser.subcommand('list', 'List buckets in a project, or contents of a bucket.') list_parser.add_argument('-p', '--project', help='The project associated with the objects') list_parser.add_argument('-o', '--objects', help='List objects under the given Google Cloud Storage path', nargs='?') list_parser.set_defaults(func=_gcs_list) read_parser = parser.subcommand('read', 'Read the contents of a Google Cloud Storage object into ' 'a Python variable.') read_parser.add_argument('-o', '--object', help='The name of the object to read', required=True) read_parser.add_argument('-v', '--variable', required=True, help='The name of the Python variable to set') read_parser.set_defaults(func=_gcs_read) view_parser = parser.subcommand('view', 'View the contents of a Google Cloud Storage object.') view_parser.add_argument('-n', '--head', type=int, default=20, help='The number of initial lines to view') view_parser.add_argument('-t', '--tail', type=int, default=20, help='The number of lines from end to view') view_parser.add_argument('-o', '--object', help='The name of the object to view', required=True) view_parser.set_defaults(func=_gcs_view) write_parser = parser.subcommand('write', 'Write the value of a Python variable to a Google ' 'Cloud Storage object.') write_parser.add_argument('-v', '--variable', help='The name of the source Python variable', required=True) write_parser.add_argument('-o', '--object', required=True, help='The name of the destination Google Cloud Storage object to write') write_parser.add_argument('-c', '--content_type', help='MIME type', default='text/plain') write_parser.set_defaults(func=_gcs_write) return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
Implements the gcs cell magic for ipython notebooks. Args: line: the contents of the gcs line. Returns: The results of executing the cell.
juraj-google-style
def ParseVideoRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = KodiVideoEventData() event_data.filename = self._GetRowValue(query_hash, row, 'strFilename') event_data.play_count = self._GetRowValue(query_hash, row, 'playCount') event_data.query = query timestamp = self._GetRowValue(query_hash, row, 'lastPlayed') date_time = dfdatetime_time_elements.TimeElements() date_time.CopyFromDateTimeString(timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a Video row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def _GetArgDefault(flag, spec): num_defaults = len(spec.defaults) args_with_defaults = spec.args[-num_defaults:] for arg, default in zip(args_with_defaults, spec.defaults): if arg == flag: return repr(default) if flag in spec.kwonlydefaults: return repr(spec.kwonlydefaults[flag]) return ''
Returns a string describing a flag's default value. Args: flag: The name of the flag. spec: An instance of fire.inspectutils.FullArgSpec, containing type and default information about the arguments to a callable. Returns: A string to be used in constructing the help screen for the function, the empty string if the flag does not have a default or the default is not available.
github-repos
def GlobForPaths(self, paths, pathtype='OS', root_path=None, process_non_regular_files=False, collect_ext_attrs=False): patterns = [] if (not paths): return self.state.pathtype = pathtype self.state.root_path = root_path self.state.process_non_regular_files = process_non_regular_files self.state.collect_ext_attrs = collect_ext_attrs for path in paths: patterns.extend(path.Interpolate(knowledge_base=self.client_knowledge_base)) patterns.sort(key=len, reverse=True) for pattern in patterns: curr_node = self.state.component_tree components = self.ConvertGlobIntoPathComponents(pattern) for (i, curr_component) in enumerate(components): is_last_component = (i == (len(components) - 1)) next_node = curr_node.get(curr_component.SerializeToString(), {}) if (is_last_component and next_node): curr_node[curr_component.SerializeToString()] = {} else: curr_node = curr_node.setdefault(curr_component.SerializeToString(), {}) root_path = next(iterkeys(self.state.component_tree)) self.CallStateInline(messages=[None], next_state='ProcessEntry', request_data=dict(component_path=[root_path]))
Starts the Glob. This is the main entry point for this flow mixin. First we convert the pattern into regex components, and then we interpolate each component. Finally, we generate a cartesian product of all combinations. Args: paths: A list of GlobExpression instances. pathtype: The pathtype to use for creating pathspecs. root_path: A pathspec where to start searching from. process_non_regular_files: Work with all kinds of files - not only with regular ones. collect_ext_attrs: Whether to gather information about file extended attributes.
codesearchnet
def NgramScorer(frequency_map): length = len(next(iter(frequency_map))) floor = math.log10(0.01 / sum(frequency_map.values())) ngrams = frequency.frequency_to_probability(frequency_map, decorator=math.log10) def inner(text): text = ''.join(text) text = remove(text.upper(), string.whitespace + string.punctuation) return sum(ngrams.get(ngram, floor) for ngram in iterate_ngrams(text, length)) return inner
Compute the score of a text by using the frequencies of ngrams. Example: >>> fitness = NgramScorer(english.unigrams) >>> fitness("ABC") -4.3622319742618245 Args: frequency_map (dict): ngram to frequency mapping
juraj-google-style
def is_torch_support_available(self) -> bool: if is_torch_available(): from transformers.utils import get_torch_version return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version else: return False
The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model.
github-repos
def precheck_ami_id(context): key = "{}/{}".format(context.env, context.service_name) print_if_verbose("precheck_ami_id with key: {}".format(key)) current_ami = context.versionresolver.lookup("ami-id,{}".format(key)) print_if_verbose("ami found: {}".format(current_ami)) if current_ami is None: print_if_verbose("precheck passed without check because current AMI is None") return True instances_running_ami = context.aws_client("ec2").describe_instances( Filters=[{ 'Name': 'image-id', 'Values': [current_ami] }] )["Reservations"] if instances_running_ami: instances_running_ami = [resv["Instances"][0]["InstanceId"] for resv in instances_running_ami] print_if_verbose("instances running ami {}:\n{}".format(current_ami, repr(instances_running_ami))) env_service = "{}-{}".format(context.env, context.service_name) instances_running_as_env_service = context.aws_client("ec2").describe_instances( Filters=[{ 'Name': 'iam-instance-profile.arn', 'Values': ["arn:aws:iam::*:instance-profile/{}-{}".format(context.env, context.service_name)] }] )["Reservations"] if instances_running_as_env_service: instances_running_as_env_service = \ [resv["Instances"][0]["InstanceId"] for resv in instances_running_as_env_service] print_if_verbose("instances running as {}".format(env_service)) print_if_verbose(repr(instances_running_as_env_service)) for instance_id in instances_running_as_env_service: if instance_id not in instances_running_ami: raise RuntimeError("Instance: {} not running expected ami: {}".format(instance_id, current_ami)) return True
Is the AMI in service the same as the AMI marked current in the version records? This tool won't update records unless the world state is coherent. Args: context: a populated EFVersionContext object Returns: True if ok to proceed Raises: RuntimeError if not ok to proceed
juraj-google-style
def is_action(task): result = False if _extract_from_env_in_payload(task, 'ACTION_CALLBACK'): result = True if (task.get('extra', {}).get('action') is not None): result = True return result
Determine if a task is an action task. Trusted decision and action tasks are important in that they can generate other valid tasks. The verification of decision and action tasks is slightly different, so we need to be able to tell them apart. This checks for the following things:: * ``task.payload.env.ACTION_CALLBACK`` exists * ``task.extra.action`` exists Args: task (dict): the task definition to check Returns: bool: True if it's an action
codesearchnet
def list_installed(): cmd = 'Get-WindowsFeature -ErrorAction SilentlyContinue -WarningAction SilentlyContinue | Select DisplayName,Name,Installed' features = _pshell_json(cmd) ret = {} for entry in features: if entry['Installed']: ret[entry['Name']] = entry['DisplayName'] return ret
List installed features. Supported on Windows Server 2008 and Windows 8 and newer. Returns: dict: A dictionary of installed features CLI Example: .. code-block:: bash salt '*' win_servermanager.list_installed
codesearchnet
def _parse_dataset(file_path, tmp_dir, train): input_path = file_path file_name = ('train' if train else 'dev') gen_output_path = os.path.join(tmp_dir, (file_name + '.txt')) example_output_path = os.path.join(tmp_dir, _EXAMPLES_FILE) print(('input path: ' + input_path)) print(('gen_output_path: ' + gen_output_path)) print(('example_output_path: ' + example_output_path)) input_file = tf.gfile.Open(input_path, mode='r') examples = [] for (counter, line) in enumerate(input_file): if (counter == 0): continue line_split = line.split('\t') parse1 = line_split[_PARSE1_INDEX] parse2 = line_split[_PARSE2_INDEX] consensus_label = line_split[_LABEL_INDEX] tokens1 = _get_tokens_and_tags(parse1) tokens2 = _get_tokens_and_tags(parse2) tokens1_str = ' '.join(tokens1) tokens2_str = ' '.join(tokens2) if (consensus_label != '-'): examples.append([tokens1_str, tokens2_str, consensus_label]) input_file.close() with tf.gfile.GFile(gen_output_path, 'w') as f: for (tokens1_str, tokens2_str, consensus_label) in examples: f.write(('%s\t%s\t%s\n' % (tokens1_str, tokens2_str, consensus_label))) if train: with tf.gfile.GFile(example_output_path, 'w') as f: for (tokens1_str, tokens2_str, consensus_label) in examples: f.write(('%s %s\n' % (tokens1_str, tokens2_str)))
Convert the dataset in to a simpler format. This function creates two files. One for being processed to produce a vocab and another to generate the data. Args: file_path: string, path to the file to parse. tmp_dir: string, path to the directory to output the files. train: bool, indicating if we are parsing the training set.
codesearchnet
def diff_compute(self, text1, text2, checklines, deadline): if not text1: return [(self.DIFF_INSERT, text2)] if not text2: return [(self.DIFF_DELETE, text1)] if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) i = longtext.find(shorttext) if i != -1: diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[i + len(shorttext):])] if len(text1) > len(text2): diffs[0] = (self.DIFF_DELETE, diffs[0][1]) diffs[2] = (self.DIFF_DELETE, diffs[2][1]) return diffs if len(shorttext) == 1: return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] hm = self.diff_halfMatch(text1, text2) if hm: (text1_a, text1_b, text2_a, text2_b, mid_common) = hm diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline) diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline) return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b if checklines and len(text1) > 100 and len(text2) > 100: return self.diff_lineMode(text1, text2, deadline) return self.diff_bisect(text1, text2, deadline)
Find the differences between two texts. Assumes that the texts do not have any common prefix or suffix. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Speedup flag. If false, then don't run a line-level diff first to identify the changed areas. If true, then run a faster, slightly less optimal diff. deadline: Time when the diff should be complete by. Returns: Array of changes.
juraj-google-style
def reformat_css(input_file, output_file): line_count = get_line_count(input_file) f = open(input_file, 'r+') output = open(output_file, 'w') for line in range(line_count): string = f.readline().strip() string = re.sub('\{', '{\n', string) string = re.sub('; ', ';', string) string = re.sub(';', ';\n', string) string = re.sub('} ', '*/\n', string) output.write(string) output.close() f.close() indent_css(output_file, output_file) add_whitespace_before("{", output_file, output_file)
Reformats poorly written css. This function does not validate or fix errors in the code. It only gives code the proper indentation. Args: input_file: string, path to the input file. output_file: string, path to where the reformatted css should be saved. If the target file doesn't exist, a new file is created. Returns: None.
juraj-google-style
def marcxml2record(marcxml): marcjson = create_record(marcxml, keep_singletons=False) collections = _get_collections(marcjson) if 'conferences' in collections: return conferences.do(marcjson) elif 'data' in collections: return data.do(marcjson) elif 'experiment' in collections: return experiments.do(marcjson) elif 'hepnames' in collections: return hepnames.do(marcjson) elif 'institution' in collections: return institutions.do(marcjson) elif 'job' in collections or 'jobhidden' in collections: return jobs.do(marcjson) elif 'journals' in collections or 'journalsnew' in collections: return journals.do(marcjson) return hep.do(marcjson)
Convert a MARCXML string to a JSON record. Tries to guess which set of rules to use by inspecting the contents of the ``980__a`` MARC field, but falls back to HEP in case nothing matches, because records belonging to special collections logically belong to the Literature collection but don't have ``980__a:HEP``. Args: marcxml(str): a string containing MARCXML. Returns: dict: a JSON record converted from the string.
juraj-google-style
def check_column(df: DataFrame, row: int, name: str, fn: Callable[[float], bool]) -> bool: is_ok = True if df(row, 'trt_model'): if not fn(df(row, name)): logging.error('Unsatisfied %s found at: %s', name, df(row)) is_ok = False return is_ok
Checks the values of a column using a custom function and logs abnormals. The check is only performed on TensorRT models, not native CPU/GPU models. Args: df: The DataFrame to be checked. row: The row in the DataFrame name: The name of the column to be checked. fn: The function that takes a value of at the specified column and returns if the value satisfies the check. Returns: Whether all the values of the specified column satisfies the provided check.
github-repos
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc): for vevent in getattr(cal, 'vevent_list', []): start = getattr(vevent, 'dtstart', None) end = getattr(vevent, 'dtend', None) for node in (start, end): if node: dt = node.value if (isinstance(dt, datetime) and ((not utc_only) or (dt.tzinfo == utc_tz))): if (dt.tzinfo is None): dt = dt.replace(tzinfo=default) node.value = dt.astimezone(new_timezone)
Change the timezone of the specified component. Args: cal (Component): the component to change new_timezone (tzinfo): the timezone to change to default (tzinfo): a timezone to assume if the dtstart or dtend in cal doesn't have an existing timezone utc_only (bool): only convert dates that are in utc utc_tz (tzinfo): the tzinfo to compare to for UTC when processing utc_only=True
codesearchnet
def fetch_friends(self, user, paginate=False): if USING_ALLAUTH: social_app = SocialApp.objects.get_current('facebook') oauth_token = SocialToken.objects.get(account=user, app=social_app).token else: social_auth_backend = FacebookBackend() tokens = social_auth_backend.tokens(user) oauth_token = tokens['access_token'] graph = facebook.GraphAPI(oauth_token) friends = graph.get_connections('me', 'friends') if paginate: total_friends = friends.copy() total_friends.pop('paging') while (('paging' in friends) and ('next' in friends['paging']) and friends['paging']['next']): next_url = friends['paging']['next'] next_url_parsed = urlparse.urlparse(next_url) query_data = urlparse.parse_qs(next_url_parsed.query) query_data.pop('access_token') for (k, v) in query_data.items(): query_data[k] = v[0] friends = graph.get_connections('me', 'friends', **query_data) total_friends['data'] = sum([total_friends['data'], friends['data']], []) else: total_friends = friends return total_friends
fethces friends from facebook using the oauth_token fethched by django-social-auth. Note - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth Returns: collection of friend objects fetched from facebook
codesearchnet
def duration_distance(item_a, item_b, max_value): duration_a = item_a.times.size duration_b = item_b.times.size return np.minimum(np.abs(duration_a - duration_b), max_value) / float(max_value)
Absolute difference in the duration of two items Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
juraj-google-style
def __init__(self, ascii_codepage='cp1252', key_path_prefix=''): super(REGFWinRegistryFile, self).__init__( ascii_codepage=ascii_codepage, key_path_prefix=key_path_prefix) self._file_object = None self._regf_file = pyregf.file() self._regf_file.set_ascii_codepage(ascii_codepage)
Initializes the Windows Registry file. Args: ascii_codepage (Optional[str]): ASCII string codepage. key_path_prefix (Optional[str]): Windows Registry key path prefix.
juraj-google-style
def mkdirs(self, path): try: os.makedirs(path) except OSError as err: raise IOError(err)
Recursively create directories for the provided path. Args: path: string path of the directory structure that should be created Raises: IOError: if leaf directory already exists.
github-repos
def is_compatible(self, other: 'Schema') -> bool: if not isinstance(other, Schema): raise TypeError(f"Argument 'other' should be a Schema object. Encountered {other}.") for key_spec in other.keys(): if key_spec not in self: return False for key_spec, field in self.items(): if key_spec not in other: return False if not field.value.is_compatible(other[key_spec].value): return False return True
Returns whether current schema is compatible with the other schema. NOTE(daiyip): schema A is compatible with schema B when: schema A and schema B have the same keys, with compatible values specs. Args: other: Other schema. Returns: True if values that is acceptable to the other schema is acceptable to current schema. Raises: TypeError: If `other` is not a schema object.
github-repos
def describe(self): response = {'TransformJobStatus': self.state, 'ModelName': self.model_name, 'TransformJobName': self.name, 'TransformJobArn': _UNUSED_ARN, 'TransformEndTime': self.end_time, 'CreationTime': self.start_time, 'TransformStartTime': self.start_time, 'Environment': {}, 'BatchStrategy': self.batch_strategy} if self.transform_resources: response['TransformResources'] = self.transform_resources if self.output_data: response['TransformOutput'] = self.output_data if self.input_data: response['TransformInput'] = self.input_data return response
Describe this _LocalTransformJob The response is a JSON-like dictionary that follows the response of the boto describe_transform_job() API. Returns: dict: description of this _LocalTransformJob
codesearchnet
def with_rank_at_most(self, rank): if ((self.ndims is not None) and (self.ndims > rank)): raise ValueError(('Shape %s must have rank at most %d' % (self, rank))) else: return self
Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`.
codesearchnet
def to_dict(self): output = super().to_dict() if isinstance(self.esmfold_config, EsmFoldConfig): output['esmfold_config'] = self.esmfold_config.to_dict() return output
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
github-repos