code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def create_downloader_of_type(type_name): downloaders = available_downloaders() if (type_name not in downloaders.keys()): raise UnknownDownloaderException(('Unknown downloader: %s' % (type_name,))) return downloaders[type_name]()
Create an instance of the downloader with the given name. Args: type_name: The name of a downloader. Returns: An instance of the downloader with the given type.
codesearchnet
def fetch_tuples(self, max_tuples=20, timeout=None): tuples = list() if (timeout is None): while (len(tuples) < max_tuples): fetcher = self._data_fetcher if (not fetcher): break tuples.append(fetcher.items.get()) return tuples timeout = float(timeout) end = (time.time() + timeout) while (len(tuples) < max_tuples): qto = (end - time.time()) if (qto <= 0): break try: fetcher = self._data_fetcher if (not fetcher): break tuples.append(fetcher.items.get(timeout=qto)) except queue.Empty: break return tuples
Fetch a number of tuples from this view. Fetching of data must have been started with :py:meth:`start_data_fetch` before calling this method. If ``timeout`` is ``None`` then the returned list will contain ``max_tuples`` tuples. Otherwise if the timeout is reached the list may contain less than ``max_tuples`` tuples. Args: max_tuples(int): Maximum number of tuples to fetch. timeout(float): Maximum time to wait for ``max_tuples`` tuples. Returns: list: List of fetched tuples. .. versionadded:: 1.12
codesearchnet
def CheckForBadCharacters(filename, lines, error): for (linenum, line) in enumerate(lines): if (unicode_escape_decode('�') in line): error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if ('\x00' in line): error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found.
codesearchnet
def prose_wc(args): if (args.file is None): return 1 if args.split_hyphens: INTERSTITIAL_PUNCTUATION.append(re.compile('-')) content = args.file.read().decode('utf-8') filename = args.file.name body = strip_frontmatter(content) parsed = markdown_to_text(body) result = wc(filename, body, parsed=parsed, is_jekyll=(body != content)) if (args.update and (filename != '_stdin_') and (result['counts']['type'] == 'jekyll')): update_file(filename, result, content, args.indent) else: _mockable_print({'yaml': yaml.safe_dump(result, default_flow_style=False, indent=args.indent), 'json': json.dumps(result, indent=args.indent), 'default': default_dump(result)}[args.format]) return 0
Processes data provided to print a count object, or update a file. Args: args: an ArgumentParser object returned by setup()
codesearchnet
def _check_middleware_dependencies(concerned_object, required_middleware): declared_middleware = getattr(settings, 'MIDDLEWARE', None) if (declared_middleware is None): declared_middleware = settings.MIDDLEWARE_CLASSES matching_middleware = [mw for mw in declared_middleware if (mw in required_middleware)] if (required_middleware != matching_middleware): raise AssertionError('{} requires middleware order {} but matching middleware was {}'.format(concerned_object, required_middleware, matching_middleware))
Check required middleware dependencies exist and in the correct order. Args: concerned_object (object): The object for which the required middleware is being checked. This is used for error messages only. required_middleware (list of String): An ordered list representing the required middleware to be checked. Usage: Add in __init__ method to a Middleware class to have its dependencies checked on startup. def __init__(self): super(SomeMiddleware, self).__init__() _check_middleware_dependencies(self, required_middleware=[ 'edx_django_utils.cache.middleware.RequestCacheMiddleware', ]) Raises: AssertionError if the provided dependencies don't appear in MIDDLEWARE_CLASSES in the correct order.
codesearchnet
def cardinal(self, to): return sum(1 for _ in filter( lambda d: not d.external and d.target in to, self.dependencies))
Return the number of dependencies of this module to the given node. Args: to (Package/Module): the target node. Returns: int: number of dependencies.
juraj-google-style
def vert_tab_pos(self, positions): if positions == 'clear': self.send(chr(27)+'B'+chr(0)) return if positions.min < 1 or positions.max >255: raise RuntimeError('Invalid position parameter in function horzTabPos') sendstr = chr(27) + 'D' if len(positions)<=16: for position in positions: sendstr += chr(position) self.send(sendstr + chr(0)) else: raise RuntimeError('Too many positions in function vertTabPos')
Sets tab positions, up to a maximum of 32 positions. Also can clear tab positions. Args: positions -- Either a list of tab positions (between 1 and 255), or 'clear'. Returns: None Raises: RuntimeError: Invalid position parameter. RuntimeError: Too many positions.
juraj-google-style
def update_or_create_all(cls, list_of_kwargs, keys=[]): objs = [] for kwargs in list_of_kwargs: filter_kwargs = subdict(kwargs, keys) if (filter_kwargs == {}): obj = None else: obj = cls.first(**filter_kwargs) if (obj is not None): for (key, value) in kwargs.iteritems(): if ((key not in keys) and (key not in cls._no_overwrite_)): setattr(obj, key, value) else: obj = cls.new(**kwargs) objs.append(obj) try: return cls.add_all(objs) except: cls.session.rollback() raise
Batch method for updating a list of instances and creating them if required Args: list_of_kwargs(list of dicts): A list of dicts where each dict denotes the keyword args that you would pass to the create method separately keys (list, optional): A list of keys to use for the initial finding step. Matching is done only on these attributes. Examples: >>> Customer.update_or_create_all([ ... {'name': 'Vicky', 'email': 'vicky@x.com', 'age': 34}, ... {'name': 'Ron', 'age': 40, 'email': 'ron@x.com', ... 'gender': 'Male'}], keys=['name', 'email'])
codesearchnet
def _find_variables(graph_def: graph_pb2.GraphDef) -> Mapping[str, node_def_pb2.NodeDef]: variable_nodes = {} for var_node in filter(_is_variable, graph_def.node): shared_name = str(var_node.attr['shared_name'].s, encoding='utf-8') variable_nodes[shared_name] = var_node for func in graph_def.library.function: for var_node in filter(_is_variable, func.node_def): variable_nodes[shared_name] = var_node return variable_nodes
Finds all variables within `graph_def`. This function makes sense for TF 1 graphs only, as it depends on `shared_name`. Args: graph_def: `GraphDef` to find variables from. Returns: A mapping of `shared_name` -> `NodeDef` corresponding to a variable op.
github-repos
def _head(self, client_kwargs): with _handle_azure_exception(): if 'file_name' in client_kwargs: result = self.client.get_file_properties(**client_kwargs) elif 'directory_name' in client_kwargs: result = self.client.get_directory_properties(**client_kwargs) else: result = self.client.get_share_properties(**client_kwargs) return self._model_to_dict(result)
Returns object or bucket HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
juraj-google-style
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = BytearrayStream() if self._device_serial_number is not None: self._device_serial_number.write( local_stream, kmip_version=kmip_version ) if self._password is not None: self._password.write( local_stream, kmip_version=kmip_version ) if self._device_identifier is not None: self._device_identifier.write( local_stream, kmip_version=kmip_version ) if self._network_identifier is not None: self._network_identifier.write( local_stream, kmip_version=kmip_version) if self._machine_identifier is not None: self._machine_identifier.write( local_stream, kmip_version=kmip_version ) if self._media_identifier is not None: self._media_identifier.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(DeviceCredential, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the DeviceCredential struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def _construct(configdict, prefix, ua): if (not ua): raise UserAgentError("User_agent parameter missing. It can be your project's name for example.") preprefix = configdict.get('preprefix') if preprefix: user_agent = ('%s:' % preprefix) else: user_agent = '' if prefix: user_agent = ('%s%s-' % (user_agent, prefix)) user_agent = ('%s%s' % (user_agent, ua)) return user_agent
Construct user agent Args: configdict (str): Additional configuration for user agent prefix (str): Text to put at start of user agent ua (str): Custom user agent text Returns: str: Full user agent string
codesearchnet
def round(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.round, tf.float32)
Returns a TensorFluent for the round function. Args: x: The input fluent. Returns: A TensorFluent wrapping the round function.
juraj-google-style
def __init__(self, api, path, buffer_size=DEFAULT_BUFFER_SIZE, max_request_size=MAX_REQUEST_SIZE, offset=0): self._api = api self._path = path self.name = api_utils._unquote_filename(path) self.closed = False assert buffer_size <= max_request_size self._buffer_size = buffer_size self._max_request_size = max_request_size self._offset = offset self._buffer = _Buffer() self._etag = None get_future = self._get_segment(offset, self._buffer_size, check_response=False) status, headers, content = self._api.head_object(path) errors.check_status(status, [200], path, resp_headers=headers, body=content) self._file_size = long(common.get_stored_content_length(headers)) self._check_etag(headers.get('etag')) self._buffer_future = None if self._file_size != 0: content, check_response_closure = get_future.get_result() check_response_closure() self._buffer.reset(content) self._request_next_buffer()
Constructor. Args: api: A StorageApi instance. path: Quoted/escaped path to the object, e.g. /mybucket/myfile buffer_size: buffer size. The ReadBuffer keeps one buffer. But there may be a pending future that contains a second buffer. This size must be less than max_request_size. max_request_size: Max bytes to request in one urlfetch. offset: Number of bytes to skip at the start of the file. If None, 0 is used.
juraj-google-style
def dict_setdiff(dict_, negative_keys): keys = [key for key in six.iterkeys(dict_) if (key not in set(negative_keys))] subdict_ = dict_subset(dict_, keys) return subdict_
r""" returns a copy of dict_ without keys in the negative_keys list Args: dict_ (dict): negative_keys (list):
codesearchnet
def _assert_same_base_type(items, expected_type=None): r original_expected_type = expected_type mismatch = False for item in items: if item is not None: item_type = base_dtype(item.dtype) if not expected_type: expected_type = item_type elif expected_type != item_type: mismatch = True break if mismatch: expected_type = original_expected_type original_item_str = None get_name = lambda x: x.name if hasattr(x, 'name') else str(x) for item in items: if item is not None: item_type = base_dtype(item.dtype) if not expected_type: expected_type = item_type original_item_str = get_name(item) elif expected_type != item_type: raise ValueError( '{}, type={}, must be of the same type ({}){}.'.format( get_name(item), item_type, expected_type, ((' as {}'.format(original_item_str)) if original_item_str else ''))) return expected_type else: return expected_type
r"""Asserts all items are of the same base type. Args: items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`, `Operation`, or `IndexedSlices`). Can include `None` elements, which will be ignored. expected_type: Expected type. If not specified, assert all items are of the same base type. Returns: Validated type, or none if neither expected_type nor items provided. Raises: ValueError: If any types do not match.
juraj-google-style
def _perform_action(self, params, return_dict=True): action = self.get_data( "droplets/%s/actions/" % self.id, type=POST, params=params ) if return_dict: return action else: action = action[u'action'] return_action = Action(token=self.token) for attr in action.keys(): setattr(return_action, attr, action[attr]) return return_action
Perform a droplet action. Args: params (dict): parameters of the action Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action
juraj-google-style
def remove_pos_arg_placeholders(alias_command): split_command = shlex.split(alias_command) boundary_index = len(split_command) for i, subcommand in enumerate(split_command): if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH: boundary_index = i break return ' '.join(split_command[:boundary_index]).lower()
Remove positional argument placeholders from alias_command. Args: alias_command: The alias command to remove from. Returns: The alias command string without positional argument placeholder.
juraj-google-style
def ChiSquared(target_frequency): def inner(text): text = ''.join(text) return (- chi_squared(frequency_analyze(text), target_frequency)) return inner
Score a text by comparing its frequency distribution against another. Note: It is easy to be penalised without knowing it when using this scorer. English frequency ngrams are capital letters, meaning when using it any text you score against must be all capitals for it to give correct results. I am aware of the issue and will work on a fix. Todo: Maybe include paramter for ngram size. Havent had a use case for this yet. Once there is evidence it is needed, I will add it. Example: >>> fitness = ChiSquared(english.unigrams) >>> fitness("ABC") -32.2 Args: target_frequency (dict): symbol to frequency mapping of the distribution to compare with
codesearchnet
def __init__(self, analyzer_class): super(HashTaggingAnalysisPlugin, self).__init__() self._analysis_queue_timeout = self.DEFAULT_QUEUE_TIMEOUT self._analyzer_started = False self._comment = 'Tag applied by {0:s} analysis plugin'.format(self.NAME) self._event_identifiers_by_pathspec = collections.defaultdict(list) self._hash_pathspecs = collections.defaultdict(list) self._requester_class = None self._time_of_last_status_log = time.time() self.hash_analysis_queue = Queue.Queue() self.hash_queue = Queue.Queue() self._analyzer = analyzer_class(self.hash_queue, self.hash_analysis_queue)
Initializes a hash tagging analysis plugin. Args: analyzer_class (type): a subclass of HashAnalyzer that will be instantiated by the plugin.
juraj-google-style
def match(self, path): this = self.segments that = path.split('/') current_var = None bindings = {} segment_count = self.segment_count j = 0 for i in range(0, len(this)): if j >= len(that): break if this[i].kind == _TERMINAL: if this[i].literal == '*': bindings[current_var] = that[j] j += 1 elif this[i].literal == '**': until = j + len(that) - segment_count + 1 segment_count += len(that) - segment_count bindings[current_var] = '/'.join(that[j:until]) j = until elif this[i].literal != that[j]: raise ValidationException( 'mismatched literal: \'%s\' != \'%s\'' % ( this[i].literal, that[j])) else: j += 1 elif this[i].kind == _BINDING: current_var = this[i].literal if j != len(that) or j != segment_count: raise ValidationException( 'match error: could not render from the path template: {}' .format(path)) return bindings
Matches a fully qualified path template string. Args: path (str): A fully qualified path template string. Returns: dict: Var names to matched binding values. Raises: ValidationException: If path can't be matched to the template.
juraj-google-style
def from_cif_file(cif_file, source='', comment=''): r = CifParser(cif_file) structure = r.get_structures()[0] return Header(structure, source, comment)
Static method to create Header object from cif_file Args: cif_file: cif_file path and name source: User supplied identifier, i.e. for Materials Project this would be the material ID number comment: User comment that goes in header Returns: Header Object
juraj-google-style
def argmin(x, axis=None, keepdims=False): if any_symbolic_tensors((x,)): return Argmin(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.argmin(x, axis=axis, keepdims=keepdims)
Returns the indices of the minimum values along an axis. Args: x: Input tensor. axis: By default, the index is into the flattened tensor, otherwise along the specified axis. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Defaults to `False`. Returns: Tensor of indices. It has the same shape as `x`, with the dimension along `axis` removed. Example: >>> x = keras.ops.arange(6).reshape(2, 3) + 10 >>> x array([[10, 11, 12], [13, 14, 15]], dtype=int32) >>> keras.ops.argmin(x) array(0, dtype=int32) >>> keras.ops.argmin(x, axis=0) array([0, 0, 0], dtype=int32) >>> keras.ops.argmin(x, axis=1) array([0, 0], dtype=int32)
github-repos
def user_agent_detail(self, **kwargs): path = ('%s/%s/user_agent_detail' % (self.manager.path, self.get_id())) return self.manager.gitlab.http_get(path, **kwargs)
Get the user agent detail. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server cannot perform the request
codesearchnet
def inverse_removing(self, words_to_remove): mask = np.ones(self.as_np.shape[0], dtype='bool') mask[self.__get_idxs(words_to_remove)] = False if not self.bow: return ''.join([self.as_list[i] if mask[i] else 'UNKWORDZ' for i in range(mask.shape[0])]) return ''.join([self.as_list[v] for v in mask.nonzero()[0]])
Returns a string after removing the appropriate words. If self.bow is false, replaces word with UNKWORDZ instead of removing it. Args: words_to_remove: list of ids (ints) to remove Returns: original raw string with appropriate words removed.
juraj-google-style
def mod_replace(match, sphinx_modules): sphinx_modules.append(match.group("module")) return "`{}`_".format(match.group("value"))
Convert Sphinx ``:mod:`` to plain reST link. Args: match (_sre.SRE_Match): A match (from ``re``) to be used in substitution. sphinx_modules (list): List to be track the modules that have been encountered. Returns: str: The ``match`` converted to a link.
juraj-google-style
def clean_title(title): date_pattern = re.compile('\\W*\\d{1,2}[/\\-.]\\d{1,2}[/\\-.](?=\\d*)(?:.{4}|.{2})\\W*') title = date_pattern.sub(' ', title) title = re.sub('\\s{2,}', ' ', title) title = title.strip() return title
Clean title -> remove dates, remove duplicated spaces and strip title. Args: title (str): Title. Returns: str: Clean title without dates, duplicated, trailing and leading spaces.
codesearchnet
def __call__(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: out = math_ops.matmul(input_tensor, self.filters) return {'output': out}
Performs a matrix multiplication. Args: input_tensor: Input tensor to matmul with the filter. Returns: A map of: output key -> output result.
github-repos
def header(self, key, value): if type(key) is tuple: key, value = str(key[0]), key[1] headers = {key: value} self._headers.extend(headers)
Defines a new response header. Alias to ``Response.header()``. Arguments: header (str): header name. value (str): header value. Returns: self: ``pook.Response`` current instance.
juraj-google-style
def localopt(self, forcefield='mmff94', steps=500): pbmol = pb.Molecule(self._obmol) pbmol.localopt(forcefield=forcefield, steps=steps) self._obmol = pbmol.OBMol
A wrapper to pybel's localopt method to optimize a Molecule. Args: forcefield: Default is mmff94. Options are 'gaff', 'ghemical', 'mmff94', 'mmff94s', and 'uff'. steps: Default is 500.
juraj-google-style
def _detect(self): results = [] self.results = [] self.visited_all_paths = {} for contract in self.slither.contracts: for function in contract.functions: if (function.is_implemented and (function.contract == contract)): if function.contains_assembly: continue uninitialized_local_variables = [v for v in function.local_variables if ((not v.is_storage) and v.uninitialized)] function.entry_point.context[self.key] = uninitialized_local_variables self._detect_uninitialized(function, function.entry_point, []) all_results = list(set(self.results)) for (function, uninitialized_local_variable) in all_results: var_name = uninitialized_local_variable.name info = '{} in {}.{} ({}) is a local variable never initialiazed\n' info = info.format(var_name, function.contract.name, function.name, uninitialized_local_variable.source_mapping_str) json = self.generate_json_result(info) self.add_variable_to_json(uninitialized_local_variable, json) self.add_function_to_json(function, json) results.append(json) return results
Detect uninitialized local variables Recursively visit the calls Returns: dict: [contract name] = set(local variable uninitialized)
codesearchnet
def test_correctness_2_factor_hull_white_consistency(self, valuation_method, error_tol): dtype = tf.float64 expiries = np.array([1.0]) fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0]) fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times) fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times) zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype) mu = 0.03 vol1 = 0.02 vol2 = 0.01 eff_vol = np.sqrt(vol1 ** 2 + vol2 ** 2) hjm_price = tff.models.hjm.swaption_price(expiries=expiries, fixed_leg_payment_times=fixed_leg_payment_times, fixed_leg_daycount_fractions=fixed_leg_daycount_fractions, fixed_leg_coupon=fixed_leg_coupon, reference_rate_fn=zero_rate_fn, notional=100.0, num_hjm_factors=2, mean_reversion=[mu, mu], volatility=[vol1, vol2], num_samples=25000, valuation_method=valuation_method, time_step_finite_difference=0.05, num_grid_points_finite_difference=251, time_step=0.1, random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC, seed=[1, 2], dtype=dtype) hjm_price = self.evaluate(hjm_price) hw_price = tff.models.hull_white.swaption_price(expiries=expiries, floating_leg_start_times=[0], floating_leg_end_times=[0], floating_leg_daycount_fractions=[0], fixed_leg_payment_times=fixed_leg_payment_times, fixed_leg_daycount_fractions=fixed_leg_daycount_fractions, fixed_leg_coupon=fixed_leg_coupon, reference_rate_fn=zero_rate_fn, notional=100.0, mean_reversion=[mu], volatility=[eff_vol], use_analytic_pricing=True, dtype=dtype) hw_price = self.evaluate(hw_price) self.assertNear(hjm_price, hw_price, error_tol)
Test that under certain conditions HJM matches analytic HW results. Args: valuation_method: The valuation method used. error_tol: Test error tolerance. For the two factor model, when both mean reversions are equivalent, then the HJM model matches that of a HW one-factor model with the same mean reversion, and effective volatility: eff_vol = sqrt(vol1^2 + vol2^2 + 2 rho(vol1 * vol2) where rho is the cross correlation between the two factors. In this specific test, we assume rho = 0.0.
github-repos
def get_program_by_title(self, program_title): all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[]) matching_programs = [program for program in all_programs if program.get('title') == program_title] if len(matching_programs) > 1: raise MultipleProgramMatchError(len(matching_programs)) elif len(matching_programs) == 1: return matching_programs[0] else: return None
Return single program by name, or None if not found. Arguments: program_title(string): Program title as seen by students and in Course Catalog Admin Returns: dict: Program data provided by Course Catalog API
juraj-google-style
def log(self, level, msg, *args, **kwargs): if level >= logging.FATAL: extra = kwargs.setdefault('extra', {}) extra[_ABSL_LOG_FATAL] = True super(ABSLLogger, self).log(level, msg, *args, **kwargs)
Logs a message at a cetain level substituting in the supplied arguments. This method behaves differently in python and c++ modes. Args: level: int, the standard logging level at which to log the message. msg: str, the text of the message to log. *args: The arguments to substitute in the message. **kwargs: The keyword arguments to substitute in the message.
juraj-google-style
def inverse(self, name: str='inverse') -> 'LinearOperator': if self.is_square is False: raise ValueError('Cannot take the Inverse: This operator represents a non square matrix.') if self.is_non_singular is False: raise ValueError('Cannot take the Inverse: This operator represents a singular matrix.') with self._name_scope(name): return self._linop_inverse()
Returns the Inverse of this `LinearOperator`. Given `A` representing this `LinearOperator`, return a `LinearOperator` representing `A^-1`. Args: name: A name scope to use for ops added by this method. Returns: `LinearOperator` representing inverse of this matrix. Raises: ValueError: When the `LinearOperator` is not hinted to be `non_singular`.
github-repos
def experimental_from_jax(cls, serving_funcs, inputs): TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.JAX) return TFLiteJaxConverterV2(serving_funcs, inputs)
Creates a TFLiteConverter object from a Jax model with its inputs. Args: serving_funcs: An array of Jax functions with all the weights applied already. inputs: An array of Jax input placeholders tuples list, e.g., jnp.zeros(INPUT_SHAPE). Each tuple list should correspond with the serving function. Returns: TFLiteConverter object.
github-repos
def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): (_, tmp_filename) = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write('file {}\n'.format(osp.abspath(filename))) options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' convert_video(tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.remove(tmp_filename)
Concatenate multiple videos into a single one. Args: video_list (list): A list of video filenames out_file (str): Output video filename vcodec (None or str): Output video codec, None for unchanged acodec (None or str): Output audio codec, None for unchanged log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command.
codesearchnet
def _CheckPythonModuleVersion( self, module_name, module_object, version_property, minimum_version, maximum_version): module_version = None if not version_property.endswith('()'): module_version = getattr(module_object, version_property, None) else: version_method = getattr( module_object, version_property[:-2], None) if version_method: module_version = version_method() if not module_version: status_message = ( 'unable to determine version information for: {0:s}').format( module_name) return False, status_message module_version = '{0!s}'.format(module_version) module_version = self._VERSION_NUMBERS_REGEX.findall(module_version)[0] if module_version[-1] == '.': module_version = module_version[:-1] try: module_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(module_version))) except ValueError: status_message = 'unable to parse module version: {0:s} {1:s}'.format( module_name, module_version) return False, status_message if minimum_version: try: minimum_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(minimum_version))) except ValueError: status_message = 'unable to parse minimum version: {0:s} {1:s}'.format( module_name, minimum_version) return False, status_message if module_version_map < minimum_version_map: status_message = ( '{0:s} version: {1!s} is too old, {2!s} or later required').format( module_name, module_version, minimum_version) return False, status_message if maximum_version: try: maximum_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(maximum_version))) except ValueError: status_message = 'unable to parse maximum version: {0:s} {1:s}'.format( module_name, maximum_version) return False, status_message if module_version_map > maximum_version_map: status_message = ( '{0:s} version: {1!s} is too recent, {2!s} or earlier ' 'required').format(module_name, module_version, maximum_version) return False, status_message status_message = '{0:s} version: {1!s}'.format(module_name, module_version) return True, status_message
Checks the version of a Python module. Args: module_object (module): Python module. module_name (str): name of the Python module. version_property (str): version attribute or function. minimum_version (str): minimum version. maximum_version (str): maximum version. Returns: tuple: consists: bool: True if the Python module is available and conforms to the minimum required version, False otherwise. str: status message.
juraj-google-style
def get_ast_dict(belstr, component_type: str=''): errors = [] parsed = {} bels = list(belstr) (char_locs, errors) = parse_chars(bels, errors) (parsed, errors) = parse_functions(belstr, char_locs, parsed, errors) (parsed, errors) = parse_args(bels, char_locs, parsed, errors) (parsed, errors) = arg_types(parsed, errors) (parsed, errors) = parse_relations(belstr, char_locs, parsed, errors) (parsed, errors) = parse_nested(bels, char_locs, parsed, errors) errors = parsed_top_level_errors(parsed, errors) (ast, errors) = parsed_to_ast(parsed, errors, component_type=component_type) return (ast, errors)
Convert BEL string to AST dictionary Args: belstr: BEL string component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
codesearchnet
def query_orders(self, accounts, status='filled'): try: data = self.call("orders", {'client': accounts, 'status': status}) if data is not None: orders = data.get('dataTable', False) order_headers = orders['columns'] if ('成交状态' in order_headers or '状态说明' in order_headers) and ('备注' in order_headers): order_headers[order_headers.index('备注')] = '废弃' order_headers = [cn_en_compare[item] for item in order_headers] order_all = pd.DataFrame( orders['rows'], columns=order_headers ).assign(account_cookie=accounts) order_all.towards = order_all.towards.apply( lambda x: trade_towards_cn_en[x] ) if 'order_time' in order_headers: order_all['status'] = order_all.status.apply( lambda x: order_status_cn_en[x] ) if 'order_date' not in order_headers: order_all.order_time = order_all.order_time.apply( lambda x: QA_util_get_order_datetime( dt='{} {}'.format(datetime.date.today(), x) ) ) else: order_all = order_all.assign( order_time=order_all.order_date .apply(QA_util_date_int2str) + ' ' + order_all.order_time ) if 'trade_time' in order_headers: order_all.trade_time = order_all.trade_time.apply( lambda x: '{} {}'.format(datetime.date.today(), x) ) if status is 'filled': return order_all.loc[:, self.dealstatus_headers].set_index( ['account_cookie', 'realorder_id'] ).sort_index() else: return order_all.loc[:, self.orderstatus_headers].set_index( ['account_cookie', 'realorder_id'] ).sort_index() else: print('response is None') return False except Exception as e: print(e) return False
查询订单 Arguments: accounts {[type]} -- [description] Keyword Arguments: status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'}) Returns: [type] -- [description]
juraj-google-style
def _copy_source(s, graph, op_map, handle_captures, inverse_captures, base_graph): if handle_captures and s in inverse_captures: copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name) elif s.op.type == 'PlaceholderWithDefault' and _constant_inputs(s): default_value = s.op.inputs[0] unavailable_inputs, unavailable_control_inputs = _copy_non_source(op=default_value.op, graph=graph, op_map=op_map, base_graph=base_graph) if unavailable_inputs or unavailable_control_inputs: raise AssertionError('Could not copy source node {} because it has inputs.'.format(default_value)) with ops.device(s.op.device): copied_placeholder = array_ops.placeholder_with_default(input=op_map[default_value], shape=s.shape, name=s.op.name) else: with ops.device(s.op.device): copied_placeholder = array_ops.placeholder(dtype=s.dtype, shape=s.shape, name=s.op.name) base_handle = resource_variable_ops.get_resource_handle_data(s) if base_handle.shape_and_type: resource_variable_ops._set_handle_shapes_and_types(copied_placeholder, base_handle, graph_mode=True) op_map[s] = copied_placeholder op_map[s.op] = copied_placeholder.op
Create a source in a graph based on a Tensor from a different graph. This function creates a placeholder analog of `s` in a graph with the following behavior: 1) If s is a captured Tensor or Variable and handle_captures is set to True, simply capture it in the new graph as well. 2) If s is a PlaceholderWithDefault whose default is a constant, preserve said default in the new graph. 3) When applicable, copy resource variable metadata from `s` to the newly created placeholder. Args: s: The source of interest. graph: The destination graph. op_map: A dict mapping ops and tensors in the old graph to the new one. handle_captures: A boolean indicating whether to re-capture s in the new graph or simply create a vanilla placeholder. inverse_captures: A dict mapping s back to the Tensor or Variable that it captures. base_graph: The graph being copied from.
github-repos
def setExtension(self, ext): if ext[0] != ".": ext = "." + ext self._ext = utils.asString(ext)
Set a new file extension for the sequence. Note: A leading period will be added if none is provided. Args: ext (str): the new file extension
juraj-google-style
def center_label(self, input_length, order): location_in_the_box = '*'.center(input_length * 2 - 1).index('*') + 1 top_limit = order * 2 + 2 bot_limit = top_limit + 2 if top_limit <= location_in_the_box < bot_limit: if location_in_the_box == top_limit: self.top_connect = self.label elif location_in_the_box == top_limit + 1: self.mid_content = self.label else: self.bot_connect = self.label
In multi-bit elements, the label is centered vertically. Args: input_length (int): Rhe amount of wires affected. order (int): Which middle element is this one?
juraj-google-style
def patch_addPadding(self, patches): paddingLength = self.Patch_Margin nullPadding = '' for x in range(1, (paddingLength + 1)): nullPadding += chr(x) for patch in patches: patch.start1 += paddingLength patch.start2 += paddingLength patch = patches[0] diffs = patch.diffs if ((not diffs) or (diffs[0][0] != self.DIFF_EQUAL)): diffs.insert(0, (self.DIFF_EQUAL, nullPadding)) patch.start1 -= paddingLength patch.start2 -= paddingLength patch.length1 += paddingLength patch.length2 += paddingLength elif (paddingLength > len(diffs[0][1])): extraLength = (paddingLength - len(diffs[0][1])) newText = (nullPadding[len(diffs[0][1]):] + diffs[0][1]) diffs[0] = (diffs[0][0], newText) patch.start1 -= extraLength patch.start2 -= extraLength patch.length1 += extraLength patch.length2 += extraLength patch = patches[(- 1)] diffs = patch.diffs if ((not diffs) or (diffs[(- 1)][0] != self.DIFF_EQUAL)): diffs.append((self.DIFF_EQUAL, nullPadding)) patch.length1 += paddingLength patch.length2 += paddingLength elif (paddingLength > len(diffs[(- 1)][1])): extraLength = (paddingLength - len(diffs[(- 1)][1])) newText = (diffs[(- 1)][1] + nullPadding[:extraLength]) diffs[(- 1)] = (diffs[(- 1)][0], newText) patch.length1 += extraLength patch.length2 += extraLength return nullPadding
Add some padding on text start and end so that edges can match something. Intended to be called only from within patch_apply. Args: patches: Array of Patch objects. Returns: The padding string added to each side.
codesearchnet
def download_report_hook(count, block_size, total_size): percent = int(count * block_size * 100 / total_size) print("\r%d%%" % percent + " completed", end="\r")
Report hook for download progress. Args: count: current block number block_size: block size total_size: total size
juraj-google-style
def get_counters(counter_list): if (not isinstance(counter_list, list)): raise CommandExecutionError('counter_list must be a list of tuples') try: query = win32pdh.OpenQuery() counters = build_counter_list(counter_list) for counter in counters: counter.add_to_query(query) win32pdh.CollectQueryData(query) time.sleep(1) win32pdh.CollectQueryData(query) ret = {} for counter in counters: try: ret.update({counter.path: counter.value()}) except pywintypes.error as exc: if (exc.strerror == 'No data to return.'): continue else: raise finally: win32pdh.CloseQuery(query) return ret
Get the values for the passes list of counters Args: counter_list (list): A list of counters to lookup Returns: dict: A dictionary of counters and their values
codesearchnet
def get_apps_to_backup(self): app_db = appsdb.ApplicationsDatabase() apps_to_backup = (self._config.apps_to_sync or app_db.get_app_names()) for app_name in self._config.apps_to_ignore: apps_to_backup.discard(app_name) return apps_to_backup
Get the list of applications that should be backed up by Mackup. It's the list of allowed apps minus the list of ignored apps. Returns: (set) List of application names to back up
codesearchnet
def split_into_batches(input_list, batch_size, batch_storage_dir, checkpoint=False): if (checkpoint and (not os.path.exists(batch_storage_dir))): os.mkdir(batch_storage_dir) batches = [{'index': batch_index, 'data': input_list[start_index:(start_index + batch_size)], 'input_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-input.pickle'.format(batch_index)), 'result_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-output.pickle'.format(batch_index))} for (batch_index, start_index) in enumerate(range(0, len(input_list), batch_size))] if checkpoint: for batch in batches: save(batch['data'], batch['input_filename']) return batches
Break the input data into smaller batches, optionally saving each one to disk. Args: input_list: An input object that has a list-like interface (indexing and slicing). batch_size: The maximum number of input items in each batch. batch_storage_dir: The directory to save the checkpoints to. checkpoint: Whether to save each batch to a file. Returns: A list of batch objects with the following structure: {'index', 'data', 'input_filename', 'result_filename'}
codesearchnet
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure): ncdata, closeit = as_ncreader(ncdata) lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang") red_coords = ncdata.read_value("reduced_atom_positions") natom = len(red_coords) znucl_type = ncdata.read_value("atomic_numbers") type_atom = ncdata.read_value("atom_species") species = natom * [None] for atom in range(natom): type_idx = type_atom[atom] - 1 species[atom] = int(znucl_type[type_idx]) d = {} if site_properties is not None: for prop in site_properties: d[property] = ncdata.read_value(prop) structure = cls(lattice, species, red_coords, site_properties=d) try: from abipy.core.structure import Structure as AbipyStructure structure.__class__ = AbipyStructure except ImportError: pass if closeit: ncdata.close() return structure
Reads and returns a pymatgen structure from a NetCDF file containing crystallographic data in the ETSF-IO format. Args: ncdata: filename or NetcdfReader instance. site_properties: Dictionary with site properties. cls: The Structure class to instanciate.
juraj-google-style
def __init__(self, default: typing.Optional[str]=MISSING_VALUE, regex: typing.Optional[str]=None, is_noneable: bool=False, frozen: bool=False): self._regex = re.compile(regex) if regex else None super().__init__(str, default, is_noneable=is_noneable, frozen=frozen)
Constructor. Args: default: Default value for this value spec. regex: Optional regular expression for acceptable value. is_noneable: If True, None is acceptable. frozen: If True, values other than the default value is not accceptable.
github-repos
def _read(cls, filepath_or_buffer, **kwargs): try: args, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.read_csv) defaults = dict(zip(args[2:], defaults)) filtered_kwargs = { kw: kwargs[kw] for kw in kwargs if kw in defaults and not isinstance(kwargs[kw], type(defaults[kw])) or kwargs[kw] != defaults[kw] } except AttributeError: filtered_kwargs = kwargs if isinstance(filepath_or_buffer, str): if not file_exists(filepath_or_buffer): ErrorMessage.default_to_pandas("File path could not be resolved") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) elif not isinstance(filepath_or_buffer, py.path.local): read_from_pandas = True try: import pathlib if isinstance(filepath_or_buffer, pathlib.Path): read_from_pandas = False except ImportError: pass if read_from_pandas: ErrorMessage.default_to_pandas("Reading from buffer.") return cls._read_csv_from_pandas(filepath_or_buffer, kwargs) if ( _infer_compression(filepath_or_buffer, kwargs.get("compression")) is not None ): ErrorMessage.default_to_pandas("Compression detected.") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) chunksize = kwargs.get("chunksize") if chunksize is not None: ErrorMessage.default_to_pandas("Reading chunks from a file.") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) skiprows = kwargs.get("skiprows") if skiprows is not None and not isinstance(skiprows, int): ErrorMessage.default_to_pandas("skiprows parameter not optimized yet.") return cls._read_csv_from_pandas(filepath_or_buffer, kwargs) if kwargs.get("nrows") is not None: ErrorMessage.default_to_pandas("`read_csv` with `nrows`") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) else: return cls._read_csv_from_file_pandas_on_ray( filepath_or_buffer, filtered_kwargs )
Read csv file from local disk. Args: filepath_or_buffer: The filepath of the csv file. We only support local files for now. kwargs: Keyword arguments in pandas.read_csv
juraj-google-style
def compression_type(self): best_compression = None for e in self.mardata.index.entries: self.fileobj.seek(e.offset) magic = self.fileobj.read(10) compression = guess_compression(magic) if (compression == 'xz'): best_compression = 'xz' break elif ((compression == 'bz2') and (best_compression is None)): best_compression = 'bz2' return best_compression
Return the latest compresion type used in this MAR. Returns: One of None, 'bz2', or 'xz'
codesearchnet
def unpack(value): if not is_packed(value): return value variant = value._tf_extension_type_packed_variant spec = value._tf_extension_type_cached_type_spec spec = spec._tf_extension_type_with_packed(False) return composite_tensor_ops.composite_tensor_from_variant(variant, spec)
Returns a copy of `value` with individual fields stored in __dict__. Args: value: An `ExtensionType` object. Returns: An `ExtensionType` object.
github-repos
def load_data_split(proc_data_dir): ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin')) ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin')) ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin')) return ds_train, ds_val, ds_test
Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data)
juraj-google-style
def dagify_min_edge(g): while not nx.is_directed_acyclic_graph(g): cycle = next(nx.simple_cycles(g)) scores = [] edges = [] for i, j in zip(cycle[:1], cycle[:1]): edges.append((i, j)) scores.append(g[i][j]['weight']) i, j = edges[scores.index(min(scores))] gc = deepcopy(g) gc.remove_edge(i, j) gc.add_edge(j, i) if len(list(nx.simple_cycles(gc))) < len(list(nx.simple_cycles(g))): g.add_edge(j, i, weight=min(scores)) g.remove_edge(i, j) return g
Input a graph and output a DAG. The heuristic is to reverse the edge with the lowest score of the cycle if possible, else remove it. Args: g (networkx.DiGraph): Graph to modify to output a DAG Returns: networkx.DiGraph: DAG made out of the input graph.
juraj-google-style
def _CreateFlagItem(flag, docstring_info, spec, required=False, flag_string=None, short_arg=False): max_str_length = LINE_LENGTH - SECTION_INDENTATION - SUBSECTION_INDENTATION description = _GetArgDescription(flag, docstring_info) if not flag_string: flag_name_upper = formatting.Underline(flag.upper()) flag_string = f'--{flag}={flag_name_upper}' if required: flag_string += ' (required)' if short_arg: short_flag = flag[0] flag_string = f'-{short_flag}, {flag_string}' arg_type = _GetArgType(flag, spec) arg_default = _GetArgDefault(flag, spec) if arg_default == 'None': arg_type = f'Optional[{arg_type}]' arg_type = f'Type: {arg_type}' if arg_type else '' available_space = max_str_length - len(arg_type) arg_type = formatting.EllipsisTruncate(arg_type, available_space, max_str_length) arg_default = f'Default: {arg_default}' if arg_default else '' available_space = max_str_length - len(arg_default) arg_default = formatting.EllipsisTruncate(arg_default, available_space, max_str_length) description = '\n'.join((part for part in (arg_type, arg_default, description) if part)) return _CreateItem(flag_string, description, indent=SUBSECTION_INDENTATION)
Returns a string describing a flag using docstring and FullArgSpec info. Args: flag: The name of the flag. docstring_info: A docstrings.DocstringInfo namedtuple with information about the containing function's docstring. spec: An instance of fire.inspectutils.FullArgSpec, containing type and default information about the arguments to a callable. required: Whether the flag is required. flag_string: If provided, use this string for the flag, rather than constructing one from the flag name. short_arg: Whether the flag has a short variation or not. Returns: A string to be used in constructing the help screen for the function.
github-repos
def read(self, vals): i = 0 if len(vals[i]) == 0: self.comments_2 = None else: self.comments_2 = vals[i] i += 1
Read values. Args: vals (list): list of strings representing values
juraj-google-style
def __init__(self, order_dict, default_order=None): self.order_dict = order_dict.copy() self.default_order = default_order
Create a reorderer. Args: order_dict (dict of (str, `PackageOrder`): Orderers to apply to each package family. default_order (`PackageOrder`): Orderer to apply to any packages not specified in `order_dict`.
juraj-google-style
def find_exception_by_code(code): errorName = None for error in WebDriverError: if error.value.code == code: errorName = error break return errorName
Find name of exception by WebDriver defined error code. Args: code(str): Error code defined in protocol. Returns: The error name defined in protocol.
juraj-google-style
def get_property_dict(entity_proto): return dict((p.key, p.value) for p in entity_proto.property)
Convert datastore.Entity to a dict of property name -> datastore.Value. Args: entity_proto: datastore.Entity proto message. Usage: >>> get_property_dict(entity_proto) {'foo': {string_value='a'}, 'bar': {integer_value=2}} Returns: dict of entity properties.
juraj-google-style
def merge_annotations(code, annotations, param_annotations): if param_annotations: visitor = FunctionDefVisitor(param_annotations) pyc.visit(code, visitor) visitor = CollectAnnotationTargetsVisitor() code = pyc.visit(code, visitor) for line, op in visitor.store_ops.items(): if line in annotations: annot = annotations[line] if annot.name in (None, op.argval): op.annotation = annot.annotation for start, (end, op) in sorted(visitor.make_function_ops.items(), reverse=True): for i in range(start, end): if i in annotations: op.annotation = (annotations[i].annotation, i) break return code
Merges type comments into their associated opcodes. Modifies code in place. Args: code: An OrderedCode object. annotations: A map of lines to annotations. param_annotations: A list of _ParamAnnotations from the director Returns: The code with annotations added to the relevant opcodes.
github-repos
def convert(model_path: str, out_file: str): print('Converting', model_path, 'to', out_file, '...') import tensorflow as tf from precise.model import load_precise_model from keras import backend as K out_dir, filename = split(out_file) out_dir = out_dir or '.' os.makedirs(out_dir, exist_ok=True) K.set_learning_phase(0) model = load_precise_model(model_path) out_name = 'net_output' tf.identity(model.output, name=out_name) print('Output node name:', out_name) print('Output folder:', out_dir) sess = K.get_session() tf.train.write_graph(sess.graph.as_graph_def(), out_dir, filename + 'txt', as_text=True) print('Saved readable graph to:', filename + 'txt') from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_io cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name]) graph_io.write_graph(cgraph, out_dir, filename, as_text=False) if isfile(model_path + '.params'): copyfile(model_path + '.params', out_file + '.params') print('Saved graph to:', filename) del sess
Converts an HD5F file from Keras to a .pb for use with TensorFlow Args: model_path: location of Keras model out_file: location to write protobuf
juraj-google-style
def blit_2x(self, console: tcod.console.Console, dest_x: int, dest_y: int, img_x: int=0, img_y: int=0, img_width: int=(- 1), img_height: int=(- 1)) -> None: lib.TCOD_image_blit_2x(self.image_c, _console(console), dest_x, dest_y, img_x, img_y, img_width, img_height)
Blit onto a Console with double resolution. Args: console (Console): Blit destination Console. dest_x (int): Console tile X position starting from the left at 0. dest_y (int): Console tile Y position starting from the top at 0. img_x (int): Left corner pixel of the Image to blit img_y (int): Top corner pixel of the Image to blit img_width (int): Width of the Image to blit. Use -1 for the full Image width. img_height (int): Height of the Image to blit. Use -1 for the full Image height.
codesearchnet
def create_log(log_file, uid): if not os.path.exists(log_file): dir_name = os.path.dirname(log_file) if not os.path.exists(dir_name): os.makedirs(dir_name, 0755) os.chown(dir_name, uid, -1) with open(log_file, "w") as f: f.write("") os.chown(log_file, uid, -1) os.chmod(log_file, 0640)
Create log file and set necessary permissions. Args: log_file (str): Path to the log file. uid (int): User ID - will be used for chown.
juraj-google-style
def update(self, data): updated = False if 'missing_tags' in data: updated |= self.set_property('missing_tags', data['missing_tags']) if 'notes' in data: updated |= self.set_property('notes', data['notes']) if 'state' in data: updated |= self.set_property('state', data['state']) if 'last_alert' in data: updated |= self.set_property('last_alert', data['last_alert']) if updated: now = datetime.now() self.set_property('last_change', now) return updated
Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: `bool`
juraj-google-style
def get_lattice_type(number): f = (lambda i, j: (i <= number <= j)) cs = {'triclinic': (1, 2), 'monoclinic': (3, 15), 'orthorhombic': (16, 74), 'tetragonal': (75, 142), 'trigonal': (143, 167), 'hexagonal': (168, 194), 'cubic': (195, 230)} crystal_system = None for (k, v) in cs.items(): if f(*v): crystal_system = k break if (number in [146, 148, 155, 160, 161, 166, 167]): return 'rhombohedral' elif (crystal_system == 'trigonal'): return 'hexagonal' else: return crystal_system
Return the lattice crystal system. Hexagonal cells are differentiated into rhombohedral and hexagonal lattices. Args: number (int): The international space group number. Returns: str: The lattice crystal system.
codesearchnet
def update_z(self, z, indices=None): z = _make_np_bool(z) if (indices is None): if (len(self._z) != len(z)): raise QiskitError('During updating whole z, you can not change the number of qubits.') self._z = z else: if ((not isinstance(indices, list)) and (not isinstance(indices, np.ndarray))): indices = [indices] for (p, idx) in enumerate(indices): self._z[idx] = z[p] return self
Update partial or entire z. Args: z (numpy.ndarray or list): to-be-updated z indices (numpy.ndarray or list or optional): to-be-updated qubit indices Returns: Pauli: self Raises: QiskitError: when updating whole z, the number of qubits must be the same.
codesearchnet
def run_iperf_client(self, server_host, extra_args=''): out = self.adb.shell(('iperf3 -c %s %s' % (server_host, extra_args))) clean_out = new_str(out, 'utf-8').strip().split('\n') if ('error' in clean_out[0].lower()): return (False, clean_out) return (True, clean_out)
Start iperf client on the device. Return status as true if iperf client start successfully. And data flow information as results. Args: server_host: Address of the iperf server. extra_args: A string representing extra arguments for iperf client, e.g. '-i 1 -t 30'. Returns: status: true if iperf client start successfully. results: results have data flow information
codesearchnet
def downloadMARCXML(doc_id, library, base='nkc'): downer = Downloader() data = downer.download((ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(DOC_ID=doc_id, LIBRARY=library))) dom = dhtmlparser.parseString(data) error = dom.find('login') if error: error_msg = error[0].find('error') if error_msg: raise LibraryNotFoundException((((((("Can't download document doc_id: '" + str(doc_id)) + "' ") + "(probably bad library: '") + library) + "')!\nMessage: ") + '\n'.join(map((lambda x: x.getContent()), error_msg)))) error = dom.find('ill-get-doc') if error: error_msg = error[0].find('error') if error_msg: raise DocumentNotFoundException('\n'.join(map((lambda x: x.getContent()), error_msg))) return data
Download MARC XML document with given `doc_id` from given `library`. Args: doc_id (DocumentID): You will get this from :func:`getDocumentIDs`. library (str): "``NKC01``" in our case, but don't worry, :func:`getDocumentIDs` adds library specification into :class:`DocumentID` named tuple. Returns: str: MARC XML unicode string. Raises: LibraryNotFoundException DocumentNotFoundException
codesearchnet
def _load_from_file(path): config = [] try: with open(path, 'r') as config_file: config = yaml.load(config_file)['normalizations'] except EnvironmentError as e: raise ConfigError((('Problem while loading file: %s' % e.args[1]) if (len(e.args) > 1) else e)) except (TypeError, KeyError) as e: raise ConfigError(('Config file has an unexpected structure: %s' % e)) except yaml.YAMLError: raise ConfigError('Invalid YAML file syntax') return config
Load a config file from the given path. Load all normalizations from the config file received as argument. It expects to find a YAML file with a list of normalizations and arguments under the key 'normalizations'. Args: path: Path to YAML file.
codesearchnet
def _ReadRecordHeader(self, file_object, record_header_offset): data_type_map = self._GetDataTypeMap('keychain_record_header') record_header, _ = self._ReadStructureFromFileObject( file_object, record_header_offset, data_type_map) return record_header
Reads the record header. Args: file_object (file): file-like object. record_header_offset (int): offset of the record header relative to the start of the file. Returns: keychain_record_header: record header. Raises: ParseError: if the record header cannot be read.
juraj-google-style
def restore(self, request): self._connection.connection.rpush(self._request_key, pickle.dumps(request))
Push the request back onto the queue. Args: request (Request): Reference to a request object that should be pushed back onto the request queue.
juraj-google-style
def add_context(self, name, context, prefix_char=None): if name in self.contexts: raise SuiteError("Context already in suite: %r" % name) if not context.success: raise SuiteError("Context is not resolved: %r" % name) self.contexts[name] = dict(name=name, context=context.copy(), tool_aliases={}, hidden_tools=set(), priority=self._next_priority, prefix_char=prefix_char) self._flush_tools()
Add a context to the suite. Args: name (str): Name to store the context under. context (ResolvedContext): Context to add.
juraj-google-style
def get(self, column, default_value=None): if isinstance(column, (list, tuple)): ret = [] for col in column: ret.append(self.get(col, default_value)) return ret try: return self._values[column] except (IndexError, TypeError): pass try: return self[column] except IndexError: return default_value
Get an item from the Row by column name. Args: column: Tuple of column names, or a (str) column name, or positional column number, 0-indexed. default_value: The value to use if the key is not found. Returns: A list or string with column value(s) or default_value if not found.
juraj-google-style
def from_esri_code(code): code = str(code) proj4 = utils.crscode_to_string('esri', code, 'proj4') crs = from_proj4(proj4) return crs
Load crs object from esri code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The ESRI code as an integer. Returns: - A CS instance of the indicated type.
codesearchnet
def is_comparable_type(var, type_): other_types = COMPARABLE_TYPES.get(type_, type_) return isinstance(var, other_types)
Check to see if `var` is an instance of known compatible types for `type_` Args: var (?): type_ (?): Returns: bool: CommandLine: python -m utool.util_type is_comparable_type --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_type import * # NOQA >>> import utool as ut >>> flags = [] >>> flags += [is_comparable_type(0, float)] >>> flags += [is_comparable_type(0, np.float32)] >>> flags += [is_comparable_type(0, np.int32)] >>> flags += [is_comparable_type(0, int)] >>> flags += [is_comparable_type(0.0, int)] >>> result = ut.repr2(flags) >>> print(result) [True, True, True, True, False]
codesearchnet
def draw_vr_anaglyph(cube_fbo, vr_scene, active_scene, eye_poses=(.035, -.035)): color_masks = [(True, False, False, True), (False, True, True, True)] cam = vr_scene.camera orig_cam_position = cam.position.xyz for color_mask, eye_pos in zip(color_masks, eye_poses): gl.glColorMask(*color_mask) cam.position.xyz = cam.model_matrix.dot([eye_pos, 0., 0., 1.])[:3] cam.uniforms['playerPos'] = cam.position.xyz with cube_fbo as fbo: vr_scene.draw360_to_texture(fbo.texture) cam.position.xyz = orig_cam_position active_scene.draw()
Experimental anaglyph drawing function for VR system with red/blue glasses, used in Sirota lab. Draws a virtual scene in red and blue, from subject's (heda trackers) perspective in active scene. Note: assumes shader uses playerPos like ratcave's default shader Args: cube_fbo: texture frameBuffer object. vr_scene: virtual scene object active_scene: active scene object eye_poses: the eye positions Returns:
juraj-google-style
def _CreateCampaignGroup(client): campaign_group_service = client.GetService('CampaignGroupService', version='v201809') operations = [{ 'operator': 'ADD', 'operand': { 'name': 'Mars campaign group } }] campaign_group = campaign_group_service.mutate(operations)['value'][0] campaign_group_id = campaign_group['id'] print 'Campaign group with ID "%d" and name "%s" was created.' % ( campaign_group_id, campaign_group['name']) return campaign_group_id
Create a campaign group. Args: client: an AdWordsClient instance. Returns: The integer ID of the created campaign group.
juraj-google-style
def run_validate_program_main(self, program_main): program_language = self.profile.get('install_json').get('programLanguage', 'python').lower() if program_language == 'python' and not os.path.isfile('{}.py'.format(program_main)): print( '{}{}Could not find program main file ({}).'.format( c.Style.BRIGHT, c.Fore.RED, program_main ) ) sys.exit(1)
Validate the program main file exists. Args: program_main (str): The executable name.
juraj-google-style
def _decode_crop_and_flip(image_buffer, num_channels): min_object_covered=0.1 aspect_ratio_range=[0.75, 1.33] area_range=[0.05, 1.0] max_attempts=100 mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MIN_OBJ_COV, value=min_object_covered) mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_RATIO_RANGE, value=aspect_ratio_range) mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_AREA_RANGE, value=area_range) mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MAX_ATTEMPTS, value=max_attempts) mlperf_log.resnet_print(key=mlperf_log.INPUT_CROP_USES_BBOXES, value=False) bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.image.extract_jpeg_shape(image_buffer), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, _ = sample_distorted_bounding_box offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) cropped = tf.image.decode_and_crop_jpeg( image_buffer, crop_window, channels=num_channels) mlperf_log.resnet_print(key=mlperf_log.INPUT_RANDOM_FLIP) cropped = tf.image.random_flip_left_right(cropped) return cropped
Crops the given image to a random part of the image, and randomly flips. We use the fused decode_and_crop op, which performs better than the two ops used separately in series, but note that this requires that the image be passed in as an un-decoded string Tensor. Args: image_buffer: scalar string Tensor representing the raw JPEG image buffer. num_channels: Integer depth of the image buffer for decoding. Returns: 3-D tensor with cropped image.
juraj-google-style
def get_dict_definition(self, dict, get_list=False): list_def_candidate = [] for definition_name in self.specification['definitions'].keys(): if self.validate_definition(definition_name, dict): if not get_list: return definition_name list_def_candidate.append(definition_name) if get_list: return list_def_candidate return None
Get the definition name of the given dict. Args: dict: dict to test. get_list: if set to true, return a list of definition that match the body. if False, only return the first. Returns: The definition name or None if the dict does not match any definition. If get_list is True, return a list of definition_name.
juraj-google-style
def form_to_params(fn=None, return_json=True): def forms_to_params_decorator(fn): @handle_type_error @wraps(fn) def forms_to_params_wrapper(*args, **kwargs): kwargs.update(dict(request.forms)) if (not return_json): return fn(*args, **kwargs) return encode_json_body(fn(*args, **kwargs)) return forms_to_params_wrapper if fn: return forms_to_params_decorator(fn) return forms_to_params_decorator
Convert bottle forms request to parameters for the wrapped function. Args: return_json (bool, default True): Should the decorator automatically convert returned value to JSON?
codesearchnet
def in_place_subclassed_model_state_restoration(model): assert not model._is_graph_network if hasattr(model, '_original_attributes_cache') and model._original_attributes_cache is not None: setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: _reset_build_compile_trackers(model)
Restores the original state of a model after it was "reset". This undoes this action of `_in_place_subclassed_model_reset`, which is called in `clone_and_build_model` if `in_place_reset` is set to True. Args: model: Instance of a Keras model created via subclassing, on which `_in_place_subclassed_model_reset` was previously called.
github-repos
def get_vm(access_token, subscription_id, resource_group, vm_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '?api-version=', COMP_API]) return do_get(endpoint, access_token)
Get virtual machine details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. Returns: HTTP response. JSON body of VM properties.
juraj-google-style
def export_with_dynamic_cache(model: PreTrainedModel, example_input_ids: Optional[torch.Tensor]=None, example_attention_mask: Optional[torch.Tensor]=None): if not is_torch_greater_or_equal_than_2_3: raise ImportError('torch >= 2.3 is required.') ALL_MASK_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', sdpa_mask_without_vmap) ALL_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', ALL_ATTENTION_FUNCTIONS['sdpa']) model.config._attn_implementation = 'sdpa_without_vmap' with torch.no_grad(): exported_program = torch.export.export(model, (), {'input_ids': example_input_ids, 'attention_mask': example_attention_mask, 'past_key_values': DynamicCache(), 'use_cache': True}, strict=False) return exported_program
Export a model with DynamicCache using `torch.export`, ensuring the exported model is compatible with `ExecuTorch`. Args: model (`PreTrainedModel`): The pretrained model to be exported. example_input_ids (`Optional[torch.Tensor]`): Example input token id used by `torch.export`. example_attention_mask (`Optional[torch.Tensor]`): Example attention mask used by `torch.export`. Returns: Exported program (`torch.export.ExportedProgram`): The exported program generated via `torch.export`.
github-repos
def list_children(self, obj, save_type=base.SaveType.CHECKPOINT, **kwargs): children = [] for name, ref in super(ObjectGraphView, self).children(obj, save_type, **kwargs).items(): children.append(base.TrackableReference(name, ref)) if obj is self.root and self._attached_dependencies: children.extend(self._attached_dependencies) return children
Returns list of all child trackables attached to obj. Args: obj: A `Trackable` object. save_type: A string, can be 'savedmodel' or 'checkpoint'. **kwargs: kwargs to use when retrieving the object's children. Returns: List of all children attached to the object.
github-repos
def _ParseLogFileOptions(self, options): self._log_file = self.ParseStringOption(options, 'log_file') if (not self._log_file): local_date_time = datetime.datetime.now() self._log_file = '{0:s}-{1:04d}{2:02d}{3:02d}T{4:02d}{5:02d}{6:02d}.log.gz'.format(self.NAME, local_date_time.year, local_date_time.month, local_date_time.day, local_date_time.hour, local_date_time.minute, local_date_time.second)
Parses the log file options. Args: options (argparse.Namespace): command line arguments.
codesearchnet
def insert_or_update(table, columns, values): rows = len(values) cells = len(columns) * len(values) return _Mutator(mutation=Mutation(insert_or_update=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_INSERT_OR_UPDATE, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})
Insert/update one or more table rows. Args: table: Name of the table to be modified. columns: Name of the table columns to be modified. values: Values to be modified.
github-repos
def binary_op(self, op, other, **kwargs): func = getattr(pandas.DataFrame, op) return self._inter_df_op_handler(func, other, **kwargs)
Perform an operation between two objects. Note: The list of operations is as follows: - add - eq - floordiv - ge - gt - le - lt - mod - mul - ne - pow - rfloordiv - rmod - rpow - rsub - rtruediv - sub - truediv - __and__ - __or__ - __xor__ Args: op: The operation. See list of operations above other: The object to operate against. Returns: A new QueryCompiler object.
codesearchnet
def _check_element_shape(self, shape): if not shape.is_compatible_with(self.element_shape): raise ValueError('Inconsistent shapes: saw %s but expected %s ' % (shape, self.element_shape)) if self._infer_shape: self._element_shape[0] = self.element_shape.merge_with(shape)
Changes the element shape of the array given a shape to merge with. Args: shape: A `TensorShape` object to merge with. Raises: ValueError: if the provided shape is incompatible with the current element shape of the `TensorArray`.
github-repos
def resize_images(x, height_factor, width_factor, data_format, interpolation='nearest'): if data_format == 'channels_first': rows, cols = (2, 3) elif data_format == 'channels_last': rows, cols = (1, 2) else: raise ValueError('Invalid `data_format` argument: %s' % (data_format,)) new_shape = x.shape[rows:cols + 1] if new_shape.is_fully_defined(): new_shape = constant_op.constant(new_shape.as_list(), dtype='int32') else: new_shape = array_ops.shape_v2(x)[rows:cols + 1] new_shape *= constant_op.constant(np.array([height_factor, width_factor], dtype='int32')) if data_format == 'channels_first': x = permute_dimensions(x, [0, 2, 3, 1]) if interpolation == 'nearest': x = image_ops.resize_images_v2(x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR) elif interpolation == 'bilinear': x = image_ops.resize_images_v2(x, new_shape, method=image_ops.ResizeMethod.BILINEAR) else: raise ValueError('interpolation should be one of "nearest" or "bilinear".') if data_format == 'channels_first': x = permute_dimensions(x, [0, 3, 1, 2]) return x
Resizes the images contained in a 4D tensor. Args: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. interpolation: A string, one of `nearest` or `bilinear`. Returns: A tensor. Raises: ValueError: in case of incorrect value for `data_format` or `interpolation`.
github-repos
def symbol(name: str=None, symbol_type: Type[Symbol]=Symbol) -> 'SymbolWildcard': if (isinstance(name, type) and issubclass(name, Symbol) and (symbol_type is Symbol)): return SymbolWildcard(name) return SymbolWildcard(symbol_type, variable_name=name)
Create a `SymbolWildcard` that matches a single `Symbol` argument. Args: name: Optional variable name for the wildcard. symbol_type: An optional subclass of `Symbol` to further limit which kind of symbols are matched by the wildcard. Returns: A `SymbolWildcard` that matches the *symbol_type*.
codesearchnet
def get_group_id(self, uuid=None): group_data = self.get_group(uuid) try: return group_data['response']['docs'][0]['id'] except (KeyError, IndexError): failure_message = 'Error in get_group response data - got {0}'.format(group_data) log.exception(failure_message) raise PyLmodUnexpectedData(failure_message)
Get group id based on uuid. Args: uuid (str): optional uuid. defaults to self.cuuid Raises: PyLmodUnexpectedData: No group data was returned. requests.RequestException: Exception connection error Returns: int: numeric group id
codesearchnet
def replaceFA(self, faDataType: int, xml: str): self.client.replaceFA(faDataType, xml)
Replaces Financial Advisor's settings. Args: faDataType: See :meth:`.requestFA`. xml: The XML-formatted configuration string.
codesearchnet
def download(self, temp_ver, store_metadata=True): dest = self._prefixed(temp_ver.name) temp_dest = '%s.tmp' % dest with utils.LockFile(dest + '.lock'): if os.path.exists(dest): return temp_ver.download(temp_dest) if store_metadata: with open('%s.metadata' % dest, 'w') as f: utils.json_dump(temp_ver.get_metadata(), f) sha1 = utils.get_hash(temp_dest) if temp_ver.get_hash() != sha1: raise RuntimeError( 'Image %s does not match the expected hash %s' % ( temp_ver.name, sha1, ) ) with open('%s.hash' % dest, 'w') as f: f.write(sha1) with log_utils.LogTask('Convert image', logger=LOGGER): result = utils.run_command( [ 'qemu-img', 'convert', '-O', 'raw', temp_dest, dest, ], ) os.unlink(temp_dest) if result: raise RuntimeError(result.err)
Retrieve the given template version Args: temp_ver (TemplateVersion): template version to retrieve store_metadata (bool): If set to ``False``, will not refresh the local metadata with the retrieved one Returns: None
juraj-google-style
def describe_message(message_definition): message_descriptor = MessageDescriptor() message_descriptor.name = message_definition.definition_name().split('.')[(- 1)] fields = sorted(message_definition.all_fields(), key=(lambda v: v.number)) if fields: message_descriptor.fields = [describe_field(field) for field in fields] try: nested_messages = message_definition.__messages__ except AttributeError: pass else: message_descriptors = [] for name in nested_messages: value = getattr(message_definition, name) message_descriptors.append(describe_message(value)) message_descriptor.message_types = message_descriptors try: nested_enums = message_definition.__enums__ except AttributeError: pass else: enum_descriptors = [] for name in nested_enums: value = getattr(message_definition, name) enum_descriptors.append(describe_enum(value)) message_descriptor.enum_types = enum_descriptors return message_descriptor
Build descriptor for Message class. Args: message_definition: Message class to provide descriptor for. Returns: Initialized MessageDescriptor instance describing the Message class.
codesearchnet
def prune_graph(graph_str, package_name): g = read_dot(graph_str) nodes = set() for node, attrs in g.node_attr.iteritems(): attr = [x for x in attrs if x[0] == "label"] if attr: label = attr[0][1] try: req_str = _request_from_label(label) request = PackageRequest(req_str) except PackageRequestError: continue if request.name == package_name: nodes.add(node) if not nodes: raise ValueError("The package %r does not appear in the graph." % package_name) g_rev = g.reverse() accessible_nodes = set() access = accessibility(g_rev) for node in nodes: nodes_ = access.get(node, []) accessible_nodes |= set(nodes_) inaccessible_nodes = set(g.nodes()) - accessible_nodes for node in inaccessible_nodes: g.del_node(node) return write_dot(g)
Prune a package graph so it only contains nodes accessible from the given package. Args: graph_str (str): Dot-language graph string. package_name (str): Name of package of interest. Returns: Pruned graph, as a string.
juraj-google-style
def __init__(self, resolver_context, file_object=None): if file_object: raise ValueError('File object value set.') super(EWFFile, self).__init__(resolver_context) self._file_objects = []
Initializes a file-like object. Args: resolver_context (Context): resolver context. file_object (Optional[FileIO]): file-like object. Raises: ValueError: when file_object is set.
juraj-google-style
def add_answer_for_student(student_item, vote, rationale): answers = get_answers_for_student(student_item) answers.add_answer(vote, rationale) sub_api.create_submission(student_item, { ANSWER_LIST_KEY: answers.get_answers_as_list() })
Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option
juraj-google-style
def parse(self, sentence: str) -> typing.List[str]: if sentence == '': return [] chunks = [sentence[0]] base_score = -sum((sum(g.values()) for g in self.model.values())) * 0.5 for i in range(1, len(sentence)): score = base_score if i > 2: score += self.model.get('UW1', {}).get(sentence[i - 3], 0) if i > 1: score += self.model.get('UW2', {}).get(sentence[i - 2], 0) score += self.model.get('UW3', {}).get(sentence[i - 1], 0) score += self.model.get('UW4', {}).get(sentence[i], 0) if i + 1 < len(sentence): score += self.model.get('UW5', {}).get(sentence[i + 1], 0) if i + 2 < len(sentence): score += self.model.get('UW6', {}).get(sentence[i + 2], 0) if i > 1: score += self.model.get('BW1', {}).get(sentence[i - 2:i], 0) score += self.model.get('BW2', {}).get(sentence[i - 1:i + 1], 0) if i + 1 < len(sentence): score += self.model.get('BW3', {}).get(sentence[i:i + 2], 0) if i > 2: score += self.model.get('TW1', {}).get(sentence[i - 3:i], 0) if i > 1: score += self.model.get('TW2', {}).get(sentence[i - 2:i + 1], 0) if i + 1 < len(sentence): score += self.model.get('TW3', {}).get(sentence[i - 1:i + 2], 0) if i + 2 < len(sentence): score += self.model.get('TW4', {}).get(sentence[i:i + 3], 0) if score > 0: chunks.append(sentence[i]) else: chunks[-1] += sentence[i] return chunks
Parses the input sentence and returns a list of semantic chunks. Args: sentence (str): An input sentence. Returns: A list of semantic chunks (List[str]).
github-repos