code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def of_cte(cls, header: Optional[ContentTransferEncodingHeader]) \ -> 'MessageDecoder': if header is None: return _NoopDecoder() hdr_str = str(header).lower() custom = cls.registry.get(hdr_str) if custom is not None: return custom elif hdr_str in ('7bit', '8bit'): return _NoopDecoder() elif hdr_str == 'quoted-printable': return _QuotedPrintableDecoder() elif hdr_str == 'base64': return _Base64Decoder() else: raise NotImplementedError(hdr_str)
Return a decoder from the CTE header value. There is built-in support for ``7bit``, ``8bit``, ``quoted-printable``, and ``base64`` CTE header values. Decoders can be added or overridden with the :attr:`.registry` dictionary. Args: header: The CTE header value.
juraj-google-style
def learn(self, iter_n=500, k_step=10): generative_model, discriminative_model = self.__GAN.train( self.__true_sampler, self.__generative_model, self.__discriminative_model, iter_n=iter_n, k_step=k_step ) self.__generative_model = generative_model self.__discriminative_model = discriminative_model
Learning. Args: iter_n: The number of training iterations. k_step: The number of learning of the `discriminator`.
juraj-google-style
def save_users(users, path=settings.LOGIN_FILE): with open(path, "w") as fh: for username, data in users.items(): pass_line = username + ":" + ":".join([ data["pass_hash"], data["uid"], data["gid"], data["full_name"], data["home"], data["shell"] ]) fh.write(pass_line + "\n")
Save dictionary with user data to passwd file (default :attr:`ftp.settings.LOGIN_FILE`). Args: users (dict): dictionary with user data. For details look at dict returned from :func:`load_users`. path (str, default settings.LOGIN_FILE): path of the file, where the data will be stored (default :attr:`ftp.settings.LOGIN_FILE`).
juraj-google-style
def memoizedmethod(method): method_name = method.__name__ @wraps(method) def patched(self, *args, **kwargs): try: return self._cache[method_name] except KeyError: result = self._cache[method_name] = method( self, *args, **kwargs) return result return patched
Decorator that caches method result. Args: method (function): Method Returns: function: Memoized method. Notes: Target method class needs as "_cache" attribute (dict). It is the case of "ObjectIOBase" and all its subclasses.
juraj-google-style
def __init__(self, num_workers, *unused_args, **unused_kwargs): super().__init__(*unused_args, **unused_kwargs) self._num_workers = num_workers self._successful_ops = util.MovingSum(window_ms=1000, bucket_ms=1000) self._first_instant = datetime.datetime.now() self._throttled_secs = Metrics.counter(RampupThrottlingFn, 'cumulativeThrottlingSeconds')
Initializes a ramp-up throttler transform. Args: num_workers: A hint for the expected number of workers, used to derive the local rate limit.
github-repos
def __call__(self, fn): def output(app, *args, **kwargs): data = fn(app, *args, **kwargs) attr = getattr(app, self.attribute) if isinstance(data, list) and isinstance(attr, list): getattr(app, self.attribute).extend(data) elif isinstance(attr, list): getattr(app, self.attribute).append(data) else: setattr(app, self.attribute, data) return data return output
Implement __call__ function for decorator. Args: fn (function): The decorated function. Returns: function: The custom decorator function.
juraj-google-style
def get_file(profile, branch, file_path): branch_sha = get_branch_sha(profile, branch) tree = get_files_in_branch(profile, branch_sha) match = None for item in tree: if (item.get('path') == file_path): match = item break file_sha = match.get('sha') blob = blobs.get_blob(profile, file_sha) content = blob.get('content') decoded_content = b64decode(content) return decoded_content.decode('utf-8')
Get a file from a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch The name of a branch. file_path The path of the file to fetch. Returns: The (UTF-8 encoded) content of the file, as a string.
codesearchnet
def refs(self, type='all', **kwargs): path = '%s/%s/refs' % (self.manager.path, self.get_id()) data = {'type': type} return self.manager.gitlab.http_get(path, query_data=data, **kwargs)
List the references the commit is pushed to. Args: type (str): The scope of references ('branch', 'tag' or 'all') **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the references could not be retrieved Returns: list: The references the commit is pushed to.
juraj-google-style
def sin(duration: int, amp: complex, freq: float=None, phase: float=0, name: str=None) -> SamplePulse: if (freq is None): freq = (1 / duration) return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name)
Generates sine wave `SamplePulse`. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse.
codesearchnet
def solve2x2(lhs, rhs): if (np.abs(lhs[(1, 0)]) > np.abs(lhs[(0, 0)])): ratio = (lhs[(0, 0)] / lhs[(1, 0)]) denominator = (lhs[(0, 1)] - (ratio * lhs[(1, 1)])) if (denominator == 0.0): return (True, None, None) y_val = ((rhs[0] - (ratio * rhs[1])) / denominator) x_val = ((rhs[1] - (lhs[(1, 1)] * y_val)) / lhs[(1, 0)]) return (False, x_val, y_val) else: if (lhs[(0, 0)] == 0.0): return (True, None, None) ratio = (lhs[(1, 0)] / lhs[(0, 0)]) denominator = (lhs[(1, 1)] - (ratio * lhs[(0, 1)])) if (denominator == 0.0): return (True, None, None) y_val = ((rhs[1] - (ratio * rhs[0])) / denominator) x_val = ((rhs[0] - (lhs[(0, 1)] * y_val)) / lhs[(0, 0)]) return (False, x_val, y_val)
Solve a square 2 x 2 system via LU factorization. This is meant to be a stand-in for LAPACK's ``dgesv``, which just wraps two calls to ``dgetrf`` and ``dgetrs``. We wrap for two reasons: * We seek to avoid exceptions as part of the control flow (which is what :func`numpy.linalg.solve` does). * We seek to avoid excessive type- and size-checking, since this special case is already known. Args: lhs (numpy.ndarray) A ``2 x 2`` array of real numbers. rhs (numpy.ndarray) A 1D array of 2 real numbers. Returns: Tuple[bool, float, float]: A triple of * A flag indicating if ``lhs`` is a singular matrix. * The first component of the solution. * The second component of the solution.
codesearchnet
def create_application_configuration(self, name, properties, description=None): if not hasattr(self, 'applicationConfigurations'): raise NotImplementedError() cv = ApplicationConfiguration._props(name, properties, description) res = self.rest_client.session.post(self.applicationConfigurations, headers = {'Accept' : 'application/json'}, json=cv) _handle_http_errors(res) return ApplicationConfiguration(res.json(), self.rest_client)
Create an application configuration. Args: name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a .. versionadded 1.12
juraj-google-style
def export_analytics_data_to_csv(data, output_folder, result_info_key, identifier_keys): workbook = create_excel_workbook(data, result_info_key, identifier_keys) suffix = '.csv' if (not os.path.exists(output_folder)): os.makedirs(output_folder) for worksheet in workbook.worksheets: file_name = utilities.convert_title_to_snake_case(worksheet.title) file_path = os.path.join(output_folder, (file_name + suffix)) mode = 'w' if (sys.version_info[0] < 3): mode = 'wb' with io.open(file_path, mode) as output_file: csv_writer = csv.writer(output_file) for row in worksheet.rows: csv_writer.writerow([cell.value for cell in row]) print('Saved CSV files to {}'.format(output_folder))
Creates CSV files containing data returned by the Analytics API. Creates one file per requested endpoint and saves it into the specified output_folder Args: data: Analytics API data as a list of dicts output_folder: Path to a folder to save the CSV files into
codesearchnet
def get_tensor_num_entries(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None): shape = self.get_tensor_shape(tensor_name) num_entries = 1 for dim in shape.dims: num_entries = (num_entries * dim.value) if (not partial_layout): return num_entries for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name): if (mtf_dimension_name not in partial_layout): continue mesh_dimension_name = partial_layout[mtf_dimension_name] mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name] num_entries = int(math.ceil((num_entries / mesh_dimension_size))) return num_entries
The number of entries in a tensor. If partial_layout is specified, then mesh_dimension_to_size must also be. In this case, the number of entries on a single device is returned. Args: tensor_name: a string, name of a tensor in the graph. partial_layout: an optional {string: string}, from MTF dimension name to mesh dimension name. mesh_dimension_to_size: an optional {string: int}, from mesh dimension name to size. Returns: an integer
codesearchnet
def validate_read(self, address): if (not any((address.startswith(ns) for ns in self._read_list))): raise AuthorizationException(address=address)
Raises an exception if the address is not allowed to be read in this context, based on txn inputs. Args: address (str): An address to be validated. Returns: None Raises: AuthorizationException
codesearchnet
def _ReadMemberFooter(self, file_object): file_offset = file_object.get_offset() member_footer = self._ReadStructure( file_object, file_offset, self._MEMBER_FOOTER_SIZE, self._MEMBER_FOOTER, 'member footer') self.uncompressed_data_size = member_footer.uncompressed_data_size
Reads a member footer. Args: file_object (FileIO): file-like object to read from. Raises: FileFormatError: if the member footer cannot be read.
juraj-google-style
def retrieve(self, block_height, headers=None): path = (self.path + block_height) return self.transport.forward_request(method='GET', path=path, headers=None)
Retrieves the block with the given ``block_height``. Args: block_height (str): height of the block to retrieve. headers (dict): Optional headers to pass to the request. Returns: dict: The block with the given ``block_height``.
codesearchnet
def GetArtifactPathDependencies(rdf_artifact): deps = set() for source in rdf_artifact.sources: for (arg, value) in iteritems(source.attributes): paths = [] if (arg in ['path', 'query']): paths.append(value) if (arg == 'key_value_pairs'): paths.extend([x['key'] for x in value]) if (arg in ['keys', 'paths', 'path_list', 'content_regex_list']): paths.extend(value) for path in paths: for match in artifact_utils.INTERPOLATED_REGEX.finditer(path): deps.add(match.group()[2:(- 2)]) deps.update(GetArtifactParserDependencies(rdf_artifact)) return deps
Return a set of knowledgebase path dependencies. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"]
codesearchnet
def _coords2idx(self, coords): x = self._coords2vec(coords) idx = self._kd.query(x, p=self._metric_p, distance_upper_bound=self._max_pix_scale) return idx[1]
Converts from sky coordinates to pixel indices. Args: coords (:obj:`astropy.coordinates.SkyCoord`): Sky coordinates. Returns: Pixel indices of the coordinates, with the same shape as the input coordinates. Pixels which are outside the map are given an index equal to the number of pixels in the map.
juraj-google-style
def indentjoin(strlist, indent='\n ', suffix=''): indent_ = indent strlist = list(strlist) if (len(strlist) == 0): return '' return (indent_ + indent_.join([(six.text_type(str_) + suffix) for str_ in strlist]))
r""" Convineince indentjoin similar to '\n '.join(strlist) but indent is also prefixed Args: strlist (?): indent (str): suffix (str): Returns: str: joined list
codesearchnet
def init(self, address, hard_reset=False): self.address = address if hard_reset: pass for i in range(Dongle.PORT_RETRIES): try: logger.debug('Setting up BGAPI, attempt {}/{}'.format(i + 1, Dongle.PORT_RETRIES)) self.api = BlueGigaAPI(port=self.address, callbacks=self, baud=Dongle.BAUDRATE, timeout=DEF_TIMEOUT) self.api.start_daemon() break except serial.serialutil.SerialException as e: logger.debug('Failed to init BlueGigaAPI: {}, attempt {}/{}'.format(e, i + 1, Dongle.PORT_RETRIES)) time.sleep(0.1) if self.api is None: return False time.sleep(0.5) self.get_supported_connections() logger.info('Dongle supports {} connections'.format(self.supported_connections)) if self.supported_connections == -1: logger.error('Failed to retrieve number of supported connections from the dongle! (try reinserting it)') return False self.conn_state = {x: self._STATE_IDLE for x in range(self.supported_connections)} self.reset() self._cbthread = threading.Thread(target=self._cbthreadfunc) self._cbthread.setDaemon(True) self._cbthread_q = Queue() self._cbthread.start() return True
Open the serial connection to a dongle at the supplied address. Args: address (str): the serial port address of the BLED112 dongle, e.g. 'COM5' hard_reset (bool): not currently used Returns: True if a connection with the dongle was established, False otherwise.
juraj-google-style
def __call__(self, fn): def loop(app, *args, **kwargs): r = [] arg_data = app.tcex.playbook.read(getattr(app.args, self.arg)) arg_type = app.tcex.playbook.variable_type(getattr(app.args, self.arg)) if not isinstance(arg_data, list): arg_data = [arg_data] if not arg_data: app.tcex.exit(1, 'No data retrieved for arg ({}).'.format(self.arg)) for s in arg_data: if s is None and self.default is not None: s = self.default app.tcex.log.debug( 'a null input was provided, using default value "{}" instead.'.format(s) ) if self.fail_on is not None: if s in self.fail_on: app.tcex.playbook.exit( 1, 'Arg value for IterateOnArg matched fail_on value ({}).'.format( self.fail_on ), ) if ( arg_type not in ['Binary', 'BinaryArray'] and app.tcex.log.getEffectiveLevel() == 10 ): log_string = str(s) if len(log_string) > 100: log_string = '{} ...'.format(log_string[:100]) app.tcex.log.debug('input value: {}'.format(log_string)) args_list = list(args) try: args_list[0] = s except IndexError: args_list.append(s) args = tuple(args_list) r.append(fn(app, *args, **kwargs)) return r return loop
Implement __call__ function for decorator. Args: fn (function): The decorated function. Returns: function: The custom decorator function.
juraj-google-style
def create(self, batch_outs): raise NotImplementedError('Must be implemented in subclasses.')
Creates the initial results from the first batch outputs. Args: batch_outs: A list of batch-level outputs.
github-repos
def fill_slot(self, filler_pipeline_key, slot, value): if not isinstance(filler_pipeline_key, db.Key): filler_pipeline_key = db.Key(filler_pipeline_key) if _TEST_MODE: slot._set_value_test(filler_pipeline_key, value) else: encoded_value = json.dumps(value, sort_keys=True, cls=mr_util.JsonEncoder) value_text = None value_blob = None if len(encoded_value) <= _MAX_JSON_SIZE: value_text = db.Text(encoded_value) else: value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name()) def txn(): slot_record = db.get(slot.key) if slot_record is None: raise UnexpectedPipelineError( 'Tried to fill missing slot "%s" ' 'by pipeline ID "%s" with value: %r' % (slot.key, filler_pipeline_key.name(), value)) slot_record.filler = filler_pipeline_key slot_record.value_text = value_text slot_record.value_blob = value_blob slot_record.status = _SlotRecord.FILLED slot_record.fill_time = self._gettime() slot_record.put() task = taskqueue.Task( url=self.barrier_handler_path, params=dict( slot_key=slot.key, use_barrier_indexes=True), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key}) task.add(queue_name=self.queue_name, transactional=True) db.run_in_transaction_options( db.create_transaction_options(propagation=db.ALLOWED), txn) self.session_filled_output_names.add(slot.name)
Fills a slot, enqueueing a task to trigger pending barriers. Args: filler_pipeline_key: db.Key or stringified key of the _PipelineRecord that filled this slot. slot: The Slot instance to fill. value: The serializable value to assign. Raises: UnexpectedPipelineError if the _SlotRecord for the 'slot' could not be found in the Datastore.
juraj-google-style
def search(self, *arg, **kw): output = {'cant_results': 0, 'matched_terms': defaultdict(set), 'results': {}, 'runtime': 0} indexes = self.indexes() models = kw.get('models', list(self._entities.values())) if (sys.version_info[0] < 3): models = [(self._entities.get(model, None) if (isinstance(model, str) or isinstance(model, unicode)) else model) for model in models] models = filter((lambda x: (x is not None)), models) else: models = [(self._entities.get(model, None) if (isinstance(model, str) or isinstance(model, str)) else model) for model in models] models = [x for x in models if (x is not None)] if ((models == []) or (not models)): models = list(self._entities.values()) if self.debug: print('SEARCHING ON MODELS -> ', models) indexes = [m._pw_index_ for m in models if hasattr(m, '_pw_index_')] if (indexes == []): return output (runtime, cant) = (0, 0) ma = defaultdict(set) for index in indexes: res = index.search(*arg, **kw) runtime += res['runtime'] cant += res['cant_results'] if (res['cant_results'] > 0): output['results'][index._name] = {'items': res['results'], 'matched_terms': res['matched_terms']} for (k, ts) in list(res['matched_terms'].items()): for t in ts: ma[k].add(t) output['cant_results'] = cant output['matched_terms'] = {k: list(v) for (k, v) in list(ma.items())} output['runtime'] = runtime return output
A full search function. This allows you to search expression using the following arguments. Arg: query (str): The search string expression. Optional Args: - include_entity (bool): include in each result the entity values associated of the fields stored. - add_wildcards (bool): set it if you want to consider matches that have prefix or suffixes the query. - something (bool): set `add_willcards` in case of none results for the query. - fields (list): specified the fields names that you want to consider. - except_fields (list): specified the fields names to not consider in the search. - models (list): a list of name of model to search or even the models from the database. Returns: (dict): A python dictionary with the results.
codesearchnet
def Run(self, force=False): if not self.locked: raise aff4.LockError("CronJob must be locked for Run() to be called.") self.KillOldFlows() current_flow_urn = self.Get(self.Schema.CURRENT_FLOW_URN) if current_flow_urn: current_flow = aff4.FACTORY.Open(current_flow_urn, token=self.token) runner = current_flow.GetRunner() if not runner.IsRunning(): if runner.context.state == rdf_flow_runner.FlowContext.State.ERROR: self.Set( self.Schema.LAST_RUN_STATUS, rdf_cronjobs.CronJobRunStatus( status=rdf_cronjobs.CronJobRunStatus.Status.ERROR)) stats_collector_instance.Get().IncrementCounter( "cron_job_failure", fields=[self.urn.Basename()]) else: self.Set( self.Schema.LAST_RUN_STATUS, rdf_cronjobs.CronJobRunStatus( status=rdf_cronjobs.CronJobRunStatus.Status.OK)) start_time = self.Get(self.Schema.LAST_RUN_TIME) elapsed = time.time() - start_time.AsSecondsSinceEpoch() stats_collector_instance.Get().RecordEvent( "cron_job_latency", elapsed, fields=[self.urn.Basename()]) self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN) self.Flush() if not force and not self.DueToRun(): return cron_args = self.Get(self.Schema.CRON_ARGS) cron_args.flow_runner_args.base_session_id = self.urn flow_urn = flow.StartAFF4Flow( runner_args=cron_args.flow_runner_args, args=cron_args.flow_args, token=self.token, sync=False) self.Set(self.Schema.CURRENT_FLOW_URN, flow_urn) self.Set(self.Schema.LAST_RUN_TIME, rdfvalue.RDFDatetime.Now()) self.Flush()
Do the actual work of the Cron. Will first check if DueToRun is True. CronJob object must be locked (i.e. opened via OpenWithLock) for Run() to be called. Args: force: If True, the job will run no matter what (i.e. even if DueToRun() returns False). Raises: LockError: if the object is not locked.
juraj-google-style
def parse(self, key, value): if (value is not None): try: return self._parser(value) except Exception: raise ParsingError('Error parsing {}'.format(key)) elif (self._default is not SENTINAL): return self._default else: raise KeyError(key)
Parse the environment value for a given key against the schema. Args: key: The name of the environment variable. value: The value to be parsed.
codesearchnet
def score_intersect(self, term1, term2, **kwargs): t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) overlap = np.minimum(t1_kde, t2_kde) return np.trapz(overlap)
Compute the geometric area of the overlap between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float
juraj-google-style
def loopUntil(self, condition=None, timeout: float=0) -> Iterator[object]: endTime = (time.time() + timeout) while True: test = (condition and condition()) if test: (yield test) return elif (timeout and (time.time() > endTime)): (yield False) return else: (yield test) self.waitOnUpdate(((endTime - time.time()) if timeout else 0))
Iterate until condition is met, with optional timeout in seconds. The yielded value is that of the condition or False when timed out. Args: condition: Predicate function that is tested after every network update. timeout: Maximum time in seconds to wait. If 0 then no timeout is used.
codesearchnet
def merge(self, decision_point: pg.geno.DecisionPoint, parent_decisions: List[Union[int, List[int], float, None]], global_state: pg.geno.AttributeDict, step: int) -> Union[int, List[int], float]:
Implementation of point-wise decision making. Args: decision_point: Decision point for recombination. parent_decisions: A list of parent's decisions. Each item should be an int as an active single-choice decision, a list of int as active multi- choice decisions, a float as an active float decision, or None for inactive decision point (whose parent space is not chosen). global_state: An optional keyword argument as the global state. Subclass can omit. step: An optional keyword argument as the current step. Subclass can omit. Returns: An int, list of int or float as the decision made for the decision point.
github-repos
def ctc_unique_labels(labels, name=None): with ops.name_scope(name, 'ctc_unique_labels', [labels]): labels = ops.convert_to_tensor(labels, name='labels') def _unique(x): u = array_ops.unique(x) y = array_ops.pad(u.y, [[0, _get_dim(u.idx, 0) - _get_dim(u.y, 0)]]) y = math_ops.cast(y, dtypes.int64) return [y, u.idx] return map_fn.map_fn(_unique, labels, dtype=[dtypes.int64, dtypes.int32])
Get unique labels and indices for batched labels for `tf.nn.ctc_loss`. For use with `tf.nn.ctc_loss` optional argument `unique`: This op can be used to preprocess labels in input pipeline to for better speed/memory use computing the ctc loss on TPU. Example: ctc_unique_labels([[3, 4, 4, 3]]) -> unique labels padded with 0: [[3, 4, 0, 0]] indices of original labels in unique: [0, 1, 1, 0] Args: labels: tensor of shape [batch_size, max_label_length] padded with 0. name: A name for this `Op`. Defaults to "ctc_unique_labels". Returns: tuple of - unique labels, tensor of shape `[batch_size, max_label_length]` - indices into unique labels, shape `[batch_size, max_label_length]`
github-repos
def port_add(br, port, may_exist=False, internal=False): param_may_exist = _param_may_exist(may_exist) cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist) if internal: cmd += ' -- set interface {0} type=internal'.format(port) result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] return _retcode_to_bool(retcode)
Creates on bridge a new port named port. Returns: True on success, else False. Args: br: A string - bridge name port: A string - port name may_exist: Bool, if False - attempting to create a port that exists returns False. internal: A boolean to create an internal interface if one does not exist. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.port_add br0 8080
codesearchnet
def to_batched_tensor_list(element_spec, element): return _to_tensor_list_helper(lambda state, spec, component: state + spec._to_batched_tensor_list(component), element_spec, element)
Returns a tensor list representation of the element. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. element: The element to convert to tensor list representation. Returns: A tensor list representation of `element`. Raises: ValueError: If `element_spec` and `element` do not have the same number of elements or if the two structures are not nested in the same way or the rank of any of the tensors in the tensor list representation is 0. TypeError: If `element_spec` and `element` differ in the type of sequence in any of their substructures.
github-repos
def detect_shadowing_definitions(self, contract): result = [] for function in (contract.functions + contract.modifiers): if (function.contract != contract): continue for variable in function.variables: overshadowed = [] for scope_contract in ([contract] + contract.inheritance): for scope_function in scope_contract.functions: if ((variable.name == scope_function.name) and (scope_function.contract == scope_contract)): overshadowed.append((self.OVERSHADOWED_FUNCTION, scope_contract.name, scope_function)) for scope_modifier in scope_contract.modifiers: if ((variable.name == scope_modifier.name) and (scope_modifier.contract == scope_contract)): overshadowed.append((self.OVERSHADOWED_MODIFIER, scope_contract.name, scope_modifier)) for scope_event in scope_contract.events: if ((variable.name == scope_event.name) and (scope_event.contract == scope_contract)): overshadowed.append((self.OVERSHADOWED_EVENT, scope_contract.name, scope_event)) for scope_state_variable in scope_contract.variables: if ((variable.name == scope_state_variable.name) and (scope_state_variable.contract == scope_contract)): overshadowed.append((self.OVERSHADOWED_STATE_VARIABLE, scope_contract.name, scope_state_variable)) if overshadowed: result.append((contract.name, function.name, variable, overshadowed)) return result
Detects if functions, access modifiers, events, state variables, and local variables are named after reserved keywords. Any such definitions are returned in a list. Returns: list of tuple: (type, contract name, definition)
codesearchnet
def _validate_state_root(self, state_root): if self._state_root_regex.fullmatch(state_root) is None: LOGGER.debug('Invalid state root: %s', state_root) raise _ResponseFailed(self._status.INVALID_ROOT)
Validates a state root, raising a ResponseFailed error if invalid. Args: state_root (str): The state_root to validate Raises: ResponseFailed: The state_root was invalid, and a status of INVALID_ROOT will be sent with the response.
juraj-google-style
def _build_graph(self): q = data_flow_ops.FIFOQueue(1, 'float') init = q.enqueue(1.0) x = q.dequeue() q_inc = q.enqueue(x + 1) return (init, q_inc)
Builds a graph that enqueues and dequeues a single float. Returns: A tuple with the graph init tensor and graph output tensor.
github-repos
def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False): try: return main(endpoint, stype, selector, clean, raw, verbose) except StandardError as err: return err
returns Markdown text of selected fragment. Args: endpoint: URL, file, or HTML string stype: { 'css' | 'xpath' } selector: CSS selector or XPath expression Returns: Markdown text Options: clean: cleans fragment (lxml.html.clean defaults) raw: returns raw HTML fragment verbose: show http status, encoding, headers
juraj-google-style
def parse_content_type(headers: MutableMapping) -> Tuple[(Optional[str], str)]: content_type = headers.get('content-type') if (not content_type): return (None, 'utf-8') else: (type_, parameters) = cgi.parse_header(content_type) encoding = parameters.get('charset', 'utf-8') return (type_, encoding)
Find content-type and encoding of the response Args: headers: Response headers Returns: :py:class:`tuple` (content-type, encoding)
codesearchnet
def from_function(cls, function): module_name = function.__module__ function_name = function.__name__ class_name = '' function_source_hasher = hashlib.sha1() try: source = inspect.getsource(function) if (sys.version_info[0] >= 3): source = source.encode() function_source_hasher.update(source) function_source_hash = function_source_hasher.digest() except (IOError, OSError, TypeError): function_source_hash = b'' return cls(module_name, function_name, class_name, function_source_hash)
Create a FunctionDescriptor from a function instance. This function is used to create the function descriptor from a python function. If a function is a class function, it should not be used by this function. Args: cls: Current class which is required argument for classmethod. function: the python function used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the function.
codesearchnet
def put(self, type: Type[T], item: T) -> None: LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__)) try: handlers = self._put_types[type] except KeyError: try: LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__)) handlers = self._put_handlers(type) except NoConversionError: handlers = None self._get_types[type] = handlers LOGGER.info("Creating new PipelineContext") context = self._new_context() LOGGER.info("Sending item \"{item}\" to SourceHandlers".format(item=item)) if handlers is not None: for handler in handlers: handler.put(item, context)
Puts an objects into the data pipeline. The object may be transformed into a new type for insertion if necessary. Args: item: The object to be inserted into the data pipeline.
juraj-google-style
def _get(self, url, params=None): if (not params): params = {} params.update({'login': self.login, 'key': self.key}) response_json = requests.get((self.api_url + url), params).json() return self._process_response(response_json)
Used by every other method, it makes a GET request with the given params. Args: url (str): relative path of a specific service (account_info, ...). params (:obj:`dict`, optional): contains parameters to be sent in the GET request. Returns: dict: results of the response of the GET request.
codesearchnet
def __init__(self, top_probs=5): self.top_probs = top_probs self._sess = None self._tf_input_var = None self._tf_predict_var = None self._model_name = None self._latest_ckpt_name = None self._latest_ckpt_time = None
Create a new instance of this model. `BaseModel` is an interface and should only be instantiated via a subclass. Args: top_probs (int): Number of classes to display per result. For instance, VGG16 has 1000 classes, we don't want to display a visualization for every single possibility. Defaults to 5.
juraj-google-style
def invoice_access(request, access_code): invoices = commerce.Invoice.objects.filter(user__attendee__access_code=access_code).order_by('-issue_time') if (not invoices): raise Http404() unpaid = invoices.filter(status=commerce.Invoice.STATUS_UNPAID) paid = invoices.filter(status=commerce.Invoice.STATUS_PAID) if unpaid: invoice = unpaid[0] elif paid: invoice = paid[0] else: invoice = invoices[0] return redirect('invoice', invoice.id, access_code)
Redirects to an invoice for the attendee that matches the given access code, if any. If the attendee has multiple invoices, we use the following tie-break: - If there's an unpaid invoice, show that, otherwise - If there's a paid invoice, show the most recent one, otherwise - Show the most recent invoid of all Arguments: access_code (castable to int): The access code for the user whose invoice you want to see. Returns: redirect: Redirect to the selected invoice for that user. Raises: Http404: If the user has no invoices.
codesearchnet
def _req(self, req): logger.debug('DUT> %s', req) (self._log and self.pause()) times = 3 res = None while times: times = (times - 1) try: self._sendline(req) self._expect(req) line = None res = [] while True: line = self._readline() logger.debug('Got line %s', line) if (line == 'Done'): break if line: res.append(line) break except: logger.exception('Failed to send command') self.close() self._init() (self._log and self.resume()) return res
Send command and wait for response. The command will be repeated 3 times at most in case data loss of serial port. Args: req (str): Command to send, please do not include new line in the end. Returns: [str]: The output lines
codesearchnet
def _get_snpeff_transcript(self, transcript_info): transcript = Transcript( hgnc_symbol = transcript_info.get('Gene_Name'), transcript_id = transcript_info.get('Feature'), ensembl_id = transcript_info.get('Gene_ID'), biotype = transcript_info.get('Transcript_BioType'), consequence = transcript_info.get('Annotation'), exon = transcript_info.get('Rank'), HGVSc = transcript_info.get('HGVS.c'), HGVSp = transcript_info.get('HGVS.p') ) return transcript
Create a transcript based on the snpeff annotation Args: transcript_info (dict): A dict with snpeff info Returns: transcript (puzzle.models.Transcript): A Transcripts
juraj-google-style
def from_index_amount(cls, matrixpos, amt): f = np.identity(3) f[matrixpos] += amt return cls(f)
Factory method for constructing a Deformation object from a matrix position and amount Args: matrixpos (tuple): tuple corresponding the matrix position to have a perturbation added amt (float): amount to add to the identity matrix at position matrixpos
codesearchnet
def add_criterion(self, name, priority, and_or, search_type, value): criterion = SearchCriteria(name, priority, and_or, search_type, value) self.criteria.append(criterion)
Add a search criteria object to a smart group. Args: name: String Criteria type name (e.g. "Application Title") priority: Int or Str number priority of criterion. and_or: Str, either "and" or "or". search_type: String Criteria search type. (e.g. "is", "is not", "member of", etc). Construct a SmartGroup with the criteria of interest in the web interface to determine what range of values are available. value: String value to search for/against.
codesearchnet
def _get_error_generator(type, obj, schema_dir=None, version=DEFAULT_VER, default='core'): if (schema_dir is None): schema_dir = os.path.abspath((((os.path.dirname(__file__) + '/schemas-') + version) + '/')) try: schema_path = find_schema(schema_dir, type) schema = load_schema(schema_path) except (KeyError, TypeError): try: schema_path = find_schema(schema_dir, default) schema = load_schema(schema_path) except (KeyError, TypeError): if (schema_dir is not None): return None raise SchemaInvalidError("Cannot locate a schema for the object's type, nor the base schema ({}.json).".format(default)) if ((type == 'observed-data') and (schema_dir is None)): schema['allOf'][1]['properties']['objects'] = {'objects': {'type': 'object', 'minProperties': 1}} validator = load_validator(schema_path, schema) try: error_gen = validator.iter_errors(obj) except schema_exceptions.RefResolutionError: raise SchemaInvalidError('Invalid JSON schema: a JSON reference failed to resolve') return error_gen
Get a generator for validating against the schema for the given object type. Args: type (str): The object type to find the schema for. obj: The object to be validated. schema_dir (str): The path in which to search for schemas. version (str): The version of the STIX specification to validate against. Only used to find base schemas when schema_dir is None. default (str): If the schema for the given type cannot be found, use the one with this name instead. Returns: A generator for errors found when validating the object against the appropriate schema, or None if schema_dir is None and the schema cannot be found.
codesearchnet
def distance2bbox(points, distance: torch.Tensor, reg_scale: float) -> torch.Tensor: reg_scale = abs(reg_scale) top_left_x = points[..., 0] - (0.5 * reg_scale + distance[..., 0]) * (points[..., 2] / reg_scale) top_left_y = points[..., 1] - (0.5 * reg_scale + distance[..., 1]) * (points[..., 3] / reg_scale) bottom_right_x = points[..., 0] + (0.5 * reg_scale + distance[..., 2]) * (points[..., 2] / reg_scale) bottom_right_y = points[..., 1] + (0.5 * reg_scale + distance[..., 3]) * (points[..., 3] / reg_scale) bboxes = torch.stack([top_left_x, top_left_y, bottom_right_x, bottom_right_y], -1) return corners_to_center_format(bboxes)
Decodes edge-distances into bounding box coordinates. Args: points (`torch.Tensor`): (batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height] distance (`torch.Tensor`): (batch_size, num_boxes, 4) or (num_boxes, 4), representing distances from the point to the left, top, right, and bottom boundaries. reg_scale (`float`): Controls the curvature of the Weighting Function. Returns: `torch.Tensor`: Bounding boxes in (batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height]
github-repos
def has_platform(self, platform): if platform and not isinstance(platform, dict): parts = platform.split('/') if len(parts) > 3 or len(parts) < 1: raise InvalidArgument( '"{0}" is not a valid platform descriptor'.format(platform) ) platform = {'os': parts[0]} if len(parts) > 2: platform['variant'] = parts[2] if len(parts) > 1: platform['architecture'] = parts[1] return normalize_platform( platform, self.client.version() ) in self.attrs['Platforms']
Check whether the given platform identifier is available for this digest. Args: platform (str or dict): A string using the ``os[/arch[/variant]]`` format, or a platform dictionary. Returns: (bool): ``True`` if the platform is recognized as available, ``False`` otherwise. Raises: :py:class:`docker.errors.InvalidArgument` If the platform argument is not a valid descriptor.
juraj-google-style
def get_template_object(template_file=''): jinja_template_paths_obj = [] if TEMPLATES_PATH: external_templates = pathlib.Path(TEMPLATES_PATH).expanduser().resolve() assert os.path.isdir(external_templates), 'External template path "{0}" not found'.format(external_templates) jinja_template_paths_obj.append(external_templates) jinja_template_paths_obj.append(LOCAL_TEMPLATES) jinja_template_paths = [str(path) for path in jinja_template_paths_obj] jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader(jinja_template_paths)) try: template = jinjaenv.get_template(template_file) except jinja2.TemplateNotFound: message = 'Unable to find template "{template_file}" in paths {paths}'.format( template_file=template_file, paths=jinjaenv.loader.searchpath) LOG.error(message) raise ForemastTemplateNotFound(message) return template
Retrieve template. Args: template_file (str): Name of template file. Returns: jinja2.Template: Template ready to render. Raises: AssertionError: Configured path for templates does not exist. :obj:`foremast.exceptions.ForemastTemplateNotFound`: Requested template is not available.
juraj-google-style
def load(path: str) -> Callable[..., Dict[str, EventSetNode]]: g = _load_graph(path) inputs = g.named_inputs assert inputs is not None input_names = list(inputs.keys()) @compile def fn(*args: EventSetNode, **kwargs: EventSetNode) -> Dict[str, EventSetNode]: kwargs = _kwargs_from_args_and_kwargs(input_names, args, kwargs) return g.apply_on_inputs(named_inputs=kwargs) fn.__signature__ = inspect.signature(fn).replace(parameters=[inspect.Parameter(name=k, annotation=EventSetNode, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD) for k in inputs]) return fn
Loads a compiled Temporian function from a file. The loaded function receives the same positional and keyword arguments and applies the same operator graph to its inputs as when it was saved. Args: path: The path to load the function from. Returns: The loaded function.
github-repos
def encipher_shift(plaintext, plain_vocab, shift): ciphertext = [] cipher = ShiftEncryptionLayer(plain_vocab, shift) for _, sentence in enumerate(plaintext): cipher_sentence = [] for _, character in enumerate(sentence): encrypted_char = cipher.encrypt_character(character) cipher_sentence.append(encrypted_char) ciphertext.append(cipher_sentence) return ciphertext
Encrypt plain text with a single shift layer. Args: plaintext (list of list of Strings): a list of plain text to encrypt. plain_vocab (list of Integer): unique vocabularies being used. shift (Integer): number of shift, shift to the right if shift is positive. Returns: ciphertext (list of Strings): encrypted plain text.
juraj-google-style
def clustering_factory(clf): required_methods = ['fit', 'fit_predict'] for method in required_methods: if not hasattr(clf, method): raise TypeError('"{}" is not in clf. Did you ' 'pass a clusterer instance?'.format(method)) additional_methods = { 'plot_silhouette': plot_silhouette, 'plot_elbow_curve': plot_elbow_curve } for key, fn in six.iteritems(additional_methods): if hasattr(clf, key): warnings.warn('"{}" method already in clf. ' 'Overriding anyway. This may ' 'result in unintended behavior.'.format(key)) setattr(clf, key, types.MethodType(fn, clf)) return clf
Embeds scikit-plot plotting methods in an sklearn clusterer instance. Args: clf: Scikit-learn clusterer instance Returns: The same scikit-learn clusterer instance passed in **clf** with embedded scikit-plot instance methods. Raises: ValueError: If **clf** does not contain the instance methods necessary for scikit-plot instance methods.
juraj-google-style
def http_download(url, target_path): r = requests.get(url, stream=True) with open(target_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) return target_path
Download file to local Args: - url(string): url request path - target_path(string): download destination
juraj-google-style
def convert_sbml_model(model): biomass_reactions = set() for reaction in model.reactions: if reaction.id not in model.limits: lower, upper = parse_flux_bounds(reaction) if lower is not None or upper is not None: model.limits[reaction.id] = reaction.id, lower, upper objective = parse_objective_coefficient(reaction) if objective is not None and objective != 0: biomass_reactions.add(reaction.id) if len(biomass_reactions) == 1: model.biomass_reaction = next(iter(biomass_reactions)) convert_model_entries(model) if model.extracellular_compartment is None: extracellular = detect_extracellular_compartment(model) model.extracellular_compartment = extracellular convert_exchange_to_compounds(model)
Convert raw SBML model to extended model. Args: model: :class:`NativeModel` obtained from :class:`SBMLReader`.
juraj-google-style
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, expected): for data_format in GetTestConfigs(): self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding, data_format, expected)
Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. expected: An array containing the expected operation outputs.
github-repos
def is_monotonic(neurite, tol): for node in neurite.iter_sections(): sec = node.points for point_id in range((len(sec) - 1)): if (sec[(point_id + 1)][COLS.R] > (sec[point_id][COLS.R] + tol)): return False if ((node.parent is not None) and (sec[0][COLS.R] > (node.parent.points[(- 1)][COLS.R] + tol))): return False return True
Check if neurite tree is monotonic If each child has smaller or equal diameters from its parent Args: neurite(Neurite): neurite to operate on tol(float): tolerance Returns: True if neurite monotonic
codesearchnet
def ParseCall(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) guid = self._GetRowValue(query_hash, row, 'guid') is_incoming = self._GetRowValue(query_hash, row, 'is_incoming') videostatus = self._GetRowValue(query_hash, row, 'videostatus') try: aux = guid if aux: aux_list = aux.split('-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = 'Unknown [no GUID]' dst_aux = 'Unknown [no GUID]' except IndexError: src_aux = 'Unknown [{0:s}]'.format(guid) dst_aux = 'Unknown [{0:s}]'.format(guid) if (is_incoming == '0'): user_start_call = True source = src_aux ip_address = self._GetRowValue(query_hash, row, 'ip_address') if ip_address: destination = '{0:s} <{1:s}>'.format(dst_aux, ip_address) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux call_identifier = self._GetRowValue(query_hash, row, 'id') event_data = SkypeCallEventData() event_data.dst_call = destination event_data.offset = call_identifier event_data.query = query event_data.src_call = source event_data.user_start_call = user_start_call event_data.video_conference = (videostatus == '3') timestamp = self._GetRowValue(query_hash, row, 'try_call') event_data.call_type = 'WAITING' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype') parser_mediator.ProduceEventWithEventData(event, event_data) try: timestamp = self._GetRowValue(query_hash, row, 'accept_call') timestamp = int(timestamp) except (ValueError, TypeError): timestamp = None if timestamp: event_data.call_type = 'ACCEPTED' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype') parser_mediator.ProduceEventWithEventData(event, event_data) try: call_duration = self._GetRowValue(query_hash, row, 'call_duration') call_duration = int(call_duration) except (ValueError, TypeError): parser_mediator.ProduceExtractionWarning('unable to determine when call: {0:s} was finished.'.format(call_identifier)) call_duration = None if call_duration: timestamp += call_duration event_data.call_type = 'FINISHED' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype') parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a call. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query. query (Optional[str]): query.
codesearchnet
def wait_for( self, timeout=10000, interval=1000, asserter=lambda x: x): if not callable(asserter): raise TypeError('Asserter must be callable.') @retry( retry_on_exception=lambda ex: isinstance(ex, WebDriverException), stop_max_delay=timeout, wait_fixed=interval ) def _wait_for(driver): asserter(driver) return driver return _wait_for(self)
Wait for driver till satisfy the given condition Support: Android iOS Web(WebView) Args: timeout(int): How long we should be retrying stuff. interval(int): How long between retries. asserter(callable): The asserter func to determine the result. Returns: Return the driver. Raises: WebDriverException.
juraj-google-style
def to(self, fmt=None, filename=None): from pymatgen.io.xyz import XYZ from pymatgen.io.gaussian import GaussianInput from pymatgen.io.babel import BabelMolAdaptor fmt = ('' if (fmt is None) else fmt.lower()) fname = os.path.basename((filename or '')) if ((fmt == 'xyz') or fnmatch(fname.lower(), '*.xyz*')): writer = XYZ(self) elif any([((fmt == r) or fnmatch(fname.lower(), '*.{}*'.format(r))) for r in ['gjf', 'g03', 'g09', 'com', 'inp']]): writer = GaussianInput(self) elif ((fmt == 'json') or fnmatch(fname, '*.json*') or fnmatch(fname, '*.mson*')): if filename: with zopen(filename, 'wt', encoding='utf8') as f: return json.dump(self.as_dict(), f) else: return json.dumps(self.as_dict()) elif ((fmt == 'yaml') or fnmatch(fname, '*.yaml*')): import ruamel.yaml as yaml if filename: with zopen(fname, 'wt', encoding='utf8') as f: return yaml.safe_dump(self.as_dict(), f) else: return yaml.safe_dump(self.as_dict()) else: m = re.search('\\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)', fname.lower()) if ((not fmt) and m): fmt = m.group(1) writer = BabelMolAdaptor(self) return writer.write_file(filename, file_format=fmt) if filename: writer.write_file(filename) else: return str(writer)
Outputs the molecule to a file or string. Args: fmt (str): Format to output to. Defaults to JSON unless filename is provided. If fmt is specifies, it overrides whatever the filename is. Options include "xyz", "gjf", "g03", "json". If you have OpenBabel installed, any of the formats supported by OpenBabel. Non-case sensitive. filename (str): If provided, output will be written to a file. If fmt is not specified, the format is determined from the filename. Defaults is None, i.e. string output. Returns: (str) if filename is None. None otherwise.
codesearchnet
def usufyToCsvExport(d, fPath): from pyexcel_io import get_data try: oldData = {'OSRFramework': get_data(fPath)} except: oldData = {'OSRFramework': []} tabularData = _generateTabularData(d, oldData) from pyexcel_io import save_data save_data(fPath, tabularData['OSRFramework'])
Workaround to export to a CSV file. Args: ----- d: Data to export. fPath: File path for the output file.
codesearchnet
def GetParserAndPluginNames(cls, parser_filter_expression=None): parser_and_plugin_names = [] for parser_name, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): parser_and_plugin_names.append(parser_name) if parser_class.SupportsPlugins(): for plugin_name, _ in parser_class.GetPlugins(): parser_and_plugin_names.append( '{0:s}/{1:s}'.format(parser_name, plugin_name)) return parser_and_plugin_names
Retrieves the parser and parser plugin names. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[str]: parser and parser plugin names.
juraj-google-style
def get(self, statediag, dfaaccepted): newstatediag = {} newstate = PDAState() newstate.id = 'AI,I' newstate.type = 1 newstate.sym = '@wrapping' transitions = {} transitions[(0, 0)] = [0] newstate.trans = transitions i = 0 newstatediag[i] = newstate for stateid in statediag: state = statediag[stateid] if state.type == 2: for state2id in dfaaccepted: if state.id[1] == state2id: state.trans['AI,I'] = ['@wrapping'] break i = i + 1 newstatediag[i] = state return newstatediag
# - Remove all the POP (type - 2) transitions to state 0,non DFA accepted # for symbol @closing # - Generate the accepted transitions - Replace DFA accepted States with a push - pop symbol and two extra states Args: statediag (list): The states of the PDA dfaaccepted (list):The list of DFA accepted states Returns: list: A cleaned, smaller list of DFA states
juraj-google-style
def _ParseInternetPasswordRecord(self, parser_mediator, record): key = record.get('_key_', None) if not key or not key.startswith(b'ssgp'): raise errors.ParseError(( 'Unsupported Internet password record key value does not start ' 'with: "ssgp".')) protocol_string = codecs.decode('{0:08x}'.format(record['ptcl']), 'hex') protocol_string = codecs.decode(protocol_string, 'utf-8') event_data = KeychainInternetRecordEventData() event_data.account_name = self._ParseBinaryDataAsString( parser_mediator, record['acct']) event_data.comments = self._ParseBinaryDataAsString( parser_mediator, record['crtr']) event_data.entry_name = self._ParseBinaryDataAsString( parser_mediator, record['PrintName']) event_data.protocol = self._PROTOCOL_TRANSLATION_DICT.get( protocol_string, protocol_string) ssgp_hash = codecs.encode(key[4:], 'hex') event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8') event_data.text_description = self._ParseBinaryDataAsString( parser_mediator, record['desc']) event_data.type_protocol = self._ParseBinaryDataAsString( parser_mediator, record['atyp']) event_data.where = self._ParseBinaryDataAsString( parser_mediator, record['srvr']) date_time = self._ParseDateTimeValue(parser_mediator, record['cdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseDateTimeValue(parser_mediator, record['mdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts the information from an Internet password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed.
juraj-google-style
def custom_getter(self, activation_dtype=tf.bfloat16): def getter_fn(getter, *args, **kwargs): requested_dtype = kwargs["dtype"] if requested_dtype in (tf.bfloat16, tf.float32): kwargs["dtype"] = tf.bfloat16 kwargs["initializer"] = _EncodingInitializer( kwargs["initializer"], self) ret = self._decode_with_identity_gradient(getter(*args, **kwargs)) return tf.cast(ret, activation_dtype) return getter(*args, **kwargs) return getter_fn
A custom getter that uses the encoding for bfloat16 and float32 vars. When a bfloat16 or float32 variable is requsted, an encoded float16 varaible is created, which is then decoded and cast to a bfloat16 activation. Args: activation_dtype: a dtype to which to convert the decoded value. Returns: a function.
juraj-google-style
def is_initialised( self ): if not self.lattice: raise AttributeError('Running a simulation needs the lattice to be initialised') if not self.atoms: raise AttributeError('Running a simulation needs the atoms to be initialised') if not self.number_of_jumps and not self.for_time: raise AttributeError('Running a simulation needs number_of_jumps or for_time to be set')
Check whether the simulation has been initialised. Args: None Returns: None
juraj-google-style
def _collect_unused(self, start: GridQubit, used: Set[GridQubit]) -> Set[GridQubit]: def collect(n: GridQubit, visited: Set[GridQubit]): visited.add(n) for m in self._c_adj[n]: if ((m not in used) and (m not in visited)): collect(m, visited) visited = set() collect(start, visited) return visited
Lists all the qubits that are reachable from given qubit. Args: start: The first qubit for which connectivity should be calculated. Might be a member of used set. used: Already used qubits, which cannot be used during the collection. Returns: Set of qubits that are reachable from starting qubit without traversing any of the used qubits.
codesearchnet
def _ReadPaddingDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False): if (not is_member): error_message = 'data type only supported as member' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadDataTypeDefinition(definitions_registry, definition_values, data_types.PaddingDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING) alignment_size = definition_values.get('alignment_size', None) if (not alignment_size): error_message = 'missing alignment_size' raise errors.DefinitionReaderError(definition_name, error_message) try: int(alignment_size) except ValueError: error_message = 'unuspported alignment size attribute: {0!s}'.format(alignment_size) raise errors.DefinitionReaderError(definition_name, error_message) if (alignment_size not in (2, 4, 8, 16)): error_message = 'unuspported alignment size value: {0!s}'.format(alignment_size) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.alignment_size = alignment_size return definition_object
Reads a padding data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: PaddingtDefinition: padding definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
codesearchnet
def _virtual_molecule(self, mol, ilabels, eq_atoms): vmol = ob.OBMol() non_unique_atoms = set([a for g in eq_atoms for a in g]) all_atoms = set(range(1, len(ilabels) + 1)) unique_atom_labels = sorted(all_atoms - non_unique_atoms) for i in unique_atom_labels: orig_idx = ilabels[i-1] oa1 = mol.GetAtom(orig_idx) a1 = vmol.NewAtom() a1.SetAtomicNum(oa1.GetAtomicNum()) a1.SetVector(oa1.GetVector()) if vmol.NumAtoms() < 3: for symm in eq_atoms: c1x, c1y, c1z = self._group_centroid(mol, ilabels, symm) min_distance = float("inf") for i in range(1, vmol.NumAtoms()+1): va = vmol.GetAtom(i) distance = math.sqrt((c1x - va.x())**2 + (c1y - va.y())**2 + (c1z - va.z())**2) if distance < min_distance: min_distance = distance if min_distance > 0.2: a1 = vmol.NewAtom() a1.SetAtomicNum(9) a1.SetVector(c1x, c1y, c1z) return vmol
Create a virtual molecule by unique atoms, the centriods of the equivalent atoms Args: mol: The molecule. OpenBabel OBMol object ilables: inchi label map eq_atoms: equivalent atom labels farthest_group_idx: The equivalent atom group index in which there is the farthest atom to the centroid Return: The virtual molecule
juraj-google-style
def expand(self, pcoll: beam.PCollection[Union[beam.Row, NamedTuple]]) -> beam.PCollection[common_types.InstanceDictType]: return pcoll | beam.Map(lambda x: x._asdict())
Args: pcoll: A PCollection of NamedTuples or Rows. Returns: A PCollection of dictionaries.
github-repos
def get_encoder_from_vocab(vocab_filepath): if not tf.gfile.Exists(vocab_filepath): raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) tf.logging.info("Found vocab file: %s", vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set.
juraj-google-style
def sunset(self, date=None, zenith=None): return (segment.sunset(date, zenith) for segment in self)
Calculate sunset times for locations. Args: date (datetime.date): Calculate rise or set for given date zenith (str): Calculate sunset events, or start of twilight times Returns: list of list of datetime.datetime: The time for the sunset for each point in each segment
codesearchnet
def directional_poisson_ratio(self, n, m, tol=1e-8): n, m = get_uvec(n), get_uvec(m) if not np.abs(np.dot(n, m)) < tol: raise ValueError("n and m must be orthogonal") v = self.compliance_tensor.einsum_sequence([n]*2 + [m]*2) v *= -1 / self.compliance_tensor.einsum_sequence([n]*4) return v
Calculates the poisson ratio for a specific direction relative to a second, orthogonal direction Args: n (3-d vector): principal direction m (3-d vector): secondary direction orthogonal to n tol (float): tolerance for testing of orthogonality
juraj-google-style
def service_messages(self, short_name): if (short_name not in self.services): raise ArgumentError('Unknown service name', short_name=short_name) return list(self.services[short_name]['state'].messages)
Get the messages stored for a service. Args: short_name (string): The short name of the service to get messages for Returns: list(ServiceMessage): A list of the ServiceMessages stored for this service
codesearchnet
def collapse_addresses(addresses): i = 0 addrs = [] ips = [] nets = [] for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, nets[-1])) nets.append(ip) ips = sorted(set(ips)) nets = sorted(set(nets)) while i < len(ips): (first, last) = _find_address_range(ips[i:]) i = ips.index(last) + 1 addrs.extend(summarize_address_range(first, last)) return iter(_collapse_addresses_recursive(sorted( addrs + nets, key=_BaseNetwork._get_networks_key)))
Collapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects.
juraj-google-style
def is_flat(neurite, tol, method='tolerance'): ext = principal_direction_extent(neurite.points[(:, COLS.XYZ)]) assert (method in ('tolerance', 'ratio')), "Method must be one of 'tolerance', 'ratio'" if (method == 'ratio'): sorted_ext = np.sort(ext) return ((sorted_ext[0] / sorted_ext[1]) < float(tol)) return any((ext < float(tol)))
Check if neurite is flat using the given method Args: neurite(Neurite): neurite to operate on tol(float): tolerance method(string): the method of flatness estimation: 'tolerance' returns true if any extent of the tree is smaller than the given tolerance 'ratio' returns true if the ratio of the smallest directions is smaller than tol. e.g. [1,2,3] -> 1/2 < tol Returns: True if neurite is flat
codesearchnet
def save(self, filename): with open(filename, 'w') as outfile: json.dump(self.to_json(), outfile)
Writes the JSON representation of this graph to the provided filename, such that the graph can be easily reconstructed using Graph(spec=filename). Args: filename (str): Path at which to write out the json file.
juraj-google-style
def rename_libtensorflow(srcs_dir: str, version: str): major_version = version.split('.')[0] if is_macos(): shutil.move(os.path.join(srcs_dir, 'libtensorflow_cc.{}.dylib'.format(version)), os.path.join(srcs_dir, 'libtensorflow_cc.{}.dylib'.format(major_version))) shutil.move(os.path.join(srcs_dir, 'libtensorflow_framework.{}.dylib'.format(version)), os.path.join(srcs_dir, 'libtensorflow_framework.{}.dylib'.format(major_version))) else: shutil.move(os.path.join(srcs_dir, 'libtensorflow_cc.so.{}'.format(version)), os.path.join(srcs_dir, 'libtensorflow_cc.so.{}'.format(major_version))) shutil.move(os.path.join(srcs_dir, 'libtensorflow_framework.so.{}'.format(version)), os.path.join(srcs_dir, 'libtensorflow_framework.so.{}'.format(major_version)))
Update libtensorflow_cc file name. Bazel sets full TF version in name but libtensorflow_cc must contain only major. Update accordingly to the platform: e.g. libtensorflow_cc.so.2.15.0 -> libtensorflow_cc.2 Args: srcs_dir: target directory with files. version: Major version to be set.
github-repos
def url(request, json_list, nested, url_name='show_{}', ignore_get=None): if (not ignore_get): ignore_get = [] if isinstance(url_name, str): url_string = str(url_name) url_name = (lambda x: url_string.format(x)) urls = cache.get('proso_urls') if (urls is None): urls = {} else: urls = json_lib.loads(urls) cache_updated = False pass_string = pass_get_parameters_string(request, ignore_get) for json in json_list: if (('object_type' not in json) or ('id' not in json)): continue key = ('show_%s_%s' % (json['object_type'], json['id'])) if (key in urls): json['url'] = urls[key] else: cache_updated = True json['url'] = reverse(url_name(json['object_type']), kwargs={'id': json['id']}) urls[key] = json['url'] json['url'] = append_get_parameters(json['url'], pass_string) if cache_updated: cache.set('proso_urls', json_lib.dumps(urls), CACHE_EXPIRATION)
Enrich the given list of objects, so they have URL. Args: request (django.http.request.HttpRequest): request which is currently processed json_list (list): list of dicts (JSON objects to be enriched) url_name (str|fun): pattern to create a url name taking object_type ignore_get (list): list of GET parameters which are ignored when the URL is generated Returns: list: list of dicts (enriched JSON objects)
codesearchnet
def getKeywordsForText(self, retina_name, body, ): resourcePath = '/text/keywords' method = 'POST' queryParams = {} headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'} postData = None queryParams['retina_name'] = retina_name postData = body response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams) return response.json()
Get a list of keywords from the text Args: retina_name, str: The retina name (required) body, str: The text to be evaluated (required) Returns: Array[str]
juraj-google-style
def _handle_uniqueness(self): def _getattr(u): try: return self._field_values[u] except KeyError: return getattr(self, u) if self._uniques: for u in self._uniques: val = _getattr(u) changed_fields = self.changed_fields(from_db=True) if (self.exist and (not ((u in changed_fields) if (not callable(val)) else ((str(u) + '_id') in changed_fields)))): if (val and (self.objects.filter(**{u: val}).count() > 1)): raise IntegrityError(('Unique mismatch: %s for %s already exists for value: %s' % (u, self.__class__.__name__, val))) elif (val and self.objects.filter(**{u: val}).count()): raise IntegrityError(('Unique mismatch: %s for %s already exists for value: %s' % (u, self.__class__.__name__, val))) if self.Meta.unique_together: changed_fields = self.changed_fields(from_db=True) for uniques in self.Meta.unique_together: vals = dict([(u, _getattr(u)) for u in uniques]) if self.exist: query_is_changed = [] for uni in vals.keys(): if callable(vals[uni]): is_changed = ((str(uni) + '_id') in changed_fields) query_is_changed.append(is_changed) else: is_changed = (uni in changed_fields) query_is_changed.append(is_changed) is_unique_changed = any(query_is_changed) if (not is_unique_changed): if (self.objects.filter(**vals).count() > 1): raise IntegrityError(('Unique together mismatch: %s combination already exists for %s' % (vals, self.__class__.__name__))) elif self.objects.filter(**vals).count(): raise IntegrityError(('Unique together mismatch: %s combination already exists for %s' % (vals, self.__class__.__name__))) elif self.objects.filter(**vals).count(): raise IntegrityError(('Unique together mismatch: %s combination already exists for %s' % (vals, self.__class__.__name__)))
Checks marked as unique and unique_together fields of the Model at each creation and update, and if it violates the uniqueness raises IntegrityError. First, looks at the fields which marked as "unique". If Model's unique fields did not change, it means that there is still a record at db with same unique field values. So, it must be checked that if more than one result violates the uniqueness. If it is, raise an IntegrityError. Otherwise, when marked as unique fields in the list of changed fields, it must be checked that if exists any violation instead of more than one. And, if it is, again raise an IntegrityError. Then, looks at the fields which marked as "unique_together" with the same logic. Raises: IntegrityError if unique and unique_together checks does not pass
codesearchnet
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> 'PretrainedConfig': config_dict = cls._dict_from_json_file(json_file) return cls(**config_dict)
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters. Returns: [`PretrainedConfig`]: The configuration object instantiated from that JSON file.
github-repos
def __set_mutation_type(self, hgvs_string): self.__set_lost_stop_status(hgvs_string) self.__set_lost_start_status(hgvs_string) self.__set_missense_status(hgvs_string) self.__set_indel_status() self.__set_frame_shift_status() self.__set_premature_stop_codon_status(hgvs_string)
Interpret the mutation type (missense, etc.) and set appropriate flags. Args: hgvs_string (str): hgvs syntax with "p." removed
juraj-google-style
def _lob_end_handler_factory(ion_type, action, validate=(lambda c, ctx, action_res: None)): assert ((ion_type is IonType.BLOB) or (ion_type is IonType.CLOB)) @coroutine def lob_end_handler(c, ctx): val = ctx.value prev = c action_res = None if ((c != _CLOSE_BRACE) and (c not in _WHITESPACE)): action_res = action(c, ctx, prev, action_res, True) (c, self) = (yield) trans = ctx.immediate_transition(self) while True: if (c in _WHITESPACE): if (prev == _CLOSE_BRACE): _illegal_character(c, ctx.set_ion_type(ion_type), 'Expected }.') elif (c == _CLOSE_BRACE): if (prev == _CLOSE_BRACE): validate(c, ctx, action_res) break else: action_res = action(c, ctx, prev, action_res, False) prev = c (c, _) = (yield trans) ctx.set_self_delimiting(True) (yield ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ion_type, _parse_lob(ion_type, val))) return lob_end_handler
Generates handlers for the end of blob or clob values. Args: ion_type (IonType): The type of this lob (either blob or clob). action (callable): Called for each non-whitespace, non-closing brace character encountered before the end of the lob. Accepts the current character's ordinal, the current context, the previous character's ordinal, the result of the previous call to ``action`` (if any), and True if this is the first call to ``action``. Returns any state that will be needed by subsequent calls to ``action``. For blobs, this should validate the character is valid base64; for clobs, this should ensure there are no illegal characters (e.g. comments) between the end of the data and the end of the clob. validate (Optional[callable]): Called once the second closing brace has been found. Accepts the current character's ordinal, the current context, and the result of the last call to ``action``; raises an error if this is not a valid lob value.
codesearchnet
def VerifyStructure(self, parser_mediator, line): structure = self.LOG_LINE try: parsed_structure = structure.parseString(line) except pyparsing.ParseException: logger.debug('Not a XChat scrollback log file') return False try: int(parsed_structure.timestamp, 10) except ValueError: logger.debug('Not a XChat scrollback log file, invalid timestamp string') return False return True
Verify that this file is a XChat scrollback log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line was successfully parsed.
juraj-google-style
def _get_mpr_view(self, connection, table): logger.debug( 'Looking for view of the table.\n table: {}'.format(table.vid)) view = self.get_view_name(table) view_exists = self._relation_exists(connection, view) if view_exists: logger.debug( 'View of the table exists.\n table: {}, view: {}' .format(table.vid, view)) return view raise MissingViewError('sqlite database does not have view for {} table.' .format(table.vid))
Finds and returns view name in the sqlite db represented by given connection. Args: connection: connection to sqlite db where to look for partition table. table (orm.Table): Raises: MissingViewError: if database does not have partition table. Returns: str: database table storing partition data.
juraj-google-style
def __save__(script_name, benchbuild, experiment, projects): from jinja2 import Environment, PackageLoader logs_dir = os.path.dirname(CFG['slurm']['logs'].value) node_command = str(benchbuild["-E", experiment.name, "$_project"]) env = Environment( trim_blocks=True, lstrip_blocks=True, loader=PackageLoader('benchbuild', 'utils/templates')) template = env.get_template('slurm.sh.inc') with open(script_name, 'w') as slurm2: slurm2.write( template.render( config=["export " + x for x in repr(CFG).split('\n')], clean_lockdir=str(CFG["slurm"]["node_dir"]), clean_lockfile=str(CFG["slurm"]["node_dir"]) + \ ".clean-in-progress.lock", cpus=int(CFG['slurm']['cpus_per_task']), exclusive=bool(CFG['slurm']['exclusive']), lockfile=str(CFG['slurm']["node_dir"]) + ".lock", log=local.path(logs_dir) / str(experiment.id), max_running=int(CFG['slurm']['max_running']), name=experiment.name, nice=int(CFG['slurm']['nice']), nice_clean=int(CFG["slurm"]["nice_clean"]), node_command=node_command, no_multithreading=not CFG['slurm']['multithread'], ntasks=1, prefix=str(CFG["slurm"]["node_dir"]), projects=projects, slurm_account=str(CFG["slurm"]["account"]), slurm_partition=str(CFG["slurm"]["partition"]), timelimit=str(CFG['slurm']['timelimit']), ) ) chmod("+x", script_name) if not __verify__(script_name): LOG.error("SLURM script failed verification.") print("SLURM script written to {0}".format(script_name)) return script_name
Dump a bash script that can be given to SLURM. Args: script_name (str): name of the bash script. commands (list(benchbuild.utils.cmd)): List of plumbum commands to write to the bash script. **kwargs: Dictionary with all environment variable bindings we should map in the bash script.
juraj-google-style
def plot_histograms(self, freq=None, title=None, figsize=(10, 10), **kwargs): if title is None: title = self._get_default_plot_title( freq, 'Return Histogram Matrix') plt.figure() ser = self._get_series(freq).to_returns().dropna() ser.hist(figsize=figsize, **kwargs) return plt.suptitle(title)
Wrapper around pandas' hist. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * kwargs: passed to pandas' hist method
juraj-google-style
def get_failed_enrollment_message(cls, users, enrolled_in): failed_emails = [user.email for user in users] return ( 'error', _( 'The following learners could not be enrolled in {enrolled_in}: {user_list}' ).format( enrolled_in=enrolled_in, user_list=', '.join(failed_emails), ) )
Create message for the users who were not able to be enrolled in a course or program. Args: users: An iterable of users who were not successfully enrolled enrolled_in (str): A string identifier for the course or program with which enrollment was attempted Returns: tuple: A 2-tuple containing a message type and message text
juraj-google-style
def stop(pid): if psutil.pid_exists(pid): try: p = psutil.Process(pid) p.kill() except Exception: pass
Shut down a specific process. Args: pid: the pid of the process to shutdown.
juraj-google-style
def __init__(self, selenium): self.selenium = selenium self.window_manager = WindowManager(selenium) self.browser = self.window_manager.windows[0]
Create FoxPuppet object. Args: selenium: (:py:class:`~selenium.webdriver.remote.webdriver.WebDriver`): Firefox WebDriver object.
juraj-google-style
def OpenFileObject(cls, path_spec_object, resolver_context=None): if (not isinstance(path_spec_object, path_spec.PathSpec)): raise TypeError('Unsupported path specification type.') if (resolver_context is None): resolver_context = cls._resolver_context if (path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT): if path_spec_object.HasParent(): raise errors.PathSpecError('Unsupported mount path specification with parent.') mount_point = getattr(path_spec_object, 'identifier', None) if (not mount_point): raise errors.PathSpecError('Unsupported path specification without mount point identifier.') path_spec_object = mount_manager.MountPointManager.GetMountPoint(mount_point) if (not path_spec_object): raise errors.MountPointError('No such mount point: {0:s}'.format(mount_point)) file_object = resolver_context.GetFileObject(path_spec_object) if (not file_object): resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator) file_object = resolver_helper.NewFileObject(resolver_context) file_object.open(path_spec=path_spec_object) return file_object
Opens a file-like object defined by path specification. Args: path_spec_object (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built in context which is not multi process safe. Returns: FileIO: file-like object or None if the path specification could not be resolved. Raises: PathSpecError: if the path specification is incorrect. TypeError: if the path specification type is unsupported.
codesearchnet
def sqrt(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.sqrt, tf.float32)
Returns a TensorFluent for the sqrt function. Args: x: The input fluent. Returns: A TensorFluent wrapping the sqrt function.
codesearchnet
def __init__(self, decode_module, encode_module, methodName='runTest'): super(EncodeProtoOpTestBase, self).__init__(methodName) self._decode_module = decode_module self._encode_module = encode_module
EncodeProtoOpTestBase initializer. Args: decode_module: a module containing the `decode_proto_op` method encode_module: a module containing the `encode_proto_op` method methodName: the name of the test method (same as for test.TestCase)
github-repos
def sync_firmware(self): serial_no = self.serial_number if self.firmware_newer(): try: self.invalidate_firmware() self.update_firmware() except errors.JLinkException as e: pass res = self.open(serial_no=serial_no) if self.firmware_newer(): raise errors.JLinkException('Failed to sync firmware version.') return res elif self.firmware_outdated(): try: self.update_firmware() except errors.JLinkException as e: pass if self.firmware_outdated(): raise errors.JLinkException('Failed to sync firmware version.') return self.open(serial_no=serial_no) return None
Syncs the emulator's firmware version and the DLL's firmware. This method is useful for ensuring that the firmware running on the J-Link matches the firmware supported by the DLL. Args: self (JLink): the ``JLink`` instance Returns: ``None``
juraj-google-style
def strace_clear_all(self): data = 0 res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.TRACE_EVENT_CLR_ALL, data) if (res < 0): raise errors.JLinkException('Failed to clear all STRACE events.') return None
Clears all STRACE events. Args: self (JLink): the ``JLink`` instance. Returns: ``None`` Raises: JLinkException: on error.
codesearchnet
def parse_args(argv): parser = make_parser() args = parser.parse_args(argv) t = args.tool_args kythe_args = kythe.Args(corpus=t.kythe_corpus, root=t.kythe_root, path=t.kythe_path, skip_stdlib=t.skip_stdlib) return (args.all_args, kythe_args, args.pytype_opts)
Parse command line args. Arguments: argv: Raw command line args, typically sys.argv[1:] Returns: A tuple of ( parsed_args: argparse.Namespace, kythe_args: kythe.Args, pytype_options: pytype.config.Options)
github-repos
def predict(self, x_test): if self.model: lengths = map(len, x_test) x_test = self.p.transform(x_test) y_pred = self.model.predict(x_test) y_pred = self.p.inverse_transform(y_pred, lengths) return y_pred else: raise OSError('Could not find a model. Call load(dir_path).')
Returns the prediction of the model on the given test data. Args: x_test : array-like, shape = (n_samples, sent_length) Test samples. Returns: y_pred : array-like, shape = (n_smaples, sent_length) Prediction labels for x.
juraj-google-style
def is_running(process): if os.name == 'nt': process_list = get_cmd_out(['tasklist', '/v']) return process in process_list else: process_list = get_cmd_out('ps axw | awk \'{print $5}\'') for i in process_list.split('\n'): if not i == 'COMMAND' or i.startswith('['): if i == process: return True elif os.path.basename(i) == process: return True return False
Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running?
juraj-google-style
def mark_all_as_done(self, **kwargs): result = self.gitlab.http_post('/todos/mark_as_done', **kwargs) try: return int(result) except ValueError: return 0
Mark all the todos as done. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTodoError: If the server failed to perform the request Returns: int: The number of todos maked done
codesearchnet