code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _fiss_agent_header(headers=None): _set_session() fiss_headers = {"User-Agent" : FISS_USER_AGENT} if headers is not None: fiss_headers.update(headers) return fiss_headers
Return request headers for fiss. Inserts FISS as the User-Agent. Initializes __SESSION if it hasn't been set. Args: headers (dict): Include additional headers as key-value pairs
juraj-google-style
def __init__(self, temperatures, materials): self._table = Table( column_keys=temperatures, rows_mapping=materials )
Create a material stress table. Args: temperatures: A sequence of temperatures. materials: A mapping of material names to sequences of stress values which correspond to the temperatures.
juraj-google-style
def _build_statistics(self, input_batch, use_batch_stats, stat_dtype): if (self.MOVING_MEAN not in self._initializers): self._initializers[self.MOVING_MEAN] = create_mean_initializer() self._moving_mean = tf.get_variable('moving_mean', dtype=stat_dtype, shape=(self._num_channels,), collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=self._initializers[self.MOVING_MEAN], trainable=False) if (self.MOVING_VARIANCE not in self._initializers): self._initializers[self.MOVING_VARIANCE] = create_variance_initializer() self._moving_variance = tf.get_variable('moving_variance', dtype=stat_dtype, shape=(self._num_channels,), collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=self._initializers[self.MOVING_VARIANCE], trainable=False) def build_batch_stats(): 'Builds the batch statistics calculation ops.' (mean, variance) = tf.nn.moments(input_batch, self._axis, keep_dims=True, name='normalize_moments') return (mean, variance) def build_moving_stats(): 'Retrieves the moving statistics.' input_dtype = input_batch.dtype.base_dtype if (stat_dtype == input_dtype): return (tf.identity(self._moving_mean), tf.identity(self._moving_variance)) else: return (tf.cast(self._moving_mean, input_dtype), tf.cast(self._moving_variance, input_dtype)) (mean, variance) = utils.smart_cond(use_batch_stats, build_batch_stats, build_moving_stats) return (mean, variance)
Builds the statistics part of the graph when using moving variance. Args: input_batch: Input batch Tensor. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. stat_dtype: TensorFlow datatype to use for the moving mean and variance. Returns: Tuple of (mean, variance), each of the same datatype as `input_batch`.
codesearchnet
def get_ip_address(domain): if (': domain = ('http: hostname = urlparse(domain).netloc if (not hostname): raise ValueError("Can't parse hostname!") return socket.gethostbyname(hostname)
Get IP address for given `domain`. Try to do smart parsing. Args: domain (str): Domain or URL. Returns: str: IP address. Raises: ValueError: If can't parse the domain.
codesearchnet
def _find_children_hints_in_while_loop(function_def, nodes_mapping): new_nodes = [] for node in function_def.node_def: for i, _ in enumerate(node.input): if node.input[i] in nodes_mapping: node.input[i] = nodes_mapping[node.input[i]] new_nodes.append(_copy.deepcopy(node)) name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def) children_hints = _find_all_hints_in_nodes(new_nodes) children_hints_q = [] for hint in children_hints.values(): _, output_names = hint.flattened_inputs_and_outputs() seq = name_to_seq_num[output_names[0]] for output_name in output_names: seq = min(seq, name_to_seq_num[output_name]) children_hints_q.append((seq, hint)) children_hints_q.sort(key=lambda tup: tup[0]) ordered_children_hints = [x[1] for x in children_hints_q] return (ordered_children_hints, new_nodes)
Find children hints and all nodes inside the while loop. Args: function_def: Function def of the while loop. nodes_mapping: While loop input_arg : real node name. Returns: Ordered children hints and all re-mapped nodes inside the while loop.
github-repos
def deal_with_changeset_stack_policy(self, fqn, stack_policy): if stack_policy: kwargs = generate_stack_policy_args(stack_policy) kwargs['StackName'] = fqn logger.debug('Setting stack policy on %s.', fqn) self.cloudformation.set_stack_policy(**kwargs)
Set a stack policy when using changesets. ChangeSets don't allow you to set stack policies in the same call to update them. This sets it before executing the changeset if the stack policy is passed in. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
codesearchnet
def vmstat(stat): out = subprocess.check_output(['vmstat', '-s']) stat = stat.encode('ascii') for line in out.split(b'\n'): line = line.strip() if (stat in line): return int(line.split(b' ')[0]) raise ValueError("Can't find {} in 'vmstat' output.".format(stat))
Run vmstat and get a particular statistic. Args: stat: The statistic that we are interested in retrieving. Returns: The parsed output.
codesearchnet
def mimic_adam_with_adafactor(hparams): assert ('adam' in hparams.optimizer) hparams.optimizer = 'adafactor' hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1 hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2 hparams.optimizer_adafactor_multiply_by_parameter_scale = False hparams.optimizer_adafactor_factored = False hparams.optimizer_adafactor_clipping_threshold = None hparams.optimizer_adafactor_decay_type = 'adam'
Switch from Adam to Adafactor, approximating the behavior of Adam. Some minor things may be different, like epsilon and beta1 correction. Args: hparams: model hyperparameters where "adam" in hparams.optimizer
codesearchnet
def parse_pv(header): order_fit = parse_order_fit(header) def parse_with_base(i): key_base = ('PV%d_' % i) pvi_x = [header[(key_base + '0')]] def parse_range(lower, upper): for j in range(lower, (upper + 1)): pvi_x.append(header[(key_base + str(j))]) if (order_fit >= 1): parse_range(1, 3) if (order_fit >= 2): parse_range(4, 6) if (order_fit >= 3): parse_range(7, 10) return pvi_x return [parse_with_base(1), parse_with_base(2)]
Parses the PV array from an astropy FITS header. Args: header: astropy.io.fits.header.Header The header containing the PV values. Returns: cd: 2d array (list(list(float)) [[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]] Note that N depends on the order of the fit. For example, an order 3 fit goes up to PV?_10.
codesearchnet
def compact_interval_string(value_list): if not value_list: return '' value_list.sort() interval_list = [] curr = [] for val in value_list: if curr and (val > curr[-1] + 1): interval_list.append((curr[0], curr[-1])) curr = [val] else: curr.append(val) if curr: interval_list.append((curr[0], curr[-1])) return ','.join([ '{}-{}'.format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0]) for pair in interval_list ])
Compact a list of integers into a comma-separated string of intervals. Args: value_list: A list of sortable integers such as a list of numbers Returns: A compact string representation, such as "1-5,8,12-15"
juraj-google-style
def execute(self, sensor_graph, scope_stack): streamer = DataStreamer(self.selector, self.dest, self.report_format, self.auto, report_type=self.report_type, with_other=self.with_other) sensor_graph.add_streamer(streamer)
Execute this statement on the sensor_graph given the current scope tree. This adds a single DataStreamer to the current sensor graph Args: sensor_graph (SensorGraph): The sensor graph that we are building or modifying scope_stack (list(Scope)): A stack of nested scopes that may influence how this statement allocates clocks or other stream resources.
juraj-google-style
def copy_buffer(self, dst, src, size=(- 1), *, read_offset=0, write_offset=0) -> None: self.mglo.copy_buffer(dst.mglo, src.mglo, size, read_offset, write_offset)
Copy buffer content. Args: dst (Buffer): The destination buffer. src (Buffer): The source buffer. size (int): The number of bytes to copy. Keyword Args: read_offset (int): The read offset. write_offset (int): The write offset.
codesearchnet
def convert_nested_time_distributed(weights): return preprocess_weights_for_loading(layer.layer, weights, original_keras_version, original_backend)
Converts layers nested in `TimeDistributed` wrapper. This function uses `preprocess_weights_for_loading()` for converting nested layers. Args: weights: List of weights values (Numpy arrays). Returns: A list of weights values (Numpy arrays).
github-repos
def create_training_target(self, target, run_eagerly=False): if self.has_training_target(): raise ValueError('The training_target field for the _TrainingEndpoint instance has already been populated') if run_eagerly: self.training_target = _TrainingTarget(None, feedable=True, skip_target_weights=False) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and (not backend.is_placeholder(target)): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get(self.loss_fn, backend.dtype(self.output)) target = backend.placeholder(ndim=len(self.shape), name=self.output_name + '_target', sparse=backend.is_sparse(self.output), dtype=target_dtype) self.training_target = _TrainingTarget(target, feedable=feedable, skip_target_weights=skip_target_weights)
Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated.
github-repos
def reaction_signature(eq, direction=False, stoichiometry=False): def compounds_sig(compounds): if stoichiometry: return tuple(sorted(compounds)) else: return tuple(sorted(compound for compound, _ in compounds)) left = compounds_sig(eq.left) right = compounds_sig(eq.right) if left < right: reaction_sig = left, right direction_sig = eq.direction else: reaction_sig = right, left direction_sig = eq.direction.flipped() if direction: return reaction_sig, direction_sig return reaction_sig
Return unique signature object for :class:`Reaction`. Signature objects are hashable, and compare equal only if the reactions are considered the same according to the specified rules. Args: direction: Include reaction directionality when considering equality. stoichiometry: Include stoichiometry when considering equality.
juraj-google-style
def copy_to(self, container: Container, fn_host: str, fn_container: str) -> None: logger.debug('Copying file to container, %s: %s -> %s', container.uid, fn_host, fn_container) if (not os.path.exists(fn_host)): logger.error('Failed to copy file [%s] to [%s] in container [%s]: not found.', fn_host, fn_container, container.uid) raise FileNotFound(fn_host) cmd = "docker cp '{}' '{}:{}'".format(fn_host, container.id, fn_container) try: subprocess.check_output(cmd, shell=True) logger.debug('Copied file to container, %s: %s -> %s', container.uid, fn_host, fn_container) r = self.command(container, "sudo chown $(whoami) '{}'".format(fn_container)) if (r.code != 0): m = 'failed to update permissions for container file [{}] (exit code: {}): {}' m = m.format(fn_container, r.code, r.output) raise BugZooException(m) except subprocess.CalledProcessError: logger.exception('Failed to copy file to container, %s: %s -> %s', container.uid, fn_host, fn_container) raise
Copies a file from the host machine to a specified location inside a container. Raises: FileNotFound: if the host file wasn't found. subprocess.CalledProcessError: if the file could not be copied to the container.
codesearchnet
def move(self, delta): pos = self.pos self.pos = (pos[0]+delta[0], pos[1]+delta[1], pos[2]+delta[0], pos[3]+delta[1]) for age in self.nodes: for node in age: node.move(delta)
Move the tree. Args: delta (tupel): The adjustment of the position.
juraj-google-style
def excluded_from_module_rename(module, import_rename_spec): for excluded_prefix in import_rename_spec.excluded_prefixes: if module.startswith(excluded_prefix): return True return False
Check if this module import should not be renamed. Args: module: (string) module name. import_rename_spec: ImportRename instance. Returns: True if this import should not be renamed according to the import_rename_spec.
github-repos
def __init__(self, founded_command='_silence_', score=0.0, is_new_command=False): self._founded_command = founded_command self._score = score self._is_new_command = is_new_command
Construct a recognition result. Args: founded_command: A string indicating the word just founded. score: A float representing the confidence of founded word. is_new_command: A boolean indicating if the founded command is a new one against the last one.
github-repos
def parse_services(config, services): enabled = 0 for service in services: check_disabled = config.getboolean(service, 'check_disabled') if (not check_disabled): enabled += 1 return enabled
Parse configuration to return number of enabled service checks. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. services (list): A list of section names which holds configuration for each service check Returns: A number (int) of enabled service checks.
codesearchnet
def get_updated(node): if isinstance(node, gast.Assign): return set.union(*(_get_target(target) for target in node.targets)) elif isinstance(node, (gast.For, gast.AugAssign)): return _get_target(node.target) elif isinstance(node, gast.arguments): targets = set((arg.id for arg in (node.args + node.kwonlyargs))) if node.vararg: targets.add(node.vararg.id) if node.kwarg: targets.add(node.kwarg.id) return targets else: return set()
Return the variable names created or mutated by this statement. This function considers assign statements, augmented assign statements, and the targets of for loops, as well as function arguments. For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and `y`, `for i in range(x)` will return `i`, etc. Args: node: An AST node Returns: A set of variable names (strings) of all the variables created or mutated.
codesearchnet
def __set_data__(self, prop, data): if self.data_attr: setattr(prop, self.data_attr, data) else: rm_idxs = [] for i, val in enumerate(prop): if val not in data: rm_idxs.append(i) for idx in sorted(rm_idxs, reverse=True): prop.pop(idx) for val in data: if val not in prop: prop.append(val)
sets the processed data to the appropriated property attribute Args: ----- prop: the property being manipulated data: the list of processed data
juraj-google-style
def delete(filething): t = OggVorbis(filething) filething.fileobj.seek(0) t.delete(filething)
delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file.
juraj-google-style
def run(self): target = getattr(self, '_Thread__target', getattr(self, '_target', None)) args = getattr(self, '_Thread__args', getattr(self, '_args', None)) kwargs = getattr(self, '_Thread__kwargs', getattr(self, '_kwargs', None)) if target is not None: self._return = target(*args, **kwargs) return None
Runs the thread. Args: self (ThreadReturn): the ``ThreadReturn`` instance Returns: ``None``
juraj-google-style
def shutdown(self, ssc=None, grace_secs=0, timeout=259200): logging.info('Stopping TensorFlow nodes') (ps_list, worker_list, eval_list) = ([], [], []) for node in self.cluster_info: (ps_list if (node['job_name'] == 'ps') else (eval_list if (node['job_name'] == 'evaluator') else worker_list)).append(node) if (timeout > 0): def timeout_handler(signum, frame): logging.error('TensorFlow execution timed out, exiting Spark application with error status') self.sc.cancelAllJobs() self.sc.stop() sys.exit(1) signal.signal(signal.SIGALRM, timeout_handler) signal.alarm(timeout) if (ssc is not None): while (not ssc.awaitTerminationOrTimeout(1)): if self.server.done: logging.info('Server done, stopping StreamingContext') ssc.stop(stopSparkContext=False, stopGraceFully=True) break elif (self.input_mode == InputMode.TENSORFLOW): count = 0 while (count < 3): st = self.sc.statusTracker() jobs = st.getActiveJobsIds() if (len(jobs) == 0): break stages = st.getActiveStageIds() for i in stages: si = st.getStageInfo(i) if (si.numActiveTasks == (len(ps_list) + len(eval_list))): count += 1 time.sleep(5) workers = len(worker_list) workerRDD = self.sc.parallelize(range(workers), workers) workerRDD.foreachPartition(TFSparkNode.shutdown(self.cluster_info, self.queues)) time.sleep(grace_secs) if ('error' in tf_status): logging.error('Exiting Spark application with error status.') self.sc.cancelAllJobs() self.sc.stop() sys.exit(1) logging.info('Shutting down cluster') for node in (ps_list + eval_list): addr = node['addr'] authkey = node['authkey'] m = TFManager.connect(addr, authkey) q = m.get_queue('control') q.put(None) q.join() while True: time.sleep(5) st = self.sc.statusTracker() jobs = st.getActiveJobsIds() if (len(jobs) == 0): break
Stops the distributed TensorFlow cluster. For InputMode.SPARK, this will be executed AFTER the `TFCluster.train()` or `TFCluster.inference()` method completes. For InputMode.TENSORFLOW, this will be executed IMMEDIATELY after `TFCluster.run()` and will wait until the TF worker nodes complete. Args: :ssc: *For Streaming applications only*. Spark StreamingContext :grace_secs: Grace period to wait after all executors have completed their tasks before terminating the Spark application, e.g. to allow the chief worker to perform any final/cleanup duties like exporting or evaluating the model. Default is 0. :timeout: Time in seconds to wait for TF cluster to complete before terminating the Spark application. This can be useful if the TF code hangs for any reason. Default is 3 days. Use -1 to disable timeout.
codesearchnet
def to_tensor_shape(spec): if spec.ndim is None and spec.shape is None: return tensor_shape.TensorShape(None) elif spec.shape is not None: return tensor_shape.TensorShape(spec.shape) else: shape = [None] * spec.ndim for a in spec.axes: shape[a] = spec.axes[a] return tensor_shape.TensorShape(shape)
Returns a tf.TensorShape object that matches the shape specifications. If the InputSpec's shape or ndim is defined, this method will return a fully or partially-known shape. Otherwise, the returned TensorShape is None. Args: spec: an InputSpec object. Returns: a tf.TensorShape object
github-repos
def verify_account(self, email_address): request = self._get_request() resp = request.post(self.ACCOUNT_VERIFY_URL, {'email_address': email_address}) return ('account' in resp)
Verify whether a HelloSign Account exists Args: email_address (str): Email address for the account to verify Returns: True or False
codesearchnet
def getValue(self, unit=None): if (unit or self.unit): r = (float((self.value * UnitToValue(self.unit))) / UnitToValue(unit)) return (int(round(r)) if isinstance(self.value, int) else r) return self.value
Return the value of the feature. If the unit is specified and the feature has a unit, the value is converted Args: - unit(str,optional): A unit to convert the current feature value ('B','K','M','G')
codesearchnet
def update_z(self, z, indices=None): z = _make_np_bool(z) if indices is None: if len(self._z) != len(z): raise QiskitError("During updating whole z, you can not " "change the number of qubits.") self._z = z else: if not isinstance(indices, list) and not isinstance(indices, np.ndarray): indices = [indices] for p, idx in enumerate(indices): self._z[idx] = z[p] return self
Update partial or entire z. Args: z (numpy.ndarray or list): to-be-updated z indices (numpy.ndarray or list or optional): to-be-updated qubit indices Returns: Pauli: self Raises: QiskitError: when updating whole z, the number of qubits must be the same.
juraj-google-style
def ResultCollectionForFID(cls, flow_id): if not isinstance(flow_id, rdfvalue.RDFURN): flow_id = rdfvalue.RDFURN(flow_id) return sequential_collection.GeneralIndexedCollection( flow_id.Add(RESULTS_SUFFIX))
Returns the ResultCollection for the flow with a given flow_id. Args: flow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456. Returns: The collection containing the results for the flow identified by the id.
juraj-google-style
def __call__(self, shape, dtype=None): dtype = standardize_dtype(dtype) return ops.ones(shape, dtype=dtype)
Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `keras.backend.set_floatx(float_dtype)`).
github-repos
def __init__(self, selector, comparison=None, argument=None): super(Constraint, self).__init__() self.selector = selector if comparison and COMPARISON_COMP.match(comparison) is None: raise FiqlObjectException( "'%s' is not a valid FIQL comparison" % comparison) self.comparison = comparison self.argument = argument
Initialize instance of ``Constraint``. Args: selector (string): URL decoded constraint ``selector``. comparison (string, optional): Parsed/mapped ``comparison`` operator. Defaults to ``None``. argument (string, optional): URL decoded constraint ``argument``. Defaults to ``None``. Raises: FiqlObjectException: Not a valid FIQL comparison.
juraj-google-style
def write_transform_artifacts(self, transform_fn, location): return transform_fn | 'Write Transform Artifacts' >> transform_fn_io.WriteTransformFn(location)
Write transform artifacts to the given location. Args: transform_fn: A transform_fn object. location: A location to write the artifacts. Returns: A PCollection of WriteTransformFn writing a TF transform graph.
github-repos
def get_config(self, name, default=_MISSING): value = self._adapter.get_config(name, default) if value is _MISSING: raise ArgumentError("Config value did not exist", name=name) return value
Get a config value from this adapter by name Args: name (string): The name of the config variable default (object): The default value to return if config is not found Returns: object: the value associated with the name Raises: ArgumentError: if the name is not found and no default is supplied
juraj-google-style
def _attempt_slice_retry(self, shard_state, tstate): if (shard_state.slice_retries + 1 < parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS): logging.warning( "Slice %s %s failed for the %s of up to %s attempts " "(%s of %s taskqueue execution attempts). " "Will retry now.", tstate.shard_id, tstate.slice_id, shard_state.slice_retries + 1, parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS, self.task_retry_count() + 1, parameters.config.TASK_MAX_ATTEMPTS) sys.exc_clear() self._try_free_lease(shard_state, slice_retry=True) return self._TASK_DIRECTIVE.RETRY_SLICE if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0: logging.warning("Slice attempt %s exceeded %s max attempts.", self.task_retry_count() + 1, parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS) return self._TASK_DIRECTIVE.RETRY_SHARD
Attempt to retry this slice. This method may modify shard_state and tstate to prepare for retry or fail. Args: shard_state: model.ShardState for current shard. tstate: model.TransientShardState for current shard. Returns: A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried. RETRY_SHARD if shard retry should be attempted.
juraj-google-style
def _to_proto_sparse_tensor(sparse_tensor, nested_proto, process_leafs, already_processed): already_processed.add(id(sparse_tensor)) nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME for str_key in _SPARSE_TENSOR_FIELD: tensor = getattr(sparse_tensor, str_key) nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)
Serializes a `tf.SparseTensor` into `nested_proto`. Args: sparse_tensor: An instance of `tf.SparseTensor`. nested_proto: A `module_pb2.NestedData` instance to be filled from `sparse_tensor`. process_leafs: A function to be applied to the leaf valued of the nested structure. already_processed: Set of already processed objects (used to avoid infinite recursion).
juraj-google-style
def _html_checker(job_var, interval, status, header, _interval_set=False): job_status = job_var.status() job_status_name = job_status.name job_status_msg = job_status.value status.value = header % (job_status_msg) while job_status_name not in ['DONE', 'CANCELLED']: time.sleep(interval) job_status = job_var.status() job_status_name = job_status.name job_status_msg = job_status.value if job_status_name == 'ERROR': break else: if job_status_name == 'QUEUED': job_status_msg += ' (%s)' % job_var.queue_position() if not _interval_set: interval = max(job_var.queue_position(), 2) else: if not _interval_set: interval = 2 status.value = header % (job_status_msg) status.value = header % (job_status_msg)
Internal function that updates the status of a HTML job monitor. Args: job_var (BaseJob): The job to keep track of. interval (int): The status check interval status (widget): HTML ipywidget for output ot screen header (str): String representing HTML code for status. _interval_set (bool): Was interval set by user?
juraj-google-style
def _ParseVValueString(self, parser_mediator, data, user_information_descriptor): data_start_offset = (user_information_descriptor.offset + self._V_VALUE_STRINGS_OFFSET) data_end_offset = (data_start_offset + user_information_descriptor.size) descriptor_data = data[data_start_offset:data_end_offset] try: username = descriptor_data.decode('utf-16-le') except (UnicodeDecodeError, UnicodeEncodeError) as exception: username = descriptor_data.decode('utf-16-le', errors='replace') parser_mediator.ProduceExtractionWarning('unable to decode V value string with error: {0!s}. Characters that cannot be decoded will be replaced with "?" or "\\ufffd".'.format(exception)) return username
Parses a V value string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. data (bytes): Windows Registry V value data. user_information_descriptor (user_information_descriptor): V value user information descriptor. Returns: str: string value stored in the Windows Registry V value data.
codesearchnet
def parse_GDS_columns(lines, subsets): data = [] index = [] for line in lines: line = line.rstrip() if line.startswith(" tmp = __parse_entry(line) data.append(tmp[1]) index.append(tmp[0]) df = DataFrame(data, index=index, columns=['description']) subset_ids = defaultdict(dict) for subsetname, subset in iteritems(subsets): for expid in subset.metadata["sample_id"][0].split(","): try: subset_type = subset.get_type() subset_ids[subset_type][expid] = \ subset.metadata['description'][0] except Exception as err: logger.error("Error processing subsets: %s for subset %s" % ( subset.get_type(), subsetname)) return df.join(DataFrame(subset_ids))
Parse list of line with columns description from SOFT file of GDS. Args: lines (:obj:`Iterable`): Iterator over the lines. subsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use. Returns: :obj:`pandas.DataFrame`: Columns description.
juraj-google-style
def send(self): xml_request = self.get_xml_request() if (self.connection._debug == 1): print(xml_request) Debug.warn(('-' * 25)) Debug.warn(self._command) Debug.dump('doc: \n', self._documents) Debug.dump('cont: \n', self._content) Debug.dump('nest cont \n', self._nested_content) Debug.dump('Request: \n', xml_request) response = _handle_response(self.connection._send_request(xml_request), self._command, self.connection.document_id_xpath) return response
Send an XML string version of content through the connection. Returns: Response object.
codesearchnet
def __init__(self, jss): self.jss = jss self.url = "%s/casper.jxml" % self.jss.base_url self.auth = urllib.urlencode({"username": self.jss.user, "password": self.jss.password}) super(Casper, self).__init__(tag="Casper") self.update()
Initialize a Casper object. Args: jss: A JSS object to request the casper page from.
juraj-google-style
def prepare_capstone(syntax=AsmSyntax.att, target=None): if (not HAVE_CAPSTONE): raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax') if (target is None): target = pwnypack.target.target if (target.arch == pwnypack.target.Target.Arch.x86): if (target.bits is pwnypack.target.Target.Bits.bits_32): md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32) else: md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64) elif (target.arch == pwnypack.target.Target.Arch.arm): mode = 0 if (target.bits is pwnypack.target.Target.Bits.bits_32): arch = capstone.CS_ARCH_ARM if (target.mode and pwnypack.target.Target.Mode.arm_thumb): mode = capstone.CS_MODE_THUMB else: mode = capstone.CS_MODE_ARM if (target.mode and pwnypack.target.Target.Mode.arm_m_class): mode |= capstone.CS_MODE_MCLASS if (target.mode and pwnypack.target.Target.Mode.arm_v8): mode |= capstone.CS_MODE_V8 else: arch = capstone.CS_ARCH_ARM64 if (target.endian is pwnypack.target.Target.Endian.little): mode |= capstone.CS_MODE_LITTLE_ENDIAN else: mode |= capstone.CS_MODE_BIG_ENDIAN md = capstone.Cs(arch, mode) else: raise NotImplementedError('Only x86 is currently supported.') md.skipdata = True if (syntax is AsmSyntax.att): md.syntax = capstone.CS_OPT_SYNTAX_ATT elif (syntax is AsmSyntax.intel): md.skipdata_setup(('db', None, None)) else: raise NotImplementedError('capstone engine only implements AT&T and Intel syntax.') return md
Prepare a capstone disassembler instance for a given target and syntax. Args: syntax(AsmSyntax): The assembler syntax (Intel or AT&T). target(~pwnypack.target.Target): The target to create a disassembler instance for. The global target is used if this argument is ``None``. Returns: An instance of the capstone disassembler. Raises: NotImplementedError: If the specified target isn't supported.
codesearchnet
def initialize(self, request, response): super(TaskQueueHandler, self).initialize(request, response) if "X-AppEngine-QueueName" not in self.request.headers: logging.error(self.request.headers) logging.error("Task queue handler received non-task queue request") self.response.set_status( 403, message="Task queue handler received non-task queue request") return if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS: logging.error( "Task %s has been attempted %s times. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"], self.task_retry_count() + 1) self._drop_gracefully() return try: self._preprocess() self._preprocess_success = True except: self._preprocess_success = False logging.error( "Preprocess task %s failed. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"]) self._drop_gracefully()
Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance.
juraj-google-style
def _run_function_for_calibration_eager_mode(func: wrap_function.WrappedFunction, representative_dataset: rd.RepresentativeDataset) -> None: _, keyword_args = func.structured_input_signature sample_validator = _create_sample_validator(expected_input_keys=keyword_args.keys()) for sample in map(sample_validator, _log_sample_num_for_calibration(representative_dataset)): func_kwargs = _convert_values_to_tf_tensors(sample) func(**func_kwargs)
Runs the representative dataset through a function for calibration. NOTE: This is intended to be run in eager mode (TF2). Args: func: The function to run the representative samples through. representative_dataset: Representative dataset used for calibration. The input keys and input values of the representative samples should match the keyword arguments of `func`.
github-repos
def register(self, method_name: str, func: Union[def_function.Function, tf_function.ConcreteFunction]): raise NotImplementedError('Please use create_server method to create aconcrete subclass of Server.')
Method for registering tf.function on server. Registered methods can be invoked remotely from clients. Args: method_name: Name of the tf.function. Clients use this method_name to make RPCs. func: A `tf.function` or ConcreteFunction to register.
github-repos
def __deepcopy__(self, memo): self._copy_counter += 1 new_dag = Dag('{}:{}'.format(self._name, self._copy_counter), autostart=self._autostart, queue=self._queue) new_dag._schema = deepcopy(self._schema, memo) return new_dag
Create a copy of the dag object. This method keeps track of the number of copies that have been made. The number is appended to the name of the copy. Args: memo (dict): a dictionary that keeps track of the objects that have already been copied. Returns: Dag: a copy of the dag object
juraj-google-style
def get_index_mapping(index): mappings_dir = get_setting('mappings_dir') filename = ('%s.json' % index) path = os.path.join(mappings_dir, filename) with open(path, 'r') as f: return json.load(f)
Return the JSON mapping file for an index. Mappings are stored as JSON files in the mappings subdirectory of this app. They must be saved as {{index}}.json. Args: index: string, the name of the index to look for.
codesearchnet
def FindCheckMacro(line): for macro in _CHECK_MACROS: i = line.find(macro) if i >= 0: matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) if not matched: continue return (macro, len(matched.group(1))) return (None, -1)
Find a replaceable CHECK-like macro. Args: line: line to search on. Returns: (macro name, start position), or (None, -1) if no replaceable macro is found.
juraj-google-style
def _Open(self, path_spec=None, mode='rb'): if not path_spec: raise ValueError('Missing path specification.') file_system = resolver.Resolver.OpenFileSystem( path_spec, resolver_context=self._resolver_context) file_entry = file_system.GetFileEntryByPathSpec(path_spec) if not file_entry: file_system.Close() raise IOError('Unable to retrieve file entry.') self._file_system = file_system self._cpio_archive_file = self._file_system.GetCPIOArchiveFile() self._cpio_archive_file_entry = file_entry.GetCPIOArchiveFileEntry() self._current_offset = 0
Opens the file-like object defined by path specification. Args: path_spec (Optional[PathSpec]): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def GetTableView(cls, format_type, column_names=None, title=None): view_class = cls._TABLE_VIEW_FORMAT_CLASSES.get(format_type, None) if not view_class: raise ValueError('Unsupported format type: {0:s}'.format(format_type)) return view_class(column_names=column_names, title=title)
Retrieves a table view. Args: format_type (str): table view format type. column_names (Optional[list[str]]): column names. title (Optional[str]): title. Returns: BaseTableView: table view. Raises: ValueError: if the format type is not supported.
juraj-google-style
def _ExtractResponseSummaryFields(document): headers = document.childAtPath('Envelope/Header/ResponseHeader') body = document.childAtPath('Envelope/Body') summary_fields = {} if (headers is not None): summary_fields['requestId'] = headers.getChild('requestId').text summary_fields['responseTime'] = headers.getChild('responseTime').text service_name = headers.getChild('serviceName') if (service_name is not None): summary_fields['serviceName'] = service_name.text method_name = headers.getChild('methodName') if (method_name is not None): summary_fields['methodName'] = method_name.text operations = headers.getChild('operations') if (operations is not None): summary_fields['operations'] = operations.text if (body is not None): fault = body.getChild('Fault') if (fault is not None): summary_fields['isFault'] = True summary_fields['faultMessage'] = fault.getChild('faultstring').text[:16000] else: summary_fields['isFault'] = False return summary_fields
Extract logging fields from the response's suds.sax.document.Document. Args: document: A suds.sax.document.Document instance containing the parsed API response for a given API request. Returns: A dict mapping logging field names to their corresponding value.
codesearchnet
def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, xpaths: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, node_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: def _is_valid_text_input(t): if isinstance(t, str): return True elif isinstance(t, (list, tuple)): if len(t) == 0: return True elif isinstance(t[0], str): return True elif isinstance(t[0], (list, tuple)): return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: if not _is_valid_text_input(text): raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ') if not isinstance(text_pair, (list, tuple)): raise ValueError('Nodes must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') elif not isinstance(text, (list, tuple)): raise ValueError('Nodes must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) nodes = text if text_pair is None else text_pair assert xpaths is not None, 'You must provide corresponding xpaths' if is_batched: assert len(nodes) == len(xpaths), 'You must provide nodes and xpaths for an equal amount of examples' for nodes_example, xpaths_example in zip(nodes, xpaths): assert len(nodes_example) == len(xpaths_example), 'You must provide as many nodes as there are xpaths' else: assert len(nodes) == len(xpaths), 'You must provide as many nodes as there are xpaths' if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.') batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus(text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with nodes, xpaths and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). xpaths (`List[List[int]]`, `List[List[List[int]]]`): Node-level xpaths. Each bounding box should be normalized to be on a 0-1000 scale. node_labels (`List[int]`, `List[List[int]]`, *optional*): Node-level integer labels (for token classification tasks).
github-repos
def load_csv(path): with open(path) as f: line = f.readline().strip() X = np.loadtxt(path, delimiter=',', skiprows=0 if is_number(line.split(',')[0]) else 1) y = np.array(X[:, 0]).flatten() X = X[:, 1:] return X, y
Load data from a CSV file. Args: path (str): A path to the CSV format file containing data. dense (boolean): An optional variable indicating if the return matrix should be dense. By default, it is false. Returns: Data matrix X and target vector y
juraj-google-style
def _loadCSVDataFrame(self): if (self._filename and os.path.exists(self._filename)): encoding = (self._encodingKey or 'UTF_8') try: dataFrame = superReadFile(self._filename, sep=self._delimiter, first_codec=encoding, header=self._header) dataFrame = dataFrame.apply(fillNoneValues) dataFrame = dataFrame.apply(convertTimestamps) except Exception as err: self.updateStatusBar(str(err)) print(err) return pandas.DataFrame() self.updateStatusBar('Preview generated.') return dataFrame self.updateStatusBar('File could not be read.') return pandas.DataFrame()
Loads the given csv file with pandas and generate a new dataframe. The file will be loaded with the configured encoding, delimiter and header.git If any execptions will occur, an empty Dataframe is generated and a message will appear in the status bar. Returns: pandas.DataFrame: A dataframe containing all the available information of the csv file.
codesearchnet
def default_sequence_length(self) -> int: return OnnxConfig.default_fixed_sequence
The default sequence length to use if no other indication Returns: Integer > 0
github-repos
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True, *, copy_xla_sharding=False): if dtype is None: dtype = primary.dtype slot_shape = primary.get_shape() if slot_shape.is_fully_defined(): initializer = init_ops.zeros_initializer() return create_slot_with_initializer(primary, initializer, slot_shape, dtype, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding) else: if isinstance(primary, variables.Variable): slot_shape = array_ops.shape(cond.cond(variable_v1.is_variable_initialized(primary), primary.read_value, lambda: primary.initial_value)) else: slot_shape = array_ops.shape(primary) val = array_ops.zeros(slot_shape, dtype=dtype) return create_slot(primary, val, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding)
Create a slot initialized to 0 with same shape as the primary object. Args: primary: The primary `Variable` or `Tensor`. name: Name to use for the slot variable. dtype: Type of the slot variable. Defaults to the type of `primary`. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A `Variable` object.
github-repos
def _update_object(object_key: str, event: Event): events_list_key = _keys.events_list(object_key) events_data_key = _keys.events_data(object_key) event_dict = deepcopy(event.config) event_dict.pop('id') DB.append_to_list(events_list_key, event.id, pipeline=True) DB.set_hash_value(events_data_key, event.id, json.dumps(event_dict), pipeline=True)
Update the events list and events data for the object. - Adds the event Id to the list of events for the object. - Adds the event data to the hash of object event data keyed by event id. Args: object_key (str): Key of the object being updated. event (Event): Event object
codesearchnet
def get_coordination_numbers(d): structure = Structure.from_dict(d["output"]["crystal"]) f = VoronoiNN() cn = [] for i, s in enumerate(structure.sites): try: n = f.get_cn(structure, i) number = int(round(n)) cn.append({"site": s.as_dict(), "coordination": number}) except Exception: logger.error("Unable to parse coordination errors") return cn
Helper method to get the coordination number of all sites in the final structure from a run. Args: d: Run dict generated by VaspToDbTaskDrone. Returns: Coordination numbers as a list of dict of [{"site": site_dict, "coordination": number}, ...].
juraj-google-style
def _static_value_provider_of(value_type): def _f(value): _f.__name__ = value_type.__name__ return StaticValueProvider(value_type, value) return _f
Helper function to plug a ValueProvider into argparse. Args: value_type: the type of the value. Since the type param of argparse's add_argument will always be ValueProvider, we need to preserve the type of the actual value. Returns: A partially constructed StaticValueProvider in the form of a function.
github-repos
def get_foreign_id(self, idspace='musicbrainz', cache=True): if not (cache and ('foreign_ids' in self.cache) and filter(lambda d: d.get('catalog') == idspace, self.cache['foreign_ids'])): response = self.get_attribute('profile', bucket=['id:'+idspace]) foreign_ids = response['artist'].get("foreign_ids", []) self.cache['foreign_ids'] = self.cache.get('foreign_ids', []) + foreign_ids cval = filter(lambda d: d.get('catalog') == util.map_idspace(idspace), self.cache.get('foreign_ids')) return cval[0].get('foreign_id') if cval else None
Get the foreign id for this artist for a specific id space Args: Kwargs: idspace (str): A string indicating the idspace to fetch a foreign id for. Returns: A foreign ID string Example: >>> a = artist.Artist('fabulous') >>> a.get_foreign_id('7digital') u'7digital:artist:186042' >>>
juraj-google-style
def list_files(base_path, ext=None): if not os.path.isdir(base_path): raise ValueError("Path does not exist: %s" % base_path) files = [] for entry in os.listdir(base_path): if os.path.isfile(os.path.join(base_path, entry)): _, entry_ext = os.path.splitext(entry) entry_ext = entry_ext.lstrip('.') if (ext is None) or \ (isinstance(ext, str) and entry_ext == ext) or \ (isinstance(ext, list) and entry_ext in ext): files.append(entry) return files
Lists all of the files in the given base directory, optionally only including whose extension(s) match the ext string/list of strings. This is non-recursive. Args: base_path: The directory in which to search. ext: The extension(s) to match in the given directory. If None, this matches all file extensions. Returns: A list of filenames relative to the given base path.
juraj-google-style
def _save_model_and_copy_assets(exported_model: exported_model_pb2.ExportedModel, src_saved_model_path: str, dst_saved_model_path: str, signature_def_map: Mapping[str, meta_graph_pb2.SignatureDef], tags: Collection[str]) -> bool: save_model.save_model_v1(exported_model.graph_def, dst_saved_model_path, signature_def_map, tags, init_op_name=exported_model.init_node_name, saver_def=_get_saver_def_or_none(exported_model), checkpoint_dir=exported_model.checkpoint_dir, function_aliases=exported_model.function_aliases, asset_file_defs=exported_model.asset_file_defs) _copy_assets(src_saved_model_path, dst_saved_model_path) return True
Saves the model and copies the assets from the source model. Args: exported_model: ExportedModel to save. src_saved_model_path: Path to the source SavedModel. This will be used to copy the asset files to `dst_saved_model_path`. dst_saved_model_path: Destination path to save the exported model. signature_def_map: Signature key -> SignatureDef mapping. tags: Tags to attach to the saved MetaGraphDef. Returns: `True` upon successfully saving the model.
github-repos
def __init__(self, checkpoint_dir: Text, save_secs: Optional[int]=None, save_steps: Optional[int]=None, saver: Optional[saver_lib.Saver]=None, checkpoint_basename: Text='model.ckpt', scaffold: Optional[monitored_session.Scaffold]=None, listeners: Optional[List[basic_session_run_hooks.CheckpointSaverListener]]=None): save_path = os.path.join(checkpoint_dir, checkpoint_basename) logging.info('Create AsyncCheckpointSaverHook saving to path\n%s', save_path) if listeners: logging.info(' with %d listener(s).', len(listeners)) if saver is not None and scaffold is not None: raise ValueError('You cannot provide both saver and scaffold.') self._saver = saver self._save_thread = None self._write_graph_thread = None self._checkpoint_dir = checkpoint_dir self._save_path = save_path self._scaffold = scaffold self._timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps) self._listeners = listeners or [] self._steps_per_run = 1 self._summary_writer = None self._global_step_tensor = None self._last_checkpoint_step = None global _END_TIME_OF_LAST_WRITE with _END_TIME_OF_LAST_WRITE_LOCK: if _END_TIME_OF_LAST_WRITE is None: _END_TIME_OF_LAST_WRITE = time.time()
Initializes a `CheckpointSaverHook`. Args: checkpoint_dir: `str`, base directory for the checkpoint files. save_secs: `int`, save every N secs. save_steps: `int`, save every N steps. saver: `Saver` object, used for saving. checkpoint_basename: `str`, base name for the checkpoint files. scaffold: `Scaffold`, use to get saver object. listeners: List of `CheckpointSaverListener` subclass instances. Used for callbacks that run immediately before or after this hook saves the checkpoint. Raises: ValueError: One of `save_steps` or `save_secs` should be set. ValueError: At most one of `saver` or `scaffold` should be set.
github-repos
def helper_delete(access_token, oid, path): full_path = ''.join([path, "('", oid, "')"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) return do_ams_delete(endpoint, full_path_encoded, access_token)
Helper Function to delete a Object at a URL path. Args: access_token (str): A valid Azure authentication token. oid (str): An OID. path (str): A URL Path. Returns: HTTP response. JSON body.
codesearchnet
def to(self, device: Union[str, 'torch.device'], *, non_blocking: bool=False) -> 'BatchEncoding': requires_backends(self, ['torch']) import torch if isinstance(device, str) or is_torch_device(device) or isinstance(device, int): self.data = {k: v.to(device=device, non_blocking=non_blocking) if isinstance(v, torch.Tensor) else v for k, v in self.data.items()} else: logger.warning(f'Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.') return self
Send all values to device by calling `v.to(device, non_blocking=non_blocking)` (PyTorch only). Args: device (`str` or `torch.device`): The device to put the tensors on. non_blocking (`bool`): Whether to perform the copy asynchronously. Returns: [`BatchEncoding`]: The same instance after modification.
github-repos
def format(self, template='{basename}{range}{padding}{extension}'): inverted = ((self.invertedFrameRange() or '') if ('{inverted}' in template) else '') return template.format(basename=self.basename(), extension=self.extension(), start=self.start(), end=self.end(), length=len(self), padding=self.padding(), range=(self.frameRange() or ''), inverted=inverted, dirname=self.dirname())
Return the file sequence as a formatted string according to the given template. Utilizes the python string format syntax. Available keys include: * basename - the basename of the sequence. * extension - the file extension of the sequence. * start - the start frame. * end - the end frame. * length - the length of the frame range. * padding - the detecting amount of padding. * inverted - the inverted frame range. (returns "" if none) * dirname - the directory name. If asking for the inverted range value, and the new inverted range exceeded :const:`fileseq.constants.MAX_FRAME_SIZE`, a ``MaxSizeException`` will be raised. Args: template (str): Returns: str: Raises: :class:`fileseq.exceptions.MaxSizeException`: If frame size exceeds :const:`fileseq.constants.MAX_FRAME_SIZE`
codesearchnet
def chart_type(self, value): if value not in self._allowed_charts: raise ValueError("Not a valid chart type") self.options["chart_type"] = value
Set the MetricsGraphics chart type. Allowed charts are: line, histogram, point, and bar Args: value (str): chart type. Raises: ValueError: Not a valid chart type.
juraj-google-style
def _init_sampler(tc, init, num): def func(): with tc.test_session(): return init([num]).eval() return func
Returns a func to generate a random tensor of shape [num]. Args: tc: An instance of TensorFlowTestCase. init: An Initializer that generates a tensor of a given shape num: Size of 1D tensor to create. Returns: Function to generate a random tensor.
github-repos
def dice_loss(inputs, targets, num_boxes): inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
github-repos
def build(cls, **kwargs): return cls.add(cls.new(**kwargs), commit=False)
Similar to create. But the transaction is not committed Args: **kwargs : The keyword arguments for the constructor Returns: A model instance which has been added to db session. But session transaction has not been committed yet.
juraj-google-style
def Delete(self, queue, tasks, mutation_pool=None): if queue is None: return if mutation_pool is None: raise ValueError("Mutation pool can't be none.") mutation_pool.QueueDeleteTasks(queue, tasks)
Removes the tasks from the queue. Note that tasks can already have been removed. It is not an error to re-delete an already deleted task. Args: queue: A queue to clear. tasks: A list of tasks to remove. Tasks may be Task() instances or integers representing the task_id. mutation_pool: A MutationPool object to schedule deletions on. Raises: ValueError: Mutation pool was not passed in.
juraj-google-style
def __init__(self, key_dtype, value_dtype): self._key_dtype = dtypes.as_dtype(key_dtype) self._value_dtype = dtypes.as_dtype(value_dtype)
Construct a table initializer object. Args: key_dtype: Type of the table keys. value_dtype: Type of the table values.
github-repos
def WriteBlobsWithUnknownHashes( self, blobs_data): blobs_ids = [rdf_objects.BlobID.FromBlobData(d) for d in blobs_data] self.WriteBlobs(dict(zip(blobs_ids, blobs_data))) return blobs_ids
Calculates hash ids and writes contents of given data blobs. Args: blobs_data: An iterable of bytes. Returns: A list of rdf_objects.BlobID objects with each blob id corresponding to an element in the original blobs_data argument.
juraj-google-style
def MergeAllSummaries(period=0, run_alone=False, key=None): if (key is None): key = tf.GraphKeys.SUMMARIES period = int(period) if run_alone: return MergeAllSummaries_RunAlone(period, key) else: return MergeAllSummaries_RunWithOp(period, key)
This callback is enabled by default. Evaluate all summaries by ``tf.summary.merge_all``, and write them to logs. Args: period (int): by default the callback summarizes once every epoch. This option (if not set to 0) makes it additionally summarize every ``period`` steps. run_alone (bool): whether to evaluate the summaries alone. If True, summaries will be evaluated after each epoch alone. If False, summaries will be evaluated together with the `sess.run` calls, in the last step of each epoch. For :class:`SimpleTrainer`, it needs to be False because summary may depend on inputs. key (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``. Default is ``tf.GraphKeys.SUMMARIES``.
codesearchnet
def _CheckFileEntryType(self, file_entry): if not self._file_entry_types: return None return ( self._CheckIsDevice(file_entry) or self._CheckIsDirectory(file_entry) or self._CheckIsFile(file_entry) or self._CheckIsLink(file_entry) or self._CheckIsPipe(file_entry) or self._CheckIsSocket(file_entry))
Checks the file entry type find specifications. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not or None if no file entry type specification is defined.
juraj-google-style
def __init__(self, cell_ctor, *args, **kwargs): super(RNNCellWrapper, self).__init__( name=kwargs.get("name"), custom_getter=kwargs.pop("custom_getter", None)) with self._enter_variable_scope(): self._cell = cell_ctor(*args, **kwargs)
Constructs the cell, within this module's variable scope. Args: cell_ctor: Callable that instantiates a `tf.contrib.rnn.RNNCell`. *args: Arguments to pass to `cell_ctor`. **kwargs: Keyword arguments to pass to `cell_ctor`. If `name` is provided, it is passed to `RNNCore.__init__` as well. If `custom_getter` is provided, it is passed to `RNNCore.__init__` but not to `cell_ctor`.
juraj-google-style
def _GetTimestamps(self, olecf_item): if (not olecf_item): return (None, None) try: creation_time = olecf_item.get_creation_time_as_integer() except OverflowError as exception: logger.warning('Unable to read the creation time with error: {0!s}'.format(exception)) creation_time = 0 try: modification_time = olecf_item.get_modification_time_as_integer() except OverflowError as exception: logger.warning('Unable to read the modification time with error: {0!s}'.format(exception)) modification_time = 0 if ((not creation_time) and (not modification_time)): return (None, None) if (creation_time == 18446744073709551615): creation_time = 0 return (creation_time, modification_time)
Retrieves the timestamps from an OLECF item. Args: olecf_item (pyolecf.item): OLECF item. Returns: tuple[int, int]: creation and modification FILETIME timestamp.
codesearchnet
def design_stat_heating(self, value="Heating"): if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `design_stat_heating`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `design_stat_heating`') vals = set() vals.add("Heating") if value not in vals: raise ValueError('value {} is not an accepted value for ' 'field `design_stat_heating`'.format(value)) self._design_stat_heating = value
Corresponds to IDD Field `design_stat_heating` Args: value (str): value for IDD Field `design_stat_heating` Accepted values are: - Heating Default value: Heating if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def unpack(self, buff=None, offset=0): length = UBInt16() length.unpack(buff, offset) super().unpack(buff[:offset+length.value], offset)
Unpack *buff* into this object. This method will convert a binary data into a readable value according to the attribute format. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails.
juraj-google-style
def _generate_comparator(cls, field_names): field_names = list(field_names) reverses = ([1] * len(field_names)) for (i, field_name) in enumerate(field_names): if (field_name[0] == '-'): reverses[i] = (- 1) field_names[i] = field_name[1:] field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names] def comparator(i1, i2): v1 = attrgetter(*field_names)(i1) v2 = attrgetter(*field_names)(i2) if (len(field_names) == 1): return (cls._cmp(v1, v2) * reverses[0]) order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses) try: return next(dropwhile(__not__, order)) except StopIteration: return 0 return comparator
Construct a comparator function based on the field names. The comparator returns the first non-zero comparison value. Inputs: field_names (iterable of strings): The field names to sort on. Returns: A comparator function.
codesearchnet
def get_nmr_prize_pool(self, round_num=0, tournament=1): tournaments = self.get_competitions(tournament) tournaments.sort(key=(lambda t: t['number'])) if (round_num == 0): t = tournaments[(- 1)] else: tournaments = [t for t in tournaments if (t['number'] == round_num)] if (len(tournaments) == 0): raise ValueError('invalid round number') t = tournaments[0] return t['prizePoolNmr']
Get NMR prize pool for the given round and tournament. Args: round_num (int, optional): The round you are interested in, defaults to current round. tournament (int, optional): ID of the tournament, defaults to 1 Returns: decimal.Decimal: prize pool in NMR Raises: Value Error: in case of invalid round number
codesearchnet
def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1): if (direction not in ('in', 'out')): raise ValueError('Argument `direction` should be one of "in" or "out". Got {}'.format(repr(direction))) if (dead_zone >= percent): raise ValueError('Argument `dead_zone` should not be greater than `percent`. dead_zoon={}, percent={}'.format(repr(dead_zone), repr(percent))) (w, h) = self.get_size() (x, y) = self.get_position() tracks = make_pinching(direction, [x, y], [w, h], percent, dead_zone, duration) speed = (((math.sqrt((w * h)) * (percent - dead_zone)) / 2) / duration) ret = self.poco.apply_motion_tracks(tracks, accuracy=(speed * 0.03)) return ret
Squeezing or expanding 2 fingers on this UI with given motion range and duration. Args: direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI duration (:py:obj:`float`): time interval in which the action is performed dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent`` Raises: PocoNoSuchNodeException: raised when the UI element does not exist
codesearchnet
def delete_nsg_rule(access_token, subscription_id, resource_group, nsg_name, nsg_rule_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name, '/securityRules/', nsg_rule_name, '?api-version=', NETWORK_API]) return do_delete(endpoint, access_token)
Delete network security group rule. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. nsg_name (str): Name of the Network Security Group. nsg_rule_name (str): Name of the NSG rule. Returns: HTTP response.
codesearchnet
def __init__(self, runner_capabilities: FrozenSet[str], process_bundle_descriptor: beam_fn_api_pb2.ProcessBundleDescriptor, state_handler: sdk_worker.CachingStateHandler, data_channel_factory: data_plane.DataChannelFactory, data_sampler: Optional[data_sampler.DataSampler]=None) -> None: self.runner_capabilities = runner_capabilities self.process_bundle_descriptor = process_bundle_descriptor self.state_handler = state_handler self.data_channel_factory = data_channel_factory self.data_sampler = data_sampler self.current_instruction_id: Optional[str] = None self.consuming_received_data = False _verify_descriptor_created_in_a_compatible_env(process_bundle_descriptor) if self.process_bundle_descriptor.timer_api_service_descriptor.url: self.timer_data_channel = data_channel_factory.create_data_channel_from_url(self.process_bundle_descriptor.timer_api_service_descriptor.url) else: self.timer_data_channel = None self.timers_info: Dict[Tuple[str, str], TimerInfo] = {} self.counter_factory = counters.CounterFactory() self.state_sampler = statesampler.StateSampler('fnapi-step-%s' % self.process_bundle_descriptor.id, self.counter_factory) self.ops = self.create_execution_tree(self.process_bundle_descriptor) for op in reversed(self.ops.values()): op.setup(self.data_sampler) self.splitting_lock = threading.Lock()
Initialize a bundle processor. Args: runner_capabilities (``FrozenSet[str]``): The set of capabilities of the runner with which we will be interacting process_bundle_descriptor (``beam_fn_api_pb2.ProcessBundleDescriptor``): a description of the stage that this ``BundleProcessor``is to execute. state_handler (CachingStateHandler). data_channel_factory (``data_plane.DataChannelFactory``).
github-repos
def search(self, patterns, start=30, limit=1000, include_category=False): api_name = 'opendns-patterns' fmt_url_path = u'search/{0}' start = '-{0}days'.format(start) include_category = str(include_category).lower() query_params = { 'start': start, 'limit': limit, 'includecategory': include_category, } return self._multi_get(api_name, fmt_url_path, patterns, query_params)
Performs pattern searches against the Investigate database. Args: patterns: An enumerable of RegEx domain patterns to search for start: How far back results extend from in days (max is 30) limit: Number of results to show (max is 1000) include_category: Include OpenDNS security categories Returns: An enumerable of matching domain strings
juraj-google-style
def verify(self, message, signature): message = _helpers._to_bytes(message, encoding='utf-8') return PKCS1_v1_5.new(self._pubkey).verify( SHA256.new(message), signature)
Verifies a message against a signature. Args: message: string or bytes, The message to verify. If string, will be encoded to bytes as utf-8. signature: string or bytes, The signature on the message. Returns: True if message was signed by the private key associated with the public key that this object was constructed with.
juraj-google-style
def acquire(self, key: Text, constructor_fn: Callable[[], Any], tag: Any=None) -> Any: with self._lock: control_block = self._cache_map.get(key) if control_block is None: control_block = _SharedControlBlock() self._cache_map[key] = control_block result = control_block.acquire(constructor_fn, tag) with self._lock: self._keepalive = (key, result) return result
Acquire a reference to a Shared object. Args: key: the key to the shared object constructor_fn: function that initialises / constructs the object if not present in the cache. This function should take no arguments. It should return an initialised object, or None if the object could not be initialised / constructed. tag: an optional indentifier to store with the cached object. If subsequent calls to acquire use different tags, the object will be reloaded rather than returned from cache. Returns: A reference to the initialised object, either from the cache, or newly-constructed.
github-repos
def connect(portname, baudrate): global SERPORT try: SERPORT = serial.Serial(portname, baudrate, timeout=0.1) except: raise HerkulexError('could not open the serial port')
Connect to the Herkulex bus Connect to serial port to which Herkulex Servos are attatched Args: portname (str): The serial port name baudrate (int): The serial port baudrate Raises: SerialException: Error occured while opening serial port
codesearchnet
def __init__(self, value=b''): super(CertificateValue, self).__init__(value, Tags.CERTIFICATE_VALUE)
Construct a CertificateValue byte string. Args: value (bytes): A byte string (e.g., b'\x00\x01...') containing the certificate bytes to store. Optional, defaults to the empty byte string.
juraj-google-style
def make_view(controller, context, data): if isinstance(data, BlockModel): view = _make_view_subclass(Block, controller, context, data) elif isinstance(data, AttributeModel): view = Attribute(controller, context, data) elif isinstance(data, MethodModel): view = Method(controller, context, data) elif isinstance(data, Model): view = _make_view_subclass(View, controller, context, data) elif isinstance(data, dict): d = OrderedDict() for (k, v) in data.items(): d[k] = make_view(controller, context, v) view = d elif isinstance(data, list): view = [make_view(controller, context, x) for x in data] else: view = data return view
Make a View subclass containing properties specific for given data Args: controller (Controller): The child controller that hosts the data context (Context): The context the parent has made that the View should use for manipulating the data data (Model): The actual data that context will be manipulating Returns: View: A View subclass instance that provides a user-focused API to the given data
codesearchnet
def _create_dag_op(self, name, params, qargs): if (name == 'u0'): op_class = U0Gate elif (name == 'u1'): op_class = U1Gate elif (name == 'u2'): op_class = U2Gate elif (name == 'u3'): op_class = U3Gate elif (name == 'x'): op_class = XGate elif (name == 'y'): op_class = YGate elif (name == 'z'): op_class = ZGate elif (name == 't'): op_class = TGate elif (name == 'tdg'): op_class = TdgGate elif (name == 's'): op_class = SGate elif (name == 'sdg'): op_class = SdgGate elif (name == 'swap'): op_class = SwapGate elif (name == 'rx'): op_class = RXGate elif (name == 'ry'): op_class = RYGate elif (name == 'rz'): op_class = RZGate elif (name == 'rzz'): op_class = RZZGate elif (name == 'id'): op_class = IdGate elif (name == 'h'): op_class = HGate elif (name == 'cx'): op_class = CnotGate elif (name == 'cy'): op_class = CyGate elif (name == 'cz'): op_class = CzGate elif (name == 'ch'): op_class = CHGate elif (name == 'crz'): op_class = CrzGate elif (name == 'cu1'): op_class = Cu1Gate elif (name == 'cu3'): op_class = Cu3Gate elif (name == 'ccx'): op_class = ToffoliGate elif (name == 'cswap'): op_class = FredkinGate else: raise QiskitError(('unknown operation for ast node name %s' % name)) op = op_class(*params) self.dag.apply_operation_back(op, qargs, [], condition=self.condition)
Create a DAG node out of a parsed AST op node. Args: name (str): operation name to apply to the dag. params (list): op parameters qargs (list(QuantumRegister, int)): qubits to attach to Raises: QiskitError: if encountering a non-basis opaque gate
codesearchnet
def process_files(self, path, recursive=False): self._logger.info('Processing files in "%s"', path) for (path, file) in files_generator(path, recursive): if (not file.endswith(BATCH_EXTENSION)): self.process_file(os.path.join(path, file))
Apply normalizations over all files in the given directory. Iterate over all files in a given directory. Normalizations will be applied to each file, storing the result in a new file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the directory. recursive: Whether to find files recursively or not.
codesearchnet
def encode_csv(data_dict, column_names): import csv import six values = [str(data_dict[x]) for x in column_names] str_buff = six.StringIO() writer = csv.writer(str_buff, lineterminator='') writer.writerow(values) return str_buff.getvalue()
Builds a csv string. Args: data_dict: dict of {column_name: 1 value} column_names: list of column names Returns: A csv string version of data_dict
juraj-google-style
def AddBasicOptions(self, argument_group): version_string = self.GetVersionInformation() argument_group.add_argument( '-h', '--help', action='help', help='Show this help message and exit.') argument_group.add_argument( '--troubles', dest='show_troubleshooting', action='store_true', default=False, help='Show troubleshooting information.') argument_group.add_argument( '-V', '--version', dest='version', action='version', version=version_string, help='Show the version information.')
Adds the basic options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
juraj-google-style
def interact(self, banner=None): sys.ps1 = getattr(sys, 'ps1', '>>> ') sys.ps2 = getattr(sys, 'ps2', '... ') if banner is None: print ('Pyringe (Python %s.%s.%s) on %s\n%s' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro, sys.platform, _WELCOME_MSG)) else: print banner more = False while True: try: if more: prompt = sys.ps2 else: prompt = self.StatusLine() + '\n' + sys.ps1 try: line = self.raw_input(prompt) except EOFError: print '' break else: more = self.push(line) except KeyboardInterrupt: print '\nKeyboardInterrupt' self.resetbuffer() more = False
Closely emulate the interactive Python console. This method overwrites its superclass' method to specify a different help text and to enable proper handling of the debugger status line. Args: banner: Text to be displayed on interpreter startup.
juraj-google-style
def get_pyof_version(module_fullname): ver_module_re = re.compile('(pyof\\.)(v0x\\d+)(\\..*)') matched = ver_module_re.match(module_fullname) if matched: version = matched.group(2) return version return None
Get the module pyof version based on the module fullname. Args: module_fullname (str): The fullname of the module (e.g.: pyof.v0x01.common.header) Returns: str: openflow version. The openflow version, on the format 'v0x0?' if any. Or None if there isn't a version on the fullname.
codesearchnet
def traverse_postorder(self, leaves=True, internal=True): s1 = deque(); s2 = deque(); s1.append(self) while len(s1) != 0: n = s1.pop(); s2.append(n); s1.extend(n.children) while len(s2) != 0: n = s2.pop() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n
Perform a postorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
juraj-google-style
def execute_log(args, root_dir): if args.get('keys'): config_dir = os.path.join(root_dir, '.config/pueue') queue_path = os.path.join(config_dir, 'queue') if os.path.exists(queue_path): queue_file = open(queue_path, 'rb') try: queue = pickle.load(queue_file) except Exception: print('Queue log file seems to be corrupted. Aborting.') return queue_file.close() else: print('There is no queue log file. Aborting.') return for key in args.get('keys'): if (queue.get(key) and (queue[key]['status'] in ['failed', 'done'])): entry = queue[key] print('Log of entry: {}'.format(key)) print('Returncode: {}'.format(entry['returncode'])) print('Command: {}'.format(entry['command'])) print('Path: {}'.format(entry['path'])) print('Start: {}, End: {} \n'.format(entry['start'], entry['end'])) if (len(entry['stderr']) > 0): print((Color('{autored}Stderr output: {/autored}\n ') + entry['stderr'])) if (len(entry['stdout']) > 0): print((Color('{autogreen}Stdout output: {/autogreen}\n ') + entry['stdout'])) else: print('No finished process with key {}.'.format(key)) else: log_path = os.path.join(root_dir, '.local/share/pueue/queue.log') log_file = open(log_path, 'r') print(log_file.read())
Print the current log file. Args: args['keys'] (int): If given, we only look at the specified processes. root_dir (string): The path to the root directory the daemon is running in.
codesearchnet
def pack(self, tensors: Sequence[Any], layout: layout_lib.Layout) -> Any: if not context.executing_eagerly(): raise RuntimeError('`pack` must be called eagerly.') self._register_mesh(layout.mesh) with ops.device(self.name): if all((isinstance(t, sparse_tensor.SparseTensor) for t in tensors)): if not all((t.shape == tensors[0].shape for t in tensors)): raise TypeError('All input SparseTensors to Pack must be same shape.') is_sparse = True tensors = [t.indices for t in tensors] + [t.values for t in tensors] + [ops.convert_to_tensor(t.shape, dtype=dtypes.int64) for t in tensors] elif any((isinstance(t, sparse_tensor.SparseTensor) for t in tensors)): raise TypeError('Cannot Pack SparseTensors with Tensors.') else: is_sparse = False try: return _pywrap_dtensor_device.Pack(context.context()._handle, tensors, layout.to_string(), self._device_info, is_sparse) except core._NotOkStatusException as e: raise core._status_to_exception(e) from None
Packs tensors into a DTensor handle on this DTensor device. Packing and unpacking are inverse operations: ``` * unpack(pack(tensors)) == tensors * pack(unpack(dtensor)) == dtensor ``` Refer to `dtensor.pack` for more information. Args: tensors: The list of tensors to pack into a DTensor. layout: The layout of the DTensor to be created. Returns: A DTensor created from the individual component tensors. Raises: RuntimeError: When not called eagerly.
github-repos
def generate_output_header(self, query_type='RDAP'): output = '\n{0}{1}{2} query for {3}:{4}\n\n'.format(ANSI['ul'], ANSI['b'], query_type, self.obj.address_str, ANSI['end']) return output
The function for generating the CLI output header. Args: query_type (:obj:`str`): The IPWhois query type. Defaults to 'RDAP'. Returns: str: The generated output.
codesearchnet