code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _decorate_ast_reference_data_types(node: _ast.AbstractSyntaxTree) -> None: if isinstance(node, _ast.Identifier) and node.value == 'reference': node.data_type = unittest.mock.Mock(spec=_fhir_path_data_types.StructureDataType, element_type='Reference') for child in node.children or (): _decorate_ast_reference_data_types(child)
Adds data types for reference nodes. Sets the data_type for any identifier node named 'reference' to that of a Reference type. Args: node: The root node of the AST to modify.
github-repos
def _use_cache(self, key, options=None): flag = ContextOptions.use_cache(options) if (flag is None): flag = self._cache_policy(key) if (flag is None): flag = ContextOptions.use_cache(self._conn.config) if (flag is None): flag = True return flag
Return whether to use the context cache for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the key should be cached, False otherwise.
codesearchnet
def less(x1, x2): if any_symbolic_tensors((x1, x2)): return Less().symbolic_call(x1, x2) return backend.numpy.less(x1, x2)
Return the truth value of `x1 < x2` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparison of `x1` and `x2`.
github-repos
def gather(values, index, name='segmented_gather'): return tf.gather(values, index.indices, batch_dims=index.batch_dims, name=name)
Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up a value for that index in `values`. Two elements from the same segment always get assigned the same value. Args: values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values. index: [B1, ..., Bn, I1, ..., Ik] IndexMap. name: Name for the TensorFlow operation. Returns: [B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values.
github-repos
def keep_file(self, task, response, min_size=None, max_size=None): try: img = Image.open(BytesIO(response.content)) except (IOError, OSError): return False task['img_size'] = img.size if (min_size and (not self._size_gt(img.size, min_size))): return False if (max_size and (not self._size_lt(img.size, max_size))): return False return True
Decide whether to keep the image Compare image size with ``min_size`` and ``max_size`` to decide. Args: response (Response): response of requests. min_size (tuple or None): minimum size of required images. max_size (tuple or None): maximum size of required images. Returns: bool: whether to keep the image.
codesearchnet
def MapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): context_state = getattr(context, 'state', {}) size_hints = context_state.get('size_hints', {}) elements_data_size = self._CalculateElementsDataSize(context) if elements_data_size is not None: self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size) elif not self._HasElementsTerminator(): raise errors.MappingError( 'Unable to determine elements data size and missing elements ' 'terminator') else: byte_stream_size = len(byte_stream) element_byte_size = self._element_data_type_definition.GetByteSize() elements_data_offset = byte_offset next_elements_data_offset = elements_data_offset + element_byte_size elements_terminator = self._data_type_definition.elements_terminator element_value = byte_stream[ elements_data_offset:next_elements_data_offset] while byte_stream[elements_data_offset:]: elements_data_offset = next_elements_data_offset if element_value == elements_terminator: elements_data_size = elements_data_offset - byte_offset break next_elements_data_offset += element_byte_size element_value = byte_stream[ elements_data_offset:next_elements_data_offset] if element_value != elements_terminator: size_hints[self._data_type_definition.name] = DataTypeMapSizeHint( byte_stream_size - byte_offset) context_state['size_hints'] = size_hints error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: unable to find elements terminator').format( self._data_type_definition.name, byte_offset) raise errors.ByteStreamTooSmallError(error_string) if context: context.byte_size = elements_data_size size_hints[self._data_type_definition.name] = DataTypeMapSizeHint( elements_data_size, is_complete=True) context_state['size_hints'] = size_hints return byte_stream[byte_offset:byte_offset + elements_data_size]
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
juraj-google-style
def _fqdn(o, oset=True, recheck=False, pmodule=None): if id(o) in _set_failures or o is None: return None if recheck or not _safe_hasattr(o, "__fqdn__"): import inspect if not hasattr(o, "__name__"): msg.warn("Skipped object {}: no __name__ attribute.".format(o), 3) return result = None if hasattr(o, "__acornext__") and o.__acornext__ is not None: otarget = o.__acornext__ else: otarget = o omod = _safe_getmodule(otarget) or pmodule if (omod is None and hasattr(otarget, "__objclass__") and otarget.__objclass__ is not None): omod = _safe_getmodule(otarget.__objclass__) parts = ("<unknown>" if omod is None else omod.__name__, otarget.__objclass__.__name__, otarget.__name__) result = "{}.{}.{}".format(*parts) elif (omod is None and hasattr(otarget, "__class__") and otarget.__class__ is not None): omod = _safe_getmodule(otarget.__class__) parts = ("<unknown>" if omod is None else omod.__name__, otarget.__class__.__name__, otarget.__name__) result = "{}.{}.{}".format(*parts) elif omod is not otarget: parts = (_fqdn(omod, False), otarget.__name__) result = "{}.{}".format(*parts) else: result = otarget.__name__ if oset: _safe_setattr(o, "__fqdn__", result) return result if _safe_hasattr(o, "__fqdn__"): return o.__fqdn__
Returns the fully qualified name of the object. Args: o (type): instance of the object's type. oset (bool): when True, the fqdn will also be set on the object as attribute `__fqdn__`. recheck (bool): for sub-classes, sometimes the super class has already had its __fqdn__ attribute set; in that case, we want to recheck the object's name. This usually only gets used during object extension.
juraj-google-style
def __init__(self, locations=None, separation_char=os.sep): super(LocationDescriptor, self).__init__() self._separation_char = separation_char if isinstance(locations, list): self._locations_list = list(locations) elif isinstance(locations, str) or isinstance(locations, unicode): self._locations_list = locations.split(self._separation_char) elif locations is None: self._locations_list = list() else: raise TypeError("Argument in constructor not recognized.")
Constructor. Args: locations: Can be either a string with sub-strings joined by the separation character or a list of strings, each giving a location. separation_char: Separation character in the location string. Raises: TypeError: if argument is not recognized as either a string, a list of strings or ``None``. Notes: Empty :class:`LocationDescriptor`s **are** allowed and empty locations are also allowed.
juraj-google-style
def failure_message(description, options): message = 'expected to find {}'.format(description) if (options['count'] is not None): message += ' {count} {times}'.format(count=options['count'], times=declension('time', 'times', options['count'])) elif (options['between'] is not None): between = options['between'] if between: (first, last) = (between[0], between[(- 1)]) else: (first, last) = (None, None) message += ' between {first} and {last} times'.format(first=first, last=last) elif (options['maximum'] is not None): message += ' at most {maximum} {times}'.format(maximum=options['maximum'], times=declension('time', 'times', options['maximum'])) elif (options['minimum'] is not None): message += ' at least {minimum} {times}'.format(minimum=options['minimum'], times=declension('time', 'times', options['minimum'])) return message
Returns a expectation failure message for the given query description. Args: description (str): A description of the failed query. options (Dict[str, Any]): The query options. Returns: str: A message describing the failure.
codesearchnet
def find_response_component(self, api_id=None, signature_id=None): if not api_id and not signature_id: raise ValueError('At least one of api_id and signature_id is required') components = list() if self.response_data: for component in self.response_data: if (api_id and component['api_id']) == api_id or (signature_id and component['signature_id'] == signature_id): components.append(component) return components
Find one or many repsonse components. Args: api_id (str): Api id associated with the component(s) to be retrieved. signature_id (str): Signature id associated with the component(s) to be retrieved. Returns: A list of dictionaries containing component data
juraj-google-style
class ZoeDepthFeatureFusionLayer(nn.Module): def __init__(self, config, align_corners=True): super().__init__() self.align_corners = align_corners self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True) self.residual_layer1 = ZoeDepthPreActResidualLayer(config) self.residual_layer2 = ZoeDepthPreActResidualLayer(config) def forward(self, hidden_state, residual=None): if residual is not None: if hidden_state.shape != residual.shape: residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False) hidden_state = hidden_state + self.residual_layer1(residual) hidden_state = self.residual_layer2(hidden_state) hidden_state = nn.functional.interpolate(hidden_state, scale_factor=2, mode='bilinear', align_corners=self.align_corners) hidden_state = self.projection(hidden_state) return hidden_state
Feature fusion layer, merges feature maps from different stages. Args: config (`[ZoeDepthConfig]`): Model configuration class defining the model architecture. align_corners (`bool`, *optional*, defaults to `True`): The align_corner setting for bilinear upsample.
github-repos
def call_backend(self, orig_request, start_response): method_config, params = self.lookup_rest_method(orig_request) if not method_config: cors_handler = self._create_cors_handler(orig_request) return util.send_wsgi_not_found_response(start_response, cors_handler=cors_handler) transformed_request = self.transform_request( orig_request, params, method_config) discovery = discovery_service.DiscoveryService( self.config_manager, self._backend) discovery_response = discovery.handle_discovery_request( transformed_request.path, transformed_request, start_response) if discovery_response: return discovery_response url = transformed_request.base_path + transformed_request.path transformed_request.headers['Content-Type'] = 'application/json' transformed_environ = self.prepare_backend_environ( orig_request.server, 'POST', url, transformed_request.headers.items(), transformed_request.body, transformed_request.source_ip, orig_request.port) with util.StartResponseProxy() as start_response_proxy: body_iter = self._backend(transformed_environ, start_response_proxy.Proxy) status = start_response_proxy.response_status headers = start_response_proxy.response_headers body = start_response_proxy.response_body if not body: body = ''.join(body_iter) return self.handle_backend_response(orig_request, transformed_request, status, headers, body, method_config, start_response)
Generate API call (from earlier-saved request). This calls start_response and returns the response body. Args: orig_request: An ApiRequest, the original request from the user. start_response: A function with semantics defined in PEP-333. Returns: A string containing the response body.
juraj-google-style
def wrap_with_monitor(env, video_dir): env = ExtendToEvenDimentions(env) env = RenderObservations(env) env = gym.wrappers.Monitor(env, video_dir, force=True, video_callable=lambda idx: True, write_upon_reset=True) return env
Wrap environment with gym.Monitor. Video recording provided by Monitor requires 1) both height and width of observation to be even numbers. 2) rendering of environment Args: env: environment. video_dir: video directory. Returns: wrapped environment.
juraj-google-style
def getQueryEngineDescription(self, queryEngine, **kwargs): response = self.getQueryEngineDescriptionResponse(queryEngine, **kwargs) return self._read_dataone_type_response(response, 'QueryEngineDescription')
See Also: getQueryEngineDescriptionResponse() Args: queryEngine: **kwargs: Returns:
juraj-google-style
def run_suite(test_classes, argv=None): parser = argparse.ArgumentParser(description='Mobly Suite Executable.') parser.add_argument( '-c', '--config', nargs=1, type=str, required=True, metavar='<PATH>', help='Path to the test configuration file.') parser.add_argument( '--tests', '--test_case', nargs='+', type=str, metavar='[ClassA[.test_a] ClassB[.test_b] ...]', help='A list of test classes and optional tests to execute.') if not argv: argv = sys.argv[1:] args = parser.parse_args(argv) test_configs = config_parser.load_test_config_file(args.config[0]) for test_class in test_classes: if not issubclass(test_class, base_test.BaseTestClass): logging.error('Test class %s does not extend ' 'mobly.base_test.BaseTestClass', test_class) sys.exit(1) selected_tests = compute_selected_tests(test_classes, args.tests) ok = True for config in test_configs: runner = test_runner.TestRunner(config.log_path, config.test_bed_name) for (test_class, tests) in selected_tests.items(): runner.add_test_class(config, test_class, tests) try: runner.run() ok = runner.results.is_all_pass and ok except signals.TestAbortAll: pass except: logging.exception('Exception when executing %s.', config.test_bed_name) ok = False if not ok: sys.exit(1)
Executes multiple test classes as a suite. This is the default entry point for running a test suite script file directly. Args: test_classes: List of python classes containing Mobly tests. argv: A list that is then parsed as cli args. If None, defaults to cli input.
juraj-google-style
def update(self, value): with tf.name_scope((self._name + '/update')): if (value.shape.ndims == self._mean.shape.ndims): value = value[(None, ...)] count = tf.shape(value)[0] with tf.control_dependencies([self._count.assign_add(count)]): step = tf.cast(self._count, tf.float32) mean_delta = tf.reduce_sum((value - self._mean[(None, ...)]), 0) new_mean = (self._mean + (mean_delta / step)) new_mean = tf.cond((self._count > 1), (lambda : new_mean), (lambda : value[0])) var_delta = ((value - self._mean[(None, ...)]) * (value - new_mean[(None, ...)])) new_var_sum = (self._var_sum + tf.reduce_sum(var_delta, 0)) with tf.control_dependencies([new_mean, new_var_sum]): update = (self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)) with tf.control_dependencies(update): if (value.shape.ndims == 1): value = tf.reduce_mean(value) return self._summary('value', tf.reduce_mean(value))
Update the mean and variance estimates. Args: value: Batch or single value tensor. Returns: Summary tensor.
codesearchnet
def process(self, element): import apache_beam as beam import six import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) try: clean_element = [] for line in element: clean_element.append(line.rstrip()) batch_result = self._session.run( fetches=self._transformed_features, feed_dict={self._input_placeholder_tensor: clean_element}) for i in range(len(clean_element)): transformed_features = {} for name, value in six.iteritems(batch_result): if isinstance(value, tf.SparseTensorValue): batch_i_indices = value.indices[:, 0] == i batch_i_values = value.values[batch_i_indices] transformed_features[name] = batch_i_values.tolist() else: transformed_features[name] = value[i].tolist() yield transformed_features except Exception as e: yield beam.pvalue.TaggedOutput('errors', (str(e), element))
Run the transformation graph on batched input data Args: element: list of csv strings, representing one batch input to the TF graph. Returns: dict containing the transformed data. Results are un-batched. Sparse tensors are converted to lists.
juraj-google-style
def GetDecompressor(cls, compression_method): compression_method = compression_method.lower() decompressor = cls._decompressors.get(compression_method, None) if (not decompressor): return None return decompressor()
Retrieves the decompressor object for a specific compression method. Args: compression_method (str): compression method identifier. Returns: Decompressor: decompressor or None if the compression method does not exists.
codesearchnet
def save_data(self, filename): with zopen(filename, 'wt') as f: json.dump(list(self._data), f, cls=MontyEncoder)
Save the assimilated data to a file. Args: filename (str): filename to save the assimilated data to. Note that if the filename ends with gz or bz2, the relevant gzip or bz2 compression will be applied.
codesearchnet
def SetInputSourceConfiguration(self, configuration): mount_path = configuration.mount_path if (mount_path and mount_path.endswith(os.sep)): mount_path = mount_path[:(- 1)] self._mount_path = mount_path
Sets the input source configuration settings. Args: configuration (InputSourceConfiguration): input source configuration.
codesearchnet
def init_from_adversarial_batches_write_to_datastore(self, submissions, adv_batches): idx = 0 for s_id in iterkeys(submissions.defenses): for adv_id in iterkeys(adv_batches.data): class_batch_id = CLASSIFICATION_BATCH_ID_PATTERN.format(idx) idx += 1 self.data[class_batch_id] = { 'adversarial_batch_id': adv_id, 'submission_id': s_id, 'result_path': os.path.join( self._round_name, CLASSIFICATION_BATCHES_SUBDIR, s_id + '_' + adv_id + '.csv') } client = self._datastore_client with client.no_transact_batch() as batch: for key, value in iteritems(self.data): entity = client.entity(client.key(KIND_CLASSIFICATION_BATCH, key)) entity.update(value) batch.put(entity)
Populates data from adversarial batches and writes to datastore. Args: submissions: instance of CompetitionSubmissions adv_batches: instance of AversarialBatches
juraj-google-style
def _zeo_key(self, key, new_type=OOBTree): zeo_key = self.zeo.get(key, None) if zeo_key is None: zeo_key = new_type() self.zeo[key] = zeo_key return zeo_key
Get key from the :attr:`zeo` database root. If the key doesn't exist, create it by calling `new_type` argument. Args: key (str): Key in the root dict. new_type (func/obj): Object/function returning the new instance. Returns: obj: Stored object, or `new_type`.
juraj-google-style
def reindex_similar(self, other, n_sphere=4): def make_subset_similar(m1, subset1, m2, subset2, index_dct): 'Changes index_dct INPLACE' coords = ['x', 'y', 'z'] index1 = list(subset1) for m1_i in index1: dist_m2_to_m1_i = m2.get_distance_to(m1.loc[(m1_i, coords)], subset2, sort=True) m2_i = dist_m2_to_m1_i.index[0] dist_new = dist_m2_to_m1_i.loc[(m2_i, 'distance')] m2_pos_i = dist_m2_to_m1_i.loc[(m2_i, coords)] counter = itertools.count() found = False while (not found): if (m2_i in index_dct.keys()): old_m1_pos = m1.loc[(index_dct[m2_i], coords)] if (dist_new < np.linalg.norm((m2_pos_i - old_m1_pos))): index1.append(index_dct[m2_i]) index_dct[m2_i] = m1_i found = True else: m2_i = dist_m2_to_m1_i.index[next(counter)] dist_new = dist_m2_to_m1_i.loc[(m2_i, 'distance')] m2_pos_i = dist_m2_to_m1_i.loc[(m2_i, coords)] else: index_dct[m2_i] = m1_i found = True return index_dct molecule1 = self.copy() molecule2 = other.copy() partition1 = molecule1.partition_chem_env(n_sphere) partition2 = molecule2.partition_chem_env(n_sphere) index_dct = {} for key in partition1: message = 'You have chemically different molecules, regarding the topology of their connectivity.' assert (len(partition1[key]) == len(partition2[key])), message index_dct = make_subset_similar(molecule1, partition1[key], molecule2, partition2[key], index_dct) molecule2.index = [index_dct[i] for i in molecule2.index] return molecule2.loc[molecule1.index]
Reindex ``other`` to be similarly indexed as ``self``. Returns a reindexed copy of ``other`` that minimizes the distance for each atom to itself in the same chemical environemt from ``self`` to ``other``. Read more about the definition of the chemical environment in :func:`Cartesian.partition_chem_env` .. note:: It is necessary to align ``self`` and other before applying this method. This can be done via :meth:`~Cartesian.align`. .. note:: It is probably necessary to improve the result using :meth:`~Cartesian.change_numbering()`. Args: other (Cartesian): n_sphere (int): Wrapper around the argument for :meth:`~Cartesian.partition_chem_env`. Returns: Cartesian: Reindexed version of other
codesearchnet
def exportGurobiModel(self, gurobiDriver='gurobi', verbose=False): from gurobipy import GRB, read from tempfile import mkdtemp from shutil import rmtree from os import path import sys if (sys.version_info > (3, 0)): from io import StringIO else: from io import BytesIO as StringIO tmp_dir = mkdtemp() model_file = path.join(tmp_dir, 'model.mps') previous = { 'solver': self.getOption('solver') or '', 'gurobi_auxfiles': self.getOption('auxfiles') or '', 'gurobi_options': self.getOption('gurobi_options') or '', } temporary = { 'solver': gurobiDriver, 'gurobi_auxfiles': 'rc', 'gurobi_options': .format(model_file) } for option in temporary: self.setOption(option, temporary[option]) output = self.getOutput('solve;') if not path.isfile(model_file): raise RuntimeError(output) for option in previous: self.setOption(option, previous[option]) text_trap = StringIO() stdout = sys.stdout sys.stdout = text_trap model = read(model_file) sys.stdout = stdout if verbose: print(text_trap.getvalue()) if model_file.endswith('.mps'): if not self.getCurrentObjective().minimization(): model.ModelSense = GRB.MAXIMIZE model.setObjective(- model.getObjective()) model.update() rmtree(tmp_dir) return model
Export the model to Gurobi as a gurobipy.Model object. Args: gurobiDriver: The name or the path of the Gurobi solver driver. verbose: Whether should generate verbose output. Returns: A :class:`gurobipy.Model` object with the model loaded.
juraj-google-style
def write_config_file(config_instance, appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME): path = get_config_path(appdirs, file_name) with open(path, 'w') as fobj: config_instance.write(fobj) return config_instance
Write a ConfigParser instance to file at the correct location. Args: config_instance: Config instance to safe to file. appdirs (HamsterAppDirs, optional): ``HamsterAppDirs`` instance storing app/user specific path information. file_name (text_type, optional): Name of the config file. Defaults to ``DEFAULT_CONFIG_FILENAME``. Returns: SafeConfigParser: Instance written to file.
codesearchnet
def url_assembler(query_string, no_redirect=0, no_html=0, skip_disambig=0): params = [('q', query_string.encode('utf-8')), ('format', 'json')] if no_redirect: params.append(('no_redirect', 1)) if no_html: params.append(('no_html', 1)) if skip_disambig: params.append(('skip_disambig', 1)) return ('/?' + urlencode(params))
Assembler of parameters for building request query. Args: query_string: Query to be passed to DuckDuckGo API. no_redirect: Skip HTTP redirects (for !bang commands). Default - False. no_html: Remove HTML from text, e.g. bold and italics. Default - False. skip_disambig: Skip disambiguation (D) Type. Default - False. Returns: A “percent-encoded” string which is used as a part of the query.
codesearchnet
def submit_files(self, halt_on_error=True): if self.halt_on_file_error is not None: halt_on_error = self.halt_on_file_error upload_status = [] for xid, content_data in self._files.items(): del self._files[xid] status = True if self.debug and xid in self.saved_xids: self.tcex.log.debug('skipping previously saved file {}.'.format(xid)) continue content = content_data.get('fileContent') if callable(content): content = content_data.get('fileContent')(xid) if content is None: upload_status.append({'uploaded': False, 'xid': xid}) self.tcex.log.warning('File content was null for xid {}.'.format(xid)) continue if content_data.get('type') == 'Document': api_branch = 'documents' elif content_data.get('type') == 'Report': api_branch = 'reports' url = '/v2/groups/{}/{}/upload'.format(api_branch, xid) headers = {'Content-Type': 'application/octet-stream'} params = {'owner': self._owner} r = self.submit_file_content('POST', url, content, headers, params, halt_on_error) if r.status_code == 401: self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.') r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error) self.tcex.log.debug('{} Upload URL: {}.'.format(content_data.get('type'), r.url)) if not r.ok: status = False self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error) elif self.debug: self.saved_xids.append(xid) self.tcex.log.info('Status {} for file upload with xid {}.'.format(r.status_code, xid)) upload_status.append({'uploaded': status, 'xid': xid}) return upload_status
Submit Files for Documents and Reports to ThreatConnect API. Critical Errors * There is insufficient document storage allocated to this account. Args: halt_on_error (bool, default:True): If True any exception will raise an error. Returns: dict: The upload status for each xid.
juraj-google-style
def _ParseFieldsMetadata(self, structure): fields = structure.fields.split(' ') log_line_structure = pyparsing.Empty() if ((fields[0] == 'date') and (fields[1] == 'time')): log_line_structure += self.DATE_TIME.setResultsName('date_time') fields = fields[2:] for member in fields: log_line_structure += self._LOG_LINE_STRUCTURES.get(member, self.URI) updated_structures = [] for line_structure in self._line_structures: if (line_structure[0] != 'logline'): updated_structures.append(line_structure) updated_structures.append(('logline', log_line_structure)) self._line_structures = updated_structures
Parses the fields metadata and updates the log line definition to match. Args: structure (pyparsing.ParseResults): structure parsed from the log file.
codesearchnet
def str_to_inet(address): try: return socket.inet_pton(socket.AF_INET, address) except socket.error: return socket.inet_pton(socket.AF_INET6, address)
Convert an a string IP address to a inet struct Args: address (str): String representation of address Returns: inet: Inet network address
juraj-google-style
def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None): if ignore_index is not None: assert tensor.size() == tensor_other.size() mask_arr = tensor.ne(ignore_index) tensor = tensor.masked_select(mask_arr) tensor_other = tensor_other.masked_select(mask_arr) return torch.equal(tensor, tensor_other)
Compute ``torch.equal`` with the optional mask parameter. Args: ignore_index (int, optional): Specifies a ``tensor`` index that is ignored. Returns: (bool) Returns ``True`` if target and prediction are equal.
juraj-google-style
def read(cls, five9, external_id): results = cls.search(five9, {cls.__uid_field__: external_id}) if not results: return None return results[0]
Return a record singleton for the ID. Args: five9 (five9.Five9): The authenticated Five9 remote. external_id (mixed): The identified on Five9. This should be the value that is in the ``__uid_field__`` field on the record. Returns: BaseModel: The record, if found. Otherwise ``None``
juraj-google-style
def mach53(msg): d = hex2bin(data(msg)) if (d[23] == '0'): return None mach = (bin2int(d[24:33]) * 0.008) return round(mach, 3)
MACH number, DBS 5,3 message Args: msg (String): 28 bytes hexadecimal message Returns: float: MACH number
codesearchnet
def build_avatar_url(jid): digest = md5(str(jid).encode("utf-8")).hexdigest() return "http:
Static method to build a gravatar url with the agent's JID Args: jid (aioxmpp.JID): an XMPP identifier Returns: str: an URL for the gravatar
juraj-google-style
def shift_and_pad(tensor, shift, axis=0): shape = tensor.shape rank = len(shape) assert 0 <= abs(axis) < rank length = int(shape[axis]) assert 0 <= abs(shift) < length paddings = [(0, 0)] * rank begin = [0] * rank size = [-1] * rank if shift > 0: paddings[axis] = (shift, 0) size[axis] = length - shift elif shift < 0: paddings[axis] = (0, -shift) begin[axis] = -shift ret = tf.pad(tf.slice(tensor, begin, size), paddings) return ret
Shifts and pads with zero along an axis. Example: shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2] shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0] Args: tensor: Tensor; to be shifted and padded. shift: int; number of positions to shift by. axis: int; along which axis to shift and pad. Returns: A Tensor with the same shape as the input tensor.
juraj-google-style
def _validate_paths(self, settings, name, value): return [self._validate_path(settings, name, item) for item in value]
Apply ``SettingsPostProcessor._validate_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Raises: boussole.exceptions.SettingsInvalidError: Once a path does not exists. Returns: list: Validated paths.
juraj-google-style
def _unknown_args(self, args): for u in args: self.tcex.log.warning(u'Unsupported arg found ({}).'.format(u))
Log argparser unknown arguments. Args: args (list): List of unknown arguments
juraj-google-style
def InitPathInfos(self, client_id, path_infos): self.ClearPathHistory(client_id, path_infos) self.WritePathInfos(client_id, path_infos)
Initializes a collection of path info records for a client. Unlike `WritePathInfo`, this method clears stat and hash histories of paths associated with path info records. This method is intended to be used only in the data migration scripts. Args: client_id: A client identifier for which the paths are to be initialized. path_infos: A list of `rdf_objects.PathInfo` objects to write.
juraj-google-style
async def find_user(cls, config: Config, user: str) -> Tuple[(str, str)]: with open(config.users_file, 'r') as users_file: for line in users_file: (this_user, user_dir, password) = line.split(':', 2) if (user == this_user): return (password.rstrip('\r\n'), (user_dir or user)) raise InvalidAuth()
If the given user ID exists, return its expected password and mailbox path. Override this method to implement custom login logic. Args: config: The maildir config object. user: The expected user ID. Raises: InvalidAuth: The user ID was not valid.
codesearchnet
def write_to_fil(self, filename_out, *args, **kwargs): t0 = time.time() self.__update_header() if self.container.isheavy(): self.__write_to_fil_heavy(filename_out) else: self.__write_to_fil_light(filename_out) t1 = time.time() logger.info('Conversion time: %2.2fsec' % (t1- t0))
Write data to .fil file. It check the file size then decides how to write the file. Args: filename_out (str): Name of output file
juraj-google-style
def AsDict(self, dt=True): data = {} if self.name: data['name'] = self.name data['mlkshk_url'] = self.mlkshk_url if self.profile_image_url: data['profile_image_url'] = self.profile_image_url if self.id: data['id'] = self.id if self.about: data['about'] = self.about if self.website: data['website'] = self.website if self.shakes: data['shakes'] = [shk.AsDict(dt=dt) for shk in self.shakes] data['shake_count'] = self.shake_count return data
A dict representation of this User instance. The return value uses the same key names as the JSON representation. Args: dt (bool): If True, return dates as python datetime objects. If False, return dates as ISO strings. Return: A dict representing this User instance
juraj-google-style
def _use_cache(self, key, options=None): flag = ContextOptions.use_cache(options) if flag is None: flag = self._cache_policy(key) if flag is None: flag = ContextOptions.use_cache(self._conn.config) if flag is None: flag = True return flag
Return whether to use the context cache for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the key should be cached, False otherwise.
juraj-google-style
def run_suite(test_classes, argv=None): parser = argparse.ArgumentParser(description='Mobly Suite Executable.') parser.add_argument('-c', '--config', nargs=1, type=str, required=True, metavar='<PATH>', help='Path to the test configuration file.') parser.add_argument('--tests', '--test_case', nargs='+', type=str, metavar='[ClassA[.test_a] ClassB[.test_b] ...]', help='A list of test classes and optional tests to execute.') if (not argv): argv = sys.argv[1:] args = parser.parse_args(argv) test_configs = config_parser.load_test_config_file(args.config[0]) for test_class in test_classes: if (not issubclass(test_class, base_test.BaseTestClass)): logging.error('Test class %s does not extend mobly.base_test.BaseTestClass', test_class) sys.exit(1) selected_tests = compute_selected_tests(test_classes, args.tests) ok = True for config in test_configs: runner = test_runner.TestRunner(config.log_path, config.test_bed_name) for (test_class, tests) in selected_tests.items(): runner.add_test_class(config, test_class, tests) try: runner.run() ok = (runner.results.is_all_pass and ok) except signals.TestAbortAll: pass except: logging.exception('Exception when executing %s.', config.test_bed_name) ok = False if (not ok): sys.exit(1)
Executes multiple test classes as a suite. This is the default entry point for running a test suite script file directly. Args: test_classes: List of python classes containing Mobly tests. argv: A list that is then parsed as cli args. If None, defaults to cli input.
codesearchnet
def __init__(self, export_dir): self._export_dir = export_dir self._saved_model = loader.parse_saved_model(export_dir)
Creates an MethodNameUpdater object. Args: export_dir: Directory containing the SavedModel files. Raises: IOError: If the saved model file does not exist, or cannot be successfully parsed.
github-repos
def assert_cardinality(expected_cardinality): def _apply_fn(dataset): return _AssertCardinalityDataset(dataset, expected_cardinality) return _apply_fn
Asserts the cardinality of the input dataset. NOTE: The following assumes that "examples.tfrecord" contains 42 records. >>> dataset = tf.data.TFRecordDataset("examples.tfrecord") >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) True >>> dataset = dataset.apply(tf.data.experimental.assert_cardinality(42)) >>> print(tf.data.experimental.cardinality(dataset).numpy()) 42 Args: expected_cardinality: The expected cardinality of the input dataset. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. Raises: FailedPreconditionError: The assertion is checked at runtime (when iterating the dataset) and an error is raised if the actual and expected cardinality differ.
github-repos
def __init__(self, app_or_name, registry=None): if isinstance(app_or_name, flask.Flask): self.app = app_or_name else: self.app = flask.Flask(app_or_name) self.app.wsgi_app = ProxyFix(self.app.wsgi_app) self.blueprint = flask.Blueprint( "gourde", __name__, template_folder="templates" ) self.host = "0.0.0.0" self.port = 8080 self.debug = False self.log_level = None self.twisted = False self.gunicorn = False self.threads = None self.metrics = None self.is_setup = False self.setup_blueprint() self.setup_prometheus(registry) self.setup_sentry(sentry_dsn=None)
Build a new Gourde. Args: Either a flask.Flask or the name of the calling module.
juraj-google-style
def dbmin05years(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmin05years`'.format(value)) self._dbmin05years = value
Corresponds to IDD Field `dbmin05years` 5-year return period values for minimum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmin05years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def write_hex(fout, buf, offset, width=16): skipped_zeroes = 0 for i, chunk in enumerate(chunk_iter(buf, width)): if chunk == (b"\x00" * width): skipped_zeroes += 1 continue elif skipped_zeroes != 0: fout.write(" -- skipped zeroes: {}\n".format(skipped_zeroes)) skipped_zeroes = 0 fout.write("{:016x} ".format(i * width + offset)) column = " ".join([" ".join(["{:02x}".format(c) for c in subchunk]) for subchunk in chunk_iter(chunk, 8)]) w = width * 2 + (width - 1) + ((width if len(column) != w: column += " " * (w - len(column)) fout.write(column) fout.write(" |") for c in chunk: if c in PRINTABLE_CHARS: fout.write(chr(c)) else: fout.write(".") if len(chunk) < width: fout.write(" " * (width - len(chunk))) fout.write("|") fout.write("\n")
Write the content of 'buf' out in a hexdump style Args: fout: file object to write to buf: the buffer to be pretty printed offset: the starting offset of the buffer width: how many bytes should be displayed per row
juraj-google-style
def parse_headers(obj): if isinstance(obj, basestring): obj = cStringIO.StringIO(obj) hdrs = [] for line in obj: hdr = parse_header(line) if not hdr: break if isinstance(hdr, basestring): if not hdrs: raise ValueError("First header is a continuation") hdrs[-1] = (hdrs[-1][0], hdrs[-1][1] + hdr) continue hdrs.append(hdr) return iodict.IODict(hdrs)
Parse a string a iterable object (including file like objects) to a python dictionary. Args: obj: An iterable object including file-like objects. Returns: An dictionary of headers. If a header is repeated then the last value for that header is given. Raises: ValueError: If the first line is a continuation line or the headers cannot be parsed.
juraj-google-style
def endswith(self, search_str): for entry in reversed(list(open(self._jrnl_file, 'r'))[(- 5):]): if (search_str in entry): return True return False
Check whether the provided string exists in Journal file. Only checks the last 5 lines of the journal file. This method is usually used when tracking a journal from an active Revit session. Args: search_str (str): string to search for Returns: bool: if True the search string is found
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): file_offset = file_object.get_offset() file_size = file_object.get_size() while (file_offset < file_size): try: self._ParseRecord(parser_mediator, file_object) except errors.ParseError as exception: if (file_offset == 0): raise errors.UnableToParseFile('Unable to parse first event record with error: {0!s}'.format(exception)) file_offset = file_object.get_offset()
Parses a BSM file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def calculate_weights(correlation_matrix, min_wt): np.fill_diagonal(correlation_matrix.values, np.nan) correlation_matrix = correlation_matrix.clip(lower=0) raw_weights = correlation_matrix.mean(axis=1) raw_weights = raw_weights.clip(lower=min_wt) weights = (raw_weights / sum(raw_weights)) return (raw_weights.round(rounding_precision), weights.round(rounding_precision))
Calculate a weight for each profile based on its correlation to other replicates. Negative correlations are clipped to 0, and weights are clipped to be min_wt at the least. Args: correlation_matrix (pandas df): Correlations between all replicates min_wt (float): Minimum raw weight when calculating weighted average Returns: raw weights (pandas series): Mean correlation to other replicates weights (pandas series): raw_weights normalized such that they add to 1
codesearchnet
def wrap_or_unwrap(value): if isinstance(value, NoDependency): return value.value if isinstance(value, base.Trackable): return value elif type(value) == dict: return _DictWrapper(value) elif type(value) == collections.OrderedDict: return _DictWrapper(value) elif type(value) == list: return ListWrapper(value) elif isinstance(value, tuple) and _should_wrap_tuple(value): return _TupleWrapper(value) else: return value
Wraps input value into trackable data structures. This is mostly useful for containers like list, dict, etc, which could contain trackable objects in it. Wrapped data structure will be tracked when associated with a `tf.Module`, so that save model/checkpoint can properly track the dependency. It will also unwrap NoDependency objects. Args: value: the input object to be wrapped. Returns: Wrapped trackable data structure.
github-repos
def load(self, filepath, file_encoding=None): with open(filepath, encoding=file_encoding) as inf: for line in inf: current_line = str(line).strip() if current_line.startswith('@prefix'): self._add_ttl_ns(current_line.replace('\n', '')) elif (len(current_line) > 10): break self.__make_dicts__
Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary
codesearchnet
def load(path): with open(path, "r") as fobj: analytics = Analytics(info=json.load(fobj)) os.unlink(path) return analytics
Loads analytics report from json file specified by path. Args: path (str): path to json file with analytics report.
juraj-google-style
def set_metadata(self, key: str, value: Any, per_trial: bool=True) -> None:
Sets metadata for current trial or current sampling. Metadata can be used in two use cases: * Worker processes that co-work on the same trial can use meta-data to communicate with each other. * Worker use metadata as a persistent store to save information for current trial, which can be retrieved via `poll_result` method later. Args: key: A string as key to metadata. value: A value that can be serialized by `pg.to_json_str`. per_trial: If True, the key is set per current trial. Otherwise, it is set per current sampling loop.
github-repos
def _TerminateProcess(self, process): pid = process.pid logger.warning('Terminating process: (PID: {0:d}).'.format(pid)) process.terminate() process.join(timeout=self._PROCESS_JOIN_TIMEOUT) if process.is_alive(): logger.warning('Killing process: (PID: {0:d}).'.format(pid)) self._KillProcess(pid)
Terminate a process. Args: process (MultiProcessBaseProcess): process to terminate.
codesearchnet
def update_state(world): world_size = len(world) def wrap(index): 'Wrap an index around the other end of the array' return (index % world_size) for x in range(world_size): for y in range(world_size): if (not world[x][y].allow_change.get()): continue live_neighbor_count = sum([world[wrap(x)][wrap((y + 1))].value, world[wrap((x + 1))][wrap((y + 1))].value, world[wrap((x + 1))][wrap(y)].value, world[wrap((x + 1))][wrap((y - 1))].value, world[wrap(x)][wrap((y - 1))].value, world[wrap((x - 1))][wrap((y - 1))].value, world[wrap((x - 1))][wrap(y)].value, world[wrap((x - 1))][wrap((y + 1))].value]) if world[x][y].value: if (not ((live_neighbor_count == 2) or (live_neighbor_count == 3))): world[x][y].value = False elif (live_neighbor_count == 3): world[x][y].value = True
Increment the world state, determining which cells live, die, or appear. Args: world (list[list]): A square matrix of cells Returns: None
codesearchnet
def delete(self, record_id): record_url = self.record_url(record_id) return self._delete(record_url)
Deletes a record by its id >>> record = airtable.match('Employee Id', 'DD13332454') >>> airtable.delete(record['id']) Args: record_id(``str``): Airtable record id Returns: record (``dict``): Deleted Record
juraj-google-style
def _resolve_subkeys(key, separator='.'): subkey = None if separator in key: index = key.index(separator) subkey = key[index + 1:] key = key[:index] return key, subkey
Given a key which may actually be a nested key, return the top level key and any nested subkeys as separate values. Args: key (str): A string that may or may not contain the separator. separator (str): The namespace separator. Defaults to `.`. Returns: Tuple[str, str]: The key and subkey(s).
juraj-google-style
def add_evaluation_step(result_tensor, ground_truth_tensor): with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): prediction = tf.argmax(result_tensor, 1) correct_prediction = tf.equal(prediction, ground_truth_tensor) with tf.name_scope('accuracy'): evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', evaluation_step) return (evaluation_step, prediction)
Inserts the operations we need to evaluate the accuracy of our results. Args: result_tensor: The new final node that produces results. ground_truth_tensor: The node we feed ground truth data into. Returns: Tuple of (evaluation step, prediction).
codesearchnet
def AddWeight(self, path_segment_index, weight): if path_segment_index not in self._weight_per_index: raise ValueError('Path segment index not set.') self._weight_per_index[path_segment_index] += weight if weight not in self._indexes_per_weight: self._indexes_per_weight[weight] = [] self._indexes_per_weight[weight].append(path_segment_index)
Adds a weight for a specific path segment index. Args: path_segment_index: an integer containing the path segment index. weight: an integer containing the weight. Raises: ValueError: if the path segment weights do not contain the path segment index.
juraj-google-style
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): with tf.variable_scope('softmax_cross_entropy_one_hot', values=[logits, labels]): del weights_fn cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits) return (cross_entropy, tf.constant(1.0))
Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights
codesearchnet
def var( self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs ): axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.var( axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only, **kwargs ) )
Computes variance across the DataFrame. Args: axis (int): The axis to take the variance on. skipna (bool): True to skip NA values, false otherwise. ddof (int): degrees of freedom Returns: The variance of the DataFrame.
juraj-google-style
def first_seen(self, first_seen): if (not self.can_update()): self._tcex.handle_error(910, [self.type]) first_seen = self._utils.format_datetime(first_seen, date_format='%Y-%m-%dT%H:%M:%SZ') self._data['firstSeen'] = first_seen request = {'firstSeen': first_seen} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
Updates the campaign with the new first_seen date. Args: first_seen: The first_seen date. Converted to %Y-%m-%dT%H:%M:%SZ date format Returns:
codesearchnet
def fstat(self, file_des): file_object = self.filesystem.get_open_file(file_des).get_object() return file_object.stat_result.copy()
Return the os.stat-like tuple for the FakeFile object of file_des. Args: file_des: The file descriptor of filesystem object to retrieve. Returns: The FakeStatResult object corresponding to entry_path. Raises: OSError: if the filesystem object doesn't exist.
codesearchnet
def sparsemax(x, axis=-1): if any_symbolic_tensors((x,)): return Sparsemax(axis).symbolic_call(x) return backend.nn.sparsemax(x, axis=axis)
Sparsemax activation function. For each batch `i`, and class `j`, sparsemax activation function is defined as: `sparsemax(x)[i, j] = max(x[i, j] - τ(x[i, :]), 0).` Args: x: Input tensor. axis: `int`, axis along which the sparsemax operation is applied. Returns: A tensor, output of sparsemax transformation. Has the same type and shape as `x`. Example: >>> x = np.array([-1., 0., 1.]) >>> x_sparsemax = keras.ops.sparsemax(x) >>> print(x_sparsemax) array([0., 0., 1.], shape=(3,), dtype=float64)
github-repos
def post_op(self, id: str, path_data: Union[(dict, None)], post_data: Any) -> dict: path = self._get_path_for_op_id(id) return self.post_path(path, path_data, post_data)
Modifies the ESI by looking up an operation id. Args: path: raw ESI URL path path_data: data to format the path with (can be None) post_data: data to send to ESI Returns: ESI data
codesearchnet
def rotate(p, rad, o=(0, 0)): v = vector(o, p) fx = lambda x, y, d: x * cos(d) - y * sin(d) fy = lambda x, y, d: x * sin(d) + y * cos(d) rv = fx(v[0], v[1], rad), fy(v[0], v[1], rad) return translate(rv, o)
rotate vector Args: p: point (x, y) rad: angle(radian) o: origin (x, y)
juraj-google-style
async def process_message(self, message, wait=True): to_check = deque([self._waiters]) ignored = True while (len(to_check) > 0): context = to_check.popleft() waiters = context.get(OperationManager._LEAF, []) for waiter in waiters: if isinstance(waiter, asyncio.Future): waiter.set_result(message) else: try: (await _wait_or_launch(self._loop, waiter, message, wait)) except: self._logger.warning('Error calling every_match callback, callback=%s, message=%s', waiter, message, exc_info=True) ignored = False for key in context: if (key is OperationManager._LEAF): continue message_val = _get_key(message, key) if (message_val is _MISSING): continue next_level = context[key] if (message_val in next_level): to_check.append(next_level[message_val]) return (not ignored)
Process a message to see if it wakes any waiters. This will check waiters registered to see if they match the given message. If so, they are awoken and passed the message. All matching waiters will be woken. This method returns False if the message matched no waiters so it was ignored. Normally you want to use wait=True (the default behavior) to guarantee that all callbacks have finished before this method returns. However, sometimes that can cause a deadlock if those callbacks would themselves invoke behavior that requires whatever is waiting for this method to be alive. In that case you can pass wait=False to ensure that the caller of this method does not block. Args: message (dict or object): The message that we should process wait (bool): Whether to block until all callbacks have finished or to return once the callbacks have been launched. Returns: bool: True if at least one waiter matched, otherwise False.
codesearchnet
def get_statuses(self, batch_ids): with self._lock: return {b: self.get_status(b) for b in batch_ids}
Returns a statuses dict for the requested batches. Args: batch_ids (list of str): The ids of the batches to get statuses for Returns: dict: A dict with keys of batch ids, and values of status enums
codesearchnet
def supported_cache_type(types): if isinstance(types, str): types = [typ.strip() for typ in types.split(",")] for typ in types: if typ not in ["reflink", "hardlink", "symlink", "copy"]: return False return True
Checks if link type config option has a valid value. Args: types (list/string): type(s) of links that dvc should try out.
juraj-google-style
def _gen_sentence(self, assetid_body_tuple): asset_id, body = assetid_body_tuple text = self._process(body) sentence = LabeledSentence(text, labels=['DOC_%s' % str(asset_id)]) return sentence
Takes an assetid_body_tuple and returns a Doc2Vec LabeledSentence Args: assetid_body_tuple (tuple): (assetid, bodytext) pair
juraj-google-style
def _run_submission(self, metadata): if self._use_gpu: docker_binary = 'nvidia-docker' container_name = metadata['container_gpu'] else: docker_binary = 'docker' container_name = metadata['container'] if metadata['type'] == 'defense': cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_data'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, './' + metadata['entry_point'], '/input_images', '/output_data/result.csv'] else: epsilon = np.random.choice(ALLOWED_EPS) cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_images'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, './' + metadata['entry_point'], '/input_images', '/output_images', str(epsilon)] logging.info('Command to run submission: %s', ' '.join(cmd)) return shell_call(cmd)
Runs submission inside Docker container. Args: metadata: dictionary with submission metadata Returns: True if status code of Docker command was success (i.e. zero), False otherwise.
juraj-google-style
def update_qos_aggregated_configuration(self, qos_configuration, timeout=-1): uri = "{}{}".format(self.data["uri"], self.QOS_AGGREGATED_CONFIGURATION) return self._helper.update(qos_configuration, uri=uri, timeout=timeout)
Updates the QoS aggregated configuration for the logical interconnect. Args: qos_configuration: QOS configuration. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Logical Interconnect.
juraj-google-style
def _decode_socket_response_bytes(self, response): try: return str(response, encoding='utf8') except UnicodeError: self.log.error('Failed to decode socket response bytes using encoding utf8: %s', response) raise
Returns a string decoded from the socket response bytes. Args: response: bytes, the response to be decoded. Returns: The string decoded from the given bytes. Raises: UnicodeError: if failed to decode the given bytes using encoding utf8.
github-repos
def random(cls, num_qubits, seed=None): if (seed is not None): np.random.seed(seed) z = np.random.randint(2, size=num_qubits).astype(np.bool) x = np.random.randint(2, size=num_qubits).astype(np.bool) return cls(z, x)
Return a random Pauli on number of qubits. Args: num_qubits (int): the number of qubits seed (int): Optional. To set a random seed. Returns: Pauli: the random pauli
codesearchnet
def to_csv(self): header = [] component_header = [] for row in self: for j in row.__dict__.keys(): if (j == '_colour'): j = 'colour' header.append(j) for k in row.component.__dict__.keys(): component_header.append(k) header = set(header) component_header = set(component_header) header.remove('component') header_row = '' if ('colour' in header): header_row += 'colour,' header.remove('colour') has_colour = True for item in header: header_row += (item + ',') for item in component_header: header_row += (('component ' + item) + ',') result = (header_row.strip(',') + '\n') for row in self: if has_colour: result += (row.__dict__.get('_colour', '') + ',') for item in header: result += (str(row.__dict__.get(item, '')) + ',') for item in component_header: result += (str(row.component.__dict__.get(item, '')) + ',') result += '\n' return result
Renders a legend as a CSV string. No arguments. Returns: str: The legend as a CSV.
codesearchnet
def add_output(self, *args, **kwargs): return self._outputs.add(*args, **kwargs)
Add a wrapped output argument to the hint. Args: *args: The output tensor. **kwargs: "name" label "tag" a tag to group multiple arguments that will be aggregated. I.e. a string like 'cool_input'. Basically multiple inputs can be added to the same hint for parallel operations that will eventually be combined. An example would be static_rnn which creates multiple copies of state or inputs. "aggregate" aggregation strategy that is valid only for tag non None. Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, and OpHint.AGGREGATE_STACK. "index_override" The global index to use. This corresponds to the argument order in the final stub that will be generated. Returns: The wrapped output tensor.
github-repos
def usufyToGmlExport(d, fPath): try: oldData=nx.read_gml(fPath) except UnicodeDecodeError as e: print("UnicodeDecodeError:\t" + str(e)) print("Something went wrong when reading the .gml file relating to the decoding of UNICODE.") import time as time fPath+="_" +str(time.time()) print("To avoid losing data, the output file will be renamed to use the timestamp as:\n" + fPath + "_" + str(time.time())) print() oldData = nx.Graph() except Exception as e: oldData = nx.Graph() newGraph = _generateGraphData(d, oldData) nx.write_gml(newGraph,fPath)
Workaround to export data to a .gml file. Args: ----- d: Data to export. fPath: File path for the output file.
juraj-google-style
def _build(self, inputs, prev_state): next_state = self._model(prev_state) return (next_state, next_state)
Connects the ModelRNN module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as input_ and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: inputs: Tensor input to the ModelRNN (ignored). prev_state: Tensor of size `model.output_size`. Returns: output: Tensor of size `model.output_size`. next_state: Tensor of size `model.output_size`.
codesearchnet
def to_proj4(self, as_dict=False): string = "%s" % self.proj.to_proj4() string += " %s" % self.geogcs.to_proj4(toplevel=False) string += " " + " ".join(param.to_proj4() for param in self.params) string += " %s" % self.unit.to_proj4() string += " +axis=" + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + "u" string += " +no_defs" if as_dict: return dict([ entry.lstrip('+').split('=') for entry in string.split() if entry != "+no_defs" ]) else: return string
Returns the CS as a proj4 formatted string or dict. Arguments: - **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).
juraj-google-style
def create_opengl_context(surface_size=(640, 480)): egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) (major, minor) = (egl.EGLint(), egl.EGLint()) egl.eglInitialize(egl_display, pointer(major), pointer(minor)) config_attribs = [egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE] config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) num_configs = egl.EGLint() egl_cfg = egl.EGLConfig() egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) (width, height) = surface_size pbuffer_attribs = [egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE] pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) egl.eglBindAPI(egl.EGL_OPENGL_API) egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None) egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface.
codesearchnet
def GetNumberOfRows(self, table_name): if not self._connection: raise IOError('Not opened.') self._cursor.execute(self._NUMBER_OF_ROWS_QUERY.format(table_name)) row = self._cursor.fetchone() if not row: raise IOError( 'Unable to retrieve number of rows of table: {0:s}'.format( table_name)) number_of_rows = row[0] if isinstance(number_of_rows, py2to3.STRING_TYPES): try: number_of_rows = int(number_of_rows, 10) except ValueError as exception: raise IOError(( 'Unable to determine number of rows of table: {0:s} ' 'with error: {1!s}').format(table_name, exception)) return number_of_rows
Retrieves the number of rows in the table. Args: table_name (str): name of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened.
juraj-google-style
def upload_file(self, url, file, callback=None, extra_headers={}): extra_headers = extra_headers.copy() response = None if (os.stat(file.name).st_size == 0): raise CommError(('%s is an empty file' % file.name)) try: progress = Progress(file, callback=callback) response = requests.put(url, data=progress, headers=extra_headers) response.raise_for_status() except requests.exceptions.RequestException as e: total = progress.len status = self._status_request(url, total) if (status.status_code in (308, 408, 500, 502, 503, 504)): util.sentry_reraise(retry.TransientException(exc=e)) else: util.sentry_reraise(e) return response
Uploads a file to W&B with failure resumption Args: url (str): The url to download file (str): The path to the file you want to upload callback (:obj:`func`, optional): A callback which is passed the number of bytes uploaded since the last time it was called, used to report progress Returns: The requests library response object
codesearchnet
def get_switch_macs(self, switch_ip=None, node=None, vlan=None, mac=None, port=None, verbose=0): if (switch_ip == None): if (node == None): raise Exception('get_switch_macs() requires switch_ip or node parameter') return None switch_ip = node.get_ipaddr() mac_obj = natlas_mac(self.config) if (vlan == None): macs = mac_obj.get_macs(switch_ip, verbose) else: macs = mac_obj.get_macs_for_vlan(switch_ip, vlan, verbose) if ((mac == None) & (port == None)): return (macs if macs else []) ret = [] for m in macs: if (mac != None): if (re.match(mac, m.mac) == None): continue if (port != None): if (re.match(port, m.port) == None): continue ret.append(m) return ret
Get the CAM table from a switch. Args: switch_ip IP address of the device node natlas_node from new_node() vlan Filter results by VLAN MAC Filter results by MAC address (regex) port Filter results by port (regex) verbose Display progress to stdout switch_ip or node is required Return: Array of natlas_mac objects
codesearchnet
def dict_get_path(data, path, default=None): keys = path.split('.') for k in keys: if (type(data) == list): found = False for item in data: name = item.get('name', item.get('type')) if (name == k): found = True data = item break if (not found): return default elif (type(data) == dict): if (k in data): data = data[k] else: return default else: return default return data
Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist
codesearchnet
def import_tracks(self, import_tracks): if isinstance(import_tracks, tracks.Track): import_tracks = [import_tracks] idx_mapping = {} for track in import_tracks: idx_mapping[track.idx] = track if track.idx in self._tracks.keys(): track.idx = naming.index_name_if_in_list(track.idx, self._tracks.keys()) self._tracks[track.idx] = track return idx_mapping
Add the given tracks/track to the corpus. If any of the given track-ids already exists, a suffix is appended so it is unique. Args: import_tracks (list): Either a list of or a single :py:class:`audiomate.tracks.Track`. Returns: dict: A dictionary containing track-idx mappings (old-track-idx/track-instance). If a track is imported, whose idx already exists this mapping can be used to check the new id.
juraj-google-style
def _set_save_spec(self, inputs, args=None, kwargs=None): if self._saved_model_inputs_spec is not None: return inputs_spec = tree.map_structure(tf_utils.get_tensor_spec, inputs) args_spec = tree.map_structure(tf_utils.get_tensor_spec, args or []) kwargs_spec = {} for key, kwarg in kwargs.items(): flat_kwarg = tree.flatten(kwarg) flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg] if any((s is None for s in flat_specs)): continue kwargs_spec[key] = tree.pack_sequence_as(kwarg, flat_specs) self._saved_model_inputs_spec = inputs_spec self._saved_model_arg_spec = ([inputs_spec] + list(args_spec), kwargs_spec)
Defines the save spec so that serialization can trace layer calls. The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are saved into a tuple of `([inputs] + args, kwargs)`. Args: inputs: possibly nested inputs passed into the call function. args: a list of positional arguments passed into call. kwargs: a dictionary of keyword arguments passed into call.
github-repos
def version(self): version = int(self._dll.JLINKARM_GetDLLVersion()) major = version / 10000 minor = (version / 100) % 100 rev = version % 100 rev = '' if rev == 0 else chr(rev + ord('a') - 1) return '%d.%02d%s' % (major, minor, rev)
Returns the device's version. The device's version is returned as a string of the format: M.mr where ``M`` is major number, ``m`` is minor number, and ``r`` is revision character. Args: self (JLink): the ``JLink`` instance Returns: Device version string.
juraj-google-style
def __init__(self, board_name, https=False, session=None): self._board_name = board_name self._https = https self._protocol = 'https: self._url = Url(board_name=board_name, https=self._https) self._requests_session = session or requests.session() self._requests_session.headers['User-Agent'] = 'py-4chan/%s' % __version__ self._thread_cache = {}
Creates a :mod:`basc_py4chan.Board` object. Args: board_name (string): Name of the board, such as "tg" or "etc". https (bool): Whether to use a secure connection to 4chan. session: Existing requests.session object to use instead of our current one.
juraj-google-style
def start(self): return (self.first.lineno, self.first.column)
The start of the logical line. Returns: A tuple of the starting line number and column.
github-repos
def merge_lists(*args): out = {} for contacts in filter(None, args): for contact in contacts: out[contact.value] = contact return list(out.values())
Merge an arbitrary number of lists into a single list and dedupe it Args: *args: Two or more lists Returns: A deduped merged list of all the provided lists as a single list
codesearchnet
def crop_image_to_patches(self, images: np.ndarray, min_patches: int, max_patches: int, use_thumbnail: bool=True, patch_size: Optional[Union[Tuple, int, dict]]=None, data_format: ChannelDimension=None): if data_format is None: data_format = infer_channel_dimension_format(images) images = to_channel_dimension_format(images, ChannelDimension.FIRST, data_format) patch_size_height, patch_size_width = (patch_size['height'], patch_size['width']) original_height, original_width = images.shape[-2:] num_columns, num_rows = get_optimal_tiled_canvas((original_height, original_width), (patch_size_height, patch_size_width), min_patches, max_patches) target_width = patch_size_width * num_columns target_height = patch_size_height * num_rows num_blocks = num_columns * num_rows resized_image = self.resize(images, {'height': target_height, 'width': target_width}, data_format=ChannelDimension.FIRST, input_data_format=ChannelDimension.FIRST) processed_images = [] for i in range(num_blocks): column = i % num_columns row = i box = (column * patch_size_width, row * patch_size_height, (column + 1) * patch_size_width, (row + 1) * patch_size_height) patch_image = resized_image[..., box[1]:box[3], box[0]:box[2]] patch_image = to_channel_dimension_format(patch_image, data_format, ChannelDimension.FIRST) processed_images.append(patch_image) if use_thumbnail and len(processed_images) != 1: thumbnail_img = self.resize(images, patch_size, data_format=data_format, input_data_format=ChannelDimension.FIRST) processed_images.append(thumbnail_img) return processed_images
Crop the image to patches and return a list of cropped images. The number of patches and their grid arrangement are determined by the original image size, the target patch size and the minimum and maximum number of patches. The aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio. Args: images (`np.ndarray`): The image to be cropped. min_patches (`int`): The minimum number of patches to be extracted from the image. max_patches (`int`): The maximum number of patches to be extracted from the image. use_thumbnail (`bool`, *optional*, defaults to `True`): Whether to add a thumbnail image to the list of cropped patches. patch_size (`int`, `Tuple[int, int]`, `dict`, *optional*): The size of the output patches. data_format (`ChannelDimension`, *optional*): The format of the image data. If `None`, the format is inferred from the input image. Returns: List[`PIL.Image.Image`] or List[np.ndarray]: The list of cropped images.
github-repos
def _commit(self): if (not self.in_progress): raise ValueError(_CANT_COMMIT) commit_response = _commit_with_retry(self._client, self._write_pbs, self._id) self._clean_up() return list(commit_response.write_results)
Transactionally commit the changes accumulated. Returns: List[google.cloud.proto.firestore.v1beta1.\ write_pb2.WriteResult, ...]: The write results corresponding to the changes committed, returned in the same order as the changes were applied to this transaction. A write result contains an ``update_time`` field. Raises: ValueError: If no transaction is in progress.
codesearchnet
def final_energy_from_outcar( filename='OUTCAR' ): with open( filename ) as f: outcar = f.read() energy_re = re.compile( "energy\(sigma->0\) =\s+([-\d\.]+)" ) energy = float( energy_re.findall( outcar )[-1] ) return energy
Finds and returns the energy from a VASP OUTCAR file, by searching for the last `energy(sigma->0)` entry. Args: filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'. Returns: (Float): The last energy read from the OUTCAR file.
juraj-google-style
def FindEnumTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if (full_name not in self._enum_descriptors): self._FindFileContainingSymbolInDb(full_name) return self._enum_descriptors[full_name]
Loads the named enum descriptor from the pool. Args: full_name: The full name of the enum descriptor to load. Returns: The enum descriptor for the named type. Raises: KeyError: if the enum cannot be found in the pool.
codesearchnet
def _control_dependencies_for_inputs(self, input_ops) -> list[Operation]: ret = [] for controller in self._control_dependencies_stack: dominated = False for op in input_ops: if controller.op_in_group(op): dominated = True break if not dominated: ret.extend((c for c in controller.control_inputs if c not in input_ops)) return ret
For an op that takes `input_ops` as inputs, compute control inputs. The returned control dependencies should yield an execution that is equivalent to adding all control inputs in self._control_dependencies_stack to a newly created op. However, this function attempts to prune the returned control dependencies by observing that nodes created within the same `with control_dependencies(...):` block may have data dependencies that make the explicit approach redundant. Args: input_ops: The data input ops for an op to be created. Returns: A list of control inputs for the op to be created.
github-repos
def ensuredir(path_, verbose=None, info=False, mode=1023): if (verbose is None): verbose = VERYVERBOSE if isinstance(path_, (list, tuple)): path_ = join(*path_) if (HAVE_PATHLIB and isinstance(path_, pathlib.Path)): path_ = str(path_) if (not checkpath(path_, verbose=verbose, info=info)): if verbose: print(('[util_path] mkdir(%r)' % path_)) try: os.makedirs(normpath(path_), mode=mode) except OSError as ex: util_dbg.printex(ex, 'check that the longest existing path is not a bad windows symlink.', keys=['path_']) raise return path_
r""" Ensures that directory will exist. creates new dir with sticky bits by default Args: path (str): dpath to ensure. Can also be a tuple to send to join info (bool): if True prints extra information mode (int): octal mode of directory (default 0o1777) Returns: str: path - the ensured directory
codesearchnet
def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False): if (_CON_SYM_ in globals()): if (not isinstance(globals()[_CON_SYM_], pdblp.BCon)): del globals()[_CON_SYM_] if ((_CON_SYM_ in globals()) and (not restart)): con = globals()[_CON_SYM_] if getattr(con, '_session').start(): con.start() return (con, False) else: con = pdblp.BCon(port=port, timeout=timeout) globals()[_CON_SYM_] = con con.start() return (con, True)
Create Bloomberg connection Returns: (Bloomberg connection, if connection is new)
codesearchnet
def reconstruct_feature_maps(hidden_state: torch.Tensor, batch_size: int, padding: int, output_size: Tuple[float, float]) -> torch.Tensor: features = reshape_features(hidden_state) features = merge_patches(features, batch_size=batch_size, padding=padding) features = F.interpolate(features, size=output_size, mode='bilinear', align_corners=False) return features
Reconstructs feature maps from the hidden state produced by any of the encoder. Converts the hidden state of shape `(n_patches_per_batch * batch_size, seq_len, hidden_size)` to feature maps of shape `(batch_size, hidden_size, output_size[0], output_size[1])`. Args: hidden_state (torch.Tensor): Input tensor of shape `(n_patches_per_batch * batch_size, seq_len, hidden_size)` representing the encoded patches. batch_size (int): The number of samples in a batch. padding (int): The amount of padding to be removed when merging patches. output_size (Tuple[float, float]): The desired output size for the feature maps, specified as `(height, width)`. Returns: torch.Tensor: Reconstructed feature maps of shape `(batch_size, hidden_size, output_size[0], output_size[1])`.
github-repos