code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def round(self, decimals=0, *args, **kwargs): return self.__constructor__( query_compiler=self._query_compiler.round(decimals=decimals, **kwargs) )
Round each element in the DataFrame. Args: decimals: The number of decimals to round to. Returns: A new DataFrame.
juraj-google-style
def is_type_or_profile_of(url: str, message_or_descriptor: annotation_utils.MessageOrDescriptorBase) -> bool: return is_type(url, message_or_descriptor) or is_profile_of(url, message_or_descriptor)
Whether message_or_descriptor is of type url *or* is a profile of url. Args: url: The FHIR structure definition URL to compare against. message_or_descriptor: The Message or Descriptor to examine. Returns: True if message_or_descriptor has a structure definition URL of url, or if it is a profile with a base structure definition URL of url.
github-repos
def GetUpdates(self, source, url, since): proto = url.split(':')[0] if proto not in ('http', 'https'): raise error.ConfigurationError('Unsupported protocol %s' % proto) conn = source.conn conn.setopt(pycurl.OPT_FILETIME, 1) conn.setopt(pycurl.ENCODING, 'bzip2, gzip') if since is not None: conn.setopt(pycurl.TIMEVALUE, int(since)) conn.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_IFMODSINCE) retry_count = 0 resp_code = 500 while retry_count < source.conf['retry_max']: try: source.log.debug('fetching %s', url) resp_code, headers, body_bytes = curl.CurlFetch(url, conn, self.log) self.log.debug('response code: %s', resp_code) finally: if resp_code < 400: if resp_code == 304: return [] if resp_code == 200: break retry_count += 1 self.log.warning('Failed connection: attempt if retry_count == source.conf['retry_max']: self.log.debug('max retries hit') raise error.SourceUnavailable('Max retries exceeded.') time.sleep(source.conf['retry_delay']) headers = headers.split('\r\n') last_modified = conn.getinfo(pycurl.INFO_FILETIME) self.log.debug('last modified: %s', last_modified) if last_modified == -1: for header in headers: if header.lower().startswith('last-modified'): self.log.debug('%s', header) http_ts_string = header[header.find(':') + 1:].strip() last_modified = self.FromHttpToTimestamp(http_ts_string) break else: http_ts_string = '' else: http_ts_string = self.FromTimestampToHttp(last_modified) self.log.debug('Last-modified is: %s', http_ts_string) try: body_bytes = bz2.decompress(body_bytes) self.log.debug('bzip encoding found') except IOError: self.log.debug('bzip encoding not found') response = StringIO(body_bytes.decode('utf-8')) data_map = self.GetMap(cache_info=response) if http_ts_string: http_ts = self.FromHttpToTimestamp(http_ts_string) self.log.debug('setting last modified to: %s', http_ts) data_map.SetModifyTimestamp(http_ts) return data_map
Get updates from a source. Args: source: A data source url: url to the data we want since: a timestamp representing the last change (None to force-get) Returns: A tuple containing the map of updates and a maximum timestamp Raises: ValueError: an object in the source map is malformed ConfigurationError:
github-repos
def GetCampaignFeeds(client, feed, placeholder_type): campaign_feed_service = client.GetService('CampaignFeedService', 'v201809') campaign_feeds = [] more_pages = True selector = {'fields': ['CampaignId', 'MatchingFunction', 'PlaceholderTypes'], 'predicates': [{'field': 'Status', 'operator': 'EQUALS', 'values': ['ENABLED']}, {'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']]}, {'field': 'PlaceholderTypes', 'operator': 'CONTAINS_ANY', 'values': [placeholder_type]}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}} while more_pages: page = campaign_feed_service.get(selector) if ('entries' in page): campaign_feeds.extend(page['entries']) selector['paging']['startIndex'] += PAGE_SIZE more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries'])) return campaign_feeds
Get a list of Feed Item Ids used by a campaign via a given Campaign Feed. Args: client: an AdWordsClient instance. feed: a Campaign Feed. placeholder_type: the Placeholder Type. Returns: A list of Feed Item Ids.
codesearchnet
def get_members(self, retrieve=False): if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasMember'): members = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasMember ] return members else: return []
get pcdm:hasMember for this resource Args: retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload
juraj-google-style
def __init__(self, parent=None, **kwargs): if not parent: raise ValueError('Missing parent value.') super(QCOWPathSpec, self).__init__(parent=parent, **kwargs)
Initializes a path specification. Note that the QCOW path specification must have a parent. Args: parent (Optional[PathSpec]): parent path specification. Raises: ValueError: when parent is not set.
juraj-google-style
def _ScheduleTasks(self, storage_writer): logger.debug('Task scheduler started') self._status = definitions.STATUS_INDICATOR_RUNNING event_source_heap = _EventSourceHeap() self._FillEventSourceHeap( storage_writer, event_source_heap, start_with_first=True) event_source = event_source_heap.PopEventSource() task = None while event_source or self._task_manager.HasPendingTasks(): if self._abort: break try: if not task: task = self._task_manager.CreateRetryTask() if not task and event_source: task = self._task_manager.CreateTask(self._session_identifier) task.file_entry_type = event_source.file_entry_type task.path_spec = event_source.path_spec event_source = None self._number_of_consumed_sources += 1 if self._guppy_memory_profiler: self._guppy_memory_profiler.Sample() if task: if self._ScheduleTask(task): logger.debug( 'Scheduled task {0:s} for path specification {1:s}'.format( task.identifier, task.path_spec.comparable)) self._task_manager.SampleTaskStatus(task, 'scheduled') task = None else: self._task_manager.SampleTaskStatus(task, 'schedule_attempted') self._MergeTaskStorage(storage_writer) if not event_source_heap.IsFull(): self._FillEventSourceHeap(storage_writer, event_source_heap) if not task and not event_source: event_source = event_source_heap.PopEventSource() except KeyboardInterrupt: self._abort = True self._processing_status.aborted = True if self._status_update_callback: self._status_update_callback(self._processing_status) for task in self._task_manager.GetFailedTasks(): warning = warnings.ExtractionWarning( message='Worker failed to process path specification', path_spec=task.path_spec) self._storage_writer.AddWarning(warning) self._processing_status.error_path_specs.append(task.path_spec) self._status = definitions.STATUS_INDICATOR_IDLE if self._abort: logger.debug('Task scheduler aborted') else: logger.debug('Task scheduler stopped')
Schedules tasks. Args: storage_writer (StorageWriter): storage writer for a session storage.
juraj-google-style
def grepPDF(self, path): with open(path, 'rb') as pdf_file_obj: match = set() text = '' pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj) pages = pdf_reader.numPages for page in range(pages): page_obj = pdf_reader.getPage(page) text += '\n' + page_obj.extractText() match.update(set(x.lower() for x in re.findall( self._keywords, text, re.IGNORECASE))) return match
Parse PDF files text content for keywords. Args: path: PDF file path. Returns: match: set of unique occurrences of every match.
juraj-google-style
def _get_vcap_services(vcap_services=None): vcap_services = (vcap_services or os.environ.get('VCAP_SERVICES')) if (not vcap_services): raise ValueError("VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'") if isinstance(vcap_services, dict): return vcap_services try: vcap_services = json.loads(vcap_services) except json.JSONDecodeError: try: with open(vcap_services) as vcap_json_data: vcap_services = json.load(vcap_json_data) except: raise ValueError('VCAP_SERVICES information is not JSON or a file containing JSON:', vcap_services) return vcap_services
Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If `vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable. Args: vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file. vcap_services (dict): Return the dict as is. Returns: dict: A dict representation of the VCAP Services information. Raises: ValueError: * if `vcap_services` nor VCAP_SERVICES environment variable are specified. * cannot parse `vcap_services` as a JSON string nor as a filename.
codesearchnet
def recv_task_request_from_workers(self): info = MPI.Status() comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info) worker_rank = info.Get_source() logger.info('Received task request from worker:{}'.format(worker_rank)) return worker_rank
Receives 1 task request from MPI comm Returns: -------- worker_rank: worker_rank id
codesearchnet
def __init__(self, encoding='utf-8'): super(OutputWriter, self).__init__() self._encoding = encoding self._errors = 'strict'
Initializes an output writer. Args: encoding (Optional[str]): input encoding.
juraj-google-style
def assert_rank_at_least(x, rank, data=None, summarize=None, message=None, name=None): with ops.name_scope(name, 'assert_rank_at_least', (x, rank) + tuple(data or [])): x = ops.convert_to_tensor(x, name='x') rank = ops.convert_to_tensor(rank, name='rank') message = _message_prefix(message) static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank dynamic_condition = math_ops.greater_equal if context.executing_eagerly(): name = '' else: name = x.name if data is None: data = [message, 'Tensor %s must have rank at least' % name, rank, 'Received shape: ', array_ops.shape(x)] try: assert_op = _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize) except ValueError as e: if e.args[0] == 'Static rank condition failed': raise ValueError('%sTensor %s must have rank at least %d. Received rank %d, shape %s' % (message, name, e.args[2], e.args[1], x.get_shape())) else: raise return assert_op
Assert `x` has rank equal to `rank` or higher. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank_at_least(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_at_least". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or higher. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank.
github-repos
def _info_to_string(info): for key in _TENSORBOARD_INFO_FIELDS: field_type = _TENSORBOARD_INFO_FIELDS[key] if not isinstance(getattr(info, key), field_type.runtime_type): raise ValueError( "expected %r of type %s, but found: %r" % (key, field_type.runtime_type, getattr(info, key)) ) if info.version != version.VERSION: raise ValueError( "expected 'version' to be %r, but found: %r" % (version.VERSION, info.version) ) json_value = { k: _TENSORBOARD_INFO_FIELDS[k].serialize(getattr(info, k)) for k in _TENSORBOARD_INFO_FIELDS } return json.dumps(json_value, sort_keys=True, indent=4)
Convert a `TensorBoardInfo` to string form to be stored on disk. The format returned by this function is opaque and should only be interpreted by `_info_from_string`. Args: info: A valid `TensorBoardInfo` object. Raises: ValueError: If any field on `info` is not of the correct type. Returns: A string representation of the provided `TensorBoardInfo`.
juraj-google-style
def categorize(values, categories, default=None): uniq_cats = list(unique_iterator(values)) cats = [] for c in values: if isinstance(categories, list): cat_ind = uniq_cats.index(c) if (cat_ind < len(categories)): cat = categories[cat_ind] else: cat = default else: cat = categories.get(c, default) cats.append(cat) return np.asarray(cats)
Maps discrete values to supplied categories. Replaces discrete values in input array with a fixed set of categories defined either as a list or dictionary. Args: values: Array of values to be categorized categories: List or dict of categories to map inputs to default: Default value to assign if value not in categories Returns: Array of categorized values
codesearchnet
def AddFilterOptions(self, argument_group): names = ['artifact_filters', 'date_filters', 'filter_file'] helpers_manager.ArgumentHelperManager.AddCommandLineArguments(argument_group, names=names) argument_group.add_argument('-x', '--extensions', dest='extensions_string', action='store', type=str, metavar='EXTENSIONS', help='Filter on file name extensions. This option accepts multiple multiple comma separated values e.g. "csv,docx,pst".') argument_group.add_argument('--names', dest='names_string', action='store', type=str, metavar='NAMES', help='Filter on file names. This option accepts a comma separated string denoting all file names, e.g. -x "NTUSER.DAT,UsrClass.dat".') argument_group.add_argument('--signatures', dest='signature_identifiers', action='store', type=str, metavar='IDENTIFIERS', help='Filter on file format signature identifiers. This option accepts multiple comma separated values e.g. "esedb,lnk". Use "list" to show an overview of the supported file format signatures.')
Adds the filter options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
codesearchnet
def load_from_dict(self, dictionary, _override=True, _allow_undeclared=False): undeclared_keys = [] for (key, value) in self._modules['six'].iteritems(dictionary): if ((key not in self._declarations) and (not _allow_undeclared)): undeclared_keys.append(key) continue if (key in self._loaded_values): if _override: self._logger.info('Overriding previously loaded value for %s (%s) with value: %s', key, self._loaded_values[key], value) else: self._logger.info('Ignoring new value (%s), keeping previous value for %s: %s', value, key, self._loaded_values[key]) continue key = (key.decode() if isinstance(key, bytes) else key) value = (value.decode() if isinstance(value, bytes) else value) self._loaded_values[key] = value if undeclared_keys: self._logger.warning('Ignoring undeclared configuration keys: %s', undeclared_keys)
Loads the config with values from a dictionary instead of a file. This is meant for testing and bin purposes and shouldn't be used in most applications. Args: dictionary: The dictionary containing config keys/values to update. _override: If True, new values will override previous values. _allow_undeclared: If True, silently load undeclared keys, otherwise warn and ignore the value. Typically used for loading config files before declarations have been evaluated.
codesearchnet
def sni2route(self, sni: SchemaNodeId, sctx: SchemaContext) -> SchemaRoute: nlist = sni.split('/') res = [] for qn in (nlist[1:] if (sni[0] == '/') else nlist): res.append(self.translate_node_id(qn, sctx)) return res
Translate schema node identifier to a schema route. Args: sni: Schema node identifier (absolute or relative). sctx: Schema context. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If a prefix specified in `sni` is not declared.
codesearchnet
def fold(*vals): vals = [v for v in vals if v is not None] if not vals: return None return min(vals), max(vals)
Initialize a new (min,max) tuple interval from values. Args: *vals ([int,...]): A list of values (or Nones) Returns: ((int,int)): A (min,max) interval tuple or None
juraj-google-style
def na_if(series, *values): series = pd.Series(series) series[series.isin(values)] = np.nan return series
If values in a series match a specified value, change them to `np.nan`. Args: series: Series or vector, often symbolic. *values: Value(s) to convert to `np.nan` in the series.
juraj-google-style
def register_task(self, input, deps=None, manager=None, task_class=None, append=False): if (not append): work = Work(manager=manager) elif (not self.works): work = Work(manager=manager) append = False else: work = self.works[(- 1)] task = work.register(input, deps=deps, task_class=task_class) if (not append): self.register_work(work) return work
Utility function that generates a `Work` made of a single task Args: input: :class:`AbinitInput` deps: List of :class:`Dependency` objects specifying the dependency of this node. An empy list of deps implies that this node has no dependencies. manager: The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use the :class:`TaskManager` specified during the creation of the work. task_class: Task subclass to instantiate. Default: :class:`AbinitTask` append: If true, the task is added to the last work (a new Work is created if flow is empty) Returns: The generated :class:`Work` for the task, work[0] is the actual task.
codesearchnet
def PushEventSource(self, event_source): if event_source.file_entry_type == ( dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY): weight = 1 else: weight = 100 heap_values = (weight, time.time(), event_source) heapq.heappush(self._heap, heap_values)
Pushes an event source onto the heap. Args: event_source (EventSource): event source.
juraj-google-style
def _exception_for(self, code): if (code in self.errors): return self.errors[code] elif (500 <= code < 599): return exceptions.RemoteServerError else: return exceptions.UnknownError
Return the exception class suitable for the specified HTTP status code. Raises: UnknownError: The HTTP status code is not one of the knowns.
codesearchnet
def _alter_code(code, **attrs): PyCode_New = ctypes.pythonapi.PyCode_New PyCode_New.argtypes = ( ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.c_int, ctypes.py_object) PyCode_New.restype = ctypes.py_object args = [ [code.co_argcount, 'co_argcount'], [code.co_kwonlyargcount, 'co_kwonlyargcount'], [code.co_nlocals, 'co_nlocals'], [code.co_stacksize, 'co_stacksize'], [code.co_flags, 'co_flags'], [code.co_code, 'co_code'], [code.co_consts, 'co_consts'], [code.co_names, 'co_names'], [code.co_varnames, 'co_varnames'], [code.co_freevars, 'co_freevars'], [code.co_cellvars, 'co_cellvars'], [code.co_filename, 'co_filename'], [code.co_name, 'co_name'], [code.co_firstlineno, 'co_firstlineno'], [code.co_lnotab, 'co_lnotab']] for arg in args: if arg[1] in attrs: arg[0] = attrs[arg[1]] return PyCode_New( args[0][0], args[1][0], args[2][0], args[3][0], args[4][0], args[5][0], args[6][0], args[7][0], args[8][0], args[9][0], args[10][0], args[11][0], args[12][0], args[13][0], args[14][0])
Create a new code object by altering some of ``code`` attributes Args: code: code objcect attrs: a mapping of names of code object attrs to their values
juraj-google-style
def save(self, path_info, checksum): assert (path_info['scheme'] == 'local') assert (checksum is not None) path = path_info['path'] assert os.path.exists(path) (actual_mtime, actual_size) = get_mtime_and_size(path) actual_inode = get_inode(path) existing_record = self.get_state_record_for_inode(actual_inode) if (not existing_record): self._insert_new_state_record(path, actual_inode, actual_mtime, actual_size, checksum) return self._update_state_for_path_changed(path, actual_inode, actual_mtime, actual_size, checksum)
Save checksum for the specified path info. Args: path_info (dict): path_info to save checksum for. checksum (str): checksum to save.
codesearchnet
def gaussian_deriv(times: np.ndarray, amp: complex, center: float, sigma: float, ret_gaussian: bool=False) -> np.ndarray: (gauss, x) = gaussian(times, amp=amp, center=center, sigma=sigma, ret_x=True) gauss_deriv = (((- x) / sigma) * gauss) if ret_gaussian: return (gauss_deriv, gauss) return gauss_deriv
Continuous unnormalized gaussian derivative pulse. Args: times: Times to output pulse for. amp: Pulse amplitude at `center`. center: Center (mean) of pulse. sigma: Width (standard deviation) of pulse. ret_gaussian: Return gaussian with which derivative was taken with.
codesearchnet
def all(self, data={}, **kwargs): return super(Order, self).all(data, **kwargs)
Fetch all Order entities Returns: Dictionary of Order data
codesearchnet
def find_documents(self, sentence, limit=None, must_sort=True, search_type='fuzzy'): sentence = sentence.strip() sentence = strip_accents(sentence) if (sentence == u''): return self.get_all_docs() result_list_list = [] total_results = 0 for query_parser in self.search_param_list[search_type]: query = query_parser['query_parser'].parse(sentence) sortedby = None if (must_sort and ('sortedby' in query_parser)): sortedby = query_parser['sortedby'] if sortedby: results = self.__searcher.search(query, limit=limit, sortedby=sortedby) else: results = self.__searcher.search(query, limit=limit) results = [(result['docid'], result['doctype']) for result in results] result_list_list.append(results) total_results += len(results) if ((not must_sort) and (total_results >= limit)): break docs = set() for result_intermediate in result_list_list: for result in result_intermediate: doc = self._docs_by_id.get(result[0]) if (doc is None): continue docs.add(doc) docs = [d for d in docs] if ((not must_sort) and (limit is not None)): docs = docs[:limit] return docs
Returns all the documents matching the given keywords Arguments: sentence --- a sentenced query Returns: An array of document (doc objects)
codesearchnet
def contains_peroxide(structure, relative_cutoff=1.1): ox_type = oxide_type(structure, relative_cutoff) if (ox_type == 'peroxide'): return True else: return False
Determines if a structure contains peroxide anions. Args: structure (Structure): Input structure. relative_cutoff: The peroxide bond distance is 1.49 Angstrom. Relative_cutoff * 1.49 stipulates the maximum distance two O atoms must be to each other to be considered a peroxide. Returns: Boolean indicating if structure contains a peroxide anion.
codesearchnet
def safe_group_name(group_name, group_max_length=100, ellipsis=True): ellipsis_value = '' if ellipsis: ellipsis_value = ' ...' if group_name is not None and len(group_name) > group_max_length: group_name_array = group_name.split(' ') group_name = '' for word in group_name_array: word = u'{}'.format(word) if (len(group_name) + len(word) + len(ellipsis_value)) >= group_max_length: group_name = '{}{}'.format(group_name, ellipsis_value) group_name = group_name.lstrip(' ') break group_name += ' {}'.format(word) return group_name
Truncate group name to match limit breaking on space and optionally add an ellipsis. .. note:: Currently the ThreatConnect group name limit is 100 characters. Args: group_name (string): The raw group name to be truncated. group_max_length (int): The max length of the group name. ellipsis (boolean): If true the truncated name will have '...' appended. Returns: (string): The truncated group name with optional ellipsis.
juraj-google-style
def list_group_members(self, name): return self.service.list_group_members( name, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get the members of a group (does not include maintainers). Args: name (string): Name of group to query. Returns: (list[string]): List of member names. Raises: requests.HTTPError on failure.
juraj-google-style
def transformer_encode(encoder_function, inputs, target_space, hparams, attention_weights=None, features=None, losses=None, **kwargs): inputs = common_layers.flatten4d3d(inputs) (encoder_input, self_attention_bias, encoder_decoder_attention_bias) = transformer_prepare_encoder(inputs, target_space, hparams, features=features) mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams) encoder_input = tf.nn.dropout(encoder_input, (1.0 - hparams.layer_prepostprocess_dropout)) attn_bias_for_padding = None if hparams.unidirectional_encoder: attn_bias_for_padding = encoder_decoder_attention_bias encoder_output = encoder_function(encoder_input, self_attention_bias, hparams, nonpadding=features_to_nonpadding(features, 'inputs'), save_weights_to=attention_weights, make_image_summary=(not common_layers.is_xla_compiled()), losses=losses, attn_bias_for_padding=attn_bias_for_padding, **kwargs) return (encoder_output, encoder_decoder_attention_bias)
Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparameters for model. attention_weights: weight to store attention to. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to encoder_function Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length]
codesearchnet
def canonicalize(d, default=None): if isinstance(d, context.LogicalDevice): d = tf_device.DeviceSpec.from_string(d.name) else: d = tf_device.DeviceSpec.from_string(d) assert d.device_type is None or d.device_type == d.device_type.upper(), "Device type '%s' must be all-caps." % (d.device_type,) result = tf_device.DeviceSpec(replica=0, task=0, device_type='CPU', device_index=0) if ops.executing_eagerly_outside_functions(): host_cpu = tf_device.DeviceSpec.from_string(config.list_logical_devices('CPU')[0].name) if host_cpu.job: result = result.make_merged_spec(host_cpu) else: result = result.replace(job='localhost') if default: result = result.make_merged_spec(tf_device.DeviceSpec.from_string(default)) result = result.make_merged_spec(d) return result.to_string()
Canonicalize device string. If d has missing components, the rest would be deduced from the `default` argument or from '/replica:0/task:0/device:CPU:0'. For example: If d = '/cpu:0', default='/job:worker/task:1', it returns '/job:worker/replica:0/task:1/device:CPU:0'. If d = '/cpu:0', default='/job:worker', it returns '/job:worker/replica:0/task:0/device:CPU:0'. If d = '/gpu:0', default=None, it returns '/replica:0/task:0/device:GPU:0'. Note: This uses "job:localhost" as the default if executing eagerly. Args: d: a device string or tf.config.LogicalDevice default: a string for default device if d doesn't have all components. Returns: a canonicalized device string.
github-repos
def directed_tripartition_indices(N): result = [] if N <= 0: return result base = [0, 1, 2] for key in product(base, repeat=N): part = [[], [], []] for i, location in enumerate(key): part[location].append(i) result.append(tuple(tuple(p) for p in part)) return result
Return indices for directed tripartitions of a sequence. Args: N (int): The length of the sequence. Returns: list[tuple]: A list of tuples containing the indices for each partition. Example: >>> N = 1 >>> directed_tripartition_indices(N) [((0,), (), ()), ((), (0,), ()), ((), (), (0,))]
juraj-google-style
def add_offset(self, offset): self.offset += offset try: self._counterpart.add_offset(offset) except AttributeError: pass
Add specified value to the offset of a binary quadratic model. Args: offset (number): Value to be added to the constant energy offset of the binary quadratic model. Examples: This example creates an Ising model with an offset of -0.5 and then adds to it. >>> import dimod ... >>> bqm = dimod.BinaryQuadraticModel({0: 0.0, 1: 0.0}, {(0, 1): 0.5}, -0.5, dimod.SPIN) >>> bqm.add_offset(1.0) >>> bqm.offset 0.5
codesearchnet
def get_dataset(categories: list, split: str='train'): labels = ['sadness', 'joy', 'love', 'anger', 'fear', 'surprise'] label_map = {class_name: class_id for class_id, class_name in enumerate(labels)} labels_subset = np.array([label_map[class_name] for class_name in categories]) emotion_dataset = load_dataset('emotion', download_mode='force_redownload') X, y = (np.array(emotion_dataset[split]['text']), np.array(emotion_dataset[split]['label'])) subclass_idxs = [idx for idx, label in enumerate(y) if label in labels_subset] X_subset, y_subset = (X[subclass_idxs], y[subclass_idxs]) return (X_subset.tolist(), y_subset.tolist())
Takes a list of categories and a split (train/test/dev) and returns the corresponding subset of the dataset Args: categories (list): list of emotion categories to use split (str): The split of the dataset to use. Can be either "train", "dev", or "test". Defaults to train Returns: A list of text and a list of labels
github-repos
def sanger_ordered(self, institute_id=None, user_id=None): query = {'$match': { '$and': [ {'verb': 'sanger'}, ], }} if institute_id: query['$match']['$and'].append({'institute': institute_id}) if user_id: query['$match']['$and'].append({'user_id': user_id}) results = self.event_collection.aggregate([ query, {'$group': { '_id': "$case", 'vars': {'$addToSet' : '$variant_id'} }} ]) sanger_ordered = [item for item in results] return sanger_ordered
Get all variants with validations ever ordered. Args: institute_id(str) : The id of an institute user_id(str) : The id of an user Returns: sanger_ordered(list) : a list of dictionaries, each with "case_id" as keys and list of variant ids as values
juraj-google-style
def _split_cell(cell, module): lines = cell.split('\n') code = None last_def = (- 1) name = None define_wild_re = re.compile('^DEFINE\\s+.*$', re.IGNORECASE) define_re = re.compile('^DEFINE\\s+QUERY\\s+([A-Z]\\w*)\\s*?(.*)$', re.IGNORECASE) select_re = re.compile('^SELECT\\s*.*$', re.IGNORECASE) standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\\s*.*$', re.IGNORECASE) for (i, line) in enumerate(lines): define_match = define_re.match(line) select_match = select_re.match(line) standard_sql_match = standard_sql_re.match(line) if i: prior_content = ''.join(lines[:i]).strip() if select_match: select_match = ((len(prior_content) == 0) or ((prior_content[(- 1)] != '(') and (not standard_sql_re.match(prior_content)))) if standard_sql_match: standard_sql_match = ((len(prior_content) == 0) or (not standard_sql_re.match(prior_content))) if (define_match or select_match or standard_sql_match): if (code is None): code = '\n'.join(lines[:i]).strip() if len(code): code += '\n' elif (last_def >= 0): query = '\n'.join([line for line in lines[last_def:i] if len(line)]).strip() if (select_match and (name != datalab.data._utils._SQL_MODULE_MAIN) and (len(query) == 0)): continue statement = datalab.data.SqlStatement(query, module) module.__dict__[name] = statement module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement if define_match: name = define_match.group(1) lines[i] = define_match.group(2) else: name = datalab.data._utils._SQL_MODULE_MAIN last_def = i else: define_wild_match = define_wild_re.match(line) if define_wild_match: raise Exception('Expected "DEFINE QUERY <name>"') if (last_def >= 0): query = '\n'.join([line for line in lines[last_def:] if len(line)]).strip() statement = datalab.data.SqlStatement(query, module) module.__dict__[name] = statement module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement if (code is None): code = '' module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module) return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None)
Split a hybrid %%sql cell into the Python code and the queries. Populates a module with the queries. Args: cell: the contents of the %%sql cell. module: the module that the contents will populate. Returns: The default (last) query for the module.
codesearchnet
def email_results(results, host, mail_from, mail_to, port=0, ssl=False, user=None, password=None, subject=None, attachment_filename=None, message=None, ssl_context=None): logging.debug('Emailing report to: {0}'.format(','.join(mail_to))) date_string = datetime.now().strftime('%Y-%m-%d') if attachment_filename: if (not attachment_filename.lower().endswith('.zip')): attachment_filename += '.zip' filename = attachment_filename else: filename = 'DMARC-{0}.zip'.format(date_string) assert isinstance(mail_to, list) msg = MIMEMultipart() msg['From'] = mail_from msg['To'] = ', '.join(mail_to) msg['Date'] = email.utils.formatdate(localtime=True) msg['Subject'] = (subject or 'DMARC results for {0}'.format(date_string)) text = (message or 'Please see the attached zip file\n') msg.attach(MIMEText(text)) zip_bytes = get_report_zip(results) part = MIMEApplication(zip_bytes, Name=filename) part['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename) msg.attach(part) try: if (ssl_context is None): ssl_context = create_default_context() if ssl: server = smtplib.SMTP_SSL(host, port=port, context=ssl_context) server.connect(host, port) server.ehlo_or_helo_if_needed() else: server = smtplib.SMTP(host, port=port) server.connect(host, port) server.ehlo_or_helo_if_needed() if server.has_extn('starttls'): server.starttls(context=ssl_context) server.ehlo() else: logger.warning('SMTP server does not support STARTTLS. Proceeding in plain text!') if (user and password): server.login(user, password) server.sendmail(mail_from, mail_to, msg.as_string()) except smtplib.SMTPException as error: error = error.__str__().lstrip("b'").rstrip("'").rstrip('.') raise SMTPError(error) except socket.gaierror: raise SMTPError('DNS resolution failed') except ConnectionRefusedError: raise SMTPError('Connection refused') except ConnectionResetError: raise SMTPError('Connection reset') except ConnectionAbortedError: raise SMTPError('Connection aborted') except TimeoutError: raise SMTPError('Connection timed out') except SSLError as error: raise SMTPError('SSL error: {0}'.format(error.__str__())) except CertificateError as error: raise SMTPError('Certificate error: {0}'.format(error.__str__()))
Emails parsing results as a zip file Args: results (OrderedDict): Parsing results host: Mail server hostname or IP address mail_from: The value of the message from header mail_to : A list of addresses to mail to port (int): Port to use ssl (bool): Require a SSL connection from the start user: An optional username password: An optional password subject: Overrides the default message subject attachment_filename: Override the default attachment filename message: Override the default plain text body ssl_context: SSL context options
codesearchnet
def _to_qasm_output(self, header: Optional[str]=None, precision: int=10, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> QasmOutput: if (header is None): header = 'Generated from Cirq v{}'.format(cirq._version.__version__) qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(self.all_qubits()) return QasmOutput(operations=self.all_operations(), qubits=qubits, header=header, precision=precision, version='2.0')
Returns a QASM object equivalent to the circuit. Args: header: A multi-line string that is placed in a comment at the top of the QASM. Defaults to a cirq version specifier. precision: Number of digits to use when representing numbers. qubit_order: Determines how qubits are ordered in the QASM register.
codesearchnet
def show_history(self, status=None, nids=None, full_history=False, metadata=False): nrows, ncols = get_terminal_size() works_done = [] for task in self.iflat_tasks(status=status, nids=nids): work = task.work if work not in works_done: works_done.append(work) if work.history or full_history: cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts) print(work.history.to_string(metadata=metadata)) if task.history or full_history: cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts) print(task.history.to_string(metadata=metadata)) if self.history or full_history: cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts) print(self.history.to_string(metadata=metadata))
Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental)
juraj-google-style
def class_from_typename(cls, type_name: str) -> Optional[Type['JSONConvertible']]: return cls._TYPE_REGISTRY.class_from_typename(type_name)
Gets the class for a registered type name. Args: type_name: A string as the global unique type identifier for requested class. Returns: A type object if registered, otherwise None.
github-repos
def __call__(self, images: Union[str, List[str], 'Image.Image', List['Image.Image']], **kwargs: Any) -> Union['Image.Image', List['Image.Image']]: return super().__call__(images, **kwargs)
Transform the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and the call may block forever. Return: An image (Image.Image) or a list of images (List["Image.Image"]) containing result(s). If the input is a single image, the return will be also a single image, if the input is a list of several images, it will return a list of transformed images.
github-repos
def timed_operation(msg, log_start=False): assert len(msg) if log_start: logger.info('Start {} ...'.format(msg)) start = timer() (yield) msg = (msg[0].upper() + msg[1:]) logger.info('{} finished, time:{:.4f} sec.'.format(msg, (timer() - start)))
Surround a context with a timer. Args: msg(str): the log to print. log_start(bool): whether to print also at the beginning. Example: .. code-block:: python with timed_operation('Good Stuff'): time.sleep(1) Will print: .. code-block:: python Good stuff finished, time:1sec.
codesearchnet
def send_log_message(self, message: LogMessage) -> None: self.send_log_messages([message])
Sends the log message to BigQuery. Args: * message: LogMessage dictionary Returns: * None Raises: * RuntimeError: if BigQuery insert fails
github-repos
def add_mapped_chain_ids(self, mapped_chains): mapped_chains = ssbio.utils.force_list(mapped_chains) for c in mapped_chains: if c not in self.mapped_chains: self.mapped_chains.append(c) log.debug('{}: added to list of mapped chains'.format(c)) else: log.debug('{}: chain already in list of mapped chains, not adding'.format(c))
Add chains by ID into the mapped_chains attribute Args: mapped_chains (str, list): Chain ID or list of IDs
juraj-google-style
def get_all(cls, account=None, location=None, include_disabled=False): qry = db.Resource.filter((Resource.resource_type_id == ResourceType.get(cls.resource_type).resource_type_id)) if account: qry = qry.filter((Resource.account_id == account.account_id)) if (not include_disabled): qry = qry.join(Account, (Resource.account_id == Account.account_id)).filter((Account.enabled == 1)) if location: qry = qry.filter((Resource.location == location)) return {res.resource_id: cls(res) for res in qry.all()}
Returns a list of all resources for a given account, location and resource type. Attributes: account (:obj:`Account`): Account owning the resources location (`str`): Location of the resources to return (region) include_disabled (`bool`): Include resources from disabled accounts (default: False) Returns: list of resource objects
codesearchnet
def from_ase_atoms(cls, atoms): return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions)
Create an instance of the own class from an ase molecule Args: molecule (:class:`ase.atoms.Atoms`): Returns: Cartesian:
codesearchnet
def unpackStruct(self, data, def_buf): struct_str = '=' for fld in def_buf: if (not def_buf[fld][MeterData.CalculatedFlag]): struct_str = ((struct_str + str(def_buf[fld][MeterData.SizeValue])) + 's') if (len(data) == 255): contents = struct.unpack(struct_str, str(data)) else: self.writeCmdMsg(('Length error. Len() size = ' + str(len(data)))) contents = () return contents
Wrapper for struct.unpack with SerialBlock buffer definitionns. Args: data (str): Implicit cast bytes to str, serial port return. def_buf (SerialBlock): Block object holding field lengths. Returns: tuple: parsed result of struct.unpack() with field definitions.
codesearchnet
def users(self, institute=None): query = {} if institute: LOG.info("Fetching all users from institute %s", institute) query = {'institutes': {'$in': [institute]}} else: LOG.info("Fetching all users") res = self.user_collection.find(query) return res
Return all users from the database Args: institute(str): A institute_id Returns: res(pymongo.Cursor): A cursor with users
juraj-google-style
def create(self, name): return Bucket(name, context=self._context).create(self._project_id)
Creates a new bucket. Args: name: a unique name for the new bucket. Returns: The newly created bucket. Raises: Exception if there was an error creating the bucket.
codesearchnet
def _parse_session_run_index(self, event): metadata_string = event.log_message.message try: metadata = json.loads(metadata_string) except ValueError as e: logger.error("Could not decode metadata string '%s' for step value: %s", metadata_string, e) return constants.SENTINEL_FOR_UNDETERMINED_STEP try: return metadata['session_run_index'] except KeyError: logger.error('The session_run_index is missing from the metadata: %s', metadata_string) return constants.SENTINEL_FOR_UNDETERMINED_STEP
Parses the session_run_index value from the event proto. Args: event: The event with metadata that contains the session_run_index. Returns: The int session_run_index value. Or constants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined.
codesearchnet
def copy(self, source_file_names, destination_file_names): raise NotImplementedError
Recursively copy the file tree from the source to the destination Args: source_file_names: list of source file objects that needs to be copied destination_file_names: list of destination of the new object Raises: ``BeamIOError``: if any of the copy operations fail
github-repos
def add_virtual_loss(self, up_to): self.losses_applied += 1 loss = self.position.to_play self.W += loss if self.parent is None or self is up_to: return self.parent.add_virtual_loss(up_to)
Propagate a virtual loss up to the root node. Args: up_to: The node to propagate until. (Keep track of this! You'll need it to reverse the virtual loss later.)
juraj-google-style
def AddArg(self, arg): self.args.append(arg) if len(self.args) > self.number_of_args: raise ParseError("Too many args for this expression.") elif len(self.args) == self.number_of_args: return True return False
Adds a new arg to this expression. Args: arg: The argument to add (string). Returns: True if this arg is the last arg, False otherwise. Raises: ParseError: If there are too many args.
juraj-google-style
def output(self): if not self._inbound_nodes: raise AttributeError('Layer ' + self.name + ' has no inbound nodes.') return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode.
github-repos
def set_s3_prefix(self, region, name): ct = self.session.client('cloudtrail', region_name=region) ct.update_trail(Name=name, S3KeyPrefix=self.account.account_name) auditlog( event='cloudtrail.set_s3_prefix', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Updated S3KeyPrefix to {0} for {0}/{1}'.format( self.account.account_name, region ))
Sets the S3 prefix for a CloudTrail Trail Args: region (`str`): Name of the AWS region name (`str`): Name of the CloudTrail Trail Returns: `None`
juraj-google-style
def webhook(self, webhook_url): if (not webhook_url): raise Exception('Url can not be None') matcher = re.match(self.__webhook_url_format, webhook_url) if (not matcher): raise Exception(('Invalid url format, looking for: ' + self.__webhook_url_format)) self.api_keys(int(matcher.group(1)), matcher.group(2))
Load object with webhook_url Args: webhook_url (str): full webhook url given by Discord 'create webhook' func
codesearchnet
def map_exp_ids(self, exp): names = self.exp_feature_names if (self.discretized_feature_names is not None): names = self.discretized_feature_names return [(names[x[0]], x[1]) for x in exp]
Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight)
codesearchnet
def get_oligomeric_state(swiss_model_path): oligo_info = {} with open(swiss_model_path, 'r') as f: for line in f: if line.startswith('REMARK 3 MODEL INFORMATION'): break for i in range(10): line = f.readline() if ('ENGIN' in line): oligo_info['ENGIN'] = line.rstrip().split(' ')[(- 1)] elif ('OSTAT' in line): oligo_info['OSTAT'] = line.rstrip().split(' ')[(- 1)] elif ('OSRSN' in line): oligo_info['OSRSN'] = line.rstrip().split(' ')[(- 1)] elif ('QSPRD' in line): oligo_info['QSPRD'] = line.rstrip().split(' ')[(- 1)] elif ('GMQE' in line): oligo_info['GMQE'] = line.rstrip().split(' ')[(- 1)] elif ('QMN4' in line): oligo_info['QMN4'] = line.rstrip().split(' ')[(- 1)] elif ('MODT' in line): oligo_info['MODT'] = line.rstrip().split(' ')[(- 1)] return oligo_info
Parse the oligomeric prediction in a SWISS-MODEL repository file As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models. Args: swiss_model_path (str): Path to SWISS-MODEL PDB file Returns: dict: Information parsed about the oligomeric state
codesearchnet
def daemon(args): if os.environ.get(DVC_DAEMON): logger.debug("skipping launching a new daemon.") return cmd = [sys.executable] if not is_binary(): cmd += ["-m", "dvc"] cmd += ["daemon", "-q"] + args env = fix_env() file_path = os.path.abspath(inspect.stack()[0][1]) env[cast_bytes_py2("PYTHONPATH")] = cast_bytes_py2( os.path.dirname(os.path.dirname(file_path)) ) env[cast_bytes_py2(DVC_DAEMON)] = cast_bytes_py2("1") _spawn(cmd, env)
Launch a `dvc daemon` command in a detached process. Args: args (list): list of arguments to append to `dvc daemon` command.
juraj-google-style
def __init__(self, tcex): self.tcex = tcex self._db = None self._out_variables = None self._out_variables_type = None self.output_data = {} self._variable_match = re.compile(r'^{}$'.format(self._variable_pattern)) self._variable_parse = re.compile(self._variable_pattern) self._vars_keyvalue_embedded = re.compile( r'(?:\"\:\s?)[^\"]?{}'.format(self._variable_pattern) )
Initialize the Class properties. Args: tcex (object): Instance of TcEx.
juraj-google-style
def get_models(module: types.ModuleType, include_pretrained: bool=False) -> List[Tuple[str, type]]: models = [] model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel) for attr_name in dir(module): if not include_pretrained and ('Pretrained' in attr_name or 'PreTrained' in attr_name): continue attr = getattr(module, attr_name) if isinstance(attr, type) and issubclass(attr, model_classes) and (attr.__module__ == module.__name__): models.append((attr_name, attr)) return models
Get the objects in a module that are models. Args: module (`types.ModuleType`): The module from which we are extracting models. include_pretrained (`bool`, *optional*, defaults to `False`): Whether or not to include the `PreTrainedModel` subclass (like `BertPreTrainedModel`) or not. Returns: List[Tuple[str, type]]: List of models as tuples (class name, actual class).
github-repos
def remove_root(self, model, setter=None): if (model not in self._roots): return self._push_all_models_freeze() try: self._roots.remove(model) finally: self._pop_all_models_freeze() self._trigger_on_change(RootRemovedEvent(self, model, setter))
Remove a model as root model from this Document. Changes to this model may still trigger ``on_change`` callbacks on this document, if the model is still referred to by other root models. Args: model (Model) : The model to add as a root of this document. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself.
codesearchnet
def wrap_deepmind(env, dim=84, framestack=True): env = MonitorEnv(env) env = NoopResetEnv(env, noop_max=30) if "NoFrameskip" in env.spec.id: env = MaxAndSkipEnv(env, skip=4) env = EpisodicLifeEnv(env) if "FIRE" in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env, dim) if framestack: env = FrameStack(env, 4) return env
Configure environment for DeepMind-style Atari. Note that we assume reward clipping is done outside the wrapper. Args: dim (int): Dimension to resize observations to (dim x dim). framestack (bool): Whether to framestack observations.
juraj-google-style
def parse_s3_url(url): parsed_url = urlparse(url) if parsed_url.scheme != "s3": raise ValueError("Expecting 's3' scheme, got: {} in {}".format(parsed_url.scheme, url)) return parsed_url.netloc, parsed_url.path.lstrip('/')
Returns an (s3 bucket, key name/prefix) tuple from a url with an s3 scheme Args: url (str): Returns: tuple: A tuple containing: str: S3 bucket name str: S3 key
juraj-google-style
def _parse_command(self, command): command = command.strip() if not command: return ('', [], None) command_items = command_parser.parse_command(command) command_items, output_file_path = command_parser.extract_output_file_path(command_items) return (command_items[0], command_items[1:], output_file_path)
Parse a command string into prefix and arguments. Args: command: (str) Command string to be parsed. Returns: prefix: (str) The command prefix. args: (list of str) The command arguments (i.e., not including the prefix). output_file_path: (str or None) The path to save the screen output to (if any).
github-repos
def _freezeModel(self, func): root = autotrackable.AutoTrackable() root.f = func input_func = root.f.get_concrete_function() output_func = convert_to_constants.convert_var_to_const_function_in_v1(input_func, lower_control_flow=False) return (root, output_func)
Freezes the function. Args: func: Function. Returns: root: AutoTrackable object with original ConcreteFunction. output_func: frozen ConcreteFunction.
github-repos
def AddLabels(self, labels): for label in labels: if not self._VALID_LABEL_REGEX.match(label): raise ValueError(( 'Unsupported label: "{0:s}". A label must only consist of ' 'alphanumeric characters or underscores.').format(label)) for label in labels: if label not in self.labels: self.labels.append(label)
Adds labels to the event tag. Args: labels (list[str]): labels. Raises: ValueError: if a label is malformed.
juraj-google-style
def heightmap_multiply_hm(hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray) -> None: hm3[:] = (hm1[:] * hm2[:])
Multiplies two heightmap's together and stores the result in ``hm3``. Args: hm1 (numpy.ndarray): The first heightmap. hm2 (numpy.ndarray): The second heightmap to multiply with the first. hm3 (numpy.ndarray): A destination heightmap to store the result. .. deprecated:: 2.0 Do ``hm3[:] = hm1[:] * hm2[:]`` instead. Alternatively you can do ``HeightMap(hm1.array[:] * hm2.array[:])``.
codesearchnet
def integer_based_slice(self, ts): if isinstance(ts, slice): try: start = Seconds(0) if ts.start is None else ts.start if start < Seconds(0): start = self.end + start stop = self.end if ts.stop is None else ts.stop if stop < Seconds(0): stop = self.end + stop duration = stop - start ts = TimeSlice(start=start, duration=duration) except (ValueError, TypeError): pass if not isinstance(ts, TimeSlice): return ts diff = self.duration - self.frequency start_index = \ max(0, np.floor((ts.start - diff) / self.frequency)) end = self.end if ts.duration is None else ts.end ratio = np.round(end / self.frequency, 2) stop_index = np.ceil(ratio) return slice(int(start_index), int(stop_index))
Transform a :class:`TimeSlice` into integer indices that numpy can work with Args: ts (slice, TimeSlice): the time slice to translate into integer indices
juraj-google-style
def add_embedded_campaign(self, id, collection, campaign, confidence, analyst, date, description): if type(id) is not ObjectId: id = ObjectId(id) obj = getattr(self.db, collection) result = obj.find({'_id': id, 'campaign.name': campaign}) if result.count() > 0: return else: log.debug('Adding campaign to set: {}'.format(campaign)) campaign_obj = { 'analyst': analyst, 'confidence': confidence, 'date': date, 'description': description, 'name': campaign } result = obj.update( {'_id': id}, {'$push': {'campaign': campaign_obj}} ) return result
Adds an embedded campaign to the TLO. Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. campaign: The campaign to assign. confidence: The campaign confidence analyst: The analyst making the assignment date: The date of the assignment description: A description Returns: The resulting mongo object
juraj-google-style
def m_to_inches(value): if value is None: return None return value / 39.37
Converts distance in meters to inches Args: value: floating point representing the distance in meters Returns: distance in inches
github-repos
def lat_id(self, line): if self.grid == 'WAC': lat = ((1 + self.LINE_PROJECTION_OFFSET - line) * self.MAP_SCALE * 1e-3 / self.A_AXIS_RADIUS) return lat * 180 / np.pi else: lat = float(self.CENTER_LATITUDE) - \ (line - float(self.LINE_PROJECTION_OFFSET) - 1)\ / float(self.MAP_RESOLUTION) return lat
Return the corresponding latitude Args: line (int): Line number Returns: Correponding latitude in degree
juraj-google-style
def _ProcessGRRMessages(self, fs_client_id, grr_messages): grr_client_id = fleetspeak_utils.FleetspeakIDToGRRID(fs_client_id) for grr_message in grr_messages: grr_message.source = grr_client_id grr_message.auth_state = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED client_is_new = self.frontend.EnrolFleetspeakClient(client_id=grr_client_id) if ((not client_is_new) and data_store.RelationalDBEnabled()): data_store.REL_DB.WriteClientMetadata(grr_client_id, last_ping=rdfvalue.RDFDatetime.Now()) self.frontend.ReceiveMessages(client_id=grr_client_id, messages=grr_messages)
Handles messages from GRR clients received via Fleetspeak. This method updates the last-ping timestamp of the client before beginning processing. Args: fs_client_id: The Fleetspeak client-id for the client. grr_messages: An Iterable of GrrMessages.
codesearchnet
def add_arguments(self, parser): group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-d', '--downgrade', action='store_true', help='downgrade the J-Link firmware') group.add_argument('-u', '--upgrade', action='store_true', help='upgrade the J-Link firmware') return self.add_common_arguments(parser, False)
Adds the arguments for the firmware command. Args: self (FirmwareCommand): the ``FirmwareCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None``
codesearchnet
def __init__( self, sites ): self.sites = set( sites ) self.neighbours = set() for s in self.sites: self.neighbours.update( s.p_neighbours ) self.neighbours = self.neighbours.difference( self.sites )
Initialise an Cluster instance. Args: sites (List(Site): The list of sites that make up the cluster. Returns: None
juraj-google-style
def is_allowlisted(o, check_call_override=True, allow_namedtuple_subclass=False): if isinstance(o, functools.partial): m = functools else: m = tf_inspect.getmodule(o) if hasattr(m, '__name__'): for rule in config.CONVERSION_RULES: action = rule.get_action(m) if action == config.Action.CONVERT: logging.log(2, 'Not allowed: %s: %s', o, rule) return False elif action == config.Action.DO_NOT_CONVERT: logging.log(2, 'Allowlisted: %s: %s', o, rule) return True if hasattr(o, '__code__') and tf_inspect.isgeneratorfunction(o): logging.log(2, 'Allowlisted: %s: generator functions are not converted', o) return True if check_call_override and (not tf_inspect.isclass(o)) and hasattr(o, '__call__'): if type(o) != type(o.__call__) and is_allowlisted(o.__call__): logging.log(2, 'Allowlisted: %s: object __call__ allowed', o) return True owner_class = None if tf_inspect.ismethod(o): owner_class = inspect_utils.getmethodclass(o) if owner_class is tf_method_target.TfMethodTarget: owner_class = o.__self__.target_class if owner_class is not None: if issubclass(owner_class, unittest.TestCase): logging.log(2, 'Allowlisted: %s: method of TestCase subclass', o) return True owner_class = inspect_utils.getdefiningclass(o, owner_class) if is_allowlisted(owner_class, check_call_override=False, allow_namedtuple_subclass=True): logging.log(2, 'Allowlisted: %s: owner is allowed %s', o, owner_class) return True if inspect_utils.isnamedtuple(o): if allow_namedtuple_subclass: if not any((inspect_utils.isnamedtuple(base) for base in o.__bases__)): logging.log(2, 'Allowlisted: %s: named tuple', o) return True else: logging.log(2, 'Allowlisted: %s: named tuple or subclass', o) return True logging.log(2, 'Not allowed: %s: default rule', o) return False
Checks whether an entity is allowed for use in graph mode. Examples of allowed entities include all members of the tensorflow package. Args: o: A Python entity. check_call_override: Reserved for internal use. When set to `False`, it disables the rule according to which classes are allowed if their __call__ method is allowed. allow_namedtuple_subclass: Reserved for internal use. When `True`, namedtuple subclasses are not allowed. Returns: Boolean
github-repos
def equals(self, other): if (not isinstance(other, self.__class__)): return False else: return (self.properties_with_values() == other.properties_with_values())
Structural equality of models. Args: other (HasProps) : the other instance to compare to Returns: True, if properties are structurally equal, otherwise False
codesearchnet
def set_options(cls, pipeline_options): cls._pipeline_options = pipeline_options
Set filesystem options. Args: pipeline_options: Instance of ``PipelineOptions``.
github-repos
def __init__(self, xid=None, port=None): super().__init__(xid) self.port = port
Create a QueueGetConfigRequest with the optional parameters below. Args: xid (int): xid of OpenFlow header port (:class:`~.common.port.PortNo`): Target port for the query.
juraj-google-style
def _project_TH3(self, hist: Hist) -> Any: if len(self.projection_axes) < 1 or len(self.projection_axes) > 2: raise ValueError(len(self.projection_axes), "Invalid number of axes") projection_axis_name = "" for axis in self.projection_axes: proj_axis_name = axis.axis_type.name[:1] if proj_axis_name not in ["x", "y", "z"]: raise ValueError(f"Projection axis name {proj_axis_name} is not 'x', 'y', or 'z'. Please check your configuration.") projection_axis_name += proj_axis_name if len(self.projection_axes) == 2: projection_axis_name = projection_axis_name[::-1] logger.info(f"Projecting onto axes \"{projection_axis_name}\" from hist {hist.GetName()}") projected_hist = hist.Project3D(projection_axis_name) return projected_hist
Perform the actual TH3 -> TH1 projection. This projection could be to 1D or 2D. Args: hist (ROOT.TH3): Histogram from which the projections should be performed. Returns: ROOT.TH1: The projected histogram.
juraj-google-style
def energy_at_conditions(self, pH, V): return self.energy + self.npH * PREFAC * pH + self.nPhi * V
Get free energy for a given pH and V Args: pH (float): pH at which to evaluate free energy V (float): voltage at which to evaluate free energy Returns: free energy at conditions
juraj-google-style
def take_parenting(self, inst): if self is inst: return for decl in inst.declarations: decl.parent = self self.declarations.append(decl) inst.declarations = []
Takes parenting from inst and transfers it to self. Args: inst (namespace_t): a namespace declaration
juraj-google-style
def expression(self, previous_precedence=0): lhs = self.atom() return self.operator(lhs, previous_precedence)
An expression is an atom or an infix expression. Grammar (sort of, actually a precedence-climbing parser): expression = atom [ binary_operator expression ] . Args: previous_precedence: What operator precedence should we start with?
codesearchnet
def dict_to_attributes_code(dict_): lines = [] for (key, value) in dict_.iteritems(): if isinstance(value, dict): txt = dict_to_attributes_code(value) lines_ = txt.split('\n') for line in lines_: if (not line.startswith(' ')): line = ('%s.%s' % (key, line)) lines.append(line) else: value_txt = pformat(value) if ('\n' in value_txt): lines.append(('%s = \\' % key)) value_txt = indent(value_txt) lines.extend(value_txt.split('\n')) else: line = ('%s = %s' % (key, value_txt)) lines.append(line) return '\n'.join(lines)
Given a nested dict, generate a python code equivalent. Example: >>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}} >>> print dict_to_attributes_code(d) foo = 'bah' colors.red = 1 colors.blue = 2 Returns: str.
codesearchnet
def get_asides(self, block): aside_instances = [ self.get_aside_of_type(block, aside_type) for aside_type in self.applicable_aside_types(block) ] return [ aside_instance for aside_instance in aside_instances if aside_instance.should_apply_to_block(block) ]
Return instances for all of the asides that will decorate this `block`. Arguments: block (:class:`.XBlock`): The block to render retrieve asides for. Returns: List of XBlockAside instances
juraj-google-style
def _get_op_control_flow_context(self, op): op_control_flow_context = op._control_flow_context if control_flow_util.IsLoopExit(op): op_control_flow_context = op_control_flow_context.outer_context return op_control_flow_context
Returns the control flow of the given op. Args: op: tf.Operation for which the control flow context is requested. Returns: op_control_flow_context: which the is control flow context of the given op. If the operation type is LoopExit, returns the outer control flow context.
github-repos
def xml(self, xml): self._request.xml = xml self.add_matcher(matcher('XMLMatcher', xml))
Defines a XML body value to match. Arguments: xml (str|regex): body XML to match. Returns: self: current Mock instance.
juraj-google-style
def __SetDefaultUploadStrategy(self, upload_config, http_request): if (upload_config.resumable_path is None): self.strategy = SIMPLE_UPLOAD if (self.strategy is not None): return strategy = SIMPLE_UPLOAD if ((self.total_size is not None) and (self.total_size > _RESUMABLE_UPLOAD_THRESHOLD)): strategy = RESUMABLE_UPLOAD if (http_request.body and (not upload_config.simple_multipart)): strategy = RESUMABLE_UPLOAD if (not upload_config.simple_path): strategy = RESUMABLE_UPLOAD self.strategy = strategy
Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None.
codesearchnet
def jump(self): potential_jumps = self.potential_jumps() if (not potential_jumps): raise BlockedLatticeError('No moves are possible in this lattice') all_transitions = transitions.Transitions(self.potential_jumps()) random_jump = all_transitions.random() delta_t = all_transitions.time_to_jump() self.time += delta_t self.update_site_occupation_times(delta_t) self.update(random_jump) return all_transitions.time_to_jump()
Select a jump at random from all potential jumps, then update the lattice state. Args: None Returns: None
codesearchnet
def _find_and_replace(text, start_string, end_string, replace_fn): ret = u'' current_pos = 0 while True: start_pos = text.find(start_string, current_pos) if (start_pos == (- 1)): ret += text[current_pos:] break ret += text[current_pos:start_pos] end_pos = text.find(end_string, (start_pos + len(start_string))) if (end_pos == (- 1)): break ret += replace_fn(text[(start_pos + len(start_string)):end_pos]) current_pos = (end_pos + len(end_string)) return ret
Remove everything found between instances of start_string and end_string. Replace each such instance with replace_fn(removed_text) e.g. _find_and_replace(u"the [[fat]] cat [[sat]]", u"[[", u"]]", lambda x: x) = u"the fat cat sat" Args: text: a unicode string start_string: a unicode string end_string: a unicode string replace_fn: a unary function from unicode string to unicode string Returns: a string
codesearchnet
def __init__(self, call_collection, call_fn, name, input_signature): self.call_collection = call_collection self.input_signature = input_signature self.wrapped_call = def_function.function(layer_call_wrapper(call_collection, call_fn, name), input_signature=input_signature) self.original_layer_call = call_collection.layer_call_method
Initializes a LayerCall object. Args: call_collection: a LayerCallCollection, which contains the other layer call functions (e.g. call_with_conditional_losses, call). These functions should be traced with the same arguments. call_fn: A call function. name: Name of the call function. input_signature: Input signature of call_fn (can be None).
github-repos
def write_updates_to_csv(self, updates): with open(self._csv_file_name, 'w') as csvfile: csvwriter = self.csv_writer(csvfile) csvwriter.writerow(CSV_COLUMN_HEADERS) for update in updates: row = [ update.name, update.current_version, update.new_version, update.prelease, ] csvwriter.writerow(row)
Given a list of updates, write the updates out to the provided CSV file. Args: updates (list): List of Update objects.
juraj-google-style
def list(self, **kwargs): path = self._get_path('movie_list') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of supported certifications for movies. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def release_dates(self, **kwargs): path = self._get_id_path('release_dates') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the release dates and certification for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def Update(self, request, global_params=None): config = self.GetMethodConfig('Update') return self._RunMethod(config, request, global_params=global_params)
Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. Args: request: (BigqueryDatasetsUpdateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Dataset) The response message.
github-repos
def _ParseDocstringArgSpec(doc): match = re.search('^\\w+\\(.*\\)', doc) args_spec = _GenerateArgsSpec(doc) if not match or args_spec is None: raise ValueError(f'Failed to parse argspec from docstring: {doc}') output_string = f'args=[{args_spec}], varargs=None, keywords=None, defaults=None' return output_string
Get an ArgSpec string from a method docstring. This method is used to generate argspec for C extension functions that follow pybind11 DocString format function signature. For example: `foo_function(a: int, b: string) -> None...` Args: doc: A python string which starts with function signature. Returns: string: a argspec string representation if successful. If not, return None. Raises: ValueError: Raised when failed to parse the input docstring.
github-repos
def combine_regions(self,input_region_labels,output_region_label,verbose=True): if isinstance(input_region_labels,str): input_region_labels = [input_region_labels] bad_regions = set(input_region_labels)-set(self.regions) if len(bad_regions) > 0: raise ValueError("Error regions(s) "+str(bad_regions)+" are not in the data.") data = self.copy() if len(input_region_labels) == 0: return data def _swap_in(d,inputs,output): overlap = set(d.keys()).intersection(inputs) if len(overlap) == 0: return d keepers = [(k,v) for k,v in d.items() if k not in inputs] return dict(keepers+\ [(output_region_label,sum([d[x] for x in overlap]))]) data['regions'] = data.apply(lambda x: _swap_in(x['regions'],input_region_labels,output_region_label) ,1) data.loc[data['region_label'].isin(input_region_labels),'region_label'] = output_region_label return data
Combine/rename one or more input regions to a single output region Args: input_region_labels (list): A str name or list of names to combine output_region_label (list): A str name to change the phenotype names to verbose (bool): output more details Returns: CellDataFrame: The CellDataFrame modified.
juraj-google-style
def _CheckSQLite3(verbose_output=True): module_name = 'pysqlite2.dbapi2' minimum_version = '3.7.8' module_object = _ImportPythonModule(module_name) if not module_object: module_name = 'sqlite3' module_object = _ImportPythonModule(module_name) if not module_object: print('[FAILURE]\tmissing: {0:s}.'.format(module_name)) return False module_version = getattr(module_object, 'sqlite_version', None) if not module_version: return False module_version_map = list( map(int, _VERSION_SPLIT_REGEX.split(module_version))) minimum_version_map = list( map(int, _VERSION_SPLIT_REGEX.split(minimum_version))) if module_version_map < minimum_version_map: print(( '[FAILURE]\t{0:s} version: {1!s} is too old, {2!s} or later ' 'required.').format(module_name, module_version, minimum_version)) return False if verbose_output: print('[OK]\t\t{0:s} version: {1!s}'.format(module_name, module_version)) return True
Checks the availability of sqlite3. Args: verbose_output (Optional[bool]): True if output should be verbose. Returns: bool: True if the sqlite3 Python module is available, False otherwise.
juraj-google-style
def wait_for_model_package(self, model_package_name, poll=5): desc = _wait_until(lambda: _create_model_package_status(self.sagemaker_client, model_package_name), poll) status = desc['ModelPackageStatus'] if status != 'Completed': reason = desc.get('FailureReason', None) raise ValueError('Error creating model package {}: {} Reason: {}'.format( model_package_name, status, reason)) return desc
Wait for an Amazon SageMaker endpoint deployment to complete. Args: endpoint (str): Name of the ``Endpoint`` to wait for. poll (int): Polling interval in seconds (default: 5). Returns: dict: Return value from the ``DescribeEndpoint`` API.
juraj-google-style