code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def cancel(self, consumers): for consumer in consumers: del self._consumers[consumer.queue] protocol = yield self.when_connected() yield protocol.cancel(consumer)
Cancel a consumer that was previously started with consume. Args: consumer (list of fedora_messaging.api.Consumer): The consumers to cancel.
juraj-google-style
def read_float(self, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.unpack(('%sf' % endian), 4)
Read 4 bytes as a float value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: float:
codesearchnet
def _process_new(self, feed_item): lp = self.landing_page_dao.get(feed_item, required=True) feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_ID] = lp['id'] feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_NAME] = lp['name'] return {'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None), 'name': feed_item.get(FieldMap.CAMPAIGN_NAME, None), 'startDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_START_DATE, None)), 'endDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_END_DATE, None)), 'defaultLandingPageId': lp['id']}
Creates a new campaign DCM object from a feed item representing a campaign from the Bulkdozer feed. This function simply creates the object to be inserted later by the BaseDAO object. Args: feed_item: Feed item representing the campaign from the Bulkdozer feed. Returns: A campaign object ready to be inserted in DCM through the API.
github-repos
def update_(self, sct_dict, conf_arg=True): for (opt, val) in sct_dict.items(): if (opt not in self.def_): continue if ((not conf_arg) or self.def_[opt].conf_arg): self[opt] = val
Update values of configuration section with dict. Args: sct_dict (dict): dict indexed with option names. Undefined options are discarded. conf_arg (bool): if True, only options that can be set in a config file are updated.
codesearchnet
def destroy_record(client=None, found_record=None, record='', zone_id=''): LOG.debug('Found DNS record: %s', found_record) if (found_record['Name'].strip('.') == record): dns_json = get_template(template_file='destroy/destroy_dns.json.j2', record=json.dumps(found_record)) dns_dict = json.loads(dns_json) client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch=dns_dict) LOG.info('Destroyed "%s" in %s', found_record['Name'], zone_id) else: LOG.info('DNS record "%s" missing from %s.', record, zone_id) LOG.debug("Found someone else's record: %s", found_record['Name']) return True
Destroy an individual DNS record. Args: client (botocore.client.Route53): Route 53 boto3 client. found_record (dict): Route 53 record set:: {'Name': 'unicorn.forrest.dev.example.com.', 'ResourceRecords': [{'Value': 'internal-unicornforrest-1777489395.us-east-1.elb.amazonaws.com' }], 'TTL': 60, 'Type': 'CNAME'} record (str): Application DNS record name. e.g. zone_id (str): Route 53 Hosted Zone ID, e.g. /hostedzone/ZSVGJWJ979WQD. Returns: bool: True upon successful completion.
codesearchnet
def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'): if (n is None): n = __READ_TAIL_N__ verbose = _rectify_verb_read(verbose) if verbose: print(('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n))) try: if (not util_path.checkpath(fpath, verbose=verbose, n=n)): raise IOError('[io] * FILE DOES NOT EXIST!') with open(fpath, 'rb') as file_: if aslines: if six.PY2: text = [line.decode('utf8', errors=errors) for line in file_.readlines()] else: text = [line.decode('utf8', errors=errors) for line in file_.readlines()] elif six.PY2: text = file_.read().decode('utf8', errors=errors) else: text = file_.read().decode('utf8', errors=errors) return text except IOError as ex: from utool import util_dbg if (verbose or strict): util_dbg.printex(ex, (' * Error reading fpath=%r' % util_path.tail(fpath, n=n)), '[io]') if strict: raise
r""" Reads text from a file. Automatically returns utf8. Args: fpath (str): file path aslines (bool): if True returns list of lines verbose (bool): verbosity flag Returns: str: text from fpath (this is unicode) Ignore: x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n''' ut.writeto('foo.txt', x) y = ut.readfrom('foo.txt') y.encode('utf8') == x
codesearchnet
def get_event(self, event_key): event = self.event_key_map.get(event_key) if event: return event self.logger.error('Event "%s" is not in datafile.' % event_key) self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR)) return None
Get event for the provided event key. Args: event_key: Event key for which event is to be determined. Returns: Event corresponding to the provided event key.
juraj-google-style
def machine_op(self, operation): operations = {'feed2start': 1, 'feedone': 2, 'cut': 3 } if operation in operations: self.send('^'+'O'+'P'+chr(operations[operation])) else: raise RuntimeError('Invalid operation.')
Perform machine operations Args: operations: which operation you would like Returns: None Raises: RuntimeError: Invalid operation
juraj-google-style
def get_first_model_with_resource_name(cls, resource_name): models = cls.get_models_with_resource_name(resource_name) if len(models) > 0: return models[0] return None
Get the first model corresponding to a resource_name Args: resource_name: the resource name
juraj-google-style
def config_init(config_file, json_config_obj, config_dirname=None): HOME = os.environ['HOME'] if config_dirname: dir_path = HOME + '/' + config_dirname if not os.path.exists(dir_path): os.mkdir(dir_path) os.chmod(dir_path, 0o755) else: dir_path = HOME r = export_json_object( dict_obj=json_config_obj, filename=dir_path + '/' + config_file ) return r
Summary: Creates local config from JSON seed template Args: :config_file (str): filesystem object containing json dict of config values :json_config_obj (json): data to be written to config_file :config_dirname (str): dir name containing config_file Returns: TYPE: bool, Success | Failure
juraj-google-style
def profile_df(df): return IPython.core.display.HTML( pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))
Generate a profile of data in a dataframe. Args: df: the Pandas dataframe.
juraj-google-style
class EncodecEncoderOutput(ModelOutput): audio_codes: Optional[torch.LongTensor] = None audio_scales: Optional[torch.FloatTensor] = None
Args: audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*): Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.
github-repos
def run_simulations(self, param_list, show_progress=True): if (self.runner is None): raise Exception('No runner was ever specified for this CampaignManager.') if (param_list == []): return desired_params = self.db.get_params() for p in param_list: passed = list(p.keys()) available = (['RngRun'] + desired_params) if (set(passed) != set(available)): raise ValueError(('Specified parameter combination does not match the supported parameters:\nPassed: %s\nSupported: %s' % (sorted(passed), sorted(available)))) if self.check_repo: self.check_repo_ok() self.runner.configure_and_build(skip_configuration=True) shuffle(param_list) results = self.runner.run_simulations(param_list, self.db.get_data_dir()) if show_progress: result_generator = tqdm(results, total=len(param_list), unit='simulation', desc='Running simulations') else: result_generator = results for result in result_generator: self.db.insert_result(result)
Run several simulations specified by a list of parameter combinations. Note: this function does not verify whether we already have the required simulations in the database - it just runs all the parameter combinations that are specified in the list. Args: param_list (list): list of parameter combinations to execute. Items of this list are dictionaries, with one key for each parameter, and a value specifying the parameter value (which can be either a string or a number). show_progress (bool): whether or not to show a progress bar with percentage and expected remaining time.
codesearchnet
def build_hlo_module(root: testlib_base.HloInstruction, *instructions: testlib_base.HloInstruction, extra_computations: Sequence[testlib_base.HloComputation] | None=None) -> tuple[testlib_base.HloModule, testlib_base.BufferAssignment]: hlo_module = testlib_base.HloModule(root.name()) hlo_module.add_entry_computation(testlib_base.build_hlo_computation(root, *instructions)) if extra_computations is not None: for computation in extra_computations: hlo_module.add_computation(computation) return annotate_hlo_module(hlo_module)
Builds an HLO module from a root instruction and its dependencies. Args: root: The root instruction of the module. *instructions: The instructions that are dependencies of the root instruction. extra_computations: Any extra computations that should be added to the module. Returns: A tuple containing the HLO module and its buffer assignment.
github-repos
def to_pil_image(image: Union[np.ndarray, 'PIL.Image.Image', 'torch.Tensor', 'tf.Tensor', 'jnp.ndarray'], do_rescale: Optional[bool]=None, image_mode: Optional[str]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> 'PIL.Image.Image': requires_backends(to_pil_image, ['vision']) if isinstance(image, PIL.Image.Image): return image if is_torch_tensor(image) or is_tf_tensor(image): image = image.numpy() elif is_jax_tensor(image): image = np.array(image) elif not isinstance(image, np.ndarray): raise ValueError(f'Input image type not supported: {type(image)}') image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale if do_rescale: image = rescale(image, 255) image = image.astype(np.uint8) return PIL.Image.fromarray(image, mode=image_mode)
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed. Args: image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`): The image to convert to the `PIL.Image` format. do_rescale (`bool`, *optional*): Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to `True` if the image type is a floating type and casting to `int` would result in a loss of precision, and `False` otherwise. image_mode (`str`, *optional*): The mode to use for the PIL image. If unset, will use the default mode for the input image type. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `PIL.Image.Image`: The converted image.
github-repos
def decode(self, encoded): if self.enforce_reversible: self.enforce_reversible = False if self.encode(self.decode(encoded)) != encoded: raise ValueError('Decoding is not reversible for "%s"' % encoded) self.enforce_reversible = True return encoded
Decodes an object. Args: object_ (object): Encoded object. Returns: object: Object decoded.
juraj-google-style
def _set_update(self): try: self._updateStack = False stack_name = self._config.get('environment', {}).get('stack_name', None) response = self._cloudFormation.describe_stacks(StackName=stack_name) stack = response['Stacks'][0] if (stack['StackStatus'] == 'ROLLBACK_COMPLETE'): logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted') del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name) logging.info('delete started for stack: {}'.format(stack_name)) logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4))) stack_delete = self.poll_stack() if (not stack_delete): return False if (stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']): self._updateStack = True except: self._updateStack = False logging.info(('update_stack: ' + str(self._updateStack))) return True
Determine if we are creating a new stack or updating and existing one. The update member is set as you would expect at the end of this query. Args: None Returns: True
codesearchnet
def __init__( self, deconvolution_layer_list, opt_params=None, learning_rate=1e-05, verbose_mode=False ): for deconvolution_layer in deconvolution_layer_list: if isinstance(deconvolution_layer, DeconvolutionLayer) is False: raise TypeError() if opt_params is None: opt_params = Adam() opt_params.dropout_rate = 0.0 if isinstance(opt_params, OptParams) is False: raise TypeError() logger = getLogger("pydbm") handler = StreamHandler() if verbose_mode is True: handler.setLevel(DEBUG) logger.setLevel(DEBUG) else: handler.setLevel(ERROR) logger.setLevel(ERROR) logger.addHandler(handler) self.__deconvolution_layer_list = deconvolution_layer_list self.__learning_rate = learning_rate self.__attenuate_epoch = 50 self.__opt_params = opt_params self.__logger = logger
Init. Args: deconvolution_layer_list: `list` of `DeconvolutionLayer`. opt_params: is-a `OptParams`. If `None`, this value will be `Adam`. learning_rate: Learning rate. verbose_mode: Verbose mode or not.
juraj-google-style
def indices2nodes(self, indices): if set(indices) - set(self.node_indices): raise ValueError( "`indices` must be a subset of the Subsystem's indices.") return tuple(self._index2node[n] for n in indices)
Return |Nodes| for these indices. Args: indices (tuple[int]): The indices in question. Returns: tuple[Node]: The |Node| objects corresponding to these indices. Raises: ValueError: If requested indices are not in the subsystem.
juraj-google-style
def enable_tracing(self): if (not self.connected): raise HardwareError('Cannot enable tracing if we are not in a connected state') if (self._traces is not None): _clear_queue(self._traces) return self._traces self._traces = queue.Queue() self._loop.run_coroutine(self.adapter.open_interface(0, 'tracing')) return self._traces
Open the tracing interface and accumulate traces in a queue. This method is safe to call multiple times in a single device connection. There is no way to check if the tracing interface is opened or to close it once it is opened (apart from disconnecting from the device). The first time this method is called, it will open the tracing interface and return a queue that will be filled asynchronously with reports as they are received. Subsequent calls will just empty the queue and return the same queue without interacting with the device at all. Returns: queue.Queue: A queue that will be filled with trace data from the device. The trace data will be in disjoint bytes objects in the queue
codesearchnet
def load_local(self, state, name): var = self.block_env.get_local(self.frame.current_block, name) if self.ctx.options.strict_undefined_checks and self.ctx.python_version >= (3, 10) and (not var): raise KeyError() return self.load_from(state, self.frame.f_locals, name)
Called when a local is loaded onto the stack. Uses the name to retrieve the value from the current locals(). Args: state: The current VM state. name: Name of the local Returns: A tuple of the state and the value (cfg.Variable) Raises: KeyError: If the name is determined to be undefined
github-repos
def AddDescriptor(self, desc): if (not isinstance(desc, descriptor.Descriptor)): raise TypeError('Expected instance of descriptor.Descriptor.') self._descriptors[desc.full_name] = desc self._AddFileDescriptor(desc.file)
Adds a Descriptor to the pool, non-recursively. If the Descriptor contains nested messages or enums, the caller must explicitly register them. This method also registers the FileDescriptor associated with the message. Args: desc: A Descriptor.
codesearchnet
def vgg_layer(inputs, nout, kernel_size=3, activation=tf.nn.leaky_relu, padding='SAME', is_training=True, has_batchnorm=False, scope=None): with tf.variable_scope(scope): net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding, activation=None, name='conv') if has_batchnorm: net = tfl.batch_normalization(net, training=is_training, name='bn') net = activation(net) return net
A layer of VGG network with batch norm. Args: inputs: image tensor nout: number of output channels kernel_size: size of the kernel activation: activation function padding: padding of the image is_training: whether it is training mode or not has_batchnorm: whether batchnorm is applied or not scope: variable scope of the op Returns: net: output of layer
codesearchnet
def put_archive(self, path, data): return self.client.api.put_archive(self.id, path, data)
Insert a file or folder in this container using a tar archive as source. Args: path (str): Path inside the container where the file(s) will be extracted. Must exist. data (bytes): tar data to be extracted Returns: (bool): True if the call succeeds. Raises: :py:class:`~docker.errors.APIError` If an error occurs.
juraj-google-style
def _set_dacl_inheritance(path, objectType, inheritance=True, copy=True, clear=False): ret = {'result': False, 'comment': '', 'changes': {}} if path: try: sd = win32security.GetNamedSecurityInfo(path, objectType, win32security.DACL_SECURITY_INFORMATION) tdacl = sd.GetSecurityDescriptorDacl() if inheritance: if clear: counter = 0 removedAces = [] while counter < tdacl.GetAceCount(): tAce = tdacl.GetAce(counter) if (tAce[0][1] & win32security.INHERITED_ACE) != win32security.INHERITED_ACE: tdacl.DeleteAce(counter) removedAces.append(_ace_to_text(tAce, objectType)) else: counter = counter + 1 if removedAces: ret['changes']['Removed ACEs'] = removedAces else: ret['changes']['Non-Inherited ACEs'] = 'Left in the DACL' win32security.SetNamedSecurityInfo( path, objectType, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, None, None, tdacl, None) ret['changes']['Inheritance'] = 'Enabled' else: if not copy: counter = 0 inheritedAcesRemoved = [] while counter < tdacl.GetAceCount(): tAce = tdacl.GetAce(counter) if (tAce[0][1] & win32security.INHERITED_ACE) == win32security.INHERITED_ACE: tdacl.DeleteAce(counter) inheritedAcesRemoved.append(_ace_to_text(tAce, objectType)) else: counter = counter + 1 if inheritedAcesRemoved: ret['changes']['Removed ACEs'] = inheritedAcesRemoved else: ret['changes']['Previously Inherited ACEs'] = 'Copied to the DACL' win32security.SetNamedSecurityInfo( path, objectType, win32security.DACL_SECURITY_INFORMATION | win32security.PROTECTED_DACL_SECURITY_INFORMATION, None, None, tdacl, None) ret['changes']['Inheritance'] = 'Disabled' ret['result'] = True except Exception as e: ret['result'] = False ret['comment'] = 'Error attempting to set the inheritance. The error was {0}.'.format(e) return ret
helper function to set the inheritance Args: path (str): The path to the object objectType (str): The type of object inheritance (bool): True enables inheritance, False disables copy (bool): Copy inherited ACEs to the DACL before disabling inheritance clear (bool): Remove non-inherited ACEs from the DACL
juraj-google-style
def get_index(fn, cols, names, sep): if (not has_index(fn)): return generate_index(fn, cols, names, sep) file_index = read_index(get_index_fn(fn)) if (len((set(names) - (set(file_index.columns) - {'seek'}))) != 0): raise ValueError('{}: missing index columns: reindex'.format(fn)) if ('seek' not in file_index.columns): raise ValueError('{}: invalid index: reindex'.format(fn)) return file_index
Restores the index for a given file. Args: fn (str): the name of the file. cols (list): a list containing column to keep (as int). names (list): the name corresponding to the column to keep (as str). sep (str): the field separator. Returns: pandas.DataFrame: the index. If the index doesn't exist for the file, it is first created.
codesearchnet
def deploy(target): if (not os.getenv(CIRCLECI_ENV_VAR)): raise EnvironmentError('Must be on CircleCI to run this script') current_branch = os.getenv('CIRCLE_BRANCH') if ((target == 'PROD') and (current_branch != 'master')): raise EnvironmentError('Refusing to deploy to production from branch {current_branch!r}. Production deploys can only be made from master.'.format(current_branch=current_branch)) if (target in ('PROD', 'TEST')): pypi_username = os.getenv('{target}_PYPI_USERNAME'.format(target=target)) pypi_password = os.getenv('{target}_PYPI_PASSWORD'.format(target=target)) else: raise ValueError("Deploy target must be 'PROD' or 'TEST', got {target!r}.".format(target=target)) if (not (pypi_username and pypi_password)): raise EnvironmentError("Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' environment variables. These are required to push to PyPI.".format(target=target)) os.environ['TWINE_USERNAME'] = pypi_username os.environ['TWINE_PASSWORD'] = pypi_password _shell('git config --global user.email "oss@cloverhealth.com"') _shell('git config --global user.name "Circle CI"') _shell('git config push.default current') ret = _shell('make version', stdout=subprocess.PIPE) version = ret.stdout.decode('utf-8').strip() print('Deploying version {version!r}...'.format(version=version)) _shell('git tag -f -a {version} -m "Version {version}"'.format(version=version)) _shell('sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py'.format(version=version)) _shell('python setup.py sdist bdist_wheel') _shell('git add ChangeLog AUTHORS */version.py') _shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"') _pypi_push('dist') _shell('git push --follow-tags') print('Deployment complete. Latest version is {version}.'.format(version=version))
Deploys the package and documentation. Proceeds in the following steps: 1. Ensures proper environment variables are set and checks that we are on Circle CI 2. Tags the repository with the new version 3. Creates a standard distribution and a wheel 4. Updates version.py to have the proper version 5. Commits the ChangeLog, AUTHORS, and version.py file 6. Pushes to PyPI 7. Pushes the tags and newly committed files Raises: `EnvironmentError`: - Not running on CircleCI - `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables are missing - Attempting to deploy to production from a branch that isn't master
codesearchnet
def __init__(self, date=None, year=None, season=None, day_of_season=None, *args, **kwargs): if year is not None and season is not None and \ day_of_season is not None: date = (datetime.datetime(year=year - 1166, month=1, day=1) + datetime.timedelta(days=(season * 73) + day_of_season - 1)) elif date is None or not hasattr(date, "timetuple"): date = datetime.date.today() self.date = date time_tuple = self.date.timetuple() year = time_tuple.tm_year self.year = year + 1166 day_of_year = time_tuple.tm_yday - 1 if is_leap_year(year) and day_of_year > 59: day_of_year -= 1 self.day_of_week = day_of_year % 5 self.day_of_season = day_of_year % 73 + 1 self.season = int(day_of_year / 73) if is_leap_year(year) and time_tuple.tm_yday == 60: self.holiday = "St. Tib's Day" self.day_of_week = None self.day_of_season = None self.season = None elif self.day_of_season == 5: self.holiday = self.HOLIDAYS["apostle"][self.season] elif self.day_of_season == 50: self.holiday = self.HOLIDAYS["seasonal"][self.season] else: self.holiday = None super(DDate, self).__init__(*args, **kwargs)
Discordian date setup and mangling. Note: year, season and day_of_season are all required if any are used Args: date: optional date object with a timetuple method, or uses today year: optional integer discordian year to create from season: optional integer discodian season to create from day_of_season: optional int discordian day of season to create from
juraj-google-style
def get_geno_marker(self, marker, return_index=False): if (self._mode != 'r'): raise UnsupportedOperation("not available in 'w' mode") if (marker not in self._bim.index): raise ValueError('{}: marker not in BIM'.format(marker)) seek_index = self._bim.loc[(marker, 'i')] self.seek(seek_index) if return_index: return (self._read_current_marker(), seek_index) return self._read_current_marker()
Gets the genotypes for a given marker. Args: marker (str): The name of the marker. return_index (bool): Wether to return the marker's index or not. Returns: numpy.ndarray: The genotypes of the marker (additive format).
codesearchnet
def make_directory_writable(dirname): retval = shell_call(['docker', 'run', '-v', '{0}:/output_dir'.format(dirname), 'busybox:1.27.2', 'chmod', '-R', 'a+rwx', '/output_dir']) if (not retval): logging.error('Failed to change permissions on directory: %s', dirname) return retval
Makes directory readable and writable by everybody. Args: dirname: name of the directory Returns: True if operation was successfull If you run something inside Docker container and it writes files, then these files will be written as root user with restricted permissions. So to be able to read/modify these files outside of Docker you have to change permissions to be world readable and writable.
codesearchnet
def bind_sockets(address, port): ss = netutil.bind_sockets(port=port or 0, address=address) assert len(ss) ports = {s.getsockname()[1] for s in ss} assert len(ports) == 1, "Multiple ports assigned??" actual_port = ports.pop() if port: assert actual_port == port return ss, actual_port
Bind a socket to a port on an address. Args: address (str) : An address to bind a port on, e.g. ``"localhost"`` port (int) : A port number to bind. Pass 0 to have the OS automatically choose a free port. This function returns a 2-tuple with the new socket as the first element, and the port that was bound as the second. (Useful when passing 0 as a port number to bind any free port.) Returns: (socket, port)
juraj-google-style
def getitem_row_array(self, key): key = list(key) def getitem(df, internal_indices=[]): return df.iloc[internal_indices] result = self.data.apply_func_to_select_indices( 1, getitem, key, keep_remaining=False ) new_index = self.index[key] return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
Get row data for target labels. Args: key: Target numeric indices by which to retrieve data. Returns: A new QueryCompiler.
juraj-google-style
def load_file(file_path, credentials=None): if file_path.startswith('gs: return _load_file_from_gcs(file_path, credentials) else: return open(file_path, 'r')
Load a file from either local or gcs. Args: file_path: The target file path, which should have the prefix 'gs://' if to be loaded from gcs. credentials: Optional credential to be used to load the file from gcs. Returns: A python File object if loading file from local or a StringIO object if loading from gcs.
juraj-google-style
def format_filter_value(self, element, value): format_func = self.allowed_filter.get(element) return format_func(value)
Calls the specific function to format value, depending on the given element. Arguments: element (string): The element of the VT to be formatted. value (dictionary): The element value. Returns: Returns a formatted value.
juraj-google-style
def loads(s, single=False): es = deserialize(s) if single: return next(es) return es
Deserialize :class:`Eds` string representations Args: s (str): Eds string single (bool): if `True`, only return the first Xmrs object Returns: a generator of :class:`Eds` objects (unless the *single* option is `True`)
juraj-google-style
def del_node(self, node): for node_ in self.values(): if node in node_: node_.pop(node) return bool(self.pop(node))
Removes a **node object** from the ``DictGraph``. Returns ``True`` if a **node object** has been removed. If the **node object** is not in the ``DictGraph`` raises a ``KeyError``. Arguments: - node(``object``) **node object** to be removed. Any hashable Python ``object``.
juraj-google-style
def _write(self, file_prefix, options=None): if options and options.experimental_enable_async_checkpoint: self._checkpoint_options = options if checkpoint_context.in_preemption_save_context(): if self._async_checkpointer_impl is not None: self._async_checkpointer_impl.sync() logging.warning('Switching to regular sync checkpoint for preemption checkpoint.') elif context.executing_eagerly(): return self._async_checkpointer()._write(file_prefix, options) else: logging.warning('Saving async checkpoint in graph mode is currently not supported; switching to regular sync checkpoint instead.') start_time = time.time() options = options or checkpoint_options.CheckpointOptions() output = self._saver.save(file_prefix=file_prefix, options=options) output = _convert_file_name_tensor_to_string(output) if options.experimental_write_callbacks: _execute_callbacks(options.experimental_write_callbacks, output) if context.executing_eagerly(): context.async_wait() end_time = time.time() if not checkpoint_context.in_async_metrics_context(): metrics.AddCheckpointWriteDuration(api_label=_CHECKPOINT_V2, microseconds=_get_duration_microseconds(start_time, end_time)) global _END_TIME_OF_LAST_WRITE with _END_TIME_OF_LAST_WRITE_LOCK: if not checkpoint_context.in_async_metrics_context(): metrics.AddTrainingTimeSaved(api_label=_CHECKPOINT_V2, microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time)) if checkpoint_context.in_preemption_save_context(): _preemption_checkpoint_saved_time_usecs.get_cell().increase_by(_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time)) _END_TIME_OF_LAST_WRITE = end_time metrics.RecordCheckpointSize(api_label=_CHECKPOINT_V2, filesize=_get_checkpoint_size(output)) return output
Internal method that implements Checkpoint.write(). Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). options: Optional `tf.train.CheckpointOptions` object. Returns: The full path to the checkpoint (i.e. `file_prefix`).
github-repos
def _query(cls, *args, **kwds): if 'distinct' in kwds: if 'group_by' in kwds: raise TypeError( 'cannot use distinct= and group_by= at the same time') projection = kwds.get('projection') if not projection: raise TypeError( 'cannot use distinct= without projection=') if kwds.pop('distinct'): kwds['group_by'] = projection from .query import Query qry = Query(kind=cls._get_kind(), **kwds) qry = qry.filter(*cls._default_filters()) qry = qry.filter(*args) return qry
Create a Query object for this class. Args: distinct: Optional bool, short hand for group_by = projection. *args: Used to apply an initial filter **kwds: are passed to the Query() constructor. Returns: A Query object.
juraj-google-style
def ShlexSplit(string): precondition.AssertType(string, Text) if PY2: string = string.encode('utf-8') parts = shlex.split(string) if PY2: parts = [part.decode('utf-8') for part in parts] return parts
A wrapper for `shlex.split` that works with unicode objects. Args: string: A unicode string to split. Returns: A list of unicode strings representing parts of the input string.
codesearchnet
def __init__(self, autoconnect=True, password=None, db=0, **connection_kwargs): if 'read_callback' in connection_kwargs or \ 'close_callback' in connection_kwargs: raise Exception("read_callback and close_callback are not allowed " "to be used here.") self.connection_kwargs = connection_kwargs self.autoconnect = autoconnect self.password = password self.db = db self.__connection = None self.subscribed = False self.__connection = None self.__reader = None self.__callback_queue = None self._condition = tornado.locks.Condition() self._reply_list = None
Constructor. Args: autoconnect (boolean): True if the client is in autoconnect mode (and in autoreconnection mode) (default True). password (string): the password to authenticate with. db (int): database number. **connection_kwargs: :class:`Connection` object kwargs.
juraj-google-style
def unzip_file(source_file, dest_dir=None, mkdir=False): if (dest_dir is None): (dest_dir, fname) = os.path.split(source_file) elif (not os.path.isdir(dest_dir)): if mkdir: preparedir(dest_dir) else: created = preparedir(dest_dir, False) if (not created): raise ValueError(('Failed to find %s.' % dest_dir)) with zipfile.ZipFile(source_file) as zf: for member in zf.infolist(): words = member.filename.split('\\') for word in words[:(- 1)]: (drive, word) = os.path.splitdrive(word) (head, word) = os.path.split(word) if (word in (os.curdir, os.pardir, '')): continue dest_dir = os.path.join(dest_dir, word) zf.extract(member, dest_dir)
Unzip a compressed file. Args: source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip) dest_dir: Target folder to extract to (e.g. c:/ladybug). Default is set to the same directory as the source file. mkdir: Set to True to create the directory if doesn't exist (Default: False)
codesearchnet
def beautify(self, string): if not string: return string string, phrases = self.parse(string) if not phrases: return string if not self.positional and not self.always: raise errors.ArgumentError("Found phrases, but no styles " "were supplied!") return self.stringify(string, phrases)
Wraps together all actions needed to beautify a string, i.e. parse the string and then stringify the phrases (replace tags with formatting codes). Arguments: string (str): The string to beautify/parse. Returns: The parsed, stringified and ultimately beautified string. Raises: errors.ArgumentError if phrases were found, but not a single style (flag combination) was supplied.
juraj-google-style
def Validate(self, sections=None, parameters=None): if isinstance(sections, string_types): sections = [sections] if (sections is None): sections = [] if (parameters is None): parameters = [] validation_errors = {} for section in sections: for descriptor in self.type_infos: if descriptor.name.startswith((section + '.')): try: self.Get(descriptor.name) except (Error, ValueError) as e: validation_errors[descriptor.name] = e for parameter in parameters: for descriptor in self.type_infos: if (parameter == descriptor.name): try: self.Get(descriptor.name) except (Error, ValueError) as e: validation_errors[descriptor.name] = e return validation_errors
Validate sections or individual parameters. The GRR configuration file contains several sections, used by different components. Many of these components don't care about other sections. This method allows a component to declare in advance what sections and parameters it cares about, and have these validated. Args: sections: A list of sections to validate. All parameters within the section are validated. parameters: A list of specific parameters (in the format section.name) to validate. Returns: dict of {parameter: Exception}, where parameter is a section.name string.
codesearchnet
def _generate_latex_source(circuit, filename=None, scale=0.7, style=None, reverse_bits=False, plot_barriers=True, justify=None): (qregs, cregs, ops) = utils._get_layered_instructions(circuit, reverse_bits=reverse_bits, justify=justify) qcimg = _latex.QCircuitImage(qregs, cregs, ops, scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits) latex = qcimg.latex() if filename: with open(filename, 'w') as latex_file: latex_file.write(latex) return latex
Convert QuantumCircuit to LaTeX string. Args: circuit (QuantumCircuit): input circuit scale (float): image scaling filename (str): optional filename to write latex style (dict or str): dictionary of style or file name of style file reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how the circuit should be justified. Returns: str: Latex string appropriate for writing to file.
codesearchnet
def generate_stack_policy_args(stack_policy=None): args = {} if stack_policy: logger.debug("Stack has a stack policy") if stack_policy.url: raise NotImplementedError else: args["StackPolicyBody"] = stack_policy.body return args
Converts a stack policy object into keyword args. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. Returns: dict: A dictionary of keyword arguments to be used elsewhere.
juraj-google-style
def _find_channel_index(data_format): for (i, c) in enumerate(data_format): if (c == 'C'): return i raise ValueError('data_format requires a channel dimension. Got: {}'.format(data_format))
Returns the index of the channel dimension. Args: data_format: A string of characters corresponding to Tensor dimensionality. Returns: channel_index: An integer indicating the channel dimension. Raises: ValueError: If no channel dimension was found.
codesearchnet
def format(self, *args, **kwargs): inplace = kwargs.pop("inplace", False) if not inplace: return str(self).format(*args, **kwargs) self._lines = str(self).format(*args, **kwargs).splitlines()
Format the string representation of the editor. Args: inplace (bool): If True, overwrite editor's contents with formatted contents
juraj-google-style
def get_snmp_configuration(self): uri = '{}{}'.format(self.data['uri'], self.SNMP_CONFIGURATION_PATH) return self._helper.do_get(uri)
Gets the SNMP configuration for a logical interconnect. Returns: dict: SNMP configuration.
codesearchnet
def noisy_moment(self, moment: 'cirq.Moment', system_qubits: Sequence['cirq.Qid']) -> 'cirq.OP_TREE': if (not hasattr(self.noisy_moments, '_not_overridden')): return self.noisy_moments([moment], system_qubits) if (not hasattr(self.noisy_operation, '_not_overridden')): return [self.noisy_operation(op) for op in moment] assert False, 'Should be unreachable.'
Adds noise to the operations from a moment. Args: moment: The moment to add noise to. system_qubits: A list of all qubits in the system. Returns: An OP_TREE corresponding to the noisy operations for the moment.
codesearchnet
def assert_no_new_tensors(f: _F) -> _F: def decorator(self: 'TensorFlowTestCase', **kwargs): def _is_tensorflow_object(obj) -> bool: try: return isinstance(obj, (tensor_lib.Tensor, variables.Variable, tensor_shape.Dimension, tensor_shape.TensorShape)) except (ReferenceError, AttributeError): return False tensors_before = set((id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))) outside_executed_eagerly = cast(bool, context.executing_eagerly()) outside_graph_key = ops.get_default_graph()._graph_key with ops.Graph().as_default(): ops.get_default_graph()._graph_key = outside_graph_key if outside_executed_eagerly: with context.eager_mode(): result = f(self, **kwargs) else: result = f(self, **kwargs) context.context()._clear_caches() gc.collect() tensors_after = [obj for obj in gc.get_objects() if _is_tensorflow_object(obj) and id(obj) not in tensors_before] if tensors_after: raise AssertionError('%d Tensors not deallocated after test: %s' % (len(tensors_after), str(tensors_after))) return result return tf_decorator.make_decorator(f, decorator)
Decorator for asserting that no new Tensors persist after a test. Mainly useful for checking that code using the Python C API has correctly manipulated reference counts. Clears the caches that it knows about, runs the garbage collector, then checks that there are no Tensor or Tensor-like objects still around. This includes Tensors to which something still has a reference (e.g. from missing Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one of the objects has __del__ defined). Args: f: The test case to run. Returns: The decorated test case.
github-repos
def headers_present(self, headers): headers = {name: re.compile('(.*)') for name in headers} self.add_matcher(matcher('HeadersMatcher', headers))
Defines a list of headers that must be present in the outgoing request in order to satisfy the matcher, no matter what value the headers hosts. Header keys are case insensitive. Arguments: headers (list|tuple): header keys to match. Returns: self: current Mock instance. Example:: (pook.get('server.com/api') .headers_present(['content-type', 'Authorization']))
juraj-google-style
def create_row_token_type_ids_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]: table_row_ids = list(zip(*table_values))[2] if table_values else [] return [0] * (1 + len(query_ids) + 1) + list(table_row_ids)
Creates the row token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the row token type IDs values.
github-repos
def from_string(string_data, file_format="xyz"): mols = pb.readstring(str(file_format), str(string_data)) return BabelMolAdaptor(mols.OBMol)
Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object
juraj-google-style
def detect_builtin_shadowing_definitions(self, contract): result = [] for function in contract.functions: if (function.contract == contract): if self.is_builtin_symbol(function.name): result.append((self.SHADOWING_FUNCTION, function, None)) result += self.detect_builtin_shadowing_locals(function) for modifier in contract.modifiers: if (modifier.contract == contract): if self.is_builtin_symbol(modifier.name): result.append((self.SHADOWING_MODIFIER, modifier, None)) result += self.detect_builtin_shadowing_locals(modifier) for variable in contract.variables: if (variable.contract == contract): if self.is_builtin_symbol(variable.name): result.append((self.SHADOWING_STATE_VARIABLE, variable, None)) for event in contract.events: if (event.contract == contract): if self.is_builtin_symbol(event.name): result.append((self.SHADOWING_EVENT, event, None)) return result
Detects if functions, access modifiers, events, state variables, or local variables are named after built-in symbols. Any such definitions are returned in a list. Returns: list of tuple: (type, definition, [local variable parent])
codesearchnet
def _fit(self, col): column = col[self.col_name].replace({np.nan: np.inf}) frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict() start = 0 end = 0 num_vals = len(col) for val in frequencies: prob = frequencies[val] / num_vals end = start + prob interval = (start, end) mean = np.mean(interval) std = prob / 6 self.probability_map[val] = (interval, mean, std) start = end
Create a map of the empirical probability for each category. Args: col(pandas.DataFrame): Data to transform.
juraj-google-style
def multi(self, **kwargs): path = self._get_path('multi') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Search the movie, tv show and person collections with a single query. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. include_adult: (optional) Toggle the inclusion of adult titles. Expected value is True or False. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: scale = [1.0] * self.out_channel_size offset = [0.5] * self.out_channel_size mean, variance = (scale, offset) out = nn_ops.conv2d(input_tensor, self.filters, strides=strides, dilations=dilations, padding=padding, data_format='NHWC') if has_bias: out = nn_ops.bias_add(out, self.bias, data_format='NHWC') if has_batch_norm: out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False) if activation_fn is not None: out = activation_fn(out) return {'output': out}
Performs a 2D convolution operation. Args: input_tensor: Input tensor to perform convolution on. Returns: A map of: output key -> output result.
github-repos
def getWhoisInfo(domain): new = [] try: emails = {} emails['type'] = 'i3visio.alias' emails['value'] = str(domain.split('.')[0]) emails['attributes'] = [] new.append(emails) except: pass info = whois.whois(domain) if (info.status == None): raise Exception((('UnknownDomainError: ' + domain) + ' could not be resolved.')) try: emails = {} emails['type'] = 'i3visio.email' if (type(info.emails) is not list): aux = [info.emails] emails['value'] = json.dumps(aux) else: emails['value'] = json.dumps(info.emails) emails['attributes'] = [] new.append(emails) except: pass try: tmp = {} tmp['type'] = 'i3visio.location.country' tmp['value'] = str(info.country) tmp['attributes'] = [] new.append(tmp) except: pass try: tmp = {} tmp['type'] = 'i3visio.registrar' tmp['value'] = str(info.registrar) tmp['attributes'] = [] new.append(tmp) except: pass try: tmp = {} tmp['type'] = 'i3visio.fullname' try: tmp['value'] = str(info.name) except: tmp['value'] = info.name tmp['attributes'] = [] new.append(tmp) except: pass return new
Method that trie to recover the whois info from a domain. Args: ----- domain: The domain to verify. Returns: -------- dict: A dictionary containing the result as an i3visio entity with its `value`, `type` and `attributes`.
codesearchnet
def func(self, w, *args): x0 = args[0] x1 = args[1] n0 = x0.shape[0] n1 = x1.shape[0] n = max(n0, n1) * 10 idx0 = np.random.choice(range(n0), size=n) idx1 = np.random.choice(range(n1), size=n) b0 = np.ones((n0, 1)) b1 = np.ones((n1, 1)) i1 = self.i + 1 h = self.h h1 = h + 1 if sparse.issparse(x0): p0 = np.hstack((sigm(sparse.hstack((x0, b0)).dot(w[:-h1].reshape( i1, h))), b0)).dot(w[-h1:].reshape(h1, 1)) p1 = np.hstack((sigm(sparse.hstack((x1, b1)).dot(w[:-h1].reshape( i1, h))), b1)).dot(w[-h1:].reshape(h1, 1)) else: p0 = np.hstack((sigm(np.hstack((x0, b0)).dot(w[:-h1].reshape( i1, h))), b0)).dot(w[-h1:].reshape(h1, 1)) p1 = np.hstack((sigm(np.hstack((x1, b1)).dot(w[:-h1].reshape( i1, h))), b1)).dot(w[-h1:].reshape(h1, 1)) p0 = p0[idx0] p1 = p1[idx1] return .5 * (sum((1 - p1 + p0) ** 2) / n + self.l1 * sum(w[:-h1] ** 2) / (i1 * h) + self.l2 * sum(w[-h1:] ** 2) / h1)
Return the costs of the neural network for predictions. Args: w (array of float): weight vectors such that: w[:-h1] -- weights between the input and h layers w[-h1:] -- weights between the h and output layers args: features (args[0]) and target (args[1]) Returns: combined cost of RMSE, L1, and L2 regularization
juraj-google-style
def get_gains_losses(changes): res = {'gains': [], 'losses': []} for change in changes: if change > 0: res['gains'].append(change) else: res['losses'].append(change * -1) logger.debug('Gains: {0}'.format(res['gains'])) logger.debug('Losses: {0}'.format(res['losses'])) return res
Categorizes changes into gains and losses Args: changes: List of floats of price changes between entries in JSON. Returns: Dict of changes with keys 'gains' and 'losses'. All values are positive.
juraj-google-style
def create_pipeline_stage(self, pipeline_key, name, **kwargs): if not (pipeline_key and name): return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.stages_suffix]) kwargs.update({'name':name}) new_box = StreakStage(**kwargs) code, data = self._req('put', uri, new_box.to_dict(rw = True)) return code, data
Creates a pipeline stage with the provided attributes. Args: name required name string kwargs {..} see StreakStage object for details return (status code, stage dict)
juraj-google-style
def _format_subscripts(self, subscripts, value, limit=10, indent=2): lines = [] subscripts = np.transpose(subscripts) prefix = ' ' * indent if np.ndim(value) == 0: return [prefix + '[0] : ' + str(value)] for subscript in itertools.islice(subscripts, limit): lines.append(prefix + str(subscript) + ' : ' + str(value[tuple(subscript)])) if len(subscripts) > limit: lines.append(prefix + '...') return lines
Generate a summary of ndarray subscripts as a list of str. If limit == N, this method will print up to the first N subscripts on separate lines. A line of ellipses (...) will be appended at the end if the number of subscripts exceeds N. Args: subscripts: The tensor (np.ndarray) subscripts, of the same format as np_where()'s return value, i.e., a tuple of arrays with each array corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])). value: (np.ndarray) value of the tensor. limit: (int) The maximum number of indices to print. indent: (int) Number of characters to indent at the beginning of each line. Returns: (list of str) the multi-line representation of the subscripts and values, potentially with omission at the end.
github-repos
def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] return array_ops.pad(x, pattern)
Pads the middle dimension of a 3D tensor. Args: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. Returns: A padded 3D tensor.
github-repos
def nodes(self, device_name=None): if not self._debug_graphs: raise LookupError('No partition graphs have been loaded.') if device_name is None: nodes = [] for device_name in self._debug_graphs: nodes.extend(self._debug_graphs[device_name].node_inputs.keys()) return nodes else: if device_name not in self._debug_graphs: raise ValueError('Invalid device name: %s' % device_name) return self._debug_graphs[device_name].node_inputs.keys()
Get a list of all nodes from the partition graphs. Args: device_name: (`str`) name of device. If None, all nodes from all available devices will be included. Returns: All nodes' names, as a list of str. Raises: LookupError: If no partition graphs have been loaded. ValueError: If specified node name does not exist.
github-repos
def detect_timezone(): if (sys.platform == 'win32'): tz = _detect_timezone_windows() if (tz is not None): return tz tz = _detect_timezone_environ() if (tz is not None): return tz tz = _detect_timezone_etc_timezone() if (tz is not None): return tz tz = _detect_timezone_etc_localtime() if (tz is not None): return tz warnings.warn("Had to fall back to worst detection method (the 'PHP' method).") tz = _detect_timezone_php() if (tz is not None): return tz raise pytz.UnknownTimeZoneError('Unable to detect your timezone!')
Try and detect the timezone that Python is currently running in. We have a bunch of different methods for trying to figure this out (listed in order they are attempted). * In windows, use win32timezone.TimeZoneInfo.local() * Try TZ environment variable. * Try and find /etc/timezone file (with timezone name). * Try and find /etc/localtime file (with timezone data). * Try and match a TZ to the current dst/offset/shortname. Returns: The detected local timezone as a tzinfo object Raises: pytz.UnknownTimeZoneError: If it was unable to detect a timezone.
codesearchnet
def convert_mass_to_atomic_fractions(mass_fractions): atomic_fractions = {} for z, mass_fraction in mass_fractions.items(): atomic_fractions[z] = mass_fraction / pyxray.element_atomic_weight(z) total_fraction = sum(atomic_fractions.values()) for z, fraction in atomic_fractions.items(): try: atomic_fractions[z] = fraction / total_fraction except ZeroDivisionError: atomic_fractions[z] = 0.0 return atomic_fractions
Converts a mass fraction :class:`dict` to an atomic fraction :class:`dict`. Args: mass_fractions (dict): mass fraction :class:`dict`. The composition is specified by a dictionary. The keys are atomic numbers and the values weight fractions. No wildcard are accepted.
juraj-google-style
def parse_date(date_string, ignoretz=True): try: return parser.parse(date_string, ignoretz=ignoretz) except TypeError: return None
Parse a string as a date. If the string fails to parse, `None` will be returned instead >>> parse_date('2017-08-15T18:24:31') datetime.datetime(2017, 8, 15, 18, 24, 31) Args: date_string (`str`): Date in string format to parse ignoretz (`bool`): If set ``True``, ignore time zones and return a naive :class:`datetime` object. Returns: `datetime`, `None`
codesearchnet
def _ReadTableHeader(self, file_object, table_header_offset): data_type_map = self._GetDataTypeMap('keychain_table_header') table_header, _ = self._ReadStructureFromFileObject( file_object, table_header_offset, data_type_map) return table_header
Reads the table header. Args: file_object (file): file-like object. table_header_offset (int): offset of the tables header relative to the start of the file. Returns: keychain_table_header: table header. Raises: ParseError: if the table header cannot be read.
juraj-google-style
def argmin(input_, key=None): if isinstance(input, dict): return list(input.keys())[argmin(list(input.values()), key=key)] else: if key is None: def _key(item): return item[1] else: def _key(item): return key(item[1]) return min(enumerate(input), key=_key)[0]
Returns index / key of the item with the smallest value. Args: input_ (dict or list): Note: a[argmin(a, key=key)] == min(a, key=key)
juraj-google-style
def from_config(cls, config): return cls(**config)
Creates a layer from its config. This method is the reverse of `get_config`, capable of instantiating the same layer from the config dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by `set_weights`). Args: config: A Python dictionary, typically the output of get_config. Returns: A layer instance.
github-repos
def bitwise_xor(x, y): if any_symbolic_tensors((x, y)): return BitwiseXor().symbolic_call(x, y) return backend.numpy.bitwise_xor(x, y)
Compute the bit-wise XOR of two arrays element-wise. Computes the bit-wise XOR of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator `^`. Args: x: Input integer tensor. y: Input integer tensor. Returns: Result tensor.
github-repos
def insert(self, entity_id, property_uri, value): if (not entity_id.startswith('http')): entity_uri = urllib.parse.urljoin(self.base_url, entity_id) else: entity_uri = entity_id if entity_uri.endswith('/'): entity_uri = entity_uri[:(- 1)] if (not entity_id.endswith('fcr:metadata')): entity_uri = '/'.join([entity_uri, 'fcr:metadata']) if (not self.exists(entity_id)): self.create(entity_id) sparql_template = Template('$prefix\n INSERT DATA {\n <$entity> $prop_uri $value_str ;\n }') sparql = sparql_template.substitute(prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_uri=property_uri, value_str=self.__value_format__(value)) update_request = urllib.request.Request(entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) try: response = urllib.request.urlopen(update_request) except urllib.error.HTTPError: print('Error trying patch {}, sparql=\n{}'.format(entity_uri, sparql)) return False if (response.code < 400): return True return False
Method inserts a new entity's property in Fedora4 Repository Args: entity_id(string): Unique ID of Fedora object property_uri(string): URI of property value: Value of the property, can be literal or URI reference Returns: boolean: True if successful changed in Fedora, False otherwise
codesearchnet
def setKeepAliveTimeOut(self, iTimeOut): print '%s call setKeepAliveTimeOut' % self.port print iTimeOut try: cmd = WPANCTL_CMD + 'setprop NCP:SleepyPollInterval %s' % str(iTimeOut*1000) print cmd return self.__sendCommand(cmd)[0] != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('setKeepAliveTimeOut() Error: ' + str(e))
set keep alive timeout for device has been deprecated and also set SED polling rate Args: iTimeOut: data poll period for sleepy end device Returns: True: successful to set the data poll period for SED False: fail to set the data poll period for SED
juraj-google-style
def _normalize_angle(angle, range, step): while angle <= range[0]: angle += step while angle >= range[1]: angle -= step return angle
Finds an angle that matches the given one modulo step. Increments and decrements the given value with a given step. Args: range: a 2-tuple of min and max target values. step: tuning step. Returns: Normalized value within a given range.
juraj-google-style
def register_agent(self, host, sweep_id=None, project_name=None): mutation = gql() if project_name is None: project_name = self.settings('project') def no_retry_400(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'host': host, 'entityName': self.settings("entity"), 'projectName': project_name, 'sweep': sweep_id}, check_retry_fn=no_retry_400) return response['createAgent']['agent']
Register a new agent Args: host (str): hostname persistent (bool): long running or oneoff sweep (str): sweep id project_name: (str): model that contains sweep
juraj-google-style
def _next_file(self): while True: if self._bucket_iter: try: return self._bucket_iter.next().filename except StopIteration: self._bucket_iter = None self._bucket = None if (self._index >= len(self._filenames)): return filename = self._filenames[self._index] self._index += 1 if ((self._delimiter is None) or (not filename.endswith(self._delimiter))): return filename self._bucket = cloudstorage.listbucket(filename, delimiter=self._delimiter) self._bucket_iter = iter(self._bucket)
Find next filename. self._filenames may need to be expanded via listbucket. Returns: None if no more file is left. Filename otherwise.
codesearchnet
def CalculateForecastStats(matched, available, possible=None): if (matched > 0): available_percent = ((float(available) / matched) * 100.0) else: available_percent = 0 if (possible is not None): if (matched > 0): possible_percent = ((possible / float(matched)) * 100.0) else: possible_percent = 0 else: possible_percent = None return (available_percent, possible_percent)
Calculate forecast percentage stats. Args: matched: The number of matched impressions. available: The number of available impressions. possible: The optional number of possible impressions. Returns: The percentage of impressions that are available and possible.
codesearchnet
def stats(self): per_utt_stats = self.stats_per_utterance() return stats.DataStats.concatenate(per_utt_stats.values())
Return statistics calculated overall samples of all utterances in the corpus. Returns: DataStats: A DataStats object containing statistics overall samples in the corpus.
codesearchnet
def safe_date(self, x): t = x[self.col_name] if np.isnan(t): return t elif np.isposinf(t): t = sys.maxsize elif np.isneginf(t): t = -sys.maxsize tmp = time.localtime(float(t) / 1e9) return time.strftime(self.date_format, tmp)
Transform x[self.col_name] into a date string. Args: x(dict like / pandas.Series): Row containing data to cast safely. Returns: str
juraj-google-style
def memory_read(self, addr, num_units, zone=None, nbits=None): buf_size = num_units buf = None access = 0 if (nbits is None): buf = (ctypes.c_uint8 * buf_size)() access = 0 elif (nbits == 8): buf = (ctypes.c_uint8 * buf_size)() access = 1 elif (nbits == 16): buf = (ctypes.c_uint16 * buf_size)() access = 2 buf_size = (buf_size * access) elif (nbits == 32): buf = (ctypes.c_uint32 * buf_size)() access = 4 buf_size = (buf_size * access) else: raise ValueError(('Given bit size is invalid: %s' % nbits)) args = [addr, buf_size, buf, access] method = self._dll.JLINKARM_ReadMemEx if (zone is not None): method = self._dll.JLINKARM_ReadMemZonedEx args.append(zone.encode()) units_read = method(*args) if (units_read < 0): raise errors.JLinkReadException(units_read) return buf[:units_read]
Reads memory from a target system or specific memory zone. The optional ``zone`` specifies a memory zone to access to read from, e.g. ``IDATA``, ``DDATA``, or ``CODE``. The given number of bits, if provided, must be either ``8``, ``16``, or ``32``. If not provided, always reads ``num_units`` bytes. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_units (int): number of units to read zone (str): optional memory zone name to access nbits (int): number of bits to use for each unit Returns: List of units read from the target system. Raises: JLinkException: if memory could not be read. ValueError: if ``nbits`` is not ``None``, and not in ``8``, ``16``, or ``32``.
codesearchnet
def upload_metric(self, dataset_name, table_name, run_id): expected_file = os.path.join( self._logging_dir, logger.METRIC_LOG_FILE_NAME) with tf.gfile.GFile(expected_file) as f: lines = f.readlines() metrics = [] for line in filter(lambda l: l.strip(), lines): metric = json.loads(line) metric["run_id"] = run_id metrics.append(metric) table_ref = self._bq_client.dataset(dataset_name).table(table_name) errors = self._bq_client.insert_rows_json(table_ref, metrics) if errors: tf.logging.error( "Failed to upload benchmark info to bigquery: {}".format(errors))
Upload metric information to Bigquery. Args: dataset_name: string, the name of bigquery dataset where the data will be uploaded. table_name: string, the name of bigquery table under the dataset where the metric data will be uploaded. This is different from the benchmark_run table. run_id: string, a unique ID that will be attached to the data, usually this is a UUID4 format. This should be the same as the benchmark run_id.
juraj-google-style
def storage_volume_attachments(self): if (not self.__storage_volume_attachments): self.__storage_volume_attachments = StorageVolumeAttachments(self.__connection) return self.__storage_volume_attachments
Gets the StorageVolumeAttachments API client. Returns: StorageVolumeAttachments:
codesearchnet
def market_if_touched(self, accountID, **kwargs): return self.create( accountID, order=MarketIfTouchedOrderRequest(**kwargs) )
Shortcut to create a MarketIfTouched Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def pack_container(in_container, out_file): container_filename = local.path(out_file).basename out_container = local.cwd / "container-out" / container_filename out_dir = out_container.dirname with local.cwd(in_container): tar("cjf", out_container, ".") c_hash = download.update_hash(out_container) if out_dir.exists(): mkdir("-p", out_dir) mv(out_container, out_file) mv(out_container + ".hash", out_file + ".hash") new_container = {"path": out_file, "hash": str(c_hash)} CFG["container"]["known"] += new_container
Pack a container image into a .tar.bz2 archive. Args: in_container (str): Path string to the container image. out_file (str): Output file name.
juraj-google-style
def get_course_details(self, course_id): try: return self.client.course(course_id).get() except (SlumberBaseException, ConnectionError, Timeout) as exc: LOGGER.exception('Failed to retrieve course enrollment details for course [%s] due to: [%s]', course_id, str(exc)) return {}
Query the Enrollment API for the course details of the given course_id. Args: course_id (str): The string value of the course's unique identifier Returns: dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)
codesearchnet
class ClvpProcessor(ProcessorMixin): feature_extractor_class = 'ClvpFeatureExtractor' tokenizer_class = 'ClvpTokenizer' model_input_names = ['input_ids', 'input_features', 'attention_mask'] def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, *args, **kwargs): raw_speech = kwargs.pop('raw_speech', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if raw_speech is None and text is None: raise ValueError('You need to specify either an `raw_speech` or `text` input to process.') if raw_speech is not None: inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif raw_speech is None: return encodings else: inputs['input_ids'] = encodings['input_ids'] inputs['attention_mask'] = encodings['attention_mask'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs)
Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor. [`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the [`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information. Args: feature_extractor (`ClvpFeatureExtractor`): An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input. tokenizer (`ClvpTokenizer`): An instance of [`ClvpTokenizer`]. The tokenizer is a required input.
github-repos
def append(self, node): if (not isinstance(node, grammar.STATEMENTS)): raise ValueError self.to_append[(- 1)].append(node)
Append a statement to the current statement. Note that multiple calls to append will result in the last statement to be appended to end up at the bottom. Args: node: The statement to append. Raises: ValueError: If the given node is not a statement.
codesearchnet
def set_params(self, **params): if ('bias' in params.keys()): self.intercept_ = params['bias'] if ('weights' in params.keys()): self.coef_ = params['weights'] for key in params.keys(): if ('b_' == key[:2]): self.B[int(key[2:])] = params[key] return self
Set the parameters of the estimator. Args: bias (array-like) : bias of the estimator. Also known as the intercept in a linear model. weights (array-like) : weights of the features. Also known as coeficients. NER biases (array-like) : NER entities infering column position on X and bias value. Ex: `b_4=10, b_5=6`. Example: >>> cls = VTT() >>> cls.set_params(b_4=10, b_5=6, b_6=8)
codesearchnet
def cidr_check(cidr, return_cidr=True): try: if int(cidr) < 0 or int(cidr) > 32: good_cidr = False else: good_cidr = True if return_cidr: while not good_cidr: print("Sorry the CIDR value %s is not a valid value must be a value of 0 to 32. Please try again." % (cidr,)) cidr = input("What is the mask for in CIDR format?: ") if int(cidr) < 0 or int(cidr) > 32: good_cidr = False else: good_cidr = True return cidr elif not return_cidr: return good_cidr except ValueError: LOGGER.critical('Function cidr_check expected a number but got {item}'.format(item=cidr)) raise ValueError("The input needs to be a number!!")
Function to verify a good CIDR value Args: cidr: CIDR value 0 to 32 return_cidr: Set to True it returns a CIDR value, set to False returns True or False Returns: see return_cidr for return options
juraj-google-style
def _unconstrained_to_raw_svi(unconstrained_parameters): b = tf.math.exp(unconstrained_parameters[..., 1]) rho = 2 * tf.math.sigmoid(unconstrained_parameters[..., 2]) - 1 m = unconstrained_parameters[..., 3] sigma = tf.math.exp(unconstrained_parameters[..., 4]) a = tf.math.exp(unconstrained_parameters[..., 0]) - b * sigma * tf.math.sqrt(1 - rho ** 2) return tf.transpose([a, b, rho, m, sigma])
Converts unconstrained optimizarion parameters to raw SVI ones. Performs the inverse transformation of the internal unconstrained model parameters into the standard raw SVI parameters `a, b, rho, m, sigma`. Args: unconstrained_parameters: A rank 2 real `Tensor` of shape [batch_size, 5], representing SVI model's raw parameters. Returns: A rank 2 real `Tensor` of shape [batch_size, 5], representing the unconstrained parameters, used in internal optimization of the SVI model.
github-repos
def _GetMemberDataTypeMaps(self, data_type_definition, data_type_map_cache): if (not data_type_definition): raise errors.FormatError('Missing data type definition') members = getattr(data_type_definition, 'members', None) if (not members): raise errors.FormatError('Invalid data type definition missing members') data_type_maps = [] members_data_size = 0 for member_definition in members: if isinstance(member_definition, data_types.MemberDataTypeDefinition): member_definition = member_definition.member_data_type_definition if ((data_type_definition.byte_order != definitions.BYTE_ORDER_NATIVE) and (member_definition.byte_order == definitions.BYTE_ORDER_NATIVE)): member_definition = copy.copy(member_definition) member_definition.name = '_{0:s}_{1:s}'.format(data_type_definition.name, member_definition.name) member_definition.byte_order = data_type_definition.byte_order if (member_definition.name not in data_type_map_cache): data_type_map = DataTypeMapFactory.CreateDataTypeMapByType(member_definition) data_type_map_cache[member_definition.name] = data_type_map data_type_map = data_type_map_cache[member_definition.name] if (members_data_size is not None): if (not isinstance(member_definition, data_types.PaddingDefinition)): byte_size = member_definition.GetByteSize() else: (_, byte_size) = divmod(members_data_size, member_definition.alignment_size) if (byte_size > 0): byte_size = (member_definition.alignment_size - byte_size) data_type_map.byte_size = byte_size if (byte_size is None): members_data_size = None else: members_data_size += byte_size data_type_maps.append(data_type_map) return data_type_maps
Retrieves the member data type maps. Args: data_type_definition (DataTypeDefinition): data type definition. data_type_map_cache (dict[str, DataTypeMap]): cached data type maps. Returns: list[DataTypeMap]: member data type maps. Raises: FormatError: if the data type maps cannot be determined from the data type definition.
codesearchnet
def maybe_add_training_arg(original_call, wrapped_call, expects_training_arg, default_training_value): if not expects_training_arg: return (wrapped_call, None) def wrap_with_training_arg(*args, **kwargs): training_arg_index = get_training_arg_index(original_call) training = get_training_arg(training_arg_index, args, kwargs) if training is None: training = default_training_value or K.learning_phase() args = list(args) kwargs = kwargs.copy() def replace_training_and_call(training): set_training_arg(training, training_arg_index, args, kwargs) return wrapped_call(*args, **kwargs) return control_flow_util.smart_cond(training, lambda: replace_training_and_call(True), lambda: replace_training_and_call(False)) arg_spec = tf_inspect.getfullargspec(original_call) defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else [] kwonlyargs = arg_spec.kwonlyargs kwonlydefaults = arg_spec.kwonlydefaults or {} if 'training' not in arg_spec.args: kwonlyargs.append('training') kwonlydefaults['training'] = default_training_value else: index = arg_spec.args.index('training') training_default_index = len(arg_spec.args) - index if arg_spec.defaults and len(arg_spec.defaults) >= training_default_index and (defaults[-training_default_index] is None): defaults[-training_default_index] = default_training_value decorator_argspec = tf_inspect.FullArgSpec(args=arg_spec.args, varargs=arg_spec.varargs, varkw=arg_spec.varkw, defaults=defaults, kwonlyargs=kwonlyargs, kwonlydefaults=kwonlydefaults, annotations=arg_spec.annotations) return (wrap_with_training_arg, decorator_argspec)
Decorate call and optionally adds training argument. If a layer expects a training argument, this function ensures that 'training' is present in the layer args or kwonly args, with the default training value. Args: original_call: Original call function. wrapped_call: Wrapped call function. expects_training_arg: Whether to include 'training' argument. default_training_value: Default value of the training kwarg to include in the arg spec. If `None`, the default is `K.learning_phase()`. Returns: Tuple of ( function that calls `wrapped_call` and sets the training arg, Argspec of returned function or `None` if the argspec is unchanged)
github-repos
def Deserialize(self, reader): self.HashStart = reader.ReadSerializableArray('neocore.UInt256.UInt256') self.HashStop = reader.ReadUInt256()
Deserialize full object. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def _exclude_denylisted_ops(self, node_names): return [node_name for node_name in node_names if self._debug_dump.node_op_type(debug_graphs.get_node_name(node_name)) not in self._GRAPH_STRUCT_OP_TYPE_DENYLIST]
Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_DENYLIST. Args: node_names: An iterable of node or graph element names. Returns: A list of node names that are not denylisted.
github-repos
def send_message(host, data, timeout=None, properties=None): channel = _get_channel(host, timeout) if (not properties): properties = pika.BasicProperties(content_type='application/json', delivery_mode=2, headers={'UUID': str(uuid.uuid4())}) parameters = settings.get_amqp_settings()[host] channel.basic_publish(exchange=parameters['exchange'], routing_key=parameters['in_key'], properties=properties, body=data)
Send message to given `host`. Args: host (str): Specified host: aleph/ftp/whatever available host. data (str): JSON data. timeout (int, default None): How much time wait for connection.
codesearchnet
def get_discovery_doc(self, services, hostname=None): if (not isinstance(services, (tuple, list))): services = [services] util.check_list_type(services, remote._ServiceClass, 'services', allow_none=False) return self.__discovery_doc_descriptor(services, hostname=hostname)
JSON dict description of a protorpc.remote.Service in discovery format. Args: services: Either a single protorpc.remote.Service or a list of them that implements an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: dict, The discovery document as a JSON dict.
codesearchnet
def add(self, label): label.label_list = self self.label_tree.addi(label.start, label.end, label)
Add a label to the end of the list. Args: label (Label): The label to add.
codesearchnet
def ForceRemoveFileObject(self, path_spec): cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable) if (not cache_value): return False while (not cache_value.IsDereferenced()): cache_value.vfs_object.close() return True
Forces the removal of a file-like object based on a path specification. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file-like object was cached.
codesearchnet
def five_crop(img, size): if isinstance(size, numbers.Number): size = (int(size), int(size)) else: assert (len(size) == 2), 'Please provide only two dimensions (h, w) for size.' (w, h) = img.size (crop_h, crop_w) = size if ((crop_w > w) or (crop_h > h)): raise ValueError('Requested crop size {} is bigger than input size {}'.format(size, (h, w))) tl = img.crop((0, 0, crop_w, crop_h)) tr = img.crop(((w - crop_w), 0, w, crop_h)) bl = img.crop((0, (h - crop_h), crop_w, h)) br = img.crop(((w - crop_w), (h - crop_h), w, h)) center = center_crop(img, (crop_h, crop_w)) return (tl, tr, bl, br, center)
Crop the given PIL Image into four corners and the central crop. .. Note:: This transform returns a tuple of images and there may be a mismatch in the number of inputs and targets your ``Dataset`` returns. Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. Returns: tuple: tuple (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop.
codesearchnet
def run_calibration(self, saved_model_path: str, signature_keys: list[str], tags: set[str], force_graph_mode_calibration: bool, representative_dataset_file_map_serialized: dict[str, bytes]) -> Optional[bool]: dataset_file_map = {} for signature_key, dataset_file_serialized in representative_dataset_file_map_serialized.items(): dataset_file_map[signature_key] = quantization_options_pb2.RepresentativeDatasetFile.FromString(dataset_file_serialized) return _call_and_return_none_on_error(func=functools.partial(_run_calibration, saved_model_path, signature_keys, tags, force_graph_mode_calibration, dataset_file_map), error_msg=f'Failed to run calibration on model "{saved_model_path}", signature_keys: {signature_keys}, tags: {tags}.')
Runs calibration and adds calibration statistics to exported model. Args: saved_model_path: Path to the SavedModel to run calibration. signature_keys: List of signature keys corresponding to SignatureDefs to run calibration on. tags: A set of tags that identify the MetaGraphDef. force_graph_mode_calibration: If True, runs the calibration in graph mode. representative_dataset_file_map_serialized: Signature key -> `RepresentativeDatasetFile` mapping for running the calibration step. Each dataset file stores the representative dataset for the function matching the signature key. Returns: The error message if the function raises and exception. `None` otherwise.
github-repos