code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def create_korobov_samples(order, dim, base=17797): values = numpy.empty(dim) values[0] = 1 for idx in range(1, dim): values[idx] = ((base * values[(idx - 1)]) % (order + 1)) grid = numpy.mgrid[(:dim, :(order + 1))] out = (((values[grid[0]] * (grid[1] + 1)) / (order + 1.0)) % 1.0) return out[(:, :order)]
Create Korobov lattice samples. Args: order (int): The order of the Korobov latice. Defines the number of samples. dim (int): The number of dimensions in the output. base (int): The number based used to calculate the distribution of values. Returns (numpy.ndarray): Korobov lattice with ``shape == (dim, order)``
codesearchnet
def action(elem, doc): if isinstance(elem, pf.CodeBlock): doc.listings_counter += 1 elems = [elem] if 'hide' not in elem.classes else [] if 'file' in elem.attributes: elem.text = read_file(elem.attributes['file']) filename = trimpath(elem.attributes) prefix = pf.Emph(pf.Str('File:')) if 'exec' in elem.classes: if 'interactive' in elem.classes or elem.text[:4] == '>>> ': elem.text = execute_interactive_code(elem, doc) else: result = execute_code_block(elem, doc) if 'hideimports' in elem.classes: elem.text = remove_import_statements(elem.text) if 'plt' in elem.attributes or 'plt' in elem.classes: doc.plot_found = True result = maybe_center_plot(result) block = pf.RawBlock(result, format='latex') else: block = pf.CodeBlock(result, classes=['changelog']) elems += [pf.Para(pf.Emph(pf.Str('Output:'))), block] if 'lines' in elem.attributes: elem.text = filter_lines(elem.text, elem.attributes['lines']) label = elem.attributes.get('label', f'cl:{doc.listings_counter}') if 'caption' in elem.attributes.keys(): doc.caption_found = True cap = pf.convert_text(elem.attributes['caption'], output_format='latex') if 'shortcaption' in elem.attributes.keys(): shortcap = pf.convert_text(elem.attributes['shortcaption'], output_format='latex') else: shortcap = cap if 'file' in elem.attributes.keys(): cap += pf.convert_text(f' (`{filename}`)', output_format='latex') elems = make_codelisting(elems, cap, label, shortcaption=shortcap, above='capbelow' not in elem.classes) elif 'caption' in elem.classes: doc.caption_found = True cap = '' if 'file' in elem.attributes.keys(): cap = pf.convert_text(f'`{filename}`', output_format='latex') elems = make_codelisting(elems, cap, label, above='capbelow' not in elem.classes) else: if 'file' in elem.attributes.keys(): elems.insert(0, pf.Para(prefix, pf.Space, pf.Code(filename))) return elems
Processes pf.CodeBlocks. For details and a specification of how each command should behave, check the example files (especially the md and pdf)! Args: elem: The element to process. doc: The document. Returns: A changed element or None.
juraj-google-style
def list_tensors(self, args, screen_info=None): _ = screen_info parsed = self._arg_parsers['list_tensors'].parse_args(args) output = [] filter_strs = [] if parsed.op_type_filter: op_type_regex = re.compile(parsed.op_type_filter) filter_strs.append('Op type regex filter: "%s"' % parsed.op_type_filter) else: op_type_regex = None if parsed.node_name_filter: node_name_regex = re.compile(parsed.node_name_filter) filter_strs.append('Node name regex filter: "%s"' % parsed.node_name_filter) else: node_name_regex = None output = debugger_cli_common.RichTextLines(filter_strs) output.append('') if parsed.tensor_filter: try: filter_callable = self.get_tensor_filter(parsed.tensor_filter) except ValueError: output = cli_shared.error('There is no tensor filter named "%s".' % parsed.tensor_filter) _add_main_menu(output, node_name=None, enable_list_tensors=False) return output data_to_show = self._debug_dump.find(filter_callable, exclude_node_names=parsed.filter_exclude_node_names) else: if parsed.filter_exclude_node_names: raise ValueError('The flag --filter_exclude_node_names is valid only when the flag -f or --tensor_filter is used.') data_to_show = self._debug_dump.dumped_tensor_data max_timestamp_width, max_dump_size_width, max_op_type_width = self._measure_tensor_list_column_widths(data_to_show) data_to_show = self._sort_dump_data_by(data_to_show, parsed.sort_by, parsed.reverse) output.extend(self._tensor_list_column_heads(parsed, max_timestamp_width, max_dump_size_width, max_op_type_width)) dump_count = 0 for dump in data_to_show: if node_name_regex and (not node_name_regex.match(dump.node_name)): continue if op_type_regex: op_type = self._debug_dump.node_op_type(dump.node_name) if not op_type_regex.match(op_type): continue rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0 dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes) dumped_tensor_name = '%s:%d' % (dump.node_name, dump.output_slot) op_type = self._debug_dump.node_op_type(dump.node_name) line = '[%.3f]' % rel_time line += ' ' * (max_timestamp_width - len(line)) line += dump_size_str line += ' ' * (max_timestamp_width + max_dump_size_width - len(line)) line += op_type line += ' ' * (max_timestamp_width + max_dump_size_width + max_op_type_width - len(line)) line += dumped_tensor_name output.append(line, font_attr_segs=[(len(line) - len(dumped_tensor_name), len(line), debugger_cli_common.MenuItem('', 'pt %s' % dumped_tensor_name))]) dump_count += 1 if parsed.tensor_filter: output.prepend(['%d dumped tensor(s) passing filter "%s":' % (dump_count, parsed.tensor_filter)]) else: output.prepend(['%d dumped tensor(s):' % dump_count]) _add_main_menu(output, node_name=None, enable_list_tensors=False) return output
Command handler for list_tensors. List tensors dumped during debugged Session.run() call. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. Raises: ValueError: If `--filter_exclude_node_names` is used without `-f` or `--tensor_filter` being used.
github-repos
def _get_language_modeling_inputs(filename, delimiter="\n", repeat=1, append_space_to_final_punctionation=True): with tf.gfile.Open(filename) as f: text = f.read() inputs = text.split(delimiter) if not inputs[-1]: inputs.pop() inputs *= repeat if append_space_to_final_punctionation: inputs = [ s + " " if s and s[-1] in string.punctuation else s for s in inputs] return inputs
Read a file of partial texts to continue. The purpose of append_space_to_final_punctionation is that SubwordTokenizer groups punctuation and the ensuing space in the same token. Adding a space causes the token to be completed. Args: filename: a string delimiter: a string repeat: an integer - we repeat the entire file that many times. append_space_to_final_punctionation: a boolean Returns: a list of strings
juraj-google-style
def CollectFromKnowledgeBase(cls, knowledge_base): for preprocess_plugin in cls._knowledge_base_plugins.values(): logger.debug('Running knowledge base preprocessor plugin: {0:s}'.format( preprocess_plugin.__class__.__name__)) try: preprocess_plugin.Collect(knowledge_base) except errors.PreProcessFail as exception: logger.warning( 'Unable to collect knowledge base value with error: {0!s}'.format( exception))
Collects values from knowledge base values. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information.
juraj-google-style
def scale(reader, writer, column, start, stop, multiple): for (i, row) in enumerate(reader): if ((i >= start) and (i <= stop)): row[column] = (type(multiple)(row[column]) * multiple) writer.appendRecord(row)
Multiplies a value over a range of rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. column: The column of data to modify. start: The first row in the range to modify. end: The last row in the range to modify. multiple: The value to scale/multiply by.
codesearchnet
def combine_samples(self, md5_list, filename, type_tag): total_bytes = '' for md5 in md5_list: total_bytes += self.get_sample(md5)['sample']['raw_bytes'] self.remove_sample(md5) return self.store_sample(total_bytes, filename, type_tag)
Combine samples together. This may have various use cases the most significant involving a bunch of sample 'chunks' got uploaded and now we combine them together Args: md5_list: The list of md5s to combine, order matters! filename: name of the file (used purely as meta data not for lookup) type_tag: ('exe','pcap','pdf','json','swf', or ...) Returns: the computed md5 of the combined samples
codesearchnet
def get_stdout(self, workflow_id, task_id): url = ('%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % {'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id}) r = self.gbdx_connection.get(url) r.raise_for_status() return r.text
Get stdout for a particular task. Args: workflow_id (str): Workflow id. task_id (str): Task id. Returns: Stdout of the task (string).
codesearchnet
def convert_maxpool3(params, w_name, scope_name, inputs, layers, weights, names): print('Converting pooling ...') if (names == 'short'): tf_name = ('P' + random_string(7)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) if ('kernel_shape' in params): (height, width, depth) = params['kernel_shape'] else: (height, width, depth) = params['kernel_size'] if ('strides' in params): (stride_height, stride_width, stride_depth) = params['strides'] else: (stride_height, stride_width, stride_depth) = params['stride'] if ('pads' in params): (padding_h, padding_w, padding_d, _, _) = params['pads'] else: (padding_h, padding_w, padding_d) = params['padding'] input_name = inputs[0] if ((padding_h > 0) and (padding_w > 0) and (padding_d > 0)): padding_name = (tf_name + '_pad') padding_layer = keras.layers.ZeroPadding3D(padding=(padding_h, padding_w, padding_d), name=padding_name) layers[padding_name] = padding_layer(layers[inputs[0]]) input_name = padding_name pooling = keras.layers.MaxPooling3D(pool_size=(height, width, depth), strides=(stride_height, stride_width, stride_depth), padding='valid', name=tf_name) layers[scope_name] = pooling(layers[input_name])
Convert 3d Max pooling. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def from_file(cls, weafile, timestep=1, is_leap_year=False): assert os.path.isfile(weafile), 'Failed to find {}'.format(weafile) location = Location() with open(weafile, readmode) as weaf: first_line = weaf.readline() assert first_line.startswith('place'), 'Failed to find place in header. {} is not a valid wea file.'.format(weafile) location.city = ' '.join(first_line.split()[1:]) location.latitude = float(weaf.readline().split()[(- 1)]) location.longitude = (- float(weaf.readline().split()[(- 1)])) location.time_zone = ((- int(weaf.readline().split()[(- 1)])) / 15) location.elevation = float(weaf.readline().split()[(- 1)]) weaf.readline() direct_normal_irradiance = [] diffuse_horizontal_irradiance = [] for line in weaf: (dirn, difh) = [int(v) for v in line.split()[(- 2):]] direct_normal_irradiance.append(dirn) diffuse_horizontal_irradiance.append(difh) return cls.from_values(location, direct_normal_irradiance, diffuse_horizontal_irradiance, timestep, is_leap_year)
Create wea object from a wea file. Args: weafile:Full path to wea file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. If the wea file has a time step smaller than an hour adjust this input accordingly. is_leap_year: A boolean to indicate if values are representing a leap year. Default is False.
codesearchnet
def operate_multi(self, points): points = np.array(points) affine_points = np.concatenate([points, np.ones((points.shape[:(- 1)] + (1,)))], axis=(- 1)) return np.inner(affine_points, self.affine_matrix)[(..., :(- 1))]
Apply the operation on a list of points. Args: points: List of Cartesian coordinates Returns: Numpy array of coordinates after operation
codesearchnet
def get_variant_by_name(self, name): try: geno, i = self.bed.get_geno_marker(name, return_index=True) except ValueError: if name in self.bed.get_duplicated_markers(): return [ self.get_variant_by_name(dup_name).pop() for dup_name in self.bed.get_duplicated_markers()[name] ] else: logging.variant_name_not_found(name) return [] else: info = self.bim.iloc[i, :] return [Genotypes( Variant(info.name, CHROM_INT_TO_STR[info.chrom], info.pos, [info.a1, info.a2]), self._normalize_missing(geno), reference=info.a2, coded=info.a1, multiallelic=info.multiallelic, )]
Get the genotype of a marker using it's name. Args: name (str): The name of the marker. Returns: list: A list of Genotypes (only one for PyPlink, see note below). Note ==== From PyPlink version 1.3.2 and onwards, each name is unique in the dataset. Hence, we can use the 'get_geno_marker' function and be sure only one variant is returned.
juraj-google-style
def wait_running(self, timeout=None): flag = self._running.wait(timeout) if (flag is False): raise TimeoutExpiredError('Timeout waiting for thread to start running')
Wait for the thread to pass control to its routine. Args: timeout (float): The maximum amount of time to wait
codesearchnet
def __init__(self, status=enums.ResultStatus.OPERATION_FAILED, reason=enums.ResultReason.GENERAL_FAILURE, message='A general failure occurred.'): super(KmipError, self).__init__(message) self.status = status self.reason = reason
Create a KmipError exception. Args: status (ResultStatus): An enumeration detailing the result outcome. reason (ResultReason): An enumeration giving the status rationale. message (string): A string containing more information about the error.
juraj-google-style
def validate(data): text = data.get('text') if ((not isinstance(text, _string_types)) or (len(text) == 0)): raise ValueError('text field is required and should not be empty') if (('markdown' in data) and (not (type(data['markdown']) is bool))): raise ValueError('markdown field should be bool') if ('attachments' in data): if (not isinstance(data['attachments'], (list, tuple))): raise ValueError('attachments field should be list or tuple') for attachment in data['attachments']: if (('text' not in attachment) and ('title' not in attachment)): raise ValueError('text or title is required in attachment') return True
Validates incoming data Args: data(dict): the incoming data Returns: True if the data is valid Raises: ValueError: the data is not valid
codesearchnet
def refreshSkypeToken(self): (self.tokens['skype'], self.tokenExpiry['skype']) = SkypeRefreshAuthProvider(self).auth(self.tokens['skype']) self.getRegToken()
Take the existing Skype token and refresh it, to extend the expiry time without other credentials. Raises: .SkypeAuthException: if the login request is rejected .SkypeApiException: if the login form can't be processed
codesearchnet
def generate(self, text): if not text: raise Exception("No text to speak") if len(text) >= self.MAX_CHARS: raise Exception("Number of characters must be less than 2000") params = self.__params.copy() params["text"] = text self._data = requests.get(self.TTS_URL, params=params, stream=False).iter_content()
Try to get the generated file. Args: text: The text that you want to generate.
juraj-google-style
def check_filepath(self, path, filename): settings_path = os.path.join(path, filename) if ((not os.path.exists(settings_path)) or (not os.path.isfile(settings_path))): msg = 'Unable to find settings file: {}' raise SettingsBackendError(msg.format(settings_path)) return settings_path
Check and return the final filepath to settings Args: path (str): Directory path where to search for settings file. filename (str): Filename to use to search for settings file. Raises: boussole.exceptions.SettingsBackendError: If determined filepath does not exists or is a directory. Returns: string: Settings file path, joining given path and filename.
codesearchnet
def is_subset(self, other): if isinstance(other, _basebag): for (elem, count) in self.counts(): if (not (count <= other.count(elem))): return False else: for elem in self: if ((self.count(elem) > 1) or (elem not in other)): return False return True
Check that every element in self has a count <= in other. Args: other (Set)
codesearchnet
def grid_reload_from_ids(oargrid_jobids): gk = get_api_client() jobs = [] for (site, job_id) in oargrid_jobids: jobs.append(gk.sites[site].jobs[job_id]) return jobs
Reload all running or pending jobs of Grid'5000 from their ids Args: oargrid_jobids (list): list of ``(site, oar_jobid)`` identifying the jobs on each site Returns: The list of python-grid5000 jobs retrieved
codesearchnet
def EncryptPrivateKey(self, decrypted): aes = AES.new(self._master_key, AES.MODE_CBC, self._iv) return aes.encrypt(decrypted)
Encrypt the provided plaintext with the initialized private key. Args: decrypted (byte string): the plaintext to be encrypted. Returns: bytes: the ciphertext.
juraj-google-style
def vocabulary_size(self): if tf.executing_eagerly(): return int(self.lookup_table.size().numpy()) + self._token_start_index() else: return self.lookup_table.size() + self._token_start_index()
Gets the current size of the layer's vocabulary. Returns: The integer size of the vocabulary, including optional mask and oov indices.
github-repos
def build_current_graph(): graph = SQLStateGraph() for (app_name, config) in apps.app_configs.items(): try: module = import_module('.'.join((config.module.__name__, SQL_CONFIG_MODULE))) sql_items = module.sql_items except (ImportError, AttributeError): continue for sql_item in sql_items: graph.add_node((app_name, sql_item.name), sql_item) for dep in sql_item.dependencies: graph.add_lazy_dependency((app_name, sql_item.name), dep) graph.build_graph() return graph
Read current state of SQL items from the current project state. Returns: (SQLStateGraph) Current project state graph.
codesearchnet
def rename(self, source_file_names, destination_file_names): err_msg = 'source_file_names and destination_file_names should be equal in length' assert len(source_file_names) == len(destination_file_names), err_msg gcs_batches = [] gcs_current_batch = [] for src, dest in zip(source_file_names, destination_file_names): gcs_current_batch.append((src, dest)) if len(gcs_current_batch) == self.CHUNK_SIZE: gcs_batches.append(gcs_current_batch) gcs_current_batch = [] if gcs_current_batch: gcs_batches.append(gcs_current_batch) exceptions = {} for batch in gcs_batches: copy_statuses = self._gcsIO().copy_batch(batch) copy_succeeded = {} delete_targets = [] for src, dest, exception in copy_statuses: if exception: exceptions[src, dest] = exception else: copy_succeeded[src] = dest delete_targets.append(src) delete_statuses = self._gcsIO().delete_batch(delete_targets) for src, exception in delete_statuses: if exception: dest = copy_succeeded[src] exceptions[src, dest] = exception if exceptions: raise BeamIOError('Rename operation failed', exceptions)
Rename the files at the source list to the destination list. Source and destination lists should be of the same size. Args: source_file_names: List of file paths that need to be moved destination_file_names: List of destination_file_names for the files Raises: ``BeamIOError``: if any of the rename operations fail
github-repos
def _get_imports_for_module(module: str, output_package: str, symbols_by_module: Mapping[str, set[_Entrypoint]], generated_imports_by_module: Mapping[str, set[str]], file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool, subpackage_rewrite: Optional[str]) -> str: content = '' symbol_imports = list(symbols_by_module[module]) symbol_imports = sorted(symbol_imports, key=lambda s: f'{s.exported_symbol.file_name}:{s.name}') generated_imports = sorted(generated_imports_by_module[module]) for imp in generated_imports: if subpackage_rewrite: imp = imp.replace(output_package, subpackage_rewrite) last_dot = imp.rfind('.') if use_lazy_loading: content += f" '{imp[last_dot + 1:]}': ('', '{imp}'),\n" else: content += f'from {imp[:last_dot]} import {imp[last_dot + 1:]}\n' for s in symbol_imports: content += f'{s.get_import(file_prefixes_to_strip, module_prefix, use_lazy_loading=use_lazy_loading)}\n' return content
Returns the imports for a module. Args: module: The module to get imports for. output_package: The package to use for the imports. symbols_by_module: The symbols that should be exposed by each module. generated_imports_by_module: The sub-modules that should be exposed by each module. file_prefixes_to_strip: The prefixes to strip from the file names of the imports. module_prefix: A prefix to add to the non-generated imports. use_lazy_loading: Whether to use lazy loading or not. subpackage_rewrite: The subpackage to use for the imports.
github-repos
def parse_gptl(file_path, var_list): timing_result = dict() if os.path.isfile(file_path): with open(file_path, 'r') as f: for var in var_list: for line in f: if var in line: timing_result[var] = float(line.split()[4])/int(line.split()[2]) break return timing_result
Read a GPTL timing file and extract some data. Args: file_path: the path to the GPTL timing file var_list: a list of strings to look for in the file Returns: A dict containing key-value pairs of the livvkit and the times associated with them
juraj-google-style
def register(self, user_dict): endpoint = os.path.join(self._config.get('napps', 'api'), 'users', '') res = self.make_request(endpoint, method='POST', json=user_dict) return res.content.decode('utf-8')
Send an user_dict to NApps server using POST request. Args: user_dict(dict): Dictionary with user attributes. Returns: result(string): Return the response of Napps server.
juraj-google-style
def add_to_submission(self, submission_id, submission_objects): LOG.info("Adding new variants and case data to clinvar submission '%s'", submission_id) for var_obj in submission_objects[0]: try: result = self.clinvar_collection.insert_one(var_obj) self.clinvar_submission_collection.update_one({'_id': submission_id}, {'$push': {'variant_data': str(result.inserted_id)}}, upsert=True) except pymongo.errors.DuplicateKeyError: LOG.error('Attepted to insert a clinvar variant which is already in DB!') if submission_objects[1]: for case_obj in submission_objects[1]: try: result = self.clinvar_collection.insert_one(case_obj) self.clinvar_submission_collection.update_one({'_id': submission_id}, {'$push': {'case_data': str(result.inserted_id)}}, upsert=True) except pymongo.errors.DuplicateKeyError: LOG.error('One or more casedata object is already present in clinvar collection!') updated_submission = self.clinvar_submission_collection.find_one_and_update({'_id': submission_id}, {'$set': {'updated_at': datetime.now()}}, return_document=pymongo.ReturnDocument.AFTER) return updated_submission
Adds submission_objects to clinvar collection and update the coresponding submission object with their id Args: submission_id(str) : id of the submission to be updated submission_objects(tuple): a tuple of 2 elements coresponding to a list of variants and a list of case data objects to add to submission Returns: updated_submission(obj): an open clinvar submission object, updated
codesearchnet
def __init__(self, input_energy): self._energy = input_energy self._num_bits = input_energy.num_bits self._parameters = dict(input_energy=input_energy) self._index_proposal_probs = tf.Variable([0.0] * self._num_bits, trainable=False) self._index_proposal_dist = tfp.distributions.Categorical(probs=self._index_proposal_probs) self._eye_bool = tf.eye(self._num_bits, dtype=tf.bool)
Initializes a GibbsWithGradientsKernel. Args: input_energy: The parameterized energy function which helps define the acceptance probabilities of the Markov chain.
github-repos
def get_nets_radb(self, response, is_http=False): nets = [] if is_http: regex = 'route(?:6)?:[^\\S\\n]+(?P<val>.+?)<br>' else: regex = '^route(?:6)?:[^\\S\\n]+(?P<val>.+|.+)$' for match in re.finditer(regex, response, re.MULTILINE): try: net = copy.deepcopy(BASE_NET) net['cidr'] = match.group(1).strip() net['start'] = match.start() net['end'] = match.end() nets.append(net) except ValueError: pass return nets
The function for parsing network blocks from ASN origin data. Args: response (:obj:`str`): The response from the RADB whois/http server. is_http (:obj:`bool`): If the query is RADB HTTP instead of whois, set to True. Defaults to False. Returns: list: A list of network block dictionaries :: [{ 'cidr' (str) - The assigned CIDR 'start' (int) - The index for the start of the parsed network block 'end' (int) - The index for the end of the parsed network block }]
codesearchnet
def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs): search_keys = [k for (k, v) in kwargs.items() if (isinstance(v, list) and (len(v) > 1))] functions = util.make_list(fn) search = list(product(functions, util.dict_product(kwargs))) results = [] for (fn, kw) in search: if (not pairwise): r = self.index.to_series().apply((lambda step: fn(step, **kw))) else: r = apply_pairwise(self, fn, symmetric=symmetric, diagonal=diagonal, block=block, **kw) name = ([] if (len(functions) == 1) else [fn.__name__]) name += util.dict_subset(kw, search_keys).values() if isinstance(r, pd.DataFrame): columns = pd.MultiIndex.from_tuples([tuple((name + util.make_list(c))) for c in r.columns]) r.columns = columns else: r.name = tuple(name) results.append(r) if (len(results) > 1): result = pd.concat(results, axis=1) column_names = ([] if (len(functions) == 1) else [None]) column_names += search_keys column_names += ([None] * (len(result.columns.names) - len(column_names))) result.columns.names = column_names return StepFrame(result) else: result = results[0] if isinstance(result, pd.DataFrame): return StepFrame(result) else: result.name = functions[0].__name__ return StepSeries(result)
Apply function to each step object in the index Args: fn: function to apply. If a list then each function is applied pairwise: whether to apply the function to pairs of steps symmetric, diagonal, block: passed to apply_pairwise when pairwise=True kwargs: a keyword arguments to pass to each function. Arguments with list value are grid searched using util.dict_product. Returns: a StepFrame or StepSeries
codesearchnet
def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform, final_table_name_with_ptransform): final_table_name = final_table_name_no_ptransform with beam.Pipeline(options=PipelineOptions()) as pipeline: kvs = pipeline | 'CreateKVs' >> beam.Create(KVs) kvs | 'WriteToSimpleKV' >> beam.io.Write(SimpleKVSink(simplekv, 'http: final_table_name = final_table_name_with_ptransform with beam.Pipeline(options=PipelineOptions()) as pipeline: kvs = pipeline | 'CreateKVs' >> beam.core.Create(KVs) kvs | 'WriteToSimpleKV' >> WriteToKVSink(simplekv, 'http:
Demonstrates creating a new custom sink and using it in a pipeline. Uses the new sink in an example pipeline. Additionally demonstrates how a sink should be implemented using a ``PTransform``. This is the recommended way to develop sinks that are to be distributed to a large number of end users. This method runs two pipelines. (1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write`` transform. (2) A pipeline that uses a custom ``PTransform`` that wraps ``SimpleKVSink``. Args: simplekv: an object that mocks the key-value storage. KVs: the set of key-value pairs to be written in the example pipeline. final_table_name_no_ptransform: the prefix of final set of tables to be created by the example pipeline that uses ``SimpleKVSink`` directly. final_table_name_with_ptransform: the prefix of final set of tables to be created by the example pipeline that uses a ``PTransform`` that wraps ``SimpleKVSink``.
github-repos
def hgnc_genes(self, hgnc_symbol, build='37', search=False): LOG.debug("Fetching genes with symbol %s" % hgnc_symbol) if search: full_query = self.hgnc_collection.find({ '$or': [ {'aliases': hgnc_symbol}, {'hgnc_id': int(hgnc_symbol) if hgnc_symbol.isdigit() else None}, ], 'build': build }) if full_query.count() != 0: return full_query return self.hgnc_collection.find({ 'aliases': {'$regex': hgnc_symbol, '$options': 'i'}, 'build': build }) return self.hgnc_collection.find({'build': build, 'aliases': hgnc_symbol})
Fetch all hgnc genes that match a hgnc symbol Check both hgnc_symbol and aliases Args: hgnc_symbol(str) build(str): The build in which to search search(bool): if partial searching should be used Returns: result()
juraj-google-style
def filter_pyfqn(cls, value, relative_to=0): def collect_packages(element, packages): parent = element.eContainer() if parent: collect_packages(parent, packages) packages.append(element.name) packages = [] collect_packages(value, packages) if relative_to < 0 or relative_to > len(packages): raise ValueError('relative_to not in range of number of packages') fqn = '.'.join(packages[relative_to:]) if relative_to: fqn = '.' + fqn return cls.module_path_map.get(fqn, fqn)
Returns Python form of fully qualified name. Args: relative_to: If greater 0, the returned path is relative to the first n directories.
juraj-google-style
def _AddOption(self, name): if name in [option.name for option in self.options]: raise TextFSMTemplateError('Duplicate option "%s"' % name) try: option = self._options_cls.GetOption(name)(self) except AttributeError: raise TextFSMTemplateError('Unknown option "%s"' % name) self.options.append(option)
Add an option to this Value. Args: name: (str), the name of the Option to add. Raises: TextFSMTemplateError: If option is already present or the option does not exist.
juraj-google-style
def end_block(self, request_end_block): self.abort_if_abci_chain_is_not_synced() chain_shift = 0 if self.chain is None else self.chain['height'] height = request_end_block.height + chain_shift self.new_height = height logger.debug(f'Updating pre-commit state: {self.new_height}') pre_commit_state = dict(height=self.new_height, transactions=self.block_txn_ids) self.bigchaindb.store_pre_commit_state(pre_commit_state) block_txn_hash = calculate_hash(self.block_txn_ids) block = self.bigchaindb.get_latest_block() if self.block_txn_ids: self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash]) else: self.block_txn_hash = block['app_hash'] validator_update = Election.process_block(self.bigchaindb, self.new_height, self.block_transactions) return ResponseEndBlock(validator_updates=validator_update)
Calculate block hash using transaction ids and previous block hash to be stored in the next block. Args: height (int): new height of the chain.
juraj-google-style
def list_groups(name): if six.PY2: name = _to_unicode(name) ugrp = set() try: user = info(name)['groups'] except KeyError: return False for group in user: ugrp.add(group.strip(' *')) return sorted(list(ugrp))
Return a list of groups the named user belongs to Args: name (str): The user name for which to list groups Returns: list: A list of groups to which the user belongs CLI Example: .. code-block:: bash salt '*' user.list_groups foo
juraj-google-style
def convert(self, vroot, entry_variables): self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables with nn.parameter_scope(self.name): for t, func in enumerate(self.graph_info.funcs): if func.name in self.inner_prod_functions: inner_prod_func = func o = self._fixed_point_weight_conversion(inner_prod_func) continue o = self._identity_conversion(func) self.end_variable = o if self.call_forward: o.forward(clear_buffer=True) return self.end_variable
All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
juraj-google-style
def get_index_from_alias(alias_name, index_client=None): index_client = (index_client or indices_client()) if (not index_client.exists_alias(name=alias_name)): return None return list(index_client.get_alias(name=alias_name).keys())[0]
Retrieve the base index name from an alias Args: alias_name (str) Name of the alias index_client (Elasticsearch.IndicesClient) an Elasticsearch index client. Optional, will create one if not given Returns: (str) Name of index
codesearchnet
def format(self, info_dict, delimiter='/'): def dfs(father, path, acc): if isinstance(father, list): for child in father: dfs(child, path, acc) elif isinstance(father, collections.Mapping): for child in (sorted(father.items(), key=itemgetter(0)),): dfs(child, path, acc) elif isinstance(father, tuple): path = copy.copy(path) path.append(father[0]) dfs(father[1], path, acc) else: path[(- 1)] = '{}: {}'.format(path[(- 1)], str(father)) acc.append(delimiter.join(path)) result = [] dfs((info_dict.get('Prefix') or info_dict), [], result) return '\n'.join(result)
This formatter will take a data structure that represent a tree and will print all the paths from the root to the leaves in our case it will print each value and the keys that needed to get to it, for example: vm0: net: lago memory: 1024 will be output as: vm0/net/lago vm0/memory/1024 Args: info_dict (dict): information to reformat delimiter (str): a delimiter for the path components Returns: str: String representing the formatted info
codesearchnet
def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]: url = _BASE_TAF_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain TAF for station {station_icao}\n' \ f'Got to "http: f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
Retrieves a TAF string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str
juraj-google-style
def renderJsonReadsSince(self, timestamp, meter): result = "" try: connection = sqlite3.connect(self.m_connection_string) connection.row_factory = self.dict_factory select_cursor = connection.cursor() select_cursor.execute("select * from Meter_Reads where " + Field.Time_Stamp + " > " + str(timestamp) + " and " + Field.Meter_Address + "= '" + meter + "';") reads = select_cursor.fetchall() result = json.dumps(reads, indent=4) except: ekm_log(traceback.format_exc(sys.exc_info())) return result
Simple since Time_Stamp query returned as JSON records. Args: timestamp (int): Epoch time in seconds. meter (str): 12 character meter address to query Returns: str: JSON rendered read records.
juraj-google-style
def _ConvertBool(value, require_str): if require_str: if (value == 'true'): return True elif (value == 'false'): return False else: raise ParseError('Expected "true" or "false", not {0}.'.format(value)) if (not isinstance(value, bool)): raise ParseError('Expected true or false without quotes.') return value
Convert a boolean value. Args: value: A scalar value to convert. require_str: If True, value must be a str. Returns: The bool parsed. Raises: ParseError: If a boolean value couldn't be consumed.
codesearchnet
def url_assembler(query_string, no_redirect=0, no_html=0, skip_disambig=0): params = [('q', query_string.encode("utf-8")), ('format', 'json')] if no_redirect: params.append(('no_redirect', 1)) if no_html: params.append(('no_html', 1)) if skip_disambig: params.append(('skip_disambig', 1)) return '/?' + urlencode(params)
Assembler of parameters for building request query. Args: query_string: Query to be passed to DuckDuckGo API. no_redirect: Skip HTTP redirects (for !bang commands). Default - False. no_html: Remove HTML from text, e.g. bold and italics. Default - False. skip_disambig: Skip disambiguation (D) Type. Default - False. Returns: A “percent-encoded” string which is used as a part of the query.
juraj-google-style
def read_header(self, file_handle, nextdata_offset=0): header = {'FCS format': file_handle.read(6)} file_handle.read(4) for field in ('text start', 'text end', 'data start', 'data end', 'analysis start', 'analysis end'): s = file_handle.read(8) try: field_value = int(s) except ValueError: field_value = 0 header[field] = field_value + nextdata_offset for k in ('text start', 'text end'): if header[k] == 0: raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate ' u'information about the "{}" segment.)'.format(self.path, k)) elif header[k] > self._file_size: raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment ' u'is larger than file size'.format(self.path, k)) else: pass self._data_start = header['data start'] self._data_end = header['data start'] if header['analysis end'] - header['analysis start'] != 0: warnings.warn(u'There appears to be some information in the ANALYSIS segment of file ' u'{0}. However, it might not be read correctly.'.format(self.path)) self.annotation['__header__'] = header
Read the header of the FCS file. The header specifies where the annotation, data and analysis are located inside the binary file. Args: file_handle: buffer containing FCS file. nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
juraj-google-style
def __init__(self, default: typing.Any=MISSING_VALUE, annotation: typing.Any=MISSING_VALUE, transform: typing.Optional[typing.Callable[[typing.Any], typing.Any]]=None, frozen: bool=False): super().__init__(object, default, transform, is_noneable=True, frozen=frozen) self._annotation = annotation
Constructor. Args: default: (Optional) default value of this spec. annotation: (Optional) external provided type annotation. transform: (Optional) user-defined function to be called on the input of `apply`. It could be used as a type converter or a custom validator which may raise errors. frozen: If True, values other than the default value is not accceptable.
github-repos
def apply_sql(query: str, output_name: Optional[str], found: Dict[str, beam.PCollection], run: bool=True) -> Tuple[str, Union[PValue, SqlNode], SqlChain]: output_name = _generate_output_name(output_name, query, found) query, sql_source, chain = _build_query_components(query, found, output_name, run) if run: try: output = sql_source | SqlTransform(query) output_name, output = create_var_in_main(output_name, output) _LOGGER.info('The output PCollection variable is %s with element_type %s', output_name, pformat_namedtuple(output.element_type)) return (output_name, output, chain) except (KeyboardInterrupt, SystemExit): raise except: on_error('Error when applying the Beam SQL: %s', traceback.format_exc()) raise else: return (output_name, chain.current, chain)
Applies a SqlTransform with the given sql and queried PCollections. Args: query: The SQL query executed in the magic. output_name: (optional) The output variable name in __main__ module. found: The PCollections with variable names found to be used in the query. run: Whether to prepare the SQL pipeline for a local run or not. Returns: A tuple of values. First str value is the output variable name in __main__ module, auto-generated if not provided. Second value: if run, it's a PValue; otherwise, a SqlNode tracks the SQL without applying it or executing it. Third value: SqlChain is a chain of SqlNodes that have been applied.
github-repos
def add_peer_parser(subparsers, parent_parser): parser = subparsers.add_parser( 'peer', help='Displays information about validator peers', description="Provides a subcommand to list a validator's peers") grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand') grand_parsers.required = True add_peer_list_parser(grand_parsers, parent_parser)
Adds argument parser for the peer command Args: subparsers: Add parsers to this subparser object parent_parser: The parent argparse.ArgumentParser object
juraj-google-style
def list_load_balancers(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/loadBalancers?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
List the load balancers in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of load balancer list with properties.
codesearchnet
def from_snl(cls, snl): hist = [] for h in snl.history: d = h.description d['_snl'] = {'url': h.url, 'name': h.name} hist.append(d) return cls(snl.structure, history=hist)
Create TransformedStructure from SNL. Args: snl (StructureNL): Starting snl Returns: TransformedStructure
juraj-google-style
def sampler(dataframe, modulo, column='client_id', sample_id=42): return dataframe.withColumn('sampler', udf((lambda key: ((crc32((key or '')) & 4294967295) % modulo)))(column)).where(('sampler = %s' % sample_id)).drop('sampler')
Collect a sample of clients given an input column Filter dataframe based on the modulus of the CRC32 of a given string column matching a given sample_id. if dataframe has already been filtered by sample_id, then modulo should be a multiple of 100, column should be "client_id", and the given sample_id should match the value previously used, optionally plus multiples of 100. Args: dataframe: A Dataframe to be sampled modulo (int): selects a 1/modulo sampling of dataframe column (str): name of a string column to sample on sample_id (int): modulus result to select for sampling Returns: A DataFrame sampled on the given inputs.
codesearchnet
def Append(self, value, timestamp): timestamp = self._NormalizeTime(timestamp) if (self.data and (timestamp < self.data[(- 1)][1])): raise RuntimeError('Next timestamp must be larger.') self.data.append([value, timestamp])
Adds value at timestamp. Values must be added in order of increasing timestamp. Args: value: An observed value. timestamp: The timestamp at which value was observed. Raises: RuntimeError: If timestamp is smaller than the previous timstamp.
codesearchnet
def make_job(name: str = '', run_name: str = '', num_tasks: int = 0, install_script: str = '', **kwargs ) -> backend.Job: return _backend.make_job(name=name, run_name=run_name, num_tasks=num_tasks, install_script=install_script, **kwargs)
Create a job using current backend. Blocks until all tasks are up and initialized. Args: name: name of the job run_name: name of the run (auto-assigned if empty) num_tasks: number of tasks install_script: bash-runnable script **kwargs: Returns: backend.Job
juraj-google-style
def filter_by_conditional_statement(self, statement): _filt_values, _filt_datetimes = self._filter_by_statement(statement) collection = HourlyDiscontinuousCollection( self.header.duplicate(), _filt_values, _filt_datetimes) collection._validated_a_period = True return collection
Filter the Data Collection based on a conditional statement. Args: statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0). The variable should always be named as 'a' (without quotations). Return: A new Data Collection containing only the filtered data
juraj-google-style
def FindModuleDefiningFlag(self, flagname, default=None): registered_flag = self.FlagDict().get(flagname) if registered_flag is None: return default for module, flags in six.iteritems(self.FlagsByModuleDict()): for flag in flags: if (flag.name == registered_flag.name and flag.short_name == registered_flag.short_name): return module return default
Return the name of the module defining this flag, or default. Args: flagname: Name of the flag to lookup. default: Value to return if flagname is not defined. Defaults to None. Returns: The name of the module which registered the flag with this name. If no such module exists (i.e. no flag with this name exists), we return default.
juraj-google-style
def requestB(self): work_context = self.getContext() self.setContext('request[v4B]') self.m_serial_port.write((('2f3f'.decode('hex') + self.m_meter_address) + '3031210d0a'.decode('hex'))) self.m_raw_read_b = self.m_serial_port.getResponse(self.getContext()) unpacked_read_b = self.unpackStruct(self.m_raw_read_b, self.m_blk_b) self.convertData(unpacked_read_b, self.m_blk_b, self.m_kwh_precision) self.m_b_crc = self.crcMeterRead(self.m_raw_read_b, self.m_blk_b) self.setContext(work_context) return self.m_b_crc
Issue a B read on V4 meter. Returns: bool: True if CRC match at end of call.
codesearchnet
def similar_filter_r(self, sentence_list): result_list = [] recursive_list = [] try: self.nlp_base.tokenize(sentence_list[0]) subject_token = self.nlp_base.token result_list.append(sentence_list[0]) if len(sentence_list) > 1: for i in range(len(sentence_list)): if i > 0: self.nlp_base.tokenize(sentence_list[i]) object_token = self.nlp_base.token similarity = self.calculate(subject_token, object_token) if similarity <= self.similarity_limit: recursive_list.append(sentence_list[i]) if len(recursive_list) > 0: result_list.extend(self.similar_filter_r(recursive_list)) except IndexError: result_list = sentence_list return result_list
Filter mutually similar sentences. Args: sentence_list: The list of sentences. Returns: The list of filtered sentences.
juraj-google-style
def redirect_stdout(new_stdout): (old_stdout, sys.stdout) = (sys.stdout, new_stdout) try: (yield None) finally: sys.stdout = old_stdout
Redirect the stdout Args: new_stdout (io.StringIO): New stdout to use instead
codesearchnet
def _ParseStorageMediaImageOptions(self, options): self._partitions = getattr(options, 'partitions', None) if self._partitions: try: self._ParseVolumeIdentifiersString(self._partitions, prefix='p') except ValueError: raise errors.BadConfigOption('Unsupported partitions') self._volumes = getattr(options, 'volumes', None) if self._volumes: try: self._ParseVolumeIdentifiersString(self._volumes, prefix='apfs') except ValueError: raise errors.BadConfigOption('Unsupported volumes')
Parses the storage media image options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
codesearchnet
def __init__(self, paths=None, separator='/'): if not paths: raise errors.FormatError('Missing directory value.') super(DirectorySourceType, self).__init__() self.paths = paths self.separator = separator
Initializes a source type. Args: paths (Optional[str]): paths relative to the root of the file system. separator (Optional[str]): path segment separator. Raises: FormatError: when paths is not set.
juraj-google-style
def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, store_args=None): try: wf = Workflow.from_name(name, queue=queue, clear_data_store=clear_data_store, arguments=store_args) except DirectedAcyclicGraphInvalid as e: raise WorkflowDefinitionError(workflow_name=name, graph_name=e.graph_name) celery_app = create_app(config) result = celery_app.send_task(JobExecPath.Workflow, args=(wf,), queue=queue, routing_key=queue) return result.id
Start a single workflow by sending it to the workflow queue. Args: name (str): The name of the workflow that should be started. Refers to the name of the workflow file without the .py extension. config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. store_args (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. Returns: str: The ID of the workflow job. Raises: WorkflowArgumentError: If the workflow requires arguments to be set in store_args that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails.
codesearchnet
def format_trigger(self, stream): src = u'value' if self.use_count: src = u'count' return u"{}({}) {} {}".format(src, stream, self.comp_string, self.reference)
Create a user understandable string like count(stream) >= X. Args: stream (DataStream): The stream to use to format ourselves. Returns: str: The formatted string
juraj-google-style
def write_xml(xml_str, output_loc=None, filename=None): if not xml_str: raise Dump2PolarionException("No data to write.") filename_fin = _get_filename(output_loc=output_loc, filename=filename) with io.open(filename_fin, "w", encoding="utf-8") as xml_file: xml_file.write(get_unicode_str(xml_str)) logger.info("Data written to '%s'", filename_fin)
Outputs the XML content (string) into a file. If `output_loc` is supplied and it's a file (not directory), the output will be saved there and the `filename` is ignored. Args: xml_str: string with XML document output_loc: file or directory for saving the file filename: file name that will be used if `output_loc` is directory If it is needed and is not supplied, it will be generated
juraj-google-style
def from_tuple(cls, queries): domain = cls() join_with = cls.AND for query in queries: if (query in [cls.OR, cls.AND]): join_with = query else: domain.add_query(query, join_with) return domain
Create a ``Domain`` given a set of complex query tuples. Args: queries (iter): An iterator of complex queries. Each iteration should contain either: * A data-set compatible with :func:`~domain.Domain.add_query` * A string to switch the join type Example:: [('subject', 'Test1'), 'OR', ('subject', 'Test2')', ('subject', 'Test3')', ] # The above is equivalent to: # subject:'Test1' OR subject:'Test2' OR subject:'Test3' [('modified_at', datetime(2017, 01, 01)), ('status', 'active'), ] # The above is equivalent to: # modified_at:[2017-01-01T00:00:00Z TO *] # AND status:"active" Returns: Domain: A domain representing the input queries.
codesearchnet
def is40(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 1, 2, 13): return False if wrongstatus(d, 14, 15, 26): return False if wrongstatus(d, 27, 28, 39): return False if wrongstatus(d, 48, 49, 51): return False if wrongstatus(d, 54, 55, 56): return False if bin2int(d[39:47]) != 0: return False if bin2int(d[51:53]) != 0: return False return True
Check if a message is likely to be BDS code 4,0 Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
juraj-google-style
def _GroupByArguments(self, signatures): groups = {} for sig in signatures: stripped_signature = sig.Replace(return_type=None, exceptions=None) ret = groups.get(stripped_signature) if not ret: ret = _ReturnsAndExceptions() groups[stripped_signature] = ret ret.Update(sig) return groups
Groups signatures by arguments. Arguments: signatures: A list of function signatures (Signature instances). Returns: A dictionary mapping signatures (without return and exceptions) to a tuple of return values and exceptions.
github-repos
def get_length(self, y): lens = [self.find_pad_index(row) for row in y] return lens
Get true length of y. Args: y (list): padded list. Returns: lens: true length of y. Examples: >>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]] >>> self.get_length(y) [1, 2, 3]
codesearchnet
def get_cpu_vendor(cls, family, arch='x86'): props = cls.get_cpu_props(family, arch) vendor = 'generic' try: vendor = props.xpath('vendor/@name')[0] except IndexError: pass return vendor
Get CPU vendor, if vendor is not available will return 'generic' Args: family(str): CPU family arch(str): CPU arch Returns: str: CPU vendor if found otherwise 'generic'
codesearchnet
def link_cloud(self, username=None, password=None, device_id=None): reg = ComponentRegistry() domain = self.get('cloud:server') if (username is None): prompt_str = 'Please enter your IOTile.cloud email: ' username = input(prompt_str) if (password is None): prompt_str = 'Please enter your IOTile.cloud password: ' password = getpass.getpass(prompt_str) cloud = Api(domain=domain) ok_resp = cloud.login(email=username, password=password) if (not ok_resp): raise ArgumentError(('Could not login to iotile.cloud as user %s' % username)) reg.set_config('arch:cloud_user', cloud.username) reg.set_config('arch:cloud_token', cloud.token) reg.set_config('arch:cloud_token_type', cloud.token_type) if (device_id is not None): cloud = IOTileCloud() cloud.impersonate_device(device_id)
Create and store a token for interacting with the IOTile Cloud API. You will need to call link_cloud once for each virtualenv that you create and want to use with any api calls that touch iotile cloud. Note that this method is called on a ConfigManager instance If you do not pass your username or password it will be prompted from you securely on stdin. If you are logging in for a user, the token will expire periodically and you will have to relogin. If you pass a device_id, you can obtain a limited token for that device that will never expire, assuming you have access to that device. Args: username (string): Your iotile.cloud username. This is prompted from stdin if not provided. password (string): Your iotile.cloud password. This is prompted from stdin if not provided. device_id (int): Optional device id to obtain permanent credentials for a device.
codesearchnet
def get_path(self, key, rel_to_cwd=False, rel_to_conf=False): if key in self.__cli: path = self.__cli[key] from_conf = False else: path = self.__config.get(key) from_conf = True if not isinstance(path, str): return None res = self.__abspath(path, from_conf) if rel_to_cwd: return os.path.relpath(res, self.__invoke_dir) if rel_to_conf: return os.path.relpath(res, self.__conf_dir) return self.__abspath(path, from_conf)
Retrieve a path from the config, resolving it against the invokation directory or the configuration file directory, depending on whether it was passed through the command-line or the configuration file. Args: key: str, the key to lookup the path with Returns: str: The path, or `None`
juraj-google-style
def _checkBool(inputvalue, description='inputvalue'): _checkString(description, minlength=1, description='description string') if (not isinstance(inputvalue, bool)): raise TypeError('The {0} must be boolean. Given: {1!r}'.format(description, inputvalue))
Check that the given inputvalue is a boolean. Args: * inputvalue (boolean): The value to be checked. * description (string): Used in error messages for the checked inputvalue. Raises: TypeError, ValueError
codesearchnet
def request(http, uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): http_callable = getattr(http, 'request', http) return http_callable(uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type)
Make an HTTP request with an HTTP object and arguments. Args: http: httplib2.Http, an http object to be used to make requests. uri: string, The URI to be requested. method: string, The HTTP method to use for the request. Defaults to 'GET'. body: string, The payload / body in HTTP request. By default there is no payload. headers: dict, Key-value pairs of request headers. By default there are no headers. redirections: int, The number of allowed 203 redirects for the request. Defaults to 5. connection_type: httplib.HTTPConnection, a subclass to be used for establishing connection. If not set, the type will be determined from the ``uri``. Returns: tuple, a pair of a httplib2.Response with the status code and other headers and the bytes of the content returned.
codesearchnet
def fill_memory_slot(memory, value, index): mask = tf.to_float( tf.one_hot(index, tf.shape(memory)[0])[:, None, None, None]) fill_memory = (1 - mask) * memory + mask * value[None, ...] return fill_memory
Fills the memory slot at a particular index with the given value. Args: memory: a 4-d tensor [memory_size, batch, length, channel] containing the state of all steps value: a 3-d tensor [batch, length, channel] as the sate index: integer in [0, memory_size) Returns: filled memory
juraj-google-style
def upload_blob(self, did, wid, filepath='./blob.json'): chars = string.ascii_letters + string.digits boundary_key = ''.join(random.choice(chars) for i in range(8)) mimetype = mimetypes.guess_type(filepath)[0] encoded_filename = os.path.basename(filepath) file_content_length = str(os.path.getsize(filepath)) blob = open(filepath) req_headers = { 'Content-Type': 'multipart/form-data; boundary="%s"' % boundary_key } payload = '--' + boundary_key + '\r\nContent-Disposition: form-data; name="encodedFilename"\r\n\r\n' + encoded_filename + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="fileContentLength"\r\n\r\n' + file_content_length + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="file"; filename="' + encoded_filename + '"\r\n' payload += 'Content-Type: ' + mimetype + '\r\n\r\n' payload += blob.read() payload += '\r\n--' + boundary_key + '--' return self._api.request('post', '/api/blobelements/d/' + did + '/w/' + wid, headers=req_headers, body=payload)
Uploads a file to a new blob element in the specified doc. Args: - did (str): Document ID - wid (str): Workspace ID - filepath (str, default='./blob.json'): Blob element location Returns: - requests.Response: Onshape response data
juraj-google-style
def show_abierrors(self, nids=None, stream=sys.stdout): lines = [] app = lines.append for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids): header = "=== " + task.qout_file.path + "===" app(header) report = task.get_event_report() if report is not None: app("num_errors: %s, num_warnings: %s, num_comments: %s" % ( report.num_errors, report.num_warnings, report.num_comments)) app("*** ERRORS ***") app("\n".join(str(e) for e in report.errors)) app("*** BUGS ***") app("\n".join(str(b) for b in report.bugs)) else: app("get_envent_report returned None!") app("=" * len(header) + 2*"\n") return stream.writelines(lines)
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL. Args: nids: optional list of node identifiers used to filter the tasks. stream: File-like object. Default: sys.stdout
juraj-google-style
def add_showcase(self, showcase, showcases_to_check=None): dataset_showcase = self._get_dataset_showcase_dict(showcase) if showcases_to_check is None: showcases_to_check = self.get_showcases() for showcase in showcases_to_check: if dataset_showcase['showcase_id'] == showcase['id']: return False showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration) showcase._write_to_hdx('associate', dataset_showcase, 'package_id') return True
Add dataset to showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if the showcase was added, False if already present
juraj-google-style
def _create_filters(col_params, extractors): result = [] for (col_param, extractor) in zip(col_params, extractors): a_filter = _create_filter(col_param, extractor) if a_filter: result.append(a_filter) return result
Creates filters for the given col_params. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. extractors: list of extractor functions of the same length as col_params. Each element should extract the column described by the corresponding element of col_params. Returns: A list of filter functions. Each corresponding to a single col_params.filter oneof field of _request
codesearchnet
def query(self, query): path = self.path(query.key) if os.path.exists(path): filenames = os.listdir(path) filenames = list(set(filenames) - set(self.ignore_list)) filenames = map(lambda f: os.path.join(path, f), filenames) iterable = self._read_object_gen(filenames) else: iterable = list() return query(iterable)
Returns an iterable of objects matching criteria expressed in `query` FSDatastore.query queries all the `.obj` files within the directory specified by the query.key. Args: query: Query object describing the objects to return. Raturns: Cursor with all objects matching criteria
juraj-google-style
def receive(host, timeout): parameters = settings.get_amqp_settings()[host] queues = parameters["queues"] queues = dict(map(lambda (x, y): (y, x), queues.items())) queue = queues[parameters["out_key"]] channel = _get_channel(host, timeout) for method_frame, properties, body in channel.consume(queue): print json.dumps({ "method_frame": str(method_frame), "properties": str(properties), "body": body }) print "-" * 79 print channel.basic_ack(method_frame.delivery_tag)
Print all messages in queue. Args: host (str): Specified --host. timeout (int): How log should script wait for message.
juraj-google-style
def follow(self, chars): chars = chars.lower() node = self.node for char in chars: node = cgaddag.gdg_follow_edge(self.gdg, node, char.encode("ascii")) if not node: raise KeyError(char) return Node(self.gdg, node)
Traverse the GADDAG to the node at the end of the given characters. Args: chars: An string of characters to traverse in the GADDAG. Returns: The Node which is found by traversing the tree.
juraj-google-style
def update_state(self, y_true, y_pred, sample_weight=None): y_true = math_ops.cast(y_true, self._dtype) y_pred = math_ops.cast(y_pred, self._dtype) y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true) error_sq = math_ops.squared_difference(y_pred, y_true) return super(RootMeanSquaredError, self).update_state(error_sq, sample_weight=sample_weight)
Accumulates root mean squared error statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op.
github-repos
def add_user( self, user, first_name=None, last_name=None, email=None, password=None ): self.project_service.set_auth(self._token_project) self.project_service.add_user( user, first_name, last_name, email, password)
Add a new user. Args: user (string): User name. first_name (optional[string]): User's first name. Defaults to None. last_name (optional[string]): User's last name. Defaults to None. email: (optional[string]): User's email address. Defaults to None. password: (optional[string]): User's password. Defaults to None. Raises: requests.HTTPError on failure.
juraj-google-style
def get_compound_bodies(node): if isinstance(node, (ast.Module, ast.FunctionDef, ast.ClassDef, ast.With)): return [node.body] elif isinstance(node, (ast.If, ast.While, ast.For)): return [node.body, node.orelse] elif PY2 and isinstance(node, ast.TryFinally): return [node.body, node.finalbody] elif PY2 and isinstance(node, ast.TryExcept): return [node.body, node.orelse] + [h.body for h in node.handlers] elif PY3 and isinstance(node, ast.Try): return ([node.body, node.orelse, node.finalbody] + [h.body for h in node.handlers]) end return []
Returns a list of bodies of a compound statement node. Args: node: AST node. Returns: A list of bodies of the node. If the given node does not represent a compound statement, an empty list is returned.
juraj-google-style
def disk(radius, alias_blur=0.1, dtype=np.float32): if radius <= 8: length = np.arange(-8, 8 + 1) ksize = (3, 3) else: length = np.arange(-radius, radius + 1) ksize = (5, 5) x_axis, y_axis = np.meshgrid(length, length) aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype) aliased_disk /= np.sum(aliased_disk) return tfds.core.lazy_imports.cv2.GaussianBlur( aliased_disk, ksize=ksize, sigmaX=alias_blur)
Generating a Gaussian blurring kernel with disk shape. Generating a Gaussian blurring kernel with disk shape using cv2 API. Args: radius: integer, radius of blurring kernel. alias_blur: float, standard deviation of Gaussian blurring. dtype: data type of kernel Returns: cv2 object of the Gaussian blurring kernel.
juraj-google-style
def AllBalancesZeroOrLess(self): for (key, fixed8) in self.Balances.items(): if (fixed8.value > 0): return False return True
Flag indicating if all balances are 0 or less. Returns: bool: True if all balances are <= 0. False, otherwise.
codesearchnet
def merge_vert(script, threshold=0.0): filter_xml = ''.join([' <filter name="Merge Close Vertices">\n', ' <Param name="Threshold" ', 'value="{}" '.format(threshold), 'description="Merging distance" ', 'min="0" ', 'max="1" ', 'type="RichAbsPerc" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Merge together all the vertices that are nearer than the specified threshold. Like a unify duplicate vertices but with some tolerance. Args: script: the FilterScript object or script filename to write the filter to. threshold (float): Merging distance. All the vertices that are closer than this threshold are merged together. Use very small values, default is zero. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
codesearchnet
def add_forwarding_rules(self, forwarding_rules): rules_dict = [rule.__dict__ for rule in forwarding_rules] return self.get_data( "load_balancers/%s/forwarding_rules/" % self.id, type=POST, params={"forwarding_rules": rules_dict} )
Adds new forwarding rules to a LoadBalancer. Args: forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
juraj-google-style
def send_message_for_lane_change(sender, **kwargs): current = kwargs['current'] owners = kwargs['possible_owners'] if 'lane_change_invite' in current.task_data: msg_context = current.task_data.pop('lane_change_invite') else: msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG wfi = WFCache(current).get_instance() TaskInvitation.objects.filter(instance=wfi, role=current.role, wf_name=wfi.wf.name).delete() today = datetime.today() for recipient in owners: inv = TaskInvitation( instance=wfi, role=recipient, wf_name=wfi.wf.name, progress=30, start_date=today, finish_date=today + timedelta(15) ) inv.title = current.task_data.get('INVITATION_TITLE') or wfi.wf.title inv.save() try: recipient.send_notification(title=msg_context['title'], message="%s %s" % (wfi.wf.title, msg_context['body']), typ=1, url='', sender=sender ) except: pass
Sends a message to possible owners of the current workflows next lane. Args: **kwargs: ``current`` and ``possible_owners`` are required. sender (User): User object
juraj-google-style
def get_backend_engine(self, name, **kwargs): if (name not in self._engines): msg = 'Given settings backend is unknowed: {}' raise SettingsBackendError(msg.format(name)) return self._engines[name](**kwargs)
Get backend engine from given name. Args: (string): Path to validate. Raises: boussole.exceptions.SettingsBackendError: If given backend name does not match any available engine. Returns: object: Instance of selected backend engine.
codesearchnet
def reset(self): self.number_of_hops = 0 self.dr = np.array([0.0, 0.0, 0.0]) self.summed_dr2 = 0.0 self.sites_visited = [self._site.number]
Reinitialise the stored displacements, number of hops, and list of sites visited for this `Atom`. Args: None Returns: None
codesearchnet
def set_notifier_path(self, notifier, path): assert self.notifier is Model.notifier or notifier is Model.notifier, \ "Already have a notifier %s path %s" % (self.notifier, self.path) self.notifier = notifier self.path = path for name, ct in self.call_types.items(): if ct.is_mapping: child = getattr(self, name) if child and issubclass(ct.typ[1], Model): for k, v in child.items(): v.set_notifier_path(notifier, self.path + [name, k]) elif issubclass(ct.typ, Model): assert not ct.is_array, \ "Can't deal with Arrays of Models %s" % ct child = getattr(self, name) child.set_notifier_path(notifier, self.path + [name])
Sets the notifier, and the path from the path from block root Args: notifier (Notifier): The Notifier to tell when endpoint data changes path (list): The absolute path to get to this object
juraj-google-style
def _log_to_stderr(self, record): old_stream = self.stream self.stream = sys.stderr try: super(PythonHandler, self).emit(record) finally: self.stream = old_stream
Emits the record to stderr. This temporarily sets the handler stream to stderr, calls StreamHandler.emit, then reverts the stream back. Args: record: logging.LogRecord, the record to log.
codesearchnet
def plot_spectra_pages_pdf(ss, pdf_filename='pages.pdf', setup=_default_setup): logger = a99.get_python_logger() (xmin, xmax, ymin_, ymax, xspan, yspan) = calc_max_min(ss) ymin = (ymin_ if (setup.ymin is None) else setup.ymin) num_pages = len(ss) a99.format_BLB() pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename) for (i, s) in enumerate(ss): title = s.title fig = plt.figure() plt.plot(s.x, s.y, c=_FAV_COLOR) if (setup.flag_xlabel and setup.fmt_xlabel): _set_plot(plt.xlabel, setup.fmt_xlabel, s) if (setup.flag_ylabel and setup.fmt_ylabel): _set_plot(plt.ylabel, setup.fmt_ylabel, s) _set_plot(plt.title, setup.fmt_title, s) plt.xlim([(xmin - (xspan * _T)), (xmax + (xspan * _T))]) plt.ylim([(ymin - (yspan * _T)), (ymax + (yspan * _T))]) plt.tight_layout() plt.subplots_adjust(top=0.94) logger.info("Printing page {0:d}/{1:d} ('{2!s}')".format((i + 1), num_pages, title)) pdf.savefig(fig) plt.close() pdf.close() logger.info('File {0!s} successfully created.'.format(pdf_filename))
Plots spectra into a PDF file, one spectrum per page. Splits into several pieces of width Args: ss: list of Spectrum objects pdf_filename: name of output file
codesearchnet
def combine_results(results): total, inside = (sum((r[0] for r in results)), sum((r[1] for r in results))) return (total, inside, 4 * float(inside) / total)
Combiner function to sum up trials and compute the estimate. Args: results: An iterable of 3-tuples (total trials, inside trials, ignored). Returns: A 3-tuple containing the sum of total trials, sum of inside trials, and the probability computed from the two numbers.
github-repos
def get(cls): results = {} hierarchy = cls.__hierarchy hierarchy.reverse() for storeMethod in hierarchy: cls.merger.merge(results, storeMethod.get()) return results
Get values gathered from the previously set hierarchy. Respects the order in which sources are set, the first source set has the highest priority, overrides values with the same key that exist in sources with lower priority. Returns: dict: The dictionary containing values gathered from all set sources.
codesearchnet
def rep1(parser: Union[(Parser, Sequence[Input])]) -> RepeatedOnceParser: if isinstance(parser, str): parser = lit(parser) return RepeatedOnceParser(parser)
Match a parser one or more times repeatedly. This matches ``parser`` multiple times in a row. If it matches as least once, it returns a list of values from each time ``parser`` matched. If it does not match ``parser`` at all, it fails. Args: parser: Parser or literal
codesearchnet
def coord_list_mapping_pbc(subset, superset, atol=1e-08): atol = (np.array([1.0, 1.0, 1.0]) * atol) return cuc.coord_list_mapping_pbc(subset, superset, atol)
Gives the index mapping from a subset to a superset. Superset cannot contain duplicate matching rows Args: subset, superset: List of frac_coords Returns: list of indices such that superset[indices] = subset
codesearchnet
def _send_group_coordinator_request(self): node_id = self._client.least_loaded_node() if (node_id is None): return Future().failure(Errors.NoBrokersAvailable()) elif (not self._client.ready(node_id, metadata_priority=False)): e = Errors.NodeNotReadyError(node_id) return Future().failure(e) log.debug('Sending group coordinator request for group %s to broker %s', self.group_id, node_id) request = GroupCoordinatorRequest[0](self.group_id) future = Future() _f = self._client.send(node_id, request) _f.add_callback(self._handle_group_coordinator_response, future) _f.add_errback(self._failed_request, node_id, request, future) return future
Discover the current coordinator for the group. Returns: Future: resolves to the node id of the coordinator
codesearchnet
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') status_view_mode = cls._ParseStringOption( options, 'status_view_mode', default_value=status_view.StatusView.MODE_WINDOW) setattr(configuration_object, '_status_view_mode', status_view_mode)
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
juraj-google-style
def mirror(self): reverse_circ = self.copy(name=(self.name + '_mirror')) reverse_circ.data = [] for (inst, qargs, cargs) in reversed(self.data): reverse_circ.data.append((inst.mirror(), qargs, cargs)) return reverse_circ
Mirror the circuit by reversing the instructions. This is done by recursively mirroring all instructions. It does not invert any gate. Returns: QuantumCircuit: the mirrored circuit
codesearchnet