code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def report_factory(app, report_name, **kwargs): created = pendulum.now().to_rfc3339_string() user_model = app._swimlane.user.as_usergroup_selection() return Report(app, {'$type': Report._type, 'groupBys': [], 'aggregates': [], 'applicationIds': [app.id], 'columns': [], 'sorts': {'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[Core.Models.Search.SortTypes, Core]], mscorlib'}, 'filters': [], 'defaultSearchReport': False, 'allowed': [], 'permissions': {'$type': 'Core.Models.Security.PermissionMatrix, Core'}, 'createdDate': created, 'modifiedDate': created, 'createdByUser': user_model, 'modifiedByUser': user_model, 'id': None, 'name': report_name, 'disabled': False, 'keywords': ''}, **kwargs)
Report instance factory populating boilerplate raw data Args: app (App): Swimlane App instance report_name (str): Generated Report name Keyword Args **kwargs: Kwargs to pass to the Report class
codesearchnet
def draw_sample(num_samples, num_classes, logits, num_trials, dtype, seed): with tf.name_scope('multinomial.draw_sample'): num_trials = (tf.ones_like(logits[(..., 0)], dtype=num_trials.dtype) * num_trials) logits = (tf.ones_like(num_trials[(..., tf.newaxis)], dtype=logits.dtype) * logits) flat_logits = tf.reshape(logits, [(- 1), num_classes]) flat_num_trials = (num_samples * tf.reshape(num_trials, [(- 1)])) def _sample_one_batch_member(args): (logits, num_cat_samples) = (args[0], args[1]) x = tf.random.categorical(logits[(tf.newaxis, ...)], num_cat_samples, seed=seed) x = tf.reshape(x, shape=[num_samples, (- 1)]) x = tf.one_hot(x, depth=num_classes) x = tf.reduce_sum(input_tensor=x, axis=(- 2)) return tf.cast(x, dtype=dtype) x = tf.map_fn(_sample_one_batch_member, [flat_logits, flat_num_trials], dtype=dtype) x = tf.transpose(a=x, perm=[1, 0, 2]) final_shape = tf.concat([[num_samples], tf.shape(input=num_trials), [num_classes]], axis=0) x = tf.reshape(x, final_shape) return x
Sample a multinomial. The batch shape is given by broadcasting num_trials with remove_last_dimension(logits). Args: num_samples: Python int or singleton integer Tensor: number of multinomial samples to draw. num_classes: Python int or singleton integer Tensor: number of classes. logits: Floating Tensor with last dimension k, of (unnormalized) logit probabilities per class. num_trials: Tensor of number of categorical trials each multinomial consists of. num_trials[..., tf.newaxis] must broadcast with logits. dtype: dtype at which to emit samples. seed: Random seed. Returns: samples: Tensor of given dtype and shape [n] + batch_shape + [k].
codesearchnet
def plot_compare(self, other_plotter): data_orig = self.bs_plot_data() data = other_plotter.bs_plot_data() if (len(data_orig['distances']) != len(data['distances'])): raise ValueError('The two objects are not compatible.') plt = self.get_plot() band_linewidth = 1 for i in range(other_plotter._nb_bands): for d in range(len(data_orig['distances'])): plt.plot(data_orig['distances'][d], [e[i] for e in data['frequency']][d], 'r-', linewidth=band_linewidth) return plt
plot two band structure for comparison. One is in red the other in blue. The two band structures need to be defined on the same symmetry lines! and the distance between symmetry lines is the one of the band structure used to build the PhononBSPlotter Args: another PhononBSPlotter object defined along the same symmetry lines Returns: a matplotlib object with both band structures
codesearchnet
def randomize(vm, length=(10, 10), ints=(0, 999), strs=(1, 10), chars=(32, 126), instruction_ratio=0.5, number_string_ratio=0.8, exclude=map(crianza.instructions.lookup, ['.', 'exit', 'read', 'write', 'str']), restrict_to=None): vm.code = [] instructions = (set(vm.instructions.values()) - set(exclude)) if (restrict_to is not None): instructions = instructions.intersection(set(restrict_to)) instructions = list(instructions) for _ in xrange(random.randint(*length)): r = random.random() if (r <= instruction_ratio): vm.code.append(random.choice(instructions)) elif (r <= number_string_ratio): vm.code.append(crianza.compiler.make_embedded_push(random.randint(*ints))) else: vm.code.append(crianza.compiler.make_embedded_push(('%s' % ''.join((chr(random.randint(*chars)) for n in xrange(0, random.randint(*strs))))))) return vm
Replaces existing code with completely random instructions. Does not optimize code after generating it. Args: length: Tuple of minimum and maximum code lengths. Code length will be a random number between these two, inclusive values. ints: Integers in the code will be selected at random from this inclusive range. strs: Inclusive range of the length of strings in the code. chars: Inclusive range of characters in random strings. instruction_ratio: Ratio of instructions to numbers/strings, meaning that if this value is 0.5 then there will just as many instructions in the code as there are numbers and strings. number_string_ratio: Ratio of numbers to strings. exclude: Excluded instructions. For genetic programming, one wants to avoid the program to hang for user input. The default value is to exclude console i/o and debug instructions. restrict_to: Limit instructions to the given list. Returns: The VM.
codesearchnet
def add_all_exchange_reactions(model, compartment, allow_duplicates=False): all_reactions = {} if not allow_duplicates: for rxnid in model.database.reactions: rx = model.database.get_reaction(rxnid) all_reactions[rx] = rxnid added = set() added_compounds = set() initial_compounds = set(model.compounds) reactions = set(model.database.reactions) for model_compound in initial_compounds: compound = model_compound.in_compartment(compartment) if compound in added_compounds: continue rxnid_ex = create_exchange_id(reactions, compound) reaction_ex = Reaction(Direction.Both, {compound: -1}) if reaction_ex not in all_reactions: model.database.set_reaction(rxnid_ex, reaction_ex) reactions.add(rxnid_ex) else: rxnid_ex = all_reactions[reaction_ex] if not model.has_reaction(rxnid_ex): added.add(rxnid_ex) model.add_reaction(rxnid_ex) added_compounds.add(compound) return added
Add all exchange reactions to database and to model. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`.
juraj-google-style
def parse_relations(belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors) -> Tuple[(Parsed, Errors)]: quotes = char_locs['quotes'] quoted_range = set([i for (start, end) in quotes.items() for i in range(start, end)]) for match in relations_pattern_middle.finditer(belstr): (start, end) = match.span(1) end = (end - 1) if (start != end): test_range = set(range(start, end)) else: test_range = set(start) if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = {'type': 'Relation', 'name': match.group(1), 'span': (start, end)} for match in relations_pattern_end.finditer(belstr): (start, end) = match.span(1) log.debug(f'Relation-end {match}') end = (end - 1) if (start != end): test_range = set(range(start, end)) else: test_range = set(start) if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = {'type': 'Relation', 'name': match.group(1), 'span': (start, end)} return (parsed, errors)
Parse relations from BEL string Args: belstr: BEL string as one single string (not list of chars) char_locs: paren, comma and quote char locations parsed: data structure for parsed functions, relations, nested errors: error messages Returns: (parsed, errors):
codesearchnet
def table_delete(self, table_name): url = (Api._ENDPOINT + (Api._TABLES_PATH % table_name)) return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)
Issues a request to delete a table. Args: table_name: the name of the table as a tuple of components. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
codesearchnet
def read_at(self, d, **kwargs): try: return np.array([self._read_at(depth, **kwargs) for depth in d]) except: return self._read_at(d, **kwargs)
Read the log at a specific depth or an array of depths. Args: d (float or array-like) interpolation (str) index(bool) return_basis (bool) Returns: float or ndarray.
juraj-google-style
def learn_dfa(self, mma=None): logging.info('Initializing learning procedure.') if mma: self._init_table_from_dfa(mma) else: self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False while not closed: logging.debug('Checking if table is closed.') closed, string = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(string) else: logging.debug('Table closed.') dfa = self.get_dfa_conjecture() logging.info('Generated conjecture machine with %d states.',len(list(dfa.states))) logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(dfa) if found: logging.info('No counterexample found. Hypothesis is correct!') break logging.info('Processing counterexample %s with length %d.', counter_example, len(counter_example)) self._process_counter_example(dfa, counter_example) logging.info('Learning complete.') logging.info('Learned em_vector table is the following:') logging.info(self.observation_table.em_vector) return '', dfa
Implements the high level loop of the algorithm for learning a Mealy machine. Args: mma (DFA): The input automaton Returns: MealyMachine: A string and a model for the Mealy machine to be learned.
juraj-google-style
def find_last_sublist(list_, sublist): for i in reversed(range(((len(list_) - len(sublist)) + 1))): if ((list_[i] == sublist[0]) and (list_[i:(i + len(sublist))] == sublist)): return i return None
Given a list, find the last occurance of a sublist within it. Returns: Index where the sublist starts, or None if there is no match.
codesearchnet
def verify_firebase_token(id_token, request, audience=None): return verify_token( id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)
Verifies an ID Token issued by Firebase Authentication. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your Firebase application ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token.
juraj-google-style
def get_device_state(self, device, id_override=None, type_override=None): _LOGGER.info('Getting state via online API') object_id = (id_override or device.object_id()) object_type = (type_override or device.object_type()) url_string = '{}/{}s/{}'.format(self.BASE_URL, object_type, object_id) arequest = requests.get(url_string, headers=API_HEADERS) response_json = arequest.json() _LOGGER.debug('%s', response_json) return response_json
Get device state via online API. Args: device (WinkDevice): The device the change is being requested for. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format
codesearchnet
def fail_run_group(group, session): from datetime import datetime group.end = datetime.now() group.status = 'failed' session.commit()
End the run_group unsuccessfully. Args: group: The run_group we want to complete. session: The database transaction we will finish.
juraj-google-style
def normalize_json(template): obj = parse_cloudformation_template(template) json_str = json.dumps( obj, sort_keys=True, indent=4, default=str, separators=(',', ': '), ) result = [] lines = json_str.split("\n") for line in lines: result.append(line + "\n") return result
Normalize our template for diffing. Args: template(str): string representing the template Returns: list: json representation of the parameters
juraj-google-style
def _ReadRecordSchemaIndexes(self, tables, file_object, record_offset): _ = self._ReadRecordHeader(file_object, record_offset) attribute_value_offsets = self._ReadRecordAttributeValueOffset( file_object, record_offset + 24, 5) if attribute_value_offsets != (0x2d, 0x31, 0x35, 0x39, 0x3d): raise errors.ParseError('Unsupported record attribute value offsets') file_offset = file_object.tell() data_type_map = self._GetDataTypeMap('keychain_record_schema_indexes') record_values, _ = self._ReadStructureFromFileObject( file_object, file_offset, data_type_map) if record_values.relation_identifier not in tables: raise errors.ParseError( 'CSSM_DL_DB_SCHEMA_INDEXES defines relation identifier not defined ' 'in CSSM_DL_DB_SCHEMA_INFO.') table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES, None) if not table: raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INDEXES table.') record = collections.OrderedDict({ 'RelationID': record_values.relation_identifier, 'IndexID': record_values.index_identifier, 'AttributeID': record_values.attribute_identifier, 'IndexType': record_values.index_type, 'IndexedDataLocation': record_values.index_data_location}) table.records.append(record)
Reads a schema indexes (CSSM_DL_DB_SCHEMA_INDEXES) record. Args: tables (dict[int, KeychainDatabaseTable]): tables per identifier. file_object (file): file-like object. record_offset (int): offset of the record relative to the start of the file. Raises: ParseError: if the record cannot be read.
juraj-google-style
def set_speech_text(self, text): self.response.outputSpeech.type = 'PlainText' self.response.outputSpeech.text = text
Set response output speech as plain text type. Args: text: str. Response speech used when type is 'PlainText'. Cannot exceed 8,000 characters.
codesearchnet
def get_default_name(self): long_names = [name for name in self.name if name.startswith('--')] short_names = [name for name in self.name if (not name.startswith('--'))] if long_names: return to_snake_case(long_names[0].lstrip('-')) return to_snake_case(short_names[0].lstrip('-'))
Return the default generated name to store value on the parser for this option. eg. An option *['-s', '--use-ssl']* will generate the *use_ssl* name Returns: str: the default name of the option
codesearchnet
def print_tools(self, pattern=None, buf=sys.stdout): seen = set() rows = [] context = self.context if context: data = context.get_tools() conflicts = set(context.get_conflicting_tools().keys()) for _, (variant, tools) in sorted(data.items()): pkg_str = variant.qualified_package_name for tool in tools: if pattern and not fnmatch(tool, pattern): continue if tool in conflicts: label = "(in conflict)" color = critical else: label = '' color = None rows.append([tool, '-', pkg_str, "active context", label, color]) seen.add(tool) for suite in self.suites: for tool, d in suite.get_tools().iteritems(): if tool in seen: continue if pattern and not fnmatch(tool, pattern): continue label = [] color = None path = which(tool) if path: path_ = os.path.join(suite.tools_path, tool) if path != path_: label.append("(hidden by unknown tool '%s')" % path) color = warning variant = d["variant"] if isinstance(variant, set): pkg_str = ", ".join(variant) label.append("(in conflict)") color = critical else: pkg_str = variant.qualified_package_name orig_tool = d["tool_name"] if orig_tool == tool: orig_tool = '-' label = ' '.join(label) source = ("context '%s' in suite '%s'" % (d["context_name"], suite.load_path)) rows.append([tool, orig_tool, pkg_str, source, label, color]) seen.add(tool) _pr = Printer(buf) if not rows: _pr("No matching tools.") return False headers = [["TOOL", "ALIASING", "PACKAGE", "SOURCE", "", None], ["----", "--------", "-------", "------", "", None]] rows = headers + sorted(rows, key=lambda x: x[0].lower()) print_colored_columns(_pr, rows) return True
Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern.
juraj-google-style
def __call__(self, package_names): result = True registry = get(self.registry_name) for package_name in package_names: metadata = {} for entry_point, export_target in registry.iter_export_targets_for( package_name): builder = next(registry.generate_builder( entry_point, export_target), None) if not builder: result = False continue entries = registry.execute_builder(*builder) result = bool(entries) and result metadata.update(entries) result = bool(metadata) and result registry.update_artifact_metadata(package_name, metadata) return result
Generic artifact builder function. Arguments: package_names List of package names to be built Returns True if the build is successful without errors, False if errors were found or if no artifacts were built.
juraj-google-style
def convert_unsqueeze(params, w_name, scope_name, inputs, layers, weights, names): print('Converting unsqueeze ...') if (names == 'short'): tf_name = ('UNSQ' + random_string(4)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) def target_layer(x): import keras return keras.backend.expand_dims(x) lambda_layer = keras.layers.Lambda(target_layer, name=(tf_name + 'E')) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert unsqueeze operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def seek(self, offset, whence=os.SEEK_SET): if not self._database_object: raise IOError('Not opened.') if whence == os.SEEK_CUR: offset += self._current_offset elif whence == os.SEEK_END: offset += self._size elif whence != os.SEEK_SET: raise IOError('Unsupported whence.') if offset < 0: raise IOError('Invalid offset value out of bounds.') self._current_offset = offset
Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed.
juraj-google-style
def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True): if parallelism.n == 1: return x if maybe_reduce: original_parallelism = parallelism parallelism, x = reduce_by_device(parallelism, x, tf.add_n) if parallelism.n == 1: y = x else: x_flat = parallelism(tf.reshape, x, [[-1]] * parallelism.n) x_split = parallelism( common_layers.approximate_split, x_flat, parallelism.n, 0) def _step(source_replica, target_replica, x_split, op="plus_eq"): for shard in range(parallelism.n): source_device = (shard + source_replica) % parallelism.n target_device = (shard + target_replica) % parallelism.n source = x_split[source_device][shard] if use_bfloat16: with tf.device(parallelism.devices[source_device]): source = tf.to_bfloat16(source) with tf.device(parallelism.devices[target_device]): source = tf.to_float(source) if op == "plus_eq": x_split[target_device][shard] += source else: assert op == "copy" x_split[target_device][shard] = tf.identity(source) center = parallelism.n for i in reversed(range(center, parallelism.n - 1)): _step(i + 1, i, x_split, op="plus_eq") for i in range(center): _step(i, i + 1, x_split, op="plus_eq") for i in range(center, parallelism.n - 1): _step(i, i + 1, x_split, op="copy") for i in reversed(range(center)): _step(i + 1, i, x_split, op="copy") x_concat = parallelism(tf.concat, x_split, 0) y = parallelism(common_layers.reshape_like_all_dims, x_concat, x) if maybe_reduce: y = expand_by_device(original_parallelism, parallelism, y) return y
Compute the sum of all Tensors and put the result everywhere. Assumes that the devices are connected in a ring. Args: x: a list of Tensors with length parallelism.n parallelism: a expert_utils.Parallelism object. maybe_reduce: a boolean - first reduce per device. use_bfloat16: a boolean - saves bandwidth but loses precision Returns: a list of Tensors with length parallelism.n
juraj-google-style
def extractHolidayDate(self, setting_holiday): ret = namedtuple('result', ['Holiday', 'Month', 'Day']) setting_holiday += 1 ret.Holiday = str(setting_holiday) if ((setting_holiday < 1) or (setting_holiday > Extents.Holidays)): ekm_log(('Out of bounds: holiday ' + str(setting_holiday))) ret.Holiday = ret.Month = ret.Day = str(0) return ret idxday = (('Holiday_' + str(setting_holiday)) + '_Day') idxmon = (('Holiday_' + str(setting_holiday)) + '_Mon') if (idxmon not in self.m_hldy): ret.Holiday = ret.Month = ret.Day = str(0) return ret if (idxday not in self.m_hldy): ret.Holiday = ret.Month = ret.Day = str(0) return ret ret.Day = self.m_hldy[idxday][MeterData.StringValue] ret.Month = self.m_hldy[idxmon][MeterData.StringValue] return ret
Read a single holiday date from meter buffer. Args: setting_holiday (int): Holiday from 0-19 or in range(Extents.Holidays) Returns: tuple: Holiday tuple, elements are strings. =============== ====================== Holiday Holiday 0-19 as string Day Day 1-31 as string Month Monty 1-12 as string =============== ======================
codesearchnet
def is_nested(structure): return tree_impl.is_nested(structure)
Checks if a given structure is nested. Examples: >>> keras.tree.is_nested(42) False >>> keras.tree.is_nested({"foo": 42}) True Args: structure: A structure to check. Returns: `True` if a given structure is nested, i.e. is a sequence, a mapping, or a namedtuple, and `False` otherwise.
github-repos
def update_query_parameters(url, query_parameters): (scheme, netloc, path, query_string, fragment) = urlsplit(url) url_params = parse_qs(query_string) url_params.update(query_parameters) return urlunsplit((scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment))
Return url with updated query parameters. Arguments: url (str): Original url whose query parameters need to be updated. query_parameters (dict): A dictionary containing query parameters to be added to course selection url. Returns: (slug): slug identifier for the identity provider that can be used for identity verification of users associated the enterprise customer of the given user.
codesearchnet
def to_json_file(self, json_file_path: Union[str, os.PathLike]): with open(json_file_path, 'w', encoding='utf-8') as writer: writer.write(self.to_json_string())
Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this image_processor instance's parameters will be saved.
github-repos
def dims(self): if self._dims is None: return None return [as_dimension(d) for d in self._dims]
Deprecated. Returns list of dimensions for this shape. Suggest `TensorShape.as_list` instead. Returns: A list containing `tf.compat.v1.Dimension`s, or None if the shape is unspecified.
github-repos
def send_status_message(self, object_id, status): try: body = json.dumps({'id': object_id, 'status': status}) self.status_queue.send_message(MessageBody=body, MessageGroupId='job_status', MessageDeduplicationId=get_hash((object_id, status))) return True except Exception as ex: print(ex) return False
Send a message to the `status_queue` to update a job's status. Returns `True` if the message was sent, else `False` Args: object_id (`str`): ID of the job that was executed status (:obj:`SchedulerStatus`): Status of the job Returns: `bool`
codesearchnet
def _CheckPythonModule(self, dependency): module_object = self._ImportPythonModule(dependency.name) if not module_object: status_message = 'missing: {0:s}'.format(dependency.name) return False, status_message if not dependency.version_property: return True, dependency.name return self._CheckPythonModuleVersion( dependency.name, module_object, dependency.version_property, dependency.minimum_version, dependency.maximum_version)
Checks the availability of a Python module. Args: dependency (DependencyDefinition): dependency definition. Returns: tuple: consists: bool: True if the Python module is available and conforms to the minimum required version, False otherwise. str: status message.
juraj-google-style
def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header): return ( cache_file_metadata_header.key_size > 0 and cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and cache_file_metadata_header.format_version == 1 and cache_file_metadata_header.last_fetched_time > 0 and cache_file_metadata_header.fetch_count > 0)
Determines whether the cache file metadata header is valid. Args: cache_file_metadata_header (firefox_cache2_file_metadata_header): cache file metadata header. Returns: bool: True if the cache file metadata header is valid.
juraj-google-style
def replace_model_patterns(text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns) -> Tuple[str, str]: attributes_to_check = ['config_class'] for attr in ['tokenizer_class', 'image_processor_class', 'image_processor_fast_class', 'feature_extractor_class', 'processor_class']: if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None: attributes_to_check.append(attr) if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]: attributes_to_check.append('checkpoint') if old_model_patterns.model_type != old_model_patterns.model_lower_cased: attributes_to_check.append('model_type') else: text = re.sub(f'(\\s*)model_type = "{old_model_patterns.model_type}"', '\\1model_type = "[MODEL_TYPE]"', text) if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased: old_model_value = old_model_patterns.model_upper_cased if re.search(f'{old_model_value}_[A-Z_]*[^A-Z_]', text) is not None: text = re.sub(f'{old_model_value}([A-Z_]*)([^a-zA-Z_])', '[MODEL_UPPER_CASED]\\1\\2', text) else: attributes_to_check.append('model_upper_cased') attributes_to_check.extend(['model_camel_cased', 'model_lower_cased', 'model_name']) for attr in attributes_to_check: text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr]) replacements = [] for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items(): if placeholder in text: replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))) text = text.replace(placeholder, getattr(new_model_patterns, attr)) old_replacement_values = [old for old, new in replacements] if len(set(old_replacement_values)) != len(old_replacement_values): return (text, '') replacements = simplify_replacements(replacements) replacements = [f'{old}->{new}' for old, new in replacements] return (text, ','.join(replacements))
Replace all patterns present in a given text. Args: text (`str`): The text to treat. old_model_patterns (`ModelPatterns`): The patterns for the old model. new_model_patterns (`ModelPatterns`): The patterns for the new model. Returns: `Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it.
github-repos
def inflate_plugin_dict(plugin_dict, inflate_plugin): plugins = [] for identifier, definition in plugin_dict.items(): try: plugins.append(inflate_plugin(identifier, definition)) except PluginNotFoundError as e: logger.error('Could not import plugin identified by %s. ' 'Exception: %s.', identifier, e) return plugins
Inflate a list of strings/dictionaries to a list of plugin instances. Args: plugin_dict (dict): a dict of dict. inflate_plugin (method): the method to inflate the plugin. Returns: list: a plugin instances list.
juraj-google-style
def __init__(self, parent=None, **kwargs): if not parent: raise ValueError('Missing parent value.') super(VHDIPathSpec, self).__init__(parent=parent, **kwargs)
Initializes a path specification. Note that the VHDI file path specification must have a parent. Args: parent (Optional[PathSpec]): parent path specification. Raises: ValueError: when parent is not set.
juraj-google-style
def total_surface_energy(self): tot_surface_energy = 0 for hkl in self.miller_energy_dict.keys(): tot_surface_energy += (self.miller_energy_dict[hkl] * self.miller_area_dict[hkl]) return tot_surface_energy
Total surface energy of the Wulff shape. Returns: (float) sum(surface_energy_hkl * area_hkl)
codesearchnet
def setLCDCmd(self, display_list, password='00000000'): result = False try: self.initLcd() item_cnt = len(display_list) if ((item_cnt > 45) or (item_cnt <= 0)): ekm_log('LCD item list must have between 1 and 40 items') return False for display_item in display_list: self.addLcdItem(int(display_item)) result = self.setLCD(password) except: ekm_log(traceback.format_exc(sys.exc_info())) return result
Single call wrapper for LCD set." Wraps :func:`~ekmmeters.V4Meter.setLcd` and associated init and add methods. Args: display_list (list): List composed of :class:`~ekmmeters.LCDItems` password (str): Optional password. Returns: bool: Passthrough from :func:`~ekmmeters.V4Meter.setLcd`
codesearchnet
def as_pil_image(self): from PIL import Image try: bio = BytesIO() self._extract_direct(stream=bio) bio.seek(0) return Image.open(bio) except UnsupportedImageTypeError: pass im = self._extract_transcoded() if (not im): raise UnsupportedImageTypeError(repr(self)) return im
Extract the image as a Pillow Image, using decompression as necessary Returns: PIL.Image.Image
codesearchnet
def delete_adapter(self, adapter_names: Union[List[str], str]) -> None: check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer if isinstance(adapter_names, str): adapter_names = [adapter_names] missing_adapters = [name for name in adapter_names if name not in self.peft_config] if missing_adapters: raise ValueError(f'The following adapter(s) are not present and cannot be deleted: {', '.join(missing_adapters)}') for adapter_name in adapter_names: for module in self.modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, 'delete_adapter'): module.delete_adapter(adapter_name) else: raise ValueError('The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1') if getattr(self, '_hf_peft_config_loaded', False) and hasattr(self, 'peft_config'): self.peft_config.pop(adapter_name, None) if len(self.peft_config) == 0: del self.peft_config self._hf_peft_config_loaded = False
Delete an adapter's LoRA layers from the underlying model. Args: adapter_names (`Union[List[str], str]`): The name(s) of the adapter(s) to delete. Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic" ) pipeline.delete_adapters("cinematic") ```
github-repos
def set_default_by_index(self, index): if (index >= len(self._datasets)): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) self._default_index = index
Set the default dataset by its index. After changing the default dataset, all calls without explicitly specifying the dataset by index or alias will be redirected to this dataset. Args: index (int): The index of the dataset that should be made the default. Raises: DataInvalidIndex: If the index does not represent a valid dataset.
codesearchnet
def overlapping(self, start, stop): for event in self: if ((start <= event.begin <= stop or start <= event.end <= stop) or event.begin <= start and event.end >= stop): yield event
Iterates (in chronological order) over every event that has an intersection with the timespan between `start` and `stop` Args: start : (Arrow object) stop : (Arrow object)
juraj-google-style
def pack_rpc_payload(arg_format, args): code = _create_respcode(arg_format, args) packed_result = struct.pack(code, *args) unpacked_validation = struct.unpack(code, packed_result) if tuple(args) != unpacked_validation: raise RPCInvalidArgumentsError("Passed values would be truncated, please validate the size of your string", code=code, args=args) return packed_result
Pack an RPC payload according to arg_format. Args: arg_format (str): a struct format code (without the <) for the parameter format for this RPC. This format code may include the final character V, which means that it expects a variable length bytearray. args (list): A list of arguments to pack according to arg_format. Returns: bytes: The packed argument buffer.
juraj-google-style
def remove(text, exclude): exclude = ''.join(str(symbol) for symbol in exclude) return text.translate(str.maketrans('', '', exclude))
Remove ``exclude`` symbols from ``text``. Example: >>> remove("example text", string.whitespace) 'exampletext' Args: text (str): The text to modify exclude (iterable): The symbols to exclude Returns: ``text`` with ``exclude`` symbols removed
juraj-google-style
def decode(self, obj, restype, raw_ptr=False): if raw_ptr: data = obj else: data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(restype.ctype_class)).contents if restype == WeldInt16(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_int16)).contents.value return result elif restype == WeldInt(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_int)).contents.value return result elif restype == WeldLong(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_long)).contents.value return result elif restype == WeldFloat(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_float)).contents.value return np.float32(result) elif restype == WeldDouble(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_double)).contents.value return float(result) elif restype == WeldBit(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_bool)).contents.value return bool(result) if restype == WeldVec(WeldBit()): weld_to_numpy = self.utils.weld_to_numpy_bool_arr elif restype == WeldVec(WeldInt16()): weld_to_numpy = self.utils.weld_to_numpy_int16_arr elif restype == WeldVec(WeldInt()): weld_to_numpy = self.utils.weld_to_numpy_int_arr elif restype == WeldVec(WeldLong()): weld_to_numpy = self.utils.weld_to_numpy_long_arr elif restype == WeldVec(WeldFloat()): weld_to_numpy = self.utils.weld_to_numpy_float_arr elif restype == WeldVec(WeldDouble()): weld_to_numpy = self.utils.weld_to_numpy_double_arr elif restype == WeldVec(WeldVec(WeldChar())): weld_to_numpy = self.utils.weld_to_numpy_char_arr_arr elif restype == WeldVec(WeldVec(WeldInt16())): weld_to_numpy = self.utils.weld_to_numpy_int16_arr_arr elif restype == WeldVec(WeldVec(WeldInt())): weld_to_numpy = self.utils.weld_to_numpy_int_arr_arr elif restype == WeldVec(WeldVec(WeldLong())): weld_to_numpy = self.utils.weld_to_numpy_long_arr_arr elif restype == WeldVec(WeldVec(WeldFloat())): weld_to_numpy = self.utils.weld_to_numpy_float_arr_arr elif restype == WeldVec(WeldVec(WeldDouble())): weld_to_numpy = self.utils.weld_to_numpy_double_arr_arr elif restype == WeldVec(WeldVec(WeldBit())): weld_to_numpy = self.utils.weld_to_numpy_bool_arr_arr elif isinstance(restype, WeldStruct): ret_vecs = [] for field_type in restype.field_types: ret_vec = self.decode(data, field_type, raw_ptr=True) data += sizeof(field_type.ctype_class()) ret_vecs.append(ret_vec) return tuple(ret_vecs) else: raise Exception("Unable to decode; invalid return type") weld_to_numpy.restype = py_object weld_to_numpy.argtypes = [restype.ctype_class] ret_vec = weld_to_numpy(result) return ret_vec
Converts Weld object to Python object. Args: obj: Result of Weld computation that needs to be decoded restype: Type of Weld computation result raw_ptr: Boolean indicating whether obj needs to be extracted from WeldValue or not Returns: Python object representing result of the Weld computation
juraj-google-style
def save_plot(fig, prefile='', postfile='', output_path='./', output_name='Figure', output_format='png', dpi=300, transparent=False, **_): if (not os.path.exists(output_path)): os.makedirs(output_path) output = os.path.join(output_path, ((((prefile + output_name) + postfile) + '.') + output_format)) fig.savefig(output, dpi=dpi, transparent=transparent)
Generates a figure file in the selected directory. Args: fig: matplotlib figure prefile(str): Include before the general filename of the figure postfile(str): Included after the general filename of the figure output_path(str): Define the path to the output directory output_name(str): String to define the name of the output figure output_format(str): String to define the format of the output figure dpi(int): Define the DPI (Dots per Inch) of the figure transparent(bool): If True the saved figure will have a transparent background
codesearchnet
def get_autosave_filename(self, filename): try: autosave_filename = self.name_mapping[filename] except KeyError: autosave_dir = get_conf_path('autosave') if not osp.isdir(autosave_dir): try: os.mkdir(autosave_dir) except EnvironmentError as error: action = _('Error while creating autosave directory') msgbox = AutosaveErrorDialog(action, error) msgbox.exec_if_enabled() autosave_filename = self.create_unique_autosave_filename( filename, autosave_dir) self.name_mapping[filename] = autosave_filename self.stack.sig_option_changed.emit( 'autosave_mapping', self.name_mapping) logger.debug('New autosave file name') return autosave_filename
Get name of autosave file for specified file name. This function uses the dict in `self.name_mapping`. If `filename` is in the mapping, then return the corresponding autosave file name. Otherwise, construct a unique file name and update the mapping. Args: filename (str): original file name
juraj-google-style
def __is_noncopyable_single(class_, already_visited_cls_vars=None): logger = utils.loggers.cxx_parser if has_copy_constructor(class_) \ and has_public_constructor(class_) \ and has_public_assign(class_) \ and has_public_destructor(class_): msg = os.linesep.join([ "__is_noncopyable_single - %s - COPYABLE:" % class_.decl_string, " trivial copy constructor: yes", " public constructor: yes", " public assign: yes", " public destructor: yes"]) logger.debug(msg) return False if already_visited_cls_vars is None: already_visited_cls_vars = [] if find_noncopyable_vars(class_, already_visited_cls_vars): logger.debug( ("__is_noncopyable_single(TRUE) - %s - contains noncopyable " + "members"), class_.decl_string) return True logger.debug(( "__is_noncopyable_single(FALSE) - %s - COPYABLE, because is " + "doesn't contains noncopyable members"), class_.decl_string) return False
Implementation detail. Checks if the class is non copyable, without considering the base classes. Args: class_ (declarations.class_t): the class to be checked already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: bool: if the class is non copyable
juraj-google-style
def getUserForHost(self, user, host): for name in iterFqdnUp(host): usercert = ('%s@%s' % (user, name)) if self.isUserCert(usercert): return usercert
Gets the name of the first existing user cert for a given user and host. Args: user (str): The name of the user. host (str): The name of the host. Examples: Get the name for the "myuser" user cert at "cool.vertex.link": usercertname = cdir.getUserForHost('myuser', 'cool.vertex.link') Returns: str: The cert name, if exists.
codesearchnet
def trigger_methods(instance, args): for name in sorted(args): value = args[name] target = instance if name.startswith('response_') or name.startswith('reply_'): name = name.replace('response_', '').replace('reply_', '') if hasattr(instance, '_response'): target = instance._response member = getattr(target, name, None) isattr = name in dir(target) iscallable = ismethod(member) and not isfunction(member) if not iscallable and not isattr: raise PookInvalidArgument('Unsupported argument: {}'.format(name)) if iscallable: member(value) else: setattr(target, name, value)
Triggers specific class methods using a simple reflection mechanism based on the given input dictionary params. Arguments: instance (object): target instance to dynamically trigger methods. args (iterable): input arguments to trigger objects to Returns: None
juraj-google-style
def GetCodeObjectAtLine(module, line): if not hasattr(module, '__file__'): return (False, (None, None)) prev_line = 0 next_line = six.MAXSIZE for code_object in _GetModuleCodeObjects(module): for co_line_number in _GetLineNumbers(code_object): if co_line_number == line: return (True, code_object) elif co_line_number < line: prev_line = max(prev_line, co_line_number) elif co_line_number > line: next_line = min(next_line, co_line_number) break prev_line = None if prev_line == 0 else prev_line next_line = None if next_line == six.MAXSIZE else next_line return (False, (prev_line, next_line))
Searches for a code object at the specified line in the specified module. Args: module: module to explore. line: 1-based line number of the statement. Returns: (True, Code object) on success or (False, (prev_line, next_line)) on failure, where prev_line and next_line are the closest lines with code above and below the specified line, or None if they do not exist.
juraj-google-style
def _send_request(self, xml_request): if self._scheme == 'http': return self._send_http_request(xml_request) else: return self._send_socket_request(xml_request)
Send the prepared XML request block to the CPS using the corect protocol. Args: xml_request -- A fully formed xml request string for the CPS. Returns: The raw xml response string. Raises: ConnectionError -- Can't establish a connection with the server.
juraj-google-style
def add_spin_by_site(self, spins): if len(spins) != len(self.sites): raise ValueError("Spin of all sites must be " "specified in the dictionary.") for site, spin in zip(self.sites, spins): new_sp = {} for sp, occu in site.species.items(): sym = sp.symbol oxi_state = getattr(sp, "oxi_state", None) new_sp[Specie(sym, oxidation_state=oxi_state, properties={'spin': spin})] = occu site.species = new_sp
Add spin states to a structure by site. Args: spins (list): List of spins E.g., [+5, -5, 0, 0]
juraj-google-style
def delete_resource(self, resource, delete=True): if isinstance(resource, str): if (is_valid_uuid(resource) is False): raise HDXError(('%s is not a valid resource id!' % resource)) return self._remove_hdxobject(self.resources, resource, delete=delete)
Delete a resource from the dataset and also from HDX by default Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True. Returns: bool: True if resource removed or False if not
codesearchnet
def human_timestamp(__timestamp: datetime.datetime) -> str: numstr = '. a two three four five six seven eight nine ten'.split() matches = [ 60 * 60 * 24 * 365, 60 * 60 * 24 * 28, 60 * 60 * 24 * 7, 60 * 60 * 24, 60 * 60, 60, 1, ] match_names = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second'] if __timestamp.tzinfo is None: __timestamp = __timestamp.replace(tzinfo=datetime.timezone.utc) now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) delta = int((now - __timestamp).total_seconds()) for scale in matches: i = delta if i: name = match_names[matches.index(scale)] break else: i = 0 if i == 0: result = 'right now' elif i == 1 and name in ('year', 'month', 'week'): result = 'last {}'.format(name) elif i == 1 and name == 'day': result = 'yesterday' elif i == 1 and name == 'hour': result = 'about an hour ago' else: result = 'about {} {}{} ago'.format(i if i > 10 else numstr[i], name, 's' if i > 1 else '') return result
Format a relative time. Args: __timestamp: Event to generate relative timestamp against Returns: Human readable date and time offset
juraj-google-style
def _set_typeahead(cls, el, value): PlaceholderHandler.reset_placeholder_dropdown(el) if not value and not el.value: DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-alert") return if len(value) == 1: source = value[0]["source"].strip() dropdown_el = DropdownHandler.set_dropdown_glyph( el.id, "glyphicon-eye-open" ) dropdown_content = "<span class='gray_text'>&nbsp;(%s)</span>" if source: dropdown_el.html = dropdown_content % source[::-1] el.value = value[0]["val"] return parent_id = el.parent.id if "typeahead" not in parent_id.lower(): parent_id = el.parent.parent.id if parent_id in cls._set_by_typeahead: window.destroy_typeahead_tag(" window.make_typeahead_tag(" DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-menu-down") PlaceholderHandler.set_placeholder_dropdown(el) cls._set_by_typeahead.add(parent_id)
Convert given `el` to typeahead input and set it to `value`. This method also sets the dropdown icons and descriptors. Args: el (obj): Element reference to the input you want to convert to typeahead. value (list): List of dicts with two keys: ``source`` and ``val``.
juraj-google-style
def _ParseNamesString(self, names_string): if not names_string: return names_string = names_string.lower() names = [name.strip() for name in names_string.split(',')] file_entry_filter = file_entry_filters.NamesFileEntryFilter(names) self._filter_collection.AddFilter(file_entry_filter)
Parses the name string. Args: names_string (str): comma separated filenames to filter.
juraj-google-style
def single_qubit_matrix_to_gates( mat: np.ndarray, tolerance: float = 0 ) -> List[ops.SingleQubitGate]: rotations = single_qubit_matrix_to_pauli_rotations(mat, tolerance) return [cast(ops.SingleQubitGate, pauli)**ht for pauli, ht in rotations]
Implements a single-qubit operation with few gates. Args: mat: The 2x2 unitary matrix of the operation to implement. tolerance: A limit on the amount of error introduced by the construction. Returns: A list of gates that, when applied in order, perform the desired operation.
juraj-google-style
def get_upper_triangle(correlation_matrix): upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool)) upper_tri_df = upper_triangle.stack().reset_index(level=1) upper_tri_df.columns = ['rid', 'corr'] upper_tri_df.reset_index(level=0, inplace=True) upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0) return upper_tri_df.round(rounding_precision)
Extract upper triangle from a square matrix. Negative values are set to 0. Args: correlation_matrix (pandas df): Correlations between all replicates Returns: upper_tri_df (pandas df): Upper triangle extracted from correlation_matrix; rid is the row index, cid is the column index, corr is the extracted correlation value
juraj-google-style
def flow2rgb(flow, color_wheel=None, unknown_thr=1e6): assert flow.ndim == 3 and flow.shape[-1] == 2 if color_wheel is None: color_wheel = make_color_wheel() assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3 num_bins = color_wheel.shape[0] dx = flow[:, :, 0].copy() dy = flow[:, :, 1].copy() ignore_inds = (np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) | (np.abs(dy) > unknown_thr)) dx[ignore_inds] = 0 dy[ignore_inds] = 0 rad = np.sqrt(dx**2 + dy**2) if np.any(rad > np.finfo(float).eps): max_rad = np.max(rad) dx /= max_rad dy /= max_rad [h, w] = dx.shape rad = np.sqrt(dx**2 + dy**2) angle = np.arctan2(-dy, -dx) / np.pi bin_real = (angle + 1) / 2 * (num_bins - 1) bin_left = np.floor(bin_real).astype(int) bin_right = (bin_left + 1) % num_bins w = (bin_real - bin_left.astype(np.float32))[..., None] flow_img = ( 1 - w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :] small_ind = rad <= 1 flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind]) flow_img[np.logical_not(small_ind)] *= 0.75 flow_img[ignore_inds, :] = 0 return flow_img
Convert flow map to RGB image. Args: flow (ndarray): Array of optical flow. color_wheel (ndarray or None): Color wheel used to map flow field to RGB colorspace. Default color wheel will be used if not specified. unknown_thr (str): Values above this threshold will be marked as unknown and thus ignored. Returns: ndarray: RGB image that can be visualized.
juraj-google-style
def __init__(self, fraction_of_second=None, timestamp=None): if pytsk3.TSK_VERSION_NUM >= 0x040200ff: precision = dfdatetime_definitions.PRECISION_1_NANOSECOND else: precision = dfdatetime_definitions.PRECISION_100_NANOSECONDS super(TSKTime, self).__init__() self._precision = precision self._timestamp = timestamp self.fraction_of_second = fraction_of_second
Initializes a SleuthKit timestamp. Args: fraction_of_second (Optional[int]): fraction of second, which is an integer that contains the number 100 nano seconds before Sleuthkit 4.2.0 or number of nano seconds in Sleuthkit 4.2.0 and later. timestamp (Optional[int]): POSIX timestamp.
juraj-google-style
def url_is(white_list): def func(url): prefixes = white_list.get('PREFIXES', ()) for prefix in prefixes: if url.startswith(prefix): return True constants = white_list.get('CONSTANTS', ()) for exact_url in constants: if url == exact_url: return True return False return func
Function generator. Args: white_list (dict): dict with PREFIXES and CONSTANTS keys (list values). Returns: func: a function to check if a URL is...
juraj-google-style
def count(self, event): return (len(self._listeners[event]) + len(self._once[event]))
Get the number of listeners for the event. Args: event (str): The event for which to count all listeners. The resulting count is a combination of listeners added using 'on'/'add_listener' and 'once'.
codesearchnet
def get(self, tx_id): pool = current_app.config['bigchain_pool'] with pool() as bigchain: tx = bigchain.get_transaction(tx_id) if (not tx): return make_error(404) return tx.to_dict()
API endpoint to get details about a transaction. Args: tx_id (str): the id of the transaction. Return: A JSON string containing the data about the transaction.
codesearchnet
def _build(self, input_batch, is_training, test_local_stats=False): input_shape = input_batch.get_shape() if (not self._data_format): if (len(input_shape) == 2): self._data_format = 'NC' elif (len(input_shape) == 3): self._data_format = 'NWC' elif (len(input_shape) == 4): self._data_format = 'NHWC' elif (len(input_shape) == 5): self._data_format = 'NDHWC' else: raise base.IncompatibleShapeError('Input shape {} has too many or too few dimensions.'.format(input_shape)) self._channel_index = self._data_format.index('C') self._axis = list(range(len(self._data_format))) del self._axis[self._channel_index] if (len(self._data_format) != len(input_shape)): raise base.IncompatibleShapeError('Incorrect data format {} for input shape {}.'.format(self._data_format, input_shape)) dtype = input_batch.dtype.base_dtype if (self._fused and (dtype == tf.bfloat16)): raise base.NotSupportedError('Fused batch norm does not support tf.bfloat16.') stat_dtype = (tf.float32 if (dtype in [tf.float16, tf.bfloat16]) else dtype) self._num_channels = int(input_shape[self._channel_index]) if (self._channel_index == 1): self._image_shape = [int(x) for x in input_shape[2:]] else: self._image_shape = [int(x) for x in input_shape[1:(- 1)]] self._expanded_mean_shape = ([1] * len(input_shape)) self._expanded_mean_shape[self._channel_index] = self._num_channels use_batch_stats = (is_training | test_local_stats) (mean, variance) = self._build_statistics(input_batch, use_batch_stats, stat_dtype) self._build_scale_offset(dtype) (out, mean, variance) = self._batch_norm_op(input_batch, mean, variance, use_batch_stats, stat_dtype) update_ops = self._build_update_ops(mean, variance, is_training) if update_ops: if self._update_ops_collection: for update_op in update_ops: tf.add_to_collection(self._update_ops_collection, update_op) else: with tf.control_dependencies(update_ops): out = tf.identity(out) return out
Connects the BatchNormV2 module into the graph. Args: input_batch: A Tensor of the same dimension as `len(data_format)`. is_training: A boolean to indicate if the module should be connected in training mode, meaning the moving averages are updated. Can be a Tensor. test_local_stats: A boolean to indicate if local batch statistics should be used when `is_training=False`. If not, moving averages are used. By default `False`. Can be a Tensor. Returns: A tensor with the same shape as `input_batch`. Raises: base.IncompatibleShapeError: If `data_format` is not valid for the input shape. base.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`.
codesearchnet
def _ValidateFSM(self): if ('Start' not in self.states): raise TextFSMTemplateError("Missing state 'Start'.") if self.states.get('End'): raise TextFSMTemplateError("Non-Empty 'End' state.") if self.states.get('EOF'): raise TextFSMTemplateError("Non-Empty 'EOF' state.") if ('End' in self.states): del self.states['End'] self.state_list.remove('End') for state in self.states: for rule in self.states[state]: if (rule.line_op == 'Error'): continue if ((not rule.new_state) or (rule.new_state in ('End', 'EOF'))): continue if (rule.new_state not in self.states): raise TextFSMTemplateError(("State '%s' not found, referenced in state '%s'" % (rule.new_state, state))) return True
Checks state names and destinations for validity. Each destination state must exist, be a valid name and not be a reserved name. There must be a 'Start' state and if 'EOF' or 'End' states are specified, they must be empty. Returns: True if FSM is valid. Raises: TextFSMTemplateError: If any state definitions are invalid.
codesearchnet
def PackTag(field_number, wire_type): if not 0 <= wire_type <= _WIRETYPE_MAX: raise message.EncodeError('Unknown wire type: %d' % wire_type) return (field_number << TAG_TYPE_BITS) | wire_type
Returns an unsigned 32-bit integer that encodes the field number and wire type information in standard protocol message wire format. Args: field_number: Expected to be an integer in the range [1, 1 << 29) wire_type: One of the WIRETYPE_* constants.
juraj-google-style
def assignees(self, assignee=None, resource_id=None): if resource_id is not None: self.resource_id(resource_id) self._request_uri = '{}/assignees'.format(self._request_uri) if assignee is not None: self._request_uri = '{}/{}'.format(self._request_uri, assignee)
Add an assignee to a Task GET: /v2/tasks/{uniqueId}/assignees GET: /v2/tasks/{uniqueId}/assignees/{assigneeId} POST: /v2/tasks/{uniqueId}/assignees/{assigneeId} DELETE: /v2/tasks/{uniqueId}/assignees/{assigneeId} Args: assignee (Optional [string]): The assignee name. resource_id (Optional [string]): The task ID.
juraj-google-style
def ContainsAny(self, *values): self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_ANY') return self._query_builder
Sets the type of the WHERE clause as "contains any". Args: *values: The values to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
codesearchnet
def _ReadAttributeValueInteger(self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset): if (attribute_value_offset == 0): return None data_type_map = self._GetDataTypeMap('uint32be') file_offset = ((record_offset + attribute_values_data_offset) + attribute_value_offset) attribute_value_offset -= (attribute_values_data_offset + 1) attribute_value_data = attribute_values_data[attribute_value_offset:] try: return self._ReadStructureFromByteStream(attribute_value_data, file_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to map integer attribute value data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))
Reads an integer attribute value. Args: attribute_values_data (bytes): attribute values data. record_offset (int): offset of the record relative to the start of the file. attribute_values_data_offset (int): offset of the attribute values data relative to the start of the record. attribute_value_offset (int): offset of the attribute relative to the start of the record. Returns: int: integer value or None if attribute value offset is not set. Raises: ParseError: if the attribute value cannot be read.
codesearchnet
def assertOutputStateMatches(self, **has_output): output_types = {'stdout', 'stderr', 'returncode'} assert len(output_types) == len(has_output) for output_type in output_types: output_value = getattr(self, output_type) if has_output[output_type]: self.assertTrue(output_value, output_type + ' unexpectedly empty') else: value = str(output_value) if len(value) > 50: value = value[:47] + '...' self.assertFalse(output_value, f'Unexpected output to {output_type}: {value!r}')
Check that the output state matches expectations. If, for example, you expect the program to print something to stdout and nothing to stderr before exiting with an error code, you would write assertOutputStateMatches(stdout=True, stderr=False, returncode=True). Args: **has_output: Whether each output type should have output.
github-repos
def assertDictEqual(self, a, b, msg=None): try: super().assertDictEqual(a, b, msg) except Exception: self.assertSameElements(a.keys(), b.keys()) for k, v in a.items(): a_k, b_k = self.evaluate_if_both_tensors(v, b[k]) a_k = self._GetNdArray(a_k) b_k = self._GetNdArray(b_k) if np.issubdtype(a_k.dtype, np.floating): self.assertAllClose(v, b[k], msg=k) else: self.assertAllEqual(v, b[k], msg=k)
Assert that two given dictionary of tensors are the same. Args: a: Expected dictionary with numpy ndarray or anything else that can be converted to one as values. b: Actual dictionary with numpy ndarray or anything else that can be converted to one as values. msg: Optional message to report on failure.
github-repos
def _DownloadAuthUrl(self, url, dest_dir): dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) dest_file.close() dest = dest_file.name self.logger.info('Downloading url from %s to %s using authentication token.', url, dest) if (not self.token): response = self.watcher.GetMetadata(self.token_metadata_key, recursive=False, retry=False) if (not response): self.logger.info('Authentication token not found. Attempting unauthenticated download.') return self._DownloadUrl(url, dest_dir) self.token = ('%s %s' % (response.get('token_type', ''), response.get('access_token', ''))) try: request = urlrequest.Request(url) request.add_unredirected_header('Metadata-Flavor', 'Google') request.add_unredirected_header('Authorization', self.token) content = urlrequest.urlopen(request).read().decode('utf-8') except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: self.logger.warning('Could not download %s. %s.', url, str(e)) return None with open(dest, 'wb') as f: f.write(content) return dest
Download a Google Storage URL using an authentication token. If the token cannot be fetched, fallback to unauthenticated download. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
codesearchnet
def store_unspent_outputs(self, *unspent_outputs): if unspent_outputs: return backend.query.store_unspent_outputs( self.connection, *unspent_outputs)
Store the given ``unspent_outputs`` (utxos). Args: *unspent_outputs (:obj:`tuple` of :obj:`dict`): Variable length tuple or list of unspent outputs.
juraj-google-style
def orbit(self, orbit): self._orbit = orbit tle = Tle.from_orbit(orbit) lines = tle.text.splitlines() if len(lines) == 3: _, line1, line2 = lines else: line1, line2 = lines self.tle = twoline2rv(line1, line2, wgs72)
Initialize the propagator Args: orbit (Orbit)
juraj-google-style
def prelu(inp, base_axis=1, shared=True, fix_parameters=False): shape = (tuple() if shared else (inp.shape[base_axis],)) w = get_parameter_or_create('slope', shape, ConstantInitializer((- 1)), True, (not fix_parameters)) return F.prelu(inp, w, base_axis)
Parametrized Rectified Linear Unit function defined as .. math:: y_i = \max(0, x_i) + w_i \min(0, -x_i) where negative slope :math:`w` is learned and can vary across channels (an axis specified with base_axis). Weights are initialized with :math:`-1`. Args: x(~nnabla.Variable): N-D array as input base_axis(int): Dimensions up to base_axis is treated as sample dimension. shared(bool): Use shared weight value or not fix_parameters (bool): When set to `True`, the negative slope values will not be updated. Returns: ~nnabla.Variable: N-D array.
codesearchnet
def get(self, profile_id): if (profile_id not in self._profiles): try: self._profiles[profile_id] = self._get_profile(profile_id) except (ValueError, IOError) as e: six.raise_from(RegistryError(e), e) return self._profiles[profile_id]
Returns the profile with the received ID as a dict If a local copy of the profile exists, it'll be returned. If not, it'll be downloaded from the web. The results are cached, so any subsequent calls won't hit the filesystem or the web. Args: profile_id (str): The ID of the profile you want. Raises: RegistryError: If there was some problem opening the profile file or its format was incorrect.
codesearchnet
def add_bonds(self, neighbors, center, color=None, opacity=None, radius=0.1): points = vtk.vtkPoints() points.InsertPoint(0, center.x, center.y, center.z) n = len(neighbors) lines = vtk.vtkCellArray() for i in range(n): points.InsertPoint(i + 1, neighbors[i].coords) lines.InsertNextCell(2) lines.InsertCellPoint(0) lines.InsertCellPoint(i + 1) pd = vtk.vtkPolyData() pd.SetPoints(points) pd.SetLines(lines) tube = vtk.vtkTubeFilter() if vtk.VTK_MAJOR_VERSION <= 5: tube.SetInputConnection(pd.GetProducerPort()) else: tube.SetInputData(pd) tube.SetRadius(radius) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(tube.GetOutputPort()) actor = vtk.vtkActor() actor.SetMapper(mapper) if opacity is not None: actor.GetProperty().SetOpacity(opacity) if color is not None: actor.GetProperty().SetColor(color) self.ren.AddActor(actor)
Adds bonds for a site. Args: neighbors: Neighbors of the site. center: The site in the center for all bonds. color: Color of the tubes representing the bonds opacity: Opacity of the tubes representing the bonds radius: Radius of tube s representing the bonds
juraj-google-style
def notify(self, method, params=None): log.debug('Sending notification: %s %s', method, params) message = { 'jsonrpc': JSONRPC_VERSION, 'method': method, } if params is not None: message['params'] = params self._consumer(message)
Send a JSON RPC notification to the client. Args: method (str): The method name of the notification to send params (any): The payload of the notification
juraj-google-style
def read_single_knmi_file(filename): hourly_data_obs_raw = pd.read_csv(filename, parse_dates=[['YYYYMMDD', 'HH']], date_parser=(lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]), int(str(yyyymmdd)[4:6]), int(str(yyyymmdd)[6:8]), (int(hh) - 1))), skiprows=31, skipinitialspace=True, na_values='', keep_date_col=True) hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH'] hourly_data_obs_raw.index = (hourly_data_obs_raw.index + pd.Timedelta(hours=1)) columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd'] hourly_data_obs = pd.DataFrame(index=hourly_data_obs_raw.index, columns=columns_hourly, data=dict(temp=((hourly_data_obs_raw['T'] / 10) + 273.15), precip=(hourly_data_obs_raw['RH'] / 10), glob=((hourly_data_obs_raw['Q'] * 10000) / 3600.0), hum=hourly_data_obs_raw['U'], wind=(hourly_data_obs_raw['FH'] / 10), ssd=(hourly_data_obs_raw['SQ'] * 6))) negative_values = (hourly_data_obs['precip'] < 0.0) hourly_data_obs.loc[(negative_values, 'precip')] = 0.0 return hourly_data_obs
reads a single file of KNMI's meteorological time series data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens Args: filename: the file to be opened Returns: pandas data frame including time series
codesearchnet
def SetServerInformation(self, server, port): self._host = server self._port = port logger.debug('Elasticsearch server: {0!s} port: {1:d}'.format(server, port))
Set the server information. Args: server (str): IP address or hostname of the server. port (int): Port number of the server.
codesearchnet
def _add_genotypes(self, variant_obj, gemini_variant, case_id, individual_objs): for ind in individual_objs: index = ind.ind_index variant_obj.add_individual(Genotype( sample_id=ind.ind_id, genotype=gemini_variant['gts'][index], case_id=case_id, phenotype=ind.phenotype, ref_depth=gemini_variant['gt_ref_depths'][index], alt_depth=gemini_variant['gt_alt_depths'][index], depth=gemini_variant['gt_depths'][index], genotype_quality=gemini_variant['gt_quals'][index] ))
Add the genotypes for a variant for all individuals Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow): The gemini variant case_id (str): related case id individual_objs (list(dict)): A list of Individuals
juraj-google-style
def move_file(src, dest): try: os.replace(src, dest) except Exception as ex_replace: logger.error(f'error moving file {src} to {dest}. {ex_replace}') raise
Move source file to destination. Overwrites dest. Args: src: str or path-like. source file dest: str or path-like. destination file Returns: None. Raises: FileNotFoundError: out path parent doesn't exist. OSError: if any IO operations go wrong.
codesearchnet
def get_pair(self, term1, term2): key = self.key(term1, term2) return self.pairs.get(key, None)
Get the value for a pair of terms. Args: term1 (str) term2 (str) Returns: The stored value.
juraj-google-style
def create_initial(self, address_values): with self._lock: for (add, val) in address_values: self._state[add] = _ContextFuture(address=add, result=val)
Create futures from inputs with the current value for that address at the start of that context. Args: address_values (list of tuple): The tuple is string, bytes of the address and value.
codesearchnet
def CopyToIsoFormat(cls, timestamp, timezone=pytz.UTC, raise_error=False): datetime_object = cls.CopyToDatetime(timestamp, timezone, raise_error=raise_error) return datetime_object.isoformat()
Copies the timestamp to an ISO 8601 formatted string. Args: timestamp: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. timezone: Optional timezone (instance of pytz.timezone). raise_error: Boolean that if set to True will not absorb an OverflowError if the timestamp is out of bounds. By default there will be no error raised. Returns: A string containing an ISO 8601 formatted date and time.
codesearchnet
def download_from_s3(context): target_file = context.solid_config['target_file'] return context.resources.download_manager.download_file_contents(context, target_file)
Download an object from s3. Args: info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource. Returns: str: The path to the downloaded object.
juraj-google-style
def load(cls, pkid_or_path=None): path = pkid_or_path if isinstance(path, (int, np.int32, np.int64)): raise NotImplementedError('Lookup via CMS not implemented.') elif (not os.path.isfile(path)): raise FileNotFoundError('File {} not found.'.format(path)) kwargs = {} fields = defaultdict(dict) with pd.HDFStore(path) as store: for key in store.keys(): if ('kwargs' in key): kwargs.update(store.get_storer(key).attrs.metadata) elif ('FIELD' in key): (name, dname) = '_'.join(key.split('_')[1:]).split('/') dname = dname.replace('values', '') fields[name][dname] = store[key] else: name = str(key[1:]) kwargs[name] = store[key] for (name, field_data) in fields.items(): fps = field_data.pop('data') kwargs[name] = Field(fps, field_values=[field_data[str(arr)] for arr in sorted(map(int, field_data.keys()))]) return cls(**kwargs)
Load a container object from a persistent location or file path. Args: pkid_or_path: Integer pkid corresponding to the container table or file path Returns: container: The saved container object
codesearchnet
def GetMetadata(fn) -> Dict[str, Any]: default = {ACCEPTS_POSITIONAL_ARGS: inspect.isroutine(fn)} try: metadata = getattr(fn, FIRE_METADATA, default) if ACCEPTS_POSITIONAL_ARGS in metadata: return metadata else: return default except: return default
Gets metadata attached to the function `fn` as an attribute. Args: fn: The function from which to retrieve the function metadata. Returns: A dictionary mapping property strings to their value.
github-repos
def process_tree_files(tree): config.LOGGER.info("Processing content...") files_to_diff = tree.process_tree(tree.channel) config.SUSHI_BAR_CLIENT.report_statistics(files_to_diff, topic_count=tree.channel.get_topic_count()) tree.check_for_files_failed() return files_to_diff, config.FAILED_FILES
process_tree_files: Download files from nodes Args: tree (ChannelManager): manager to handle communication to Kolibri Studio Returns: None
juraj-google-style
def ReadClientPostingLists(self, keywords): start_time, filtered_keywords = self._AnalyzeKeywords(keywords) return data_store.REL_DB.ListClientsForKeywords( filtered_keywords, start_time=start_time)
Looks up all clients associated with any of the given keywords. Args: keywords: A list of keywords we are interested in. Returns: A dict mapping each keyword to a list of matching clients.
juraj-google-style
def _run_test_class(self, config, test_class, tests=None): test_instance = test_class(config) logging.debug('Executing test class "%s" with config: %s', test_class.__name__, config) try: cls_result = test_instance.run(tests) self.results += cls_result except signals.TestAbortAll as e: self.results += e.results raise e
Instantiates and executes a test class. If tests is None, the tests listed in self.tests will be executed instead. If self.tests is empty as well, every test in this test class will be executed. Args: config: A config_parser.TestRunConfig object. test_class: class, test class to execute. tests: Optional list of test names within the class to execute.
github-repos
def update_info(self, custom=None): self.figure.suptitle((self.info_string() if (custom is None) else custom))
Updates the figure's suptitle. Calls self.info_string() unless custom is provided. Args: custom: Overwrite it with this string, unless None.
codesearchnet
def getcallargs(*func_and_positional, **named): func = func_and_positional[0] positional = func_and_positional[1:] argspec = getfullargspec(func) call_args = named.copy() this = getattr(func, 'im_self', None) or getattr(func, '__self__', None) if ismethod(func) and this: positional = (this,) + positional remaining_positionals = [arg for arg in argspec.args if arg not in call_args] call_args.update(dict(zip(remaining_positionals, positional))) default_count = 0 if not argspec.defaults else len(argspec.defaults) if default_count: for arg, value in zip(argspec.args[-default_count:], argspec.defaults): if arg not in call_args: call_args[arg] = value if argspec.kwonlydefaults is not None: for k, v in argspec.kwonlydefaults.items(): if k not in call_args: call_args[k] = v return call_args
TFDecorator-aware replacement for inspect.getcallargs. Args: *func_and_positional: A callable, possibly decorated, followed by any positional arguments that would be passed to `func`. **named: The named argument dictionary that would be passed to `func`. Returns: A dictionary mapping `func`'s named arguments to the values they would receive if `func(*positional, **named)` were called. `getcallargs` will use the argspec from the outermost decorator that provides it. If no attached decorators modify argspec, the final unwrapped target's argspec will be used.
github-repos
def parse_node_or_tensor_name(name): if ':' in name and (not name.endswith(':')): node_name = name[:name.rfind(':')] output_slot = int(name[name.rfind(':') + 1:]) return (node_name, output_slot) else: return (name, None)
Get the node name from a string that can be node or tensor name. Args: name: An input node name (e.g., "node_a") or tensor name (e.g., "node_a:0"), as a str. Returns: 1) The node name, as a str. If the input name is a tensor name, i.e., consists of a colon, the final colon and the following output slot will be stripped. 2) If the input name is a tensor name, the output slot, as an int. If the input name is not a tensor name, None.
github-repos
def check_unused(intersection, duplicates, intersections): for other in intersections: if ((other.interior_curve == UNUSED_T) and (intersection.index_first == other.index_first) and (intersection.index_second == other.index_second)): if ((intersection.s == 0.0) and (other.s == 0.0)): duplicates.append(intersection) return True if ((intersection.t == 0.0) and (other.t == 0.0)): duplicates.append(intersection) return True return False
Check if a "valid" ``intersection`` is already in ``intersections``. This assumes that * ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0`` * At least one of the intersections in ``intersections`` is classified as ``COINCIDENT_UNUSED``. Args: intersection (.Intersection): An intersection to be added. duplicates (List[.Intersection]): List of duplicate intersections. intersections (List[.Intersection]): List of "accepted" (i.e. non-duplicate) intersections. Returns: bool: Indicates if the ``intersection`` is a duplicate.
codesearchnet
def match_regex(self, regex: Pattern, required: bool=False, meaning: str='') -> str: mo = regex.match(self.input, self.offset) if mo: self.offset = mo.end() return mo.group() if required: raise UnexpectedInput(self, meaning)
Parse input based on a regular expression . Args: regex: Compiled regular expression object. required: Should the exception be raised on unexpected input? meaning: Meaning of `regex` (for use in error messages). Raises: UnexpectedInput: If no syntactically correct keyword is found.
codesearchnet
def load_sample(self, file_path, tags=None): if not tags: print '\n%sRequired: Add a list of tags when you load samples (put \'unknown\' if you must). \ \n\t%sExamples: [\'bad\'], [\'good\'], [\'bad\',\'aptz13\']%s' % (color.Yellow, color.Green, color.Normal) return if os.path.isdir(file_path): file_list = self._all_files_in_directory(file_path) else: file_list = [file_path] md5_list = [] for path in file_list: with open(path, 'rb') as my_file: raw_bytes = my_file.read() md5 = hashlib.md5(raw_bytes).hexdigest() if not self.workbench.has_sample(md5): print '%sStreaming Sample...%s' % (color.LightPurple, color.Normal) basename = os.path.basename(path) md5 = self.streamer.stream_to_workbench(raw_bytes, basename, 'unknown', tags) print '\n%s %s%s %sLocked and Loaded...%s\n' % \ (self.beer, color.LightPurple, md5[:6], color.Yellow, color.Normal) self.workbench.add_tags(md5, tags) md5_list.append(md5) set_md5 = self.workbench.store_sample_set(md5_list) self.pivot(set_md5, '_'.join(tags)) self.tags()
Load a sample (or samples) into workbench Args: file_path: path to a file or directory tags (optional): a list of tags for the sample/samples ['bad','aptz13'] Returns: The list of md5s for all samples
juraj-google-style
def get_tensor_filter(self, filter_name): if filter_name not in self._tensor_filters: raise ValueError('There is no tensor filter named "%s"' % filter_name) return self._tensor_filters[filter_name]
Retrieve filter function by name. Args: filter_name: Name of the filter set during add_tensor_filter() call. Returns: The callable associated with the filter name. Raises: ValueError: If there is no tensor filter of the specified filter name.
github-repos
def get_by_provider_display_name(self, provider_display_name): san_managers = self._client.get_all() result = [x for x in san_managers if (x['providerDisplayName'] == provider_display_name)] return (result[0] if result else None)
Gets a SAN Manager by provider display name. Args: provider_display_name: Name of the Provider Display Name Returns: dict: SAN Manager.
codesearchnet
def pull_file(self, remote_source, local_dir): local_dest = local_dir + '/' + os.path.basename(remote_source) try: os.makedirs(local_dir) except OSError as e: if e.errno != errno.EEXIST: logger.exception("Failed to create script_dir: {0}".format(script_dir)) raise BadScriptPath(e, self.hostname) if os.path.exists(local_dest): logger.exception("Remote file copy will overwrite a local file:{0}".format(local_dest)) raise FileExists(None, self.hostname, filename=local_dest) try: self.sftp_client.get(remote_source, local_dest) except Exception as e: logger.exception("File pull failed") raise FileCopyException(e, self.hostname) return local_dest
Transport file on the remote side to a local directory Args: - remote_source (string): remote_source - local_dir (string): Local directory to copy to Returns: - str: Local path to file Raises: - FileExists : Name collision at local directory. - FileCopyException : FileCopy failed.
juraj-google-style
def _HandleHashAnalysis(self, hash_analysis): tags = [] labels = self.GenerateLabels(hash_analysis.hash_information) path_specifications = self._hash_pathspecs.pop(hash_analysis.subject_hash) for path_specification in path_specifications: event_identifiers = self._event_identifiers_by_pathspec.pop(path_specification, []) if (not labels): continue for event_identifier in event_identifiers: event_tag = events.EventTag(comment=self._comment) event_tag.SetEventIdentifier(event_identifier) event_tag.AddLabels(labels) tags.append(event_tag) return (path_specifications, labels, tags)
Deals with the results of the analysis of a hash. This method ensures that labels are generated for the hash, then tags all events derived from files with that hash. Args: hash_analysis (HashAnalysis): hash analysis plugin's results for a given hash. Returns: tuple: containing: list[dfvfs.PathSpec]: pathspecs that had the hash value looked up. list[str]: labels that corresponds to the hash value that was looked up. list[EventTag]: event tags for all events that were extracted from the path specifications.
codesearchnet
def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, mask: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor: if input_ids is None and inputs_embeds is None: raise ValueError('Need to provide either `input_ids` or `input_embeds`.') if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) final_embeddings = inputs_embeds if self.position_biased_input: position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) final_embeddings += position_embeds if self.config.type_vocab_size > 0: token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings += token_type_embeds if self.embedding_size != self.hidden_size: final_embeddings = self.embed_proj(final_embeddings) final_embeddings = self.LayerNorm(final_embeddings) if mask is not None: if len(shape_list(mask)) != len(shape_list(final_embeddings)): if len(shape_list(mask)) == 4: mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1) mask = tf.cast(tf.expand_dims(mask, axis=2), dtype=self.compute_dtype) final_embeddings = final_embeddings * mask final_embeddings = self.dropout(final_embeddings, training=training) return final_embeddings
Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor.
github-repos