code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def load_b26_file(file_name): assert os.path.exists(file_name) with open(file_name, 'r') as infile: data = yaml.safe_load(infile) return data
loads a .b26 file into a dictionary Args: file_name: Returns: dictionary with keys instrument, scripts, probes
juraj-google-style
def append_from_list(self, content, fill_title=False): row_index = 0 for row in content: tr = TableRow() column_index = 0 for item in row: if row_index == 0 and fill_title: ti = TableTitle(item) else: ti = TableItem(item) tr.append(ti, str(column_index)) column_index = column_index + 1 self.append(tr, str(row_index)) row_index = row_index + 1
Appends rows created from the data contained in the provided list of tuples of strings. The first tuple of the list can be set as table title. Args: content (list): list of tuples of strings. Each tuple is a row. fill_title (bool): if true, the first tuple in the list will be set as title.
juraj-google-style
def struct_member_error(err, sid, name, offset, size): (exception, msg) = STRUCT_ERROR_MAP[err] struct_name = idc.GetStrucName(sid) return exception('AddStructMember(struct="{}", member="{}", offset={}, size={}) failed: {}'.format(struct_name, name, offset, size, msg))
Create and format a struct member exception. Args: err: The error value returned from struct member creation sid: The struct id name: The member name offset: Memeber offset size: Member size Returns: A ``SarkErrorAddStructMemeberFailed`` derivative exception, with an informative message.
codesearchnet
def validate_language_key(obj, key): backend = bigchaindb.config['database']['backend'] if backend == 'localmongodb': data = obj.get(key, {}) if isinstance(data, dict): validate_all_values_for_key_in_obj(data, 'language', validate_language) elif isinstance(data, list): validate_all_values_for_key_in_list(data, 'language', validate_language)
Validate all nested "language" key in `obj`. Args: obj (dict): dictionary whose "language" key is to be validated. Returns: None: validation successful Raises: ValidationError: will raise exception in case language is not valid.
juraj-google-style
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader): params = _get_params(mapper_spec) blob_key = params[cls.BLOB_KEY_PARAM] zip_input = zipfile.ZipFile(_reader(blob_key)) zfiles = zip_input.infolist() total_size = sum((x.file_size for x in zfiles)) num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT) size_per_shard = (total_size shard_start_indexes = [0] current_shard_size = 0 for (i, fileinfo) in enumerate(zfiles): current_shard_size += fileinfo.file_size if (current_shard_size >= size_per_shard): shard_start_indexes.append((i + 1)) current_shard_size = 0 if (shard_start_indexes[(- 1)] != len(zfiles)): shard_start_indexes.append(len(zfiles)) return [cls(blob_key, start_index, end_index, _reader) for (start_index, end_index) in zip(shard_start_indexes, shard_start_indexes[1:])]
Returns a list of input shard states for the input spec. Args: mapper_spec: The MapperSpec for this InputReader. Must contain 'blob_key' parameter with one blob key. _reader: a callable that returns a file-like object for reading blobs. Used for dependency injection. Returns: A list of InputReaders spanning files within the zip.
codesearchnet
def to_cache_timer(datetime_func): if datetime_func is None: datetime_func = datetime.utcnow def _timer(): return (datetime_func() - datetime(1970, 1, 1)).total_seconds() return _timer
Converts a datetime_func to a timestamp_func. Args: datetime_func (callable[[datatime]]): a func that returns the current time Returns: time_func (callable[[timestamp]): a func that returns the timestamp from the epoch
juraj-google-style
def c_overturned(step): rbot, rtop = misc.get_rbounds(step) cinit, rad = init_c_overturn(step) radf = (rtop**3 + rbot**3 - rad**3)**(1 / 3) return cinit, radf
Theoretical overturned concentration. This compute the resulting composition profile if fractional crystallization of a SMO is assumed and then a purely radial overturn happens. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. Returns: tuple of :class:`numpy.array`: the composition and the radial position at which it is evaluated.
juraj-google-style
def _add_result(self, dict_entry, entry, dt, start_time): time_entry = {} time_entry['dt'] = dt time_entry['start_time'] = start_time dict_entry[entry] = time_entry
Adds a result to the dictionary. Args: dict_entry: main dict to add entry entry: slot for this entry (likely an integer) dt: the timing for the entry start_time: when the entry started unix time float
codesearchnet
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected): total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)], dtype=np.float32).reshape(tensor_in_sizes) x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)], dtype=np.float32).reshape(filter_in_sizes) with self.session() as sess: t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32) t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32) with self.test_scope(): conv = nn_ops.depthwise_conv2d_native(t1, t2, strides=[1, stride, stride, 1], padding=padding) value = sess.run(conv, {t1: x1, t2: x2}) print('value = ', value) self.assertArrayNear(expected, np.ravel(value), 0.0001) self.assertShapeEqual(value, conv)
Verifies the output values of the depthwise convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols, input_depth, depth_multiplier]. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs.
github-repos
def get_all_instances(include_fastboot=False): if include_fastboot: serial_list = (list_adb_devices() + list_fastboot_devices()) return get_instances(serial_list) return get_instances(list_adb_devices())
Create AndroidDevice instances for all attached android devices. Args: include_fastboot: Whether to include devices in bootloader mode or not. Returns: A list of AndroidDevice objects each representing an android device attached to the computer.
codesearchnet
def replace_in_list(stringlist: Iterable[str], replacedict: Dict[str, str]) -> List[str]: newlist = [] for fromstring in stringlist: newlist.append(multiple_replace(fromstring, replacedict)) return newlist
Returns a list produced by applying :func:`multiple_replace` to every string in ``stringlist``. Args: stringlist: list of source strings replacedict: dictionary mapping "original" to "replacement" strings Returns: list of final strings
juraj-google-style
def _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize): assert_type(rank, dtypes.int32) rank_static = tensor_util.constant_value(rank) if rank_static is not None: if rank_static.ndim != 0: raise ValueError('Rank must be a scalar.') x_rank_static = x.get_shape().ndims if x_rank_static is not None: if not static_condition(x_rank_static, rank_static): raise ValueError('Static rank condition failed', x_rank_static, rank_static) return control_flow_ops.no_op(name='static_checks_determined_all_ok') condition = dynamic_condition(array_ops.rank(x), rank) if rank_static is None: this_data = ['Rank must be a scalar. Received rank: ', rank] rank_check = assert_rank(rank, 0, data=this_data) condition = control_flow_ops.with_dependencies([rank_check], condition) return control_flow_assert.Assert(condition, data, summarize=summarize)
Assert `x` has a rank that satisfies a given condition. Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. static_condition: A python function that takes `[actual_rank, given_rank]` and returns `True` if the condition is satisfied, `False` otherwise. dynamic_condition: An `op` that takes [actual_rank, given_rank] and return `True` if the condition is satisfied, `False` otherwise. data: The tensors to print out if the condition is false. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. Returns: Op raising `InvalidArgumentError` if `x` fails dynamic_condition. Raises: ValueError: If static checks determine `x` fails static_condition.
github-repos
def __init__(self, client, conv_states, user_list, sync_timestamp): self._client = client self._conv_dict = {} self._sync_timestamp = sync_timestamp self._user_list = user_list for conv_state in conv_states: self._add_conversation(conv_state.conversation, conv_state.event, conv_state.event_continuation_token) self._client.on_state_update.add_observer(self._on_state_update) self._client.on_connect.add_observer(self._sync) self._client.on_reconnect.add_observer(self._sync) self.on_event = event.Event('ConversationList.on_event') self.on_typing = event.Event('ConversationList.on_typing') self.on_watermark_notification = event.Event( 'ConversationList.on_watermark_notification' )
:class:`.Event` fired when an event occurs in any conversation. Args: conv_event: :class:`ConversationEvent` that occurred.
juraj-google-style
def confirm(question): if FORCE_YES: return True while True: answer = input(question + ' <Yes|No>').lower() if answer == 'yes' or answer == 'y': confirmed = True break if answer == 'no' or answer == 'n': confirmed = False break return confirmed
Ask the user if he really want something to happen. Args: question(str): What can happen Returns: (boolean): Confirmed or not
juraj-google-style
def get_domain_workgroup(): with salt.utils.winapi.Com(): conn = wmi.WMI() for computer in conn.Win32_ComputerSystem(): if computer.PartOfDomain: return {'Domain': computer.Domain} else: return {'Workgroup': computer.Workgroup}
Get the domain or workgroup the computer belongs to. .. versionadded:: 2015.5.7 .. versionadded:: 2015.8.2 Returns: str: The name of the domain or workgroup CLI Example: .. code-block:: bash salt 'minion-id' system.get_domain_workgroup
codesearchnet
def bilinearly_sampled_image(texture, uv): h, w = tf.unstack(tf.shape(texture)[:2]) u, v = tf.split(uv, 2, axis=-1) v = 1.0 - v u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5 u0, u1 = tf.floor(u), tf.ceil(u) v0, v1 = tf.floor(v), tf.ceil(v) uf, vf = u - u0, v - v0 u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1]) def sample(u, v): vu = tf.concat([v % h, u % w], axis=-1) return tf.gather_nd(texture, vu) s00, s01 = sample(u0, v0), sample(u0, v1) s10, s11 = sample(u1, v0), sample(u1, v1) s0 = s00 * (1.0 - vf) + s01 * vf s1 = s10 * (1.0 - vf) + s11 * vf s = s0 * (1.0 - uf) + s1 * uf return s
Build bilinear texture sampling graph. Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR interpolation modes. Args: texture: [tex_h, tex_w, channel_n] tensor. uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1] Returns: [frame_h, frame_h, channel_n] tensor with per-pixel sampled values.
juraj-google-style
def RunScripts(self, script_dict): metadata_types = ['%s-script-url', '%s-script'] metadata_keys = [(key % self.script_type) for key in metadata_types] metadata_keys = [key for key in metadata_keys if script_dict.get(key)] if (not metadata_keys): self.logger.info('No %s scripts found in metadata.', self.script_type) for metadata_key in metadata_keys: metadata_script = script_dict.get(metadata_key) self._MakeExecutable(metadata_script) self._RunScript(metadata_key, metadata_script)
Run the metadata scripts; execute a URL script first if one is provided. Args: script_dict: a dictionary mapping metadata keys to script files.
codesearchnet
def author_id_normalize_and_schema(uid, schema=None): def _get_uid_normalized_in_schema(_uid, _schema): (regex, template) = _RE_AUTHORS_UID[_schema] match = regex.match(_uid) if match: return template.format(match.group('uid')) if (idutils.is_orcid(uid) and (schema in (None, 'ORCID'))): return (idutils.normalize_orcid(uid), 'ORCID') if (schema and (schema not in _RE_AUTHORS_UID)): raise UnknownUIDSchema(uid) if schema: normalized_uid = _get_uid_normalized_in_schema(uid, schema) if normalized_uid: return (normalized_uid, schema) else: raise SchemaUIDConflict(schema, uid) (match_schema, normalized_uid) = (None, None) for candidate_schema in _RE_AUTHORS_UID: candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema) if candidate_uid: if match_schema: raise UnknownUIDSchema(uid) match_schema = candidate_schema normalized_uid = candidate_uid if match_schema: return (normalized_uid, match_schema) raise UnknownUIDSchema(uid)
Detect and normalize an author UID schema. Args: uid (string): a UID string schema (string): try to resolve to schema Returns: Tuple[string, string]: a tuple (uid, schema) where: - uid: the UID normalized to comply with the id.json schema - schema: a schema of the UID or *None* if not recognised Raise: UnknownUIDSchema: if UID is too little to definitively guess the schema SchemaUIDConflict: if specified schema is not matching the given UID
codesearchnet
def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike: with open(filename, 'r') as f: config = yaml.load(f) return config
Load an analysis configuration from a file. Args: yaml: YAML object to use in loading the configuration. filename: Filename of the YAML configuration file. Returns: dict-like object containing the loaded configuration
codesearchnet
def is_within_strict_int_range(lower_bound: int, upper_bound: int) -> RuleChecker[Numeric]: def _checker(value: Numeric) -> RuleOutput: if lower_bound < value < upper_bound: return None else: return 'Value is not within the strict range.' return _checker
Checks if the provided numeric value IS strictly bounded by integers i.e. (lower_bound, upper_bound) with both bounds exclusive. Args: * lower_bound: lowest integer value (exclusive) * upper_bound: highest integer value (exclusive) Returns: * None: if lower_bound < value < upper_bound * Error message, otherwise
github-repos
def get_client_kwargs(self, path): container, obj = self.split_locator(path) kwargs = dict(container=container) if obj: kwargs['obj'] = obj return kwargs
Get base keyword arguments for client for a specific path. Args: path (str): Absolute path or URL. Returns: dict: client args
juraj-google-style
def _ip_unnumbered_type(self, **kwargs): method_name = 'interface_%s_ip_ip_config_unnumbered_ip_donor_'\ 'interface_type' % kwargs['int_type'] ip_unnumbered_type = getattr(self._interface, method_name) config = ip_unnumbered_type(**kwargs) if kwargs['delete']: tag = 'ip-donor-interface-type' config.find('. return config
Return the `ip unnumbered` donor type XML. You should not use this method. You probably want `Interface.ip_unnumbered`. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). delete (bool): Remove the configuration if ``True``. ip_donor_interface_type (str): The donor interface type (loopback) Returns: XML to be passed to the switch. Raises: None
juraj-google-style
def is_native_ion_gate(gate: ops.Gate) -> bool: return isinstance(gate, (ops.XXPowGate, ops.MeasurementGate, ops.XPowGate, ops.YPowGate, ops.ZPowGate))
Check if a gate is a native ion gate. Args: gate: Input gate. Returns: True if the gate is native to the ion, false otherwise.
codesearchnet
def log_estimator_evaluation_result(self, eval_results): if (not isinstance(eval_results, dict)): tf.logging.warning('eval_results should be directory for logging. Got %s', type(eval_results)) return global_step = eval_results[tf.GraphKeys.GLOBAL_STEP] for key in sorted(eval_results): if (key != tf.GraphKeys.GLOBAL_STEP): self.log_metric(key, eval_results[key], global_step=global_step)
Log the evaluation result for a estimator. The evaluate result is a directory that contains metrics defined in model_fn. It also contains a entry for global_step which contains the value of the global step when evaluation was performed. Args: eval_results: dict, the result of evaluate() from a estimator.
codesearchnet
def dawsn(x, name=None): with ops.name_scope(name, 'dawsn', [x]): return gen_special_math_ops.dawsn(x)
Computes Dawson's integral of `x` element-wise. Dawson's integral is defined as `exp(-x**2)` times the integral of `exp(t**2)` from `0` to `x`, with the domain of definition all real numbers. Dawson's function is odd. >>> tf.math.special.dawsn([-1., -0.5, 0.5, 1.]).numpy() array([-0.5380795, -0.4244364, 0.4244364, 0.5380795], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.dawsn @end_compatibility
github-repos
def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None): try: audio = EasyMP3(filename) audio.tags = None audio["artist"] = artist audio["title"] = title if year: audio["date"] = str(year) if album: audio["album"] = album if track_number: audio["tracknumber"] = track_number if genre: audio["genre"] = genre if url: audio["website"] = url audio.save() if artwork_url: artwork_url = artwork_url.replace('https', 'http') mime = 'image/jpeg' if '.jpg' in artwork_url: mime = 'image/jpeg' if '.png' in artwork_url: mime = 'image/png' if '-large' in artwork_url: new_artwork_url = artwork_url.replace('-large', '-t500x500') try: image_data = requests.get(new_artwork_url).content except Exception as e: image_data = requests.get(artwork_url).content else: image_data = requests.get(artwork_url).content audio = MP3(filename, ID3=OldID3) audio.tags.add( APIC( encoding=3, mime=mime, type=3, desc='Cover', data=image_data ) ) audio.save() if url: audio = MP3(filename, ID3=OldID3) audio.tags.add( WXXX( encoding=3, url=url ) ) audio.save() return True except Exception as e: puts(colored.red("Problem tagging file: ") + colored.white("Is this file a WAV?")) return False
Attempt to put ID3 tags on a file. Args: artist (str): title (str): year (int): genre (str): artwork_url (str): album (str): track_number (str): filename (str): url (str):
juraj-google-style
def compose(self, *args, **kwargs): linebreak = kwargs.pop("linebreak", "\n") if len(args) > 0: self.args = args self._update(**kwargs) fkwargs = {} modtmpl = [] for line in self: cline = copy(line) for match in self._regex.findall(line): search = "[{}]".format("|".join(match)) name, indent, delim, qual, _ = match if indent != "": indent = " "*int(indent) delim = delim.replace("\\|", "|") data = getattr(self, name, None) if data is None: cline = cline.replace(search, "") continue elif delim.isdigit(): fkwargs[name] = getattr(self, "_fmt_"+name)() else: fkwargs[name] = linebreak.join([indent+k+delim+qual+v+qual for k, v in data.items()]) cline = cline.replace(search, "{"+name+"}") modtmpl.append(cline) modtmpl = "\n".join(modtmpl) print(modtmpl) dct = self.get_kwargs() dct.update(fkwargs) return self._constructor(textobj=modtmpl.format(*self.args, **dct))
Generate a file from the current template and given arguments. Warning: Make certain to check the formatted editor for correctness! Args: args: Positional arguments to update the template kwargs: Keyword arguments to update the template Returns: editor: An editor containing the formatted template.
juraj-google-style
def _AlignUncompressedDataOffset(self, uncompressed_data_offset): if self._zip_ext_file: self._zip_ext_file.close() self._zip_ext_file = None try: self._zip_ext_file = self._zip_file.open(self._zip_info, 'r') except zipfile.BadZipfile as exception: raise IOError( 'Unable to open ZIP file with error: {0!s}'.format(exception)) self._uncompressed_data = b'' self._uncompressed_data_size = 0 self._uncompressed_data_offset = 0 while uncompressed_data_offset > 0: self._ReadCompressedData(self._UNCOMPRESSED_DATA_BUFFER_SIZE) if uncompressed_data_offset < self._uncompressed_data_size: self._uncompressed_data_offset = uncompressed_data_offset break uncompressed_data_offset -= self._uncompressed_data_size
Aligns the compressed file with the uncompressed data offset. Args: uncompressed_data_offset (int): uncompressed data offset. Raises: IOError: if the ZIP file could not be opened. OSError: if the ZIP file could not be opened.
juraj-google-style
def _ParseLogline(self, parser_mediator, structure): month, day_of_month, year, hours, minutes, seconds, milliseconds = ( structure.date_time) time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds, milliseconds) try: date_time = dfdatetime_time_elements.TimeElementsInMilliseconds( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return event_data = SkyDriveOldLogEventData() event_data.log_level = structure.log_level event_data.offset = self.offset event_data.source_code = structure.source_code event_data.text = structure.text event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) self._last_date_time = date_time self._last_event_data = event_data
Parse a logline and store appropriate attributes. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
juraj-google-style
def _decode_linear_biases(linear_string, nodelist): linear_bytes = base64.b64decode(linear_string) return dict(zip(nodelist, struct.unpack(('<' + ('d' * (len(linear_bytes)
Inverse of _serialize_linear_biases. Args: linear_string (str): base 64 encoded string of little endian 8 byte floats, one for each of the nodes in nodelist. nodelist (list): list of the form [node1, node2, ...]. Returns: dict: linear biases in a dict. Examples: >>> _decode_linear_biases('AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA', [1, 2, 3]) {1: -1.0, 2: 1.0, 3: 0.0} >>> _decode_linear_biases('AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA', [3, 2, 1]) {1: 0.0, 2: 1.0, 3: -1.0}
codesearchnet
def connect(self, address) -> bytes: stdout = self._exec_adb_cmd('connect', address, shell=False, timeout=None, stderr=None) if PATTERN_ADB_CONNECT_SUCCESS.match(stdout.decode('utf-8')) is None: raise AdbError(cmd=f'connect {address}', stdout=stdout, stderr='', ret_code=0) return stdout
Executes the `adb connect` command with proper status checking. Args: address: string, the address of the Android instance to connect to. Returns: The stdout content. Raises: AdbError: if the connection failed.
github-repos
def getGridByCard(self, gssha_card_name): with tmp_chdir(self.project_directory): if (gssha_card_name not in (self.INPUT_MAPS + self.WMS_DATASETS)): raise ValueError('Card {0} not found in valid grid cards ...'.format(gssha_card_name)) gssha_grid_card = self.getCard(gssha_card_name) if (gssha_grid_card is None): raise ValueError('{0} card not found ...'.format(gssha_card_name)) gssha_pro_card = self.getCard(' if (gssha_pro_card is None): raise ValueError(' return GDALGrid(gssha_grid_card.value.strip('"').strip("'"), gssha_pro_card.value.strip('"').strip("'"))
Returns GDALGrid object of GSSHA grid Paramters: gssha_card_name(str): Name of GSSHA project card for grid. Returns: GDALGrid
codesearchnet
def parse_from_xml(root): if root.tag != 'ubcpi': raise UpdateFromXmlError(_('Every peer instruction tool must contain an "ubcpi" element.')) display_name_el = root.find('display_name') if display_name_el is None: raise UpdateFromXmlError(_('Every peer instruction tool must contain a "display_name" element.')) else: display_name = _safe_get_text(display_name_el) rationale_size_min = int(root.attrib['rationale_size_min']) if 'rationale_size_min' in root.attrib else None rationale_size_max = int(root.attrib['rationale_size_max']) if 'rationale_size_max' in root.attrib else None question_el = root.find('question') if question_el is None: raise UpdateFromXmlError(_('Every peer instruction must tool contain a "question" element.')) else: question = parse_question_xml(question_el) options_el = root.find('options') if options_el is None: raise UpdateFromXmlError(_('Every peer instruction must tool contain a "options" element.')) else: options, correct_answer, correct_rationale = parse_options_xml(options_el) seeds_el = root.find('seeds') if seeds_el is None: raise UpdateFromXmlError(_('Every peer instruction must tool contain a "seeds" element.')) else: seeds = parse_seeds_xml(seeds_el) algo = unicode(root.attrib['algorithm']) if 'algorithm' in root.attrib else None num_responses = unicode(root.attrib['num_responses']) if 'num_responses' in root.attrib else None return { 'display_name': display_name, 'question_text': question, 'options': options, 'rationale_size': {'min': rationale_size_min, 'max': rationale_size_max}, 'correct_answer': correct_answer, 'correct_rationale': correct_rationale, 'seeds': seeds, 'algo': {"name": algo, 'num_responses': num_responses} }
Update the UBCPI XBlock's content from an XML definition. We need to be strict about the XML we accept, to avoid setting the XBlock to an invalid state (which will then be persisted). Args: root (lxml.etree.Element): The XML definition of the XBlock's content. Returns: A dictionary of all of the XBlock's content. Raises: UpdateFromXmlError: The XML definition is invalid
juraj-google-style
def learn(self, features, labels): labels = np.ravel(labels) self.__learn_labels(labels) if len(labels) == 0: return labels = self.labels.transform(labels) if self.feature_length > 0 and hasattr(self.clf, 'partial_fit'): self.clf = self.clf.partial_fit(features, labels) else: self.clf = self.clf.fit(features, labels) self.feature_length = len(features[0])
Fits the classifier If it's state is empty, the classifier is fitted, if not the classifier is partially fitted. See sklearn's SGDClassifier fit and partial_fit methods. Args: features (:obj:`list` of :obj:`list` of :obj:`float`) labels (:obj:`list` of :obj:`str`): Labels for each set of features. New features are learnt.
juraj-google-style
def get_type_key(self, seen: set['BaseValue'] | None=None): return self.get_default_type_key()
Build a key from the information used to perform type matching. Get a hashable object containing this value's type information. Type keys are only compared amongst themselves, so we don't care what the internals look like, only that values with different types *always* have different type keys and values with the same type preferably have the same type key. Args: seen: The set of values seen before while computing the type key. Returns: A hashable object built from this value's type information.
github-repos
def get_stored_metadata(self, temp_ver): with open(self._prefixed('%s.metadata' % temp_ver.name)) as f: return json.load(f)
Retrieves the metadata for the given template version from the store Args: temp_ver (TemplateVersion): template version to retrieve the metadata for Returns: dict: the metadata of the given template version
juraj-google-style
def loss(logits, labels): labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss')
Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float.
codesearchnet
def DeregisterDefinition(self, artifact_definition): artifact_definition_name = artifact_definition.name.lower() if artifact_definition_name not in self._artifact_definitions: raise KeyError( 'Artifact definition not set for name: {0:s}.'.format( artifact_definition.name)) del self._artifact_definitions[artifact_definition_name]
Deregisters an artifact definition. Artifact definitions are identified based on their lower case name. Args: artifact_definition (ArtifactDefinition): an artifact definition. Raises: KeyError: if an artifact definition is not set for the corresponding name.
juraj-google-style
def cost(self, logits, target): logits = tf.reshape(logits, [(self._num_steps * self._batch_size), (- 1)]) target = tf.reshape(target, [(self._num_steps * self._batch_size), (- 1)]) xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target) loss = tf.reduce_sum(xent) return (loss / self._batch_size)
Returns cost. Args: logits: model output. target: target. Returns: Cross-entropy loss for a sequence of logits. The loss will be averaged across time steps if time_average_cost was enabled at construction time.
codesearchnet
def trace(self, predicate): self._handler = predicate if self.threading_support is None or self.threading_support: self._threading_previous = getattr(threading, '_trace_hook', None) threading.settrace(self) self._previous = sys.gettrace() sys.settrace(self) return self
Starts tracing with the given callable. Args: predicate (callable that accepts a single :obj:`hunter.Event` argument): Return: self
juraj-google-style
def get_dataclass(self, json_dataclass: type[T]) -> T: if not mime_types.is_dataclass(self.mimetype): raise ValueError('Part is not a dataclass.') try: return json_dataclass.from_json(self.text) except AttributeError as e: raise ValueError(f'{json_dataclass.__name__} is not a valid json dataclass') from e
Returns representation of the Part as a given dataclass. Args: json_dataclass: A dataclass that can be converted to/from JSON. Returns: The dataclass representation of the Part.
github-repos
def ProcessFile(filename, vlevel, extra_check_functions=None): _SetVerboseLevel(vlevel) _BackupFilters() if (not ProcessConfigOverrides(filename)): _RestoreFilters() return lf_lines = [] crlf_lines = [] try: if (filename == '-'): lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') for linenum in range((len(lines) - 1)): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') crlf_lines.append((linenum + 1)) else: lf_lines.append((linenum + 1)) except IOError: _cpplint_state.PrintError(("Skipping input '%s': Can't open for reading\n" % filename)) _RestoreFilters() return file_extension = filename[(filename.rfind('.') + 1):] if ((filename != '-') and (file_extension not in GetAllExtensions())): bazel_gen_files = set(['external/local_config_cc/libtool', 'external/local_config_cc/make_hashed_objlist.py', 'external/local_config_cc/wrapped_ar', 'external/local_config_cc/wrapped_clang', 'external/local_config_cc/xcrunwrapper.sh']) if (not (filename in bazel_gen_files)): _cpplint_state.PrintError(('Ignoring %s; not a valid file name (%s)\n' % (filename, ', '.join(GetAllExtensions())))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) if (lf_lines and crlf_lines): for linenum in crlf_lines: Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') _RestoreFilters()
Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
codesearchnet
def visit_indexer(self, indexer: _evaluation.IndexerNode) -> _sql_data_types.Select: collection_result = self.visit(indexer.collection) index_result = self.visit(indexer.index) indexed_collection = f'SELECT ROW_NUMBER() OVER() AS row_,\n{collection_result.sql_alias}\nFROM {collection_result.to_subquery()}' sql_alias = f'indexed_{collection_result.sql_alias}' return _sql_data_types.Select(select_part=_sql_data_types.Identifier(collection_result.sql_alias, collection_result.sql_data_type, _sql_alias=sql_alias), from_part=f'({indexed_collection}) AS inner_tbl', where_part=f'(inner_tbl.row_ - 1) = {index_result.as_operand()}')
Translates a FHIRPath indexer expression to Standard SQL. Args: indexer: The `_Indexer` Expression node. Returns: A compiled Standard SQL expression.
github-repos
def solve(ast, builtins_pytd, protocols_pytd): builtins_pytd = transforms.RemoveMutableParameters(builtins_pytd) builtins_pytd = visitors.LookupClasses(builtins_pytd) protocols_pytd = visitors.LookupClasses(protocols_pytd) ast = visitors.LookupClasses(ast, builtins_pytd) return (TypeSolver(ast, builtins_pytd, protocols_pytd).solve(), extract_local(ast))
Solve the unknowns in a pytd AST using the standard Python builtins. Args: ast: A pytd.TypeDeclUnit, containing classes named ~unknownXX. builtins_pytd: A pytd for builtins. protocols_pytd: A pytd for protocols. Returns: A tuple of (1) a dictionary (str->str) mapping unknown class names to known class names and (2) a pytd.TypeDeclUnit of the complete classes in ast.
github-repos
def generate_sentence(self, chain): def weighted_choice(choices): total_weight = sum((weight for (val, weight) in choices)) rand = random.uniform(0, total_weight) upto = 0 for (val, weight) in choices: if ((upto + weight) >= rand): return val upto += weight sentence = list(random.choice(chain.startwords)) while (not (sentence[(- 1)][(- 1)] in ['.', '?', '!'])): sentence.append(weighted_choice(chain.content[tuple(sentence[(- 2):])].items())) return ' '.join(sentence)
!DEMO! Demo function that shows how to generate a simple sentence starting with uppercase letter without lenght limit. Args: chain: MarkovChain that will be used to generate sentence
codesearchnet
def process_event(self, event_name: str, data: dict): if (isinstance(self.opt.get("learning_rate", None), float) and isinstance(self.opt.get("learning_rate_decay", None), float)): pass else: if event_name == 'after_train_log': if (self.get_learning_rate_variable() is not None) and ('learning_rate' not in data): data['learning_rate'] = float(K.get_value(self.get_learning_rate_variable())) if (self.get_momentum_variable() is not None) and ('momentum' not in data): data['momentum'] = float(K.get_value(self.get_momentum_variable())) else: super().process_event(event_name, data)
Process event after epoch Args: event_name: whether event is send after epoch or batch. Set of values: ``"after_epoch", "after_batch"`` data: event data (dictionary) Returns: None
juraj-google-style
def from_filenames(poscar_filenames, transformations=None, extend_collection=False): tstructs = [] for filename in poscar_filenames: with open(filename, 'r') as f: tstructs.append(TransformedStructure.from_poscar_string(f.read(), [])) return StandardTransmuter(tstructs, transformations, extend_collection=extend_collection)
Convenient constructor to generates a POSCAR transmuter from a list of POSCAR filenames. Args: poscar_filenames: List of POSCAR filenames transformations: New transformations to be applied to all structures. extend_collection: Same meaning as in __init__.
codesearchnet
def export(self, top=True): out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.number_of_records_per_hour)) out.append(self._to_str(self.data_period_name_or_description)) out.append(self._to_str(self.data_period_start_day_of_week)) out.append(self._to_str(self.data_period_start_day)) out.append(self._to_str(self.data_period_end_day)) return ",".join(out)
Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation
juraj-google-style
def _consume_line(line_info, state): _update_section_state(line_info, state) if state.section.title is None: if state.summary.permitted: if line_info.remaining: state.summary.lines.append(line_info.remaining) elif state.summary.lines: state.summary.permitted = False else: state.description.lines.append(line_info.remaining_raw) else: state.summary.permitted = False if state.section.new and state.section.format == Formats.RST: directive = _get_directive(line_info) directive_tokens = directive.split() if state.section.title == Sections.ARGS: name = directive_tokens[-1] arg = _get_or_create_arg_by_name(state, name, is_kwarg=directive_tokens[0] == 'key') if len(directive_tokens) == 3: arg.type.lines.append(directive_tokens[1]) state.current_arg = arg elif state.section.title == Sections.TYPE: name = directive_tokens[-1] arg = _get_or_create_arg_by_name(state, name) state.current_arg = arg if state.section.format == Formats.NUMPY and _line_is_hyphens(line_info.remaining): return if state.section.title == Sections.ARGS: if state.section.format == Formats.GOOGLE: _consume_google_args_line(line_info, state) elif state.section.format == Formats.RST: state.current_arg.description.lines.append(line_info.remaining.strip()) elif state.section.format == Formats.NUMPY: line_stripped = line_info.remaining.strip() if _is_arg_name(line_stripped): arg = _get_or_create_arg_by_name(state, line_stripped) state.current_arg = arg elif _line_is_numpy_parameter_type(line_info): possible_args, type_data = line_stripped.split(':', 1) arg_names = _as_arg_names(possible_args) if arg_names: for arg_name in arg_names: arg = _get_or_create_arg_by_name(state, arg_name) arg.type.lines.append(type_data) state.current_arg = arg elif state.current_arg: state.current_arg.description.lines.append(line_info.remaining.strip()) else: pass elif state.current_arg: state.current_arg.description.lines.append(line_info.remaining.strip()) else: pass elif state.section.title == Sections.RETURNS: state.returns.lines.append(line_info.remaining.strip()) elif state.section.title == Sections.YIELDS: state.yields.lines.append(line_info.remaining.strip()) elif state.section.title == Sections.RAISES: state.raises.lines.append(line_info.remaining.strip()) elif state.section.title == Sections.TYPE: if state.section.format == Formats.RST: assert state.current_arg is not None state.current_arg.type.lines.append(line_info.remaining.strip()) else: pass
Consumes one line of text, updating the state accordingly. When _consume_line is called, part of the line may already have been processed for header information. Args: line_info: Information about the current and next line of the docstring. state: The state of the docstring parser.
github-repos
def output_waiting(self): buf = array.array('I', [0]) try: fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True) except OSError as e: raise SerialError(e.errno, ('Querying output waiting: ' + e.strerror)) return buf[0]
Query the number of bytes waiting to be written to the serial port. Returns: int: number of bytes waiting to be written. Raises: SerialError: if an I/O or OS error occurs.
codesearchnet
async def get_headline(self, name): resp = await self.send_command(OPERATIONS.CMD_QUERY_HEADLINE, {'name': name}, MESSAGES.QueryHeadlineResponse, timeout=5.0) if resp is not None: resp = states.ServiceMessage.FromDictionary(resp) return resp
Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: ServiceMessage: the headline or None if no headline has been set
juraj-google-style
def update_metadata(self, resource, keys_vals): self.metadata_service.set_auth(self._token_metadata) self.metadata_service.update(resource, keys_vals)
Updates key-value pairs with the given resource. Will attempt to update all key-value pairs even if some fail. Keys must already exist. Args: resource (intern.resource.boss.BossResource) keys_vals (dictionary): Collection of key-value pairs to update on the given resource. Raises: HTTPErrorList on failure.
juraj-google-style
def __init__(self, storage_writer, knowledge_base, data_location=None): super(AnalysisMediator, self).__init__() self._abort = False self._data_location = data_location self._event_filter_expression = None self._knowledge_base = knowledge_base self._mount_path = None self._storage_writer = storage_writer self._text_prepend = None self.last_activity_timestamp = 0.0 self.number_of_produced_analysis_reports = 0 self.number_of_produced_event_tags = 0
Initializes an analysis plugin mediator. Args: storage_writer (StorageWriter): storage writer. knowledge_base (KnowledgeBase): contains information from the source data needed for analysis. data_location (Optional[str]): location of data files used during analysis.
juraj-google-style
def make_tensor_model_fn(model_fn: str) -> TensorInferenceFn: def attr_fn(batch: Sequence[torch.Tensor], model: torch.nn.Module, device: str, inference_args: Optional[dict[str, Any]]=None, model_id: Optional[str]=None) -> Iterable[PredictionResult]: with torch.no_grad(): batched_tensors = torch.stack(batch) batched_tensors = _convert_to_device(batched_tensors, device) pred_fn = getattr(model, model_fn) predictions = pred_fn(batched_tensors, **inference_args) return utils._convert_to_result(batch, predictions, model_id) return attr_fn
Produces a TensorInferenceFn that uses a method of the model other that the forward() method. Args: model_fn: A string name of the method to be used. This is accessed through getattr(model, model_fn)
github-repos
def install_exception_handler(handler): if not isinstance(handler, ExceptionHandler): raise TypeError('handler of type %s does not inherit from ExceptionHandler' % type(handler)) EXCEPTION_HANDLERS.append(handler)
Installs an exception handler. Args: handler: ExceptionHandler, the exception handler to install. Raises: TypeError: Raised when the handler was not of the correct type. All installed exception handlers will be called if main() exits via an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt, FlagsError or UsageError.
juraj-google-style
def _get_context_id(self, context): if context in self._context_to_id: return self._context_to_id[context] graph_is_new = False with self._context_lock: if context not in self._context_to_id: graph_is_new = True context_id = _get_id() self._context_to_id[context] = context_id if graph_is_new: self.get_writer().WriteDebuggedGraph(debug_event_pb2.DebuggedGraph(graph_id=context_id, graph_name=getattr(context, 'name', None), outer_context_id=self._get_outer_context_id(context))) return self._context_to_id[context]
Get a unique ID for an op-construction context (e.g., a graph). If the graph has been encountered before, reuse the same unique ID. When encountering a new context (graph), this methods writes a DebugEvent proto with the debugged_graph field to the proper DebugEvent file. Args: context: A context to get the unique ID for. Must be hashable. E.g., a Graph object. Returns: A unique ID for the context.
github-repos
def _process_using_meta_feature_generator(self, X, meta_feature_generator): all_learner_meta_features = [] for idx, base_learner in enumerate(self.base_learners): single_learner_meta_features = getattr(base_learner, self.meta_feature_generators[idx])(X) if len(single_learner_meta_features.shape) == 1: single_learner_meta_features = single_learner_meta_features.reshape(-1, 1) all_learner_meta_features.append(single_learner_meta_features) all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1) out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features) return out
Process using secondary learner meta-feature generator Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba, this internal method gives the ability to use any string. Just make sure secondary learner has the method. Args: X (array-like): Features array meta_feature_generator (str, unicode): Method for use by secondary learner
juraj-google-style
def retry_target(target, predicate, sleep_generator, deadline, on_error=None): if (deadline is not None): deadline_datetime = (datetime_helpers.utcnow() + datetime.timedelta(seconds=deadline)) else: deadline_datetime = None last_exc = None for sleep in sleep_generator: try: return target() except Exception as exc: if (not predicate(exc)): raise last_exc = exc if (on_error is not None): on_error(exc) now = datetime_helpers.utcnow() if ((deadline_datetime is not None) and (deadline_datetime < now)): six.raise_from(exceptions.RetryError('Deadline of {:.1f}s exceeded while calling {}'.format(deadline, target), last_exc), last_exc) _LOGGER.debug('Retrying due to {}, sleeping {:.1f}s ...'.format(last_exc, sleep)) time.sleep(sleep) raise ValueError('Sleep generator stopped yielding sleep values.')
Call a function and retry if it fails. This is the lowest-level retry helper. Generally, you'll use the higher-level retry helper :class:`Retry`. Args: target(Callable): The function to call and retry. This must be a nullary function - apply arguments with `functools.partial`. predicate (Callable[Exception]): A callable used to determine if an exception raised by the target should be considered retryable. It should return True to retry or False otherwise. sleep_generator (Iterable[float]): An infinite iterator that determines how long to sleep between retries. deadline (float): How long to keep retrying the target. on_error (Callable): A function to call while processing a retryable exception. Any error raised by this function will *not* be caught. Returns: Any: the return value of the target function. Raises: google.api_core.RetryError: If the deadline is exceeded while retrying. ValueError: If the sleep generator stops yielding values. Exception: If the target raises a method that isn't retryable.
codesearchnet
def __init__(self, cluster_resolver=None, communication_options=None, *, mesh=None): self._validate_init_args(mesh, cluster_resolver) if not mesh: if not cluster_resolver: cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver() dtensor_env_var = _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver) _config_dtensor_env_var(dtensor_env_var) mesh = _build_distributed_mesh(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME) extended = dtensor_strategy_extended.DTensorStrategyExtended(container_strategy=self, mesh=mesh) super().__init__(extended) self._mesh = mesh self._cluster_resolver = cluster_resolver
Creates the strategy. Args: cluster_resolver: optional `tf.distribute.cluster_resolver.ClusterResolver`. In case neither `mesh` nor `cluster_resolver` are provided, `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used. communication_options: currently ignore. mesh: optional Dtensor global mesh for the computation. Note that either `mesh` or the `cluster_resolver` should be provided. and not both.
github-repos
def _ragged_getitem(rt_input, key_list): if not key_list: return rt_input row_key = key_list[0] inner_keys = key_list[1:] if row_key is Ellipsis: expanded_key_list = _expand_ellipsis(key_list, rt_input.shape.ndims) return _ragged_getitem(rt_input, expanded_key_list) if row_key is array_ops.newaxis: inner_rt = _ragged_getitem(rt_input, inner_keys) nsplits = tensor_shape.dimension_at_index(inner_rt.row_splits.shape, 0) if nsplits.value is not None: nsplits = nsplits.value else: nsplits = array_ops.shape(inner_rt.row_splits, out_type=inner_rt.row_splits.dtype)[0] return ragged_tensor.RaggedTensor.from_uniform_row_length(inner_rt, nsplits - 1, nrows=1, validate=False) if isinstance(row_key, slice): sliced_rt_input = _slice_ragged_row_dimension(rt_input, row_key) if rt_input.uniform_row_length is not None: sliced_rt_input = ragged_tensor.RaggedTensor.from_uniform_row_length(sliced_rt_input.values, rt_input.uniform_row_length, nrows=sliced_rt_input.nrows()) return _ragged_getitem_inner_dimensions(sliced_rt_input, inner_keys) else: starts = rt_input.row_splits[:-1] limits = rt_input.row_splits[1:] if context.executing_eagerly(): try: if int(row_key) >= len(starts): raise IndexError('Row key {} out of bounds'.format(row_key)) except (TypeError, ValueError): pass row = rt_input.values[starts[row_key]:limits[row_key]] return row.__getitem__(inner_keys)
Helper for indexing and slicing ragged tensors with __getitem__(). Extracts the specified piece of the `rt_input`. See `RaggedTensor.__getitem__` for examples and restrictions. Args: rt_input: The `RaggedTensor` from which a piece should be returned. key_list: The list of keys specifying which piece to return. Each key corresponds with a separate dimension. Returns: The indicated piece of rt_input. Raises: ValueError: If `key_list` is not supported. TypeError: If any keys in `key_list` have an unsupported type.
github-repos
def parsed_forensic_reports_to_csv(reports): fields = ["feedback_type", "user_agent", "version", "original_envelope_id", "original_mail_from", "original_rcpt_to", "arrival_date", "arrival_date_utc", "subject", "message_id", "authentication_results", "dkim_domain", "source_ip_address", "source_country", "source_reverse_dns", "source_base_domain", "delivery_result", "auth_failure", "reported_domain", "authentication_mechanisms", "sample_headers_only"] if type(reports) == OrderedDict: reports = [reports] csv_file = StringIO() csv_writer = DictWriter(csv_file, fieldnames=fields) csv_writer.writeheader() for report in reports: row = report.copy() row["source_ip_address"] = report["source"]["ip_address"] row["source_reverse_dns"] = report["source"]["reverse_dns"] row["source_base_domain"] = report["source"]["base_domain"] row["source_country"] = report["source"]["country"] del row["source"] row["subject"] = report["parsed_sample"]["subject"] row["auth_failure"] = ",".join(report["auth_failure"]) authentication_mechanisms = report["authentication_mechanisms"] row["authentication_mechanisms"] = ",".join( authentication_mechanisms) del row["sample"] del row["parsed_sample"] csv_writer.writerow(row) return csv_file.getvalue()
Converts one or more parsed forensic reports to flat CSV format, including headers Args: reports: A parsed forensic report or list of parsed forensic reports Returns: str: Parsed forensic report data in flat CSV format, including headers
juraj-google-style
def GetRegistryFileMapping(self, registry_file): if not registry_file: return '' candidate_mappings = [] for mapping in self._REGISTRY_FILE_MAPPINGS_NT: if not mapping.unique_key_paths: continue match = True for key_path in mapping.unique_key_paths: registry_key = registry_file.GetKeyByPath(key_path) if not registry_key: match = False if match: candidate_mappings.append(mapping) if not candidate_mappings: return '' if len(candidate_mappings) == 1: return candidate_mappings[0].key_path_prefix key_path_prefixes = frozenset([ mapping.key_path_prefix for mapping in candidate_mappings]) expected_key_path_prefixes = frozenset([ 'HKEY_CURRENT_USER', 'HKEY_CURRENT_USER\\Software\\Classes']) if key_path_prefixes == expected_key_path_prefixes: return 'HKEY_CURRENT_USER' raise RuntimeError('Unable to resolve Windows Registry file mapping.')
Determines the Registry file mapping based on the content of the file. Args: registry_file (WinRegistyFile): Windows Registry file. Returns: str: key path prefix or an empty string. Raises: RuntimeError: if there are multiple matching mappings and the correct mapping cannot be resolved.
juraj-google-style
def move(self, to_project_id, **kwargs): path = '%s/%s/move' % (self.manager.path, self.get_id()) data = {'to_project_id': to_project_id} server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs) self._update_attrs(server_data)
Move the issue to another project. Args: to_project_id(int): ID of the target project **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the issue could not be moved
juraj-google-style
def hr_dp004(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `hr_dp004`'.format(value)) self._hr_dp004 = value
Corresponds to IDD Field `hr_dp004` humidity ratio corresponding to Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `hr_dp004` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx, extra_feed_dict): if x.dtype.is_complex: x_shape = tuple(x_shape) + (2,) dy_factor = 2 if dy.dtype.is_complex else 1 x_size = _product(x_shape) x_val_size = _product(x_shape[1:]) dy_size = _product(dy_shape) * dy_factor jacobian = np.zeros((x_size, dy_size), dtype=x.dtype.real_dtype.as_numpy_dtype) dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype) dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype) sess = ops.get_default_session() for col in range(dy_size): dy_data_flat[col] = 1 if isinstance(dx, indexed_slices.IndexedSlices): backprop_indices, backprop_values = sess.run([dx.indices, dx.values], feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data})) for i, v in zip(backprop_indices, backprop_values): r_begin = i * x_val_size r_end = r_begin + x_val_size jacobian[r_begin:r_end, col] += v.flat else: assert isinstance(dx, tensor.Tensor), 'dx = ' + str(dx) backprop = sess.run(dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data})) jacobian[:, col] = backprop.ravel().view(jacobian.dtype) dy_data_flat[col] = 0 if not dy_size: backprop = sess.run(dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data})) if backprop.shape != x_data.shape: raise ValueError('Empty gradient has wrong shape: expected %s, got %s' % (x_data.shape, backprop.shape)) if np.any(backprop): raise ValueError('Empty tensor with nonzero gradients') logging.vlog(1, 'Theoretical Jacobian =\n%s', jacobian) return jacobian
Computes the theoretical Jacobian for dy/dx. Computes the theoretical Jacobian using the ops generated by compute_gradient(). Args: x: the tensor "x". x_shape: the dimensions of x as a tuple or an array of ints. x_data: a numpy parray as the input data for x dy: the tensor "dy". dy_shape: the dimensions of dy as a tuple or an array of ints. dx: Tensor or IndexedSlices representing dx extra_feed_dict: dict that allows fixing specified tensor values during the jacobian calculation. Returns: A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows and "dy_size" columns where "x_size" is the number of elements in x and "dy_size" is the number of elements in dy. Raises: ValueError: If `dy` is empty but the gradient is nonzero.
github-repos
def lowpass_filter(data: FLOATS_TYPE, sampling_freq_hz: float, cutoff_freq_hz: float, numtaps: int) -> FLOATS_TYPE: coeffs = firwin(numtaps=numtaps, cutoff=normalized_frequency(cutoff_freq_hz, sampling_freq_hz), pass_zero=True) filtered_data = lfilter(b=coeffs, a=1.0, x=data) return filtered_data
Apply a low-pass filter to the data. Args: data: time series of the data sampling_freq_hz: sampling frequency :math:`f_s`, in Hz (or other consistent units) cutoff_freq_hz: filter cutoff frequency in Hz (or other consistent units) numtaps: number of filter taps Returns: filtered data Note: number of filter taps = filter order + 1
codesearchnet
def __add__(self, other): if not all(np.equal(self.x, other.x)): raise ValueError("X axis values are not compatible!") return self.__class__(self.x, self.y + other.y, *self._args, **self._kwargs)
Add two Spectrum object together. Checks that x scales are the same. Otherwise, a ValueError is thrown. Args: other: Another Spectrum object Returns: Sum of the two Spectrum objects
juraj-google-style
def set_lock_state(self, code, device_label, state): response = None try: response = requests.put(urls.set_lockstate(self._giid, device_label, state), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({'code': str(code)})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock'
codesearchnet
def _ConcatGradHelper(op: ops.Operation, grad, start_value_index, end_value_index, dim_index): def _CreateDenseMaskAndBegin(sizes, concat_dim): shape_of_shape = array_ops.shape(sizes[0]) mask = array_ops.concat([array_ops.zeros(array_ops.expand_dims(concat_dim, 0), dtype=dtypes.int32), [1], array_ops.zeros(shape_of_shape - concat_dim - 1, dtype=dtypes.int32)], 0) begin = array_ops.zeros(shape_of_shape, dtype=dtypes.int32) return (mask, begin) def _ExtractInputShapes(inputs): if context.executing_eagerly(): return array_ops.shape_n(inputs) sizes = [] fully_known = True for x in inputs: input_shape = array_ops.shape(x) if not isinstance(input_shape, tensor.Tensor) or input_shape.op.type != 'Const': fully_known = False break sizes.append(input_shape) if fully_known: return sizes else: return array_ops.shape_n(inputs) if len(op.inputs) == 2: return grad + [None] if end_value_index <= dim_index else [None] + grad concat_dim = op.inputs[dim_index] input_values = op.inputs[start_value_index:end_value_index] out_grads = [] if isinstance(grad, tensor.Tensor): if context.executing_eagerly() or isinstance(concat_dim, ops.EagerTensor): non_neg_concat_dim = concat_dim._numpy().item(0) % input_values[0]._rank() sizes = pywrap_tfe.TFE_Py_TensorShapeSlice(input_values, non_neg_concat_dim) out_grads = array_ops.split(grad, sizes, non_neg_concat_dim) else: if constant_op.is_constant(concat_dim): grad_context = control_flow_util.GetOutputContext(grad.op) dim_context = control_flow_util.GetOutputContext(concat_dim.op) if dim_context != grad_context: value = tensor_util.constant_value(concat_dim) concat_dim = constant_op.constant(value=value, dtype=concat_dim.dtype) non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0]) sizes = _ExtractInputShapes(input_values) if len(sizes) > 16: sizes = array_ops.squeeze(array_ops.slice(array_ops_stack.stack(sizes, axis=1), [non_neg_concat_dim, 0], [1, -1])) out_grads = array_ops.split(grad, sizes, non_neg_concat_dim) else: offset = gen_array_ops.concat_offset(non_neg_concat_dim, sizes) for begin, size in zip(offset, sizes): out_grads.append(array_ops.slice(grad, begin, size)) elif isinstance(grad, indexed_slices_lib.IndexedSlices): non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0]) concat_dim_static = tensor_util.constant_value(concat_dim) if concat_dim_static is None: raise ValueError('Can only compute IndexedSlices gradient with statically-known concat_dim') if concat_dim_static < 0: rank = tensor_util.constant_value(array_ops.rank(input_values[0])) if rank is None: raise ValueError('Can only compute IndexedSlices gradient with negative concat_dim when first value rank is statically-known.') concat_dim_static %= rank sizes = [array_ops.shape(x) for x in input_values] if concat_dim_static > 0: mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim) for size in sizes: new_values = array_ops.slice(grad.values, begin, array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0)) out_grads.append(indexed_slices_lib.IndexedSlices(new_values, grad.indices, size)) begin = math_ops.add(begin, size * mask) else: start = constant_op.constant(0, dtype=grad.indices.dtype) for size in sizes: size_concat_dim = array_ops.gather(size, non_neg_concat_dim) if size_concat_dim.dtype != grad.indices.dtype: size_concat_dim = math_ops.cast(size_concat_dim, dtype=grad.indices.dtype) end = start + size_concat_dim indices_to_select = array_ops.squeeze(array_ops.where(math_ops.logical_and(grad.indices >= start, grad.indices < end)), axis=[1]) new_indices = array_ops.gather(grad.indices, indices_to_select) - start new_values = array_ops.gather(grad.values, indices_to_select) out_grads.append(indexed_slices_lib.IndexedSlices(new_values, new_indices, size)) start = end else: raise TypeError('Expected Tensor or IndexedSlices, got %s' % type(grad)) return out_grads + [None] if end_value_index <= dim_index else [None] + out_grads
Gradient for concat op. Args: op: An operation. grad: `Tensor` or `IndexedSlices` representing the gradients with respect to each output of the op. start_value_index: An integer index of the first value in the op.inputs. end_value_index: An integer index of the last value in the op.inputs. dim_index: An integer index of concat_dim or axis parameter in op.inputs. Returns: Tensors representing the partial gradients with respect to each input of the op. Raises: ValueError: if concat_dim/axis is not statically known.
github-repos
def delete_keyvault(access_token, subscription_id, rgname, vault_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_delete(endpoint, access_token)
Deletes a key vault in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the new key vault. Returns: HTTP response. 200 OK.
codesearchnet
def get_user_stats(self, users, lang=None, concepts=None, since=None, recalculate=True): only_one_user = False if (not isinstance(users, list)): users = [users] only_one_user = True if recalculate: if (lang is None): raise ValueError('Recalculation without lang is not supported.') time_start = time_lib() concepts_to_recalculate = Concept.objects.get_concepts_to_recalculate(users, lang, concepts) LOGGER.debug('user_stats - getting identifying concepts to recalculate: %ss', (time_lib() - time_start)) time_start = time_lib() self.recalculate_concepts(concepts_to_recalculate, lang) LOGGER.debug('user_stats - recalculating concepts: %ss', (time_lib() - time_start)) qs = self.prepare_related().filter(user__in=users, concept__active=True) if (concepts is not None): qs = qs.filter(concept__in=concepts) if (lang is not None): qs = qs.filter(concept__lang=lang) if (since is not None): qs = qs.filter(time__gte=since) data = defaultdict((lambda : defaultdict((lambda : {})))) for user_stat in qs: data[user_stat.user_id][user_stat.concept.identifier][user_stat.stat] = user_stat.value if only_one_user: return data[(users[0].pk if (type(users[0]) == User) else users[0])] return data
Finds all UserStats of given concepts and users. Recompute UserStats if necessary Args: users (Optional[list of users] or [user]): list of primary keys of user or users Defaults to None meaning all users. lang (string): use only concepts witch the lang. Defaults to None meaning all languages. concepts (Optional[list of concepts]): list of primary keys of concepts or concepts Defaults to None meaning all concepts. Returns: dict: user_id -> dict (concept_identifier - > (stat_name -> value)) -- for more users dict: concept_identifier - > (stat_name -> value) -- for one user
codesearchnet
def info(self, channel_id): resource = 'v1/channel.info?channel_id={}'.format(channel_id) resp = self._rtm_client.get(resource) if resp.is_fail(): raise RTMServiceError('Failed to get channel information', resp) return resp.data['result']
Gets channel information by channel id Args: channel_id(int): the id of channel Returns: Channel Throws: RTMServiceError when request failed
codesearchnet
def get_sample_window(self, type_tag, size): md5_list = self.data_store.get_sample_window(type_tag, size) return self.store_sample_set(md5_list)
Get a sample from the DataStore. Args: type_tag: the type of samples ('pcap','exe','pdf') size: the size of the window in MegaBytes (10 = 10MB) Returns: A sample_set handle which represents the newest samples within the size window
juraj-google-style
def diff_parameters(old_params, new_params): [changes, diff] = diff_dictionaries(old_params, new_params) if (changes == 0): return [] return diff
Compares the old vs. new parameters and returns a "diff" If there are no changes, we return an empty list. Args: old_params(dict): old paramters new_params(dict): new parameters Returns: list: A list of differences
codesearchnet
async def update_example_status(example: Example, client: GRPCClient): datasets: List[api_pb2.Dataset] = [] for emulator in example.tag.emulators: dataset: Dataset = example.tag.datasets[emulator.topic.source_dataset] datasets.append(api_pb2.Dataset(type=api_pb2.EmulatorType.Value(f'EMULATOR_TYPE_{emulator.type.upper()}'), options={'topic': emulator.topic.id}, dataset_path=dataset.file_name)) files: List[api_pb2.SnippetFile] = [api_pb2.SnippetFile(name=example.filepath, content=example.code, is_main=True)] for file in example.tag.files: files.append(api_pb2.SnippetFile(name=file.name, content=file.content, is_main=False)) pipeline_id = await client.run_code(example.code, example.sdk, example.tag.pipeline_options, datasets, files=files) example.pipeline_id = pipeline_id status = await client.check_status(pipeline_id) while status in [STATUS_VALIDATING, STATUS_PREPARING, STATUS_COMPILING, STATUS_EXECUTING]: await asyncio.sleep(Config.PAUSE_DELAY) status = await client.check_status(pipeline_id) example.status = status
Receive status for examples and update example.status and pipeline_id Use client to send requests to the backend: 1. Start code processing. 2. Ping the backend while status is STATUS_VALIDATING/ STATUS_PREPARING/STATUS_COMPILING/STATUS_EXECUTING Update example.status with resulting status. Args: example: beam example for processing and updating status and pipeline_id. client: client to send requests to the server.
github-repos
def _CallAndUpdateTrace(component, args, component_trace, treatment='class', target=None): if not target: target = component filename, lineno = inspectutils.GetFileAndLine(component) metadata = decorators.GetMetadata(component) fn = component.__call__ if treatment == 'callable' else component parse = _MakeParseFn(fn, metadata) (varargs, kwargs), consumed_args, remaining_args, capacity = parse(args) if inspectutils.IsCoroutineFunction(fn): loop = asyncio.get_event_loop() component = loop.run_until_complete(fn(*varargs, **kwargs)) else: component = fn(*varargs, **kwargs) if treatment == 'class': action = trace.INSTANTIATED_CLASS elif treatment == 'routine': action = trace.CALLED_ROUTINE else: action = trace.CALLED_CALLABLE component_trace.AddCalledComponent(component, target, consumed_args, filename, lineno, capacity, action=action) return (component, remaining_args)
Call the component by consuming args from args, and update the FireTrace. The component could be a class, a routine, or a callable object. This function calls the component and adds the appropriate action to component_trace. Args: component: The component to call args: Args for calling the component component_trace: FireTrace object that contains action trace treatment: Type of treatment used. Indicating whether we treat the component as a class, a routine, or a callable. target: Target in FireTrace element, default is None. If the value is None, the component itself will be used as target. Returns: component: The object that is the result of the callable call. remaining_args: The remaining args that haven't been consumed yet.
github-repos
def ParseConversationRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TangoAndroidConversationEventData() event_data.conversation_identifier = self._GetRowValue(query_hash, row, 'conv_id') date_time = dfdatetime_semantic_time.NotSet() event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a conversation row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
codesearchnet
def _match_protocol_attribute(self, left, other_type, attribute, subst, view): left_attribute, left_is_bound = self._get_attribute_for_protocol_matching(left.cls, attribute, instance=left, unbind=True) if left_attribute is None: if attribute == '__iter__': left_attribute = self.ctx.convert.constant_to_var(pytd_utils.DummyMethod('__iter__', 'self')) else: _, left_attribute = self.ctx.attribute_handler.get_attribute(self._node, left, attribute) assert left_attribute, f'Attr {attribute!r} not found on {left.full_name}' protocol_attribute_var, _ = self._get_attribute_for_protocol_matching(other_type, attribute, instance=None, unbind=left_is_bound) assert protocol_attribute_var if any((abstract_utils.is_callable(v) for v in left_attribute.data)) and all((abstract_utils.is_callable(protocol_attribute) for protocol_attribute in protocol_attribute_var.data)) and (not isinstance(other_type, abstract.ParameterizedClass)): return subst subst = subst.copy() left_type_params = {t.full_name: t for cls in left.cls.mro for t in cls.template} for k, t in left_type_params.items(): if k not in subst: subst[k] = left.get_instance_type_parameter(k) self._type_params.seen.add(t) new_substs = [] for new_view in abstract_utils.get_views([left_attribute], self._node): new_view.update(view) bad_matches = [] for protocol_attribute in protocol_attribute_var.data: protocol_attribute_types = list(self._get_attribute_types(other_type, protocol_attribute)) for protocol_attribute_type in protocol_attribute_types: match_result = self.match_var_against_type(left_attribute, protocol_attribute_type, subst, new_view) if match_result is None: bad_matches.append((new_view[left_attribute].data, protocol_attribute)) break else: new_substs.append(match_result) else: break else: bad_left, bad_right = zip(*bad_matches) self._protocol_error = error_types.ProtocolTypeError(left.cls, other_type, attribute, self.ctx.convert.merge_values(bad_left), self.ctx.convert.merge_values(bad_right)) return None return self._merge_substs(subst, new_substs)
Checks whether left and other_type are compatible in the given attribute. Args: left: An instance of a type. other_type: A protocol. attribute: An attribute name. subst: The current type parameter assignment. view: The current mapping of Variable to Value. Returns: A new type parameter assignment if the matching succeeded, None otherwise.
github-repos
def _prevent_2nd_derivative(x): def grad(dy): return array_ops.prevent_gradient( dy, message="Second derivative is not implemented.") return tf.identity(x), grad
Disables computation of the second derivatives for a tensor. NB: you need to apply a non-identity function to the output tensor for the exception to be raised. Arguments: x: A tensor. Returns: A tensor with the same value and the same derivative as x, but that raises LookupError when trying to compute the second derivatives.
juraj-google-style
def update_in_hdx(self, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True): loaded = False if ('id' in self.data): self._check_existing_object('dataset', 'id') if self._dataset_load_from_hdx(self.data['id']): loaded = True else: logger.warning(('Failed to load dataset with id %s' % self.data['id'])) if (not loaded): self._check_existing_object('dataset', 'name') if (not self._dataset_load_from_hdx(self.data['name'])): raise HDXError('No existing dataset to update!') self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update)
Check if dataset exists in HDX and if so, update it Args: update_resources (bool): Whether to update resources. Defaults to True. update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True. remove_additional_resources (bool): Remove additional resources found in dataset. Defaults to False. create_default_views (bool): Whether to call package_create_default_resource_views. Defaults to True. hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None
codesearchnet
def extract(self, text: str) -> List[Extraction]: doc = self._parser(text) extractions = list() for sent in doc.sents: this_extraction = Extraction(value=sent.text, extractor_name=self.name, start_token=sent[0], end_token=sent[(- 1)], start_char=sent.text[0], end_char=sent.text[(- 1)]) extractions.append(this_extraction) return extractions
Splits text by sentences. Args: text (str): Input text to be extracted. Returns: List[Extraction]: the list of extraction or the empty list if there are no matches.
codesearchnet
def number_to_day(self, day_number): return [calendar.day_name[6], calendar.day_name[0], calendar.day_name[1], calendar.day_name[2], calendar.day_name[3], calendar.day_name[4], calendar.day_name[5]][day_number]
Returns localized day name by its CRON number Args: day_number: Number of a day Returns: Day corresponding to day_number Raises: IndexError: When day_number is not found
codesearchnet
def length(text, maxval=None, encoding=None): maxval = maxval or 4351 try: assert not isinstance(text, six.binary_type) except AssertionError: raise TypeError('helpers.length requires a unicode argument') return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
Count the length of a str the way Twitter does, double-counting "wide" characters (e.g. ideographs, emoji) Args: text (str): Text to count. Must be a unicode string in Python 2 maxval (int): The maximum encoding that will be counted as 1 character. Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF) Returns: int
juraj-google-style
def titles(self, unique=False): if unique: return tools.uniqued(title for _, title in self.iterfiles()) return [title for _, title in self.iterfiles()]
Return a list of all available spreadsheet titles. Args: unique (bool): drop duplicates Returns: list: list of title/name strings
juraj-google-style
def _closeElements(childs, HTMLElement): out = [] for e in childs: if not e.isTag(): out.append(e) continue if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \ and e.endtag is None: e.childs = _closeElements(e.childs, HTMLElement) out.append(e) out.append(HTMLElement("</" + e.getTagName() + ">")) e.endtag = out[-1] out[-1].openertag = e else: out.append(e) return out
Create `endtags` to elements which looks like openers, but doesn't have proper :attr:`HTMLElement.endtag`. Args: childs (list): List of childs (:class:`HTMLElement` obj) - typically from :attr:`HTMLElement.childs` property. Returns: list: List of closed elements.
juraj-google-style
def transform_regex_replace(source, pattern, rewrite, name=None): with ops.name_scope(name, "TransformRegexReplace", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_regex_replace(source.values, pattern, rewrite), dense_shape=source.dense_shape ) else: result = ops_module.transform_regex_replace(source, pattern, rewrite) return result
Replace all substrings from `needle` to corresponding strings in `haystack` with source. Args: source: `Tensor` or `SparseTensor` of any shape, source strings for replacing. pattern: List of RE2 patterns to search in source rewrite: List of strings to replace with. Should have same length as `needle`. name: A name for the operation (optional). Returns: `Tensor` or `SparseTensor` of same shape and size as input.
juraj-google-style
def channels_unarchive(self, *, channel: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"channel": channel}) return self.api_call("channels.unarchive", json=kwargs)
Unarchives a channel. Args: channel (str): The channel id. e.g. 'C1234567890'
juraj-google-style
def flush(self, hard=False): if not self.servers: return if hard: self.client.flush_all() self.reset_stats() else: from uuid import uuid4 tag = uuid4().hex if self.debug: tag = "flushed" + tag self.current = tag
Drop existing entries from the cache. Args: hard (bool): If True, all current entries are flushed from the server(s), which affects all users. If False, only the local process is affected.
juraj-google-style
def _CanPlaceOnSingleLine(line): token_types = [x.type for x in line.tokens] if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED') and any((token_types[token_index - 1] == token.COMMA for token_index, token_type in enumerate(token_types[1:], start=1) if token_type == token.RPAR)): return False if style.Get('FORCE_MULTILINE_DICT') and token.LBRACE in token_types: return False indent_amt = style.Get('INDENT_WIDTH') * line.depth last = line.last last_index = -1 if last.is_pylint_comment or last.is_pytype_comment or last.is_copybara_comment: last = last.previous_token last_index = -2 if last is None: return True return last.total_length + indent_amt <= style.Get('COLUMN_LIMIT') and (not any((tok.is_comment for tok in line.tokens[:last_index])))
Determine if the logical line can go on a single line. Arguments: line: (logical_line.LogicalLine) The line currently being formatted. Returns: True if the line can or should be added to a single line. False otherwise.
github-repos
def nearest_neighbors(self, word, top_k=10): point = self[word] diff = self.vectors - point distances = np.linalg.norm(diff, axis=1) top_ids = distances.argsort()[1:top_k+1] return [self.vocabulary.id_word[i] for i in top_ids]
Return the nearest k words to the given `word`. Args: word (string): single word. top_k (integer): decides how many neighbors to report. Returns: A list of words sorted by the distances. The closest is the first. Note: L2 metric is used to calculate distances.
juraj-google-style
def _fulfillment_from_details(data, _depth=0): if (_depth == 100): raise ThresholdTooDeep() if (data['type'] == 'ed25519-sha-256'): public_key = base58.b58decode(data['public_key']) return Ed25519Sha256(public_key=public_key) if (data['type'] == 'threshold-sha-256'): threshold = ThresholdSha256(data['threshold']) for cond in data['subconditions']: cond = _fulfillment_from_details(cond, (_depth + 1)) threshold.add_subfulfillment(cond) return threshold raise UnsupportedTypeError(data.get('type'))
Load a fulfillment for a signing spec dictionary Args: data: tx.output[].condition.details dictionary
codesearchnet
def splay_health(health_target): HealthCheck = collections.namedtuple('HealthCheck', ['path', 'port', 'proto', 'target']) proto, health_port_path = health_target.split(':') port, *health_path = health_port_path.split('/') if proto == 'TCP': path = '' elif not health_path: path = '/healthcheck' else: path = '/{0}'.format('/'.join(health_path)) target = '{0}:{1}{2}'.format(proto, port, path) health = HealthCheck(path, port, proto, target) LOG.info(health) return health
Set Health Check path, port, and protocol. Args: health_target (str): The health target. ie ``HTTP:80`` Returns: HealthCheck: A **collections.namedtuple** class with *path*, *port*, *proto*, and *target* attributes.
juraj-google-style
def ParseSMS(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms') if phone_number: phone_number = phone_number.replace(' ', '') event_data = SkypeSMSEventData() event_data.number = phone_number event_data.query = query event_data.text = self._GetRowValue(query_hash, row, 'msg_sms') timestamp = self._GetRowValue(query_hash, row, 'time_sms') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'SMS from Skype') parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an SMS. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
codesearchnet
def add_child_url(self, url: str, inline: bool=False, link_type: Optional[LinkType]=None, post_data: Optional[str]=None, level: Optional[int]=None, replace: bool=False): url_properties = URLProperties() url_properties.level = ((self.url_record.level + 1) if (level is None) else level) url_properties.inline_level = (((self.url_record.inline_level or 0) + 1) if inline else None) url_properties.parent_url = self.url_record.url url_properties.root_url = (self.url_record.root_url or self.url_record.url) url_properties.link_type = link_type url_data = URLData() url_data.post_data = post_data if replace: self.app_session.factory['URLTable'].remove_many([url]) self.add_url(url, url_properties, url_data)
Add links scraped from the document with automatic values. Args: url: A full URL. (It can't be a relative path.) inline: Whether the URL is an embedded object. link_type: Expected link type. post_data: URL encoded form data. The request will be made using POST. (Don't use this to upload files.) level: The child depth of this URL. replace: Whether to replace the existing entry in the database table so it will be redownloaded again. This function provides values automatically for: * ``inline`` * ``level`` * ``parent``: The referrering page. * ``root`` See also :meth:`add_url`.
codesearchnet
def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False): creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver) return cls(creds)
Return a spreadsheet collection making OAauth 2.0 credentials. Args: secrets (str): location of secrets file (default: ``%r``) storage (str): location of storage file (default: ``%r``) scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``) no_webserver (bool): URL/code prompt instead of webbrowser auth Returns: Sheets: new Sheets instance with OAauth 2.0 credentials
juraj-google-style
def first_return_times(dts, c=None, d=0.0): if (c is None): c = dts.mean() vmrt = distob.vectorize(analyses1.first_return_times) all_intervals = vmrt(dts, c, d) if hasattr(type(all_intervals), '__array_interface__'): return np.ravel(all_intervals) else: return np.hstack([distob.gather(ilist) for ilist in all_intervals])
For an ensemble of time series, return the set of all time intervals between successive returns to value c for all instances in the ensemble. If c is not given, the default is the mean across all times and across all time series in the ensemble. Args: dts (DistTimeseries) c (float): Optional target value (default is the ensemble mean value) d (float): Optional min distance from c to be attained between returns Returns: array of time intervals (Can take the mean of these to estimate the expected first return time for the whole ensemble)
codesearchnet
def load_terms(fo: IO, metadata: dict, forceupdate: bool): version = metadata["metadata"]["version"] with timy.Timer("Load Terms") as timer: es = bel.db.elasticsearch.get_client() es_version = version.replace("T", "").replace("-", "").replace(":", "") index_prefix = f"terms_{metadata['metadata']['namespace'].lower()}" index_name = f"{index_prefix}_{es_version}" if not elasticsearch.index_exists(es, index_name): elasticsearch.create_terms_index(es, index_name) elif forceupdate: index_name += "_alt" elasticsearch.create_terms_index(es, index_name) else: return terms_iterator = terms_iterator_for_elasticsearch(fo, index_name) elasticsearch.bulk_load_docs(es, terms_iterator) index_names = elasticsearch.get_all_index_names(es) for name in index_names: if name != index_name and index_prefix in name: elasticsearch.delete_index(es, name) elasticsearch.add_index_alias(es, index_name, terms_alias) log.info( "Load namespace terms", elapsed=timer.elapsed, namespace=metadata["metadata"]["namespace"], ) with timy.Timer("Load Term Equivalences") as timer: arango_client = arangodb.get_client() belns_db = arangodb.get_belns_handle(arango_client) arangodb.batch_load_docs( belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate="update" ) log.info( "Loaded namespace equivalences", elapsed=timer.elapsed, namespace=metadata["metadata"]["namespace"], ) remove_old_equivalence_edges = f remove_old_equivalence_nodes = f arangodb.aql_query(belns_db, remove_old_equivalence_edges) arangodb.aql_query(belns_db, remove_old_equivalence_nodes) metadata["_key"] = f"Namespace_{metadata['metadata']['namespace']}" try: belns_db.collection(arangodb.belns_metadata_name).insert(metadata) except ArangoError as ae: belns_db.collection(arangodb.belns_metadata_name).replace(metadata)
Load terms into Elasticsearch and ArangoDB Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: fo: file obj - terminology file metadata: dict containing the metadata for terminology forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
juraj-google-style
def plot(data, output_dir_path='.', width=10, height=8): if not isinstance(data, pd.DataFrame): data = pd.DataFrame(data) plot_accuracy(data, output_dir_path=output_dir_path, width=width, height=height) plot_loss(data, output_dir_path, width=width, height=height)
Create two plots: 1) loss 2) accuracy. Args: data: Panda dataframe in *the* format.
juraj-google-style
def from_str(format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False) -> 'PipelineDataFormat': if format == 'json': return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == 'csv': return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == 'pipe': return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) else: raise KeyError(f'Unknown reader {format} (Available reader are json/csv/pipe)')
Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. Args: format (`str`): The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. Returns: [`~pipelines.PipelineDataFormat`]: The proper data format.
github-repos
def _ExtractInterfaceMetadata(self, metadata): interfaces = [] for network_interface in metadata: mac_address = network_interface.get('mac') interface = self.network_utils.GetNetworkInterface(mac_address) ip_addresses = [] if interface: ip_addresses.extend(network_interface.get('forwardedIps', [])) if self.ip_aliases: ip_addresses.extend(network_interface.get('ipAliases', [])) if self.target_instance_ips: ip_addresses.extend(network_interface.get('targetInstanceIps', [])) interfaces.append(NetworkDaemon.NetworkInterface( interface, ip_addresses, network_interface.get('ip', []))) else: message = 'Network interface not found for MAC address: %s.' self.logger.warning(message, mac_address) return interfaces
Extracts network interface metadata. Args: metadata: dict, the metadata response with the new network interfaces. Returns: list, a list of NetworkInterface objects.
juraj-google-style