code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def traverse_preorder(self, leaves=True, internal=True): for node in self.root.traverse_preorder(leaves=leaves, internal=internal): (yield node)
Perform a preorder traversal of the ``Node`` objects in this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
codesearchnet
def leap_days_between(start_date, end_date): def leap_days_since_year_0(date_tensor): year = date_tensor.year() month = date_tensor.month() leap_years_since_0 = year needs_adjustment = is_leap_year(year) & (month <= 2) return leap_years_since_0 - tf.where(needs_adjustment, 1, 0) return leap_days_since_year_0(end_date) - leap_days_since_year_0(start_date)
Calculates number of leap days (29 Feb) between two dates. 'start_date' is included and 'end_date' is excluded from the period. For example, for dates `2019-12-24` and `2024-3-10` the result is 2: there is 29 Feb 2020 and 29 Feb 2024 between 24 Dec 2019 (inclusive) and 10 Mar 2024 (exclusive). If `end_date` is earlier than `start_date`, the result will be negative or zero. Args: start_date: DateTensor. end_date: DateTensor compatible with `start_date`. Returns: Tensor of type 'int32'.
github-repos
def _QueryHash(self, nsrl_socket, digest): try: query = 'QUERY {0:s}\n'.format(digest).encode('ascii') except UnicodeDecodeError: logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest)) return False response = None try: nsrl_socket.sendall(query) response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE) except socket.error as exception: logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(exception)) if (not response): return False response = response.strip() return (response == b'OK 1')
Queries nsrlsvr for a specific hash. Args: nsrl_socket (socket._socketobject): socket of connection to nsrlsvr. digest (str): hash to look up. Returns: bool: True if the hash was found, False if not or None on error.
codesearchnet
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable): variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack or self._distribution_strategy): initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position) slot_variable = self.add_slot(var=variable, initializer=initializer, slot_name=slot_name, shape=slot_variable_position.value_shape()) if slot_variable is not None: slot_variable_position.restore(slot_variable) else: self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)
Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but `variable` has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to be restored. slot_name: The name of this `Optimizer`'s slot to restore into. variable: The variable object this slot is being created for.
github-repos
def MapByteStream(self, byte_stream, byte_offset=0, **unused_kwargs): return byte_stream[byte_offset:byte_offset + self.byte_size]
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
juraj-google-style
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None): pixel_values = pixel_values.type(self.visual.dtype) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) return image_embeds
Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM.
github-repos
def nic_s(msg): tc = typecode(msg) if (tc != 31): raise RuntimeError(('%s: Not a status operation message, expecting TC = 31' % msg)) msgbin = common.hex2bin(msg) nic_s = int(msgbin[75]) return nic_s
Obtain NIC supplement bit, TC=31 message Args: msg (string): 28 bytes hexadecimal message string Returns: int: NICs number (0 or 1)
codesearchnet
def get_identity_broadcaster(cls, nvals, dtype=None): return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype))
Create an identity broadcaster. TODO(martinz): an identity broadcaster can be far more efficient than a generic broadcaster. Add an optimized implementation. Args: nvals: the number of values for the broadcaster. dtype: the dtype of the broadcaster, or None to use the dtype of nvals. Returns: an identity broadcaster from [0....nvals-1] to [0...nvals-1]
github-repos
def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None): length = (- 1) if (category in ('snv', 'indel', 'cancer')): if (ref_len == alt_len): length = alt_len else: length = abs((ref_len - alt_len)) elif (category == 'sv'): if (svtype == 'bnd'): length = int(100000000000.0) elif svlen: length = abs(int(svlen)) elif end: if (end != pos): length = (end - pos) return length
Return the length of a variant Args: alt_len(int) ref_len(int) category(str) svtype(str) svlen(int)
codesearchnet
def load(self, path): path = os.path.expandvars(os.path.expanduser(path)) gdg = cgaddag.gdg_load(path.encode('ascii')) if (not gdg): errno = ctypes.c_int.in_dll(ctypes.pythonapi, 'errno').value raise OSError(errno, os.strerror(errno), path) self.__del__() self.gdg = gdg.contents
Load a GADDAG from file, replacing the words currently in this GADDAG. Args: path: path to saved GADDAG to be loaded.
codesearchnet
def _ValidateCacheEntryHeader(self, cache_entry_header): return ( cache_entry_header.request_size > 0 and cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and cache_entry_header.major_format_version == 1 and cache_entry_header.last_fetched_time > 0 and cache_entry_header.fetch_count > 0)
Determines whether the values in the cache entry header are valid. Args: cache_entry_header (firefox_cache1_entry_header): cache entry header. Returns: bool: True if the cache entry header is valid.
juraj-google-style
def get_canonical_path(resource_key, pk=None): if resource_key not in resource_map: return None base_path = get_script_prefix() + resource_map[resource_key]['path'] if pk: return '%s/%s/' % (base_path, pk) else: return base_path
Return canonical resource path. Arguments: resource_key - Canonical resource key i.e. Serializer.get_resource_key(). pk - (Optional) Object's primary key for a single-resource URL. Returns: Absolute URL as string.
juraj-google-style
def RemoveObject(self, identifier): if (identifier not in self._values): raise KeyError('Missing cached object for identifier: {0:s}'.format(identifier)) del self._values[identifier]
Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache.
codesearchnet
def register_actor(name, actor_handle): if (not isinstance(name, str)): raise TypeError('The name argument must be a string.') if (not isinstance(actor_handle, ray.actor.ActorHandle)): raise TypeError('The actor_handle argument must be an ActorHandle object.') actor_name = _calculate_key(name) pickled_state = pickle.dumps(actor_handle) already_exists = _internal_kv_put(actor_name, pickled_state) if already_exists: actor_handle._ray_new_actor_handles.pop() raise ValueError('Error: the actor with name={} already exists'.format(name))
Register a named actor under a string key. Args: name: The name of the named actor. actor_handle: The actor object to be associated with this name
codesearchnet
def label(self, input_grid): marked = self.find_local_maxima(input_grid) marked = np.where((marked >= 0), 1, 0) markers = splabel(marked)[0] return markers
Labels input grid using enhanced watershed algorithm. Args: input_grid (numpy.ndarray): Grid to be labeled. Returns: Array of labeled pixels
codesearchnet
def read_geojson(filename): json_file = open(filename) data = json.load(json_file) json_file.close() times = data["properties"]["times"] main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[]) attribute_data = dict() for feature in data["features"]: for main_name in main_data.keys(): main_data[main_name].append(np.array(feature["properties"][main_name])) for k, v in feature["properties"]["attributes"].items(): if k not in attribute_data.keys(): attribute_data[k] = [np.array(v)] else: attribute_data[k].append(np.array(v)) kwargs = {} for kw in ["dx", "step", "u", "v"]: if kw in data["properties"].keys(): kwargs[kw] = data["properties"][kw] sto = STObject(main_data["timesteps"], main_data["masks"], main_data["x"], main_data["y"], main_data["i"], main_data["j"], times[0], times[-1], **kwargs) for k, v in attribute_data.items(): sto.attributes[k] = v return sto
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject
juraj-google-style
def minute(self, value=None): if (value is not None): try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int for field `minute`'.format(value)) if (value < 0): raise ValueError('value need to be greater or equal 0 for field `minute`') if (value > 60): raise ValueError('value need to be smaller 60 for field `minute`') self._minute = value
Corresponds to IDD Field `minute` Args: value (int): value for IDD Field `minute` value >= 0 value <= 60 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def AddProcessingOptions(self, argument_group): argument_helper_names = ['temporary_directory', 'zeromq'] if self._CanEnforceProcessMemoryLimit(): argument_helper_names.append('process_resources') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=argument_helper_names) argument_group.add_argument( '--worker-memory-limit', '--worker_memory_limit', dest='worker_memory_limit', action='store', type=int, metavar='SIZE', help=( 'Maximum amount of memory (data segment and shared memory) ' 'a worker process is allowed to consume in bytes, where 0 ' 'represents no limit. The default limit is 2147483648 (2 GiB). ' 'If a worker process exceeds this limit is is killed by the main ' '(foreman) process.'))
Adds processing options to the argument group Args: argument_group (argparse._ArgumentGroup): argparse argument group.
juraj-google-style
def load_config(paths=DEFAULT_CONFIG_PATHS): config = Config() for path in paths: if os.path.isfile(path): config.load_pyfile(path) return config
Attempt to load config from paths, in order. Args: paths (List[string]): list of paths to python files Return: Config: loaded config
codesearchnet
def dummy_inputs(self): batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) return {'input_ids': input_ids, 'visual_feats': visual_feats, 'visual_pos': visual_pos}
Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs
github-repos
def configure_profile(msg_type, profile_name, data, auth): with jsonconfig.Config("messages", indent=4) as cfg: write_data(msg_type, profile_name, data, cfg) write_auth(msg_type, profile_name, auth, cfg) print("[+] Configuration entry for <" + profile_name + "> created.") print("[+] Configuration file location: " + cfg.filename)
Create the profile entry. Args: :msg_type: (str) message type to create config entry. :profile_name: (str) name of the profile entry :data: (dict) dict values for the 'settings' :auth: (dict) auth parameters
juraj-google-style
def set_api_url(self, api_url="https: old_api_url = self._api_url old_lang = self._lang self._lang = lang.lower() self._api_url = api_url.format(lang=self._lang) try: self._get_site_info() self.__supported_languages = None except MediaWikiException: self._api_url = old_api_url self._lang = old_lang raise MediaWikiAPIURLError(api_url) self.clear_memoized()
Set the API URL and language Args: api_url (str): API URL to use lang (str): Language of the API URL Raises: :py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \ url is not a valid MediaWiki site
juraj-google-style
def __init__(self, output_mediator): super(JSONOutputModule, self).__init__(output_mediator) self._event_counter = 0
Initializes the output module object. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
juraj-google-style
def fit(self, x_train, y_train, x_valid=None, y_valid=None, epochs=1, batch_size=32, verbose=1, callbacks=None, shuffle=True): p = IndexTransformer(initial_vocab=self.initial_vocab, use_char=self.use_char) p.fit(x_train, y_train) embeddings = filter_embeddings(self.embeddings, p._word_vocab.vocab, self.word_embedding_dim) model = BiLSTMCRF(char_vocab_size=p.char_vocab_size, word_vocab_size=p.word_vocab_size, num_labels=p.label_size, word_embedding_dim=self.word_embedding_dim, char_embedding_dim=self.char_embedding_dim, word_lstm_size=self.word_lstm_size, char_lstm_size=self.char_lstm_size, fc_dim=self.fc_dim, dropout=self.dropout, embeddings=embeddings, use_char=self.use_char, use_crf=self.use_crf) (model, loss) = model.build() model.compile(loss=loss, optimizer=self.optimizer) trainer = Trainer(model, preprocessor=p) trainer.train(x_train, y_train, x_valid, y_valid, epochs=epochs, batch_size=batch_size, verbose=verbose, callbacks=callbacks, shuffle=shuffle) self.p = p self.model = model
Fit the model for a fixed number of epochs. Args: x_train: list of training data. y_train: list of training target (label) data. x_valid: list of validation data. y_valid: list of validation target (label) data. batch_size: Integer. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. epochs: Integer. Number of epochs to train the model. verbose: Integer. 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. shuffle: Boolean (whether to shuffle the training data before each epoch). `shuffle` will default to True.
codesearchnet
def add(self, other): if not isinstance(other, Chi): other = Chi(other) if self.dim != other.dim: raise QiskitError("other QuantumChannel dimensions are not equal") return Chi(self._data + other.data, self._input_dims, self._output_dims)
Return the QuantumChannel self + other. Args: other (QuantumChannel): a quantum channel. Returns: Chi: the linear addition self + other as a Chi object. Raises: QiskitError: if other is not a QuantumChannel subclass, or has incompatible dimensions.
juraj-google-style
def upload(self, params={}): if (self.upload_token is not None): status = self.check() if (status['status'] != 4): return self.commit() else: self.new_slice() while (self.slice_task_id != 0): self.upload_slice() return self.commit() else: self.create(self.prepare_video_params(**params)) self.create_file() self.new_slice() while (self.slice_task_id != 0): self.upload_slice() return self.commit()
start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload successfully
codesearchnet
def import_object_from_path(path, object): with open(path) as f: return import_object_from_string_code(f.read(), object)
Used to import an object from an absolute path. This function takes an absolute path and imports it as a Python module. It then returns the object with name `object` from the imported module. Args: path (string): Absolute file path of .py file to import object (string): Name of object to extract from imported module
codesearchnet
def _protobuf_value_type(value): if value.HasField('number_value'): return api_pb2.DATA_TYPE_FLOAT64 if value.HasField('string_value'): return api_pb2.DATA_TYPE_STRING if value.HasField('bool_value'): return api_pb2.DATA_TYPE_BOOL return None
Returns the type of the google.protobuf.Value message as an api.DataType. Returns None if the type of 'value' is not one of the types supported in api_pb2.DataType. Args: value: google.protobuf.Value message.
codesearchnet
def log_every_n_seconds(level, msg, n_seconds, *args): should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds) log_if(level, msg, should_log, *args)
Logs 'msg % args' at level 'level' iff 'n_seconds' elapsed since last call. Logs the first call, logs subsequent calls if 'n' seconds have elapsed since the last logging call from the same call site (file + line). Not thread-safe. Args: level: int, the absl logging level at which to log. msg: str, the message to be logged. n_seconds: float or int, seconds which should elapse before logging again. *args: The args to be substitued into the msg.
codesearchnet
def Name(self): name = '' if self.Version: name = self.Version.UserAgent return name
Get the peer name. Returns: str:
codesearchnet
def infer_namespace(ac): namespaces = infer_namespaces(ac) if (not namespaces): return None if (len(namespaces) > 1): raise BioutilsError('Multiple namespaces possible for {}'.format(ac)) return namespaces[0]
Infer the single namespace of the given accession This function is convenience wrapper around infer_namespaces(). Returns: * None if no namespaces are inferred * The (single) namespace if only one namespace is inferred * Raises an exception if more than one namespace is inferred >>> infer_namespace("ENST00000530893.6") 'ensembl' >>> infer_namespace("NM_01234.5") 'refseq' >>> infer_namespace("A2BC19") 'uniprot' N.B. The following test is disabled because Python 2 and Python 3 handle doctest exceptions differently. :-( X>>> infer_namespace("P12345") Traceback (most recent call last): ... bioutils.exceptions.BioutilsError: Multiple namespaces possible for P12345 >>> infer_namespace("BOGUS99") is None True
codesearchnet
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_value, attention_mask=attention_mask) self_attn_weights = None hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (past_key_value,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.
github-repos
def hardware_version(self): version = self._dll.JLINKARM_GetHardwareVersion() major = version / 10000 % 100 minor = version / 100 % 100 return '%d.%02d' % (major, minor)
Returns the hardware version of the connected J-Link as a major.minor string. Args: self (JLink): the ``JLink`` instance Returns: Hardware version string.
juraj-google-style
def __init__(self, directory, loader_factory, path_filter=lambda x: True): if directory is None: raise ValueError('A directory is required') if loader_factory is None: raise ValueError('A loader factory is required') self._directory = directory self._path = None self._loader_factory = loader_factory self._loader = None self._path_filter = path_filter self._ooo_writes_detected = False self._finalized_sizes = {}
Constructs a new DirectoryWatcher. Args: directory: The directory to load files from. loader_factory: A factory for creating loaders. The factory should take a path and return an object that has a Load method returning an iterator that will yield all events that have not been yielded yet. path_filter: If specified, only paths matching this filter are loaded. Raises: ValueError: If path_provider or loader_factory are None.
juraj-google-style
def info(self, **kwargs): path = self._get_series_id_season_number_episode_number_path('info') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the primary information about a TV episode by combination of a season and episode number. Args: language: (optional) ISO 639 code. append_to_response: (optional) Comma separated, any TV series method. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def forward(self, evoformer_output_dict, aatype, mask=None, _offload_inference=False): s = evoformer_output_dict['single'] if mask is None: mask = s.new_ones(s.shape[:-1]) s = self.layer_norm_s(s) z = self.layer_norm_z(evoformer_output_dict['pair']) z_reference_list = None if _offload_inference: assert sys.getrefcount(evoformer_output_dict['pair']) == 2 evoformer_output_dict['pair'] = evoformer_output_dict['pair'].cpu() z_reference_list = [z] z = None s_initial = s s = self.linear_in(s) rigids = Rigid.identity(s.shape[:-1], s.dtype, s.device, self.training, fmt='quat') outputs = [] for i in range(self.config.num_blocks): s = s + self.ipa(s, z, rigids, mask, _offload_inference=_offload_inference, _z_reference_list=z_reference_list) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) rigids = rigids.compose_q_update_vec(self.bb_update(s)) backb_to_global = Rigid(Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None), rigids.get_trans()) backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor) unnormalized_angles, angles = self.angle_resnet(s, s_initial) all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype) pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype) scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor) preds = {'frames': scaled_rigids.to_tensor_7(), 'sidechain_frames': all_frames_to_global.to_tensor_4x4(), 'unnormalized_angles': unnormalized_angles, 'angles': angles, 'positions': pred_xyz, 'states': s} outputs.append(preds) rigids = rigids.stop_rot_gradient() del z, z_reference_list if _offload_inference: evoformer_output_dict['pair'] = evoformer_output_dict['pair'].to(s.device) outputs = dict_multimap(torch.stack, outputs) outputs['single'] = s return outputs
Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs
github-repos
def atmospheric_station_pressure(self, value=999999): if value is not None: try: value = int(value) except ValueError: raise ValueError( 'value {} need to be of type int ' 'for field `atmospheric_station_pressure`'.format(value)) if value <= 31000: raise ValueError('value need to be greater 31000 ' 'for field `atmospheric_station_pressure`') if value >= 120000: raise ValueError('value need to be smaller 120000 ' 'for field `atmospheric_station_pressure`') self._atmospheric_station_pressure = value
Corresponds to IDD Field `atmospheric_station_pressure` Args: value (int): value for IDD Field `atmospheric_station_pressure` Unit: Pa value > 31000 value < 120000 Missing value: 999999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def update_ip_info(self, since_days=10, save=False, force=False): try: last_check = IPInfoCheck.objects.get( ip_address=self.client_ip_address) since_last = datetime.date.today() - last_check.date if since_last <= datetime.timedelta(days=since_days): if not self.ip_info or ( self.ip_info != last_check.ip_info and force): self.ip_info = last_check.ip_info self.save() return True elif save: self.save() return False ip_info, created = IPInfo.get_or_create_from_ip( self.client_ip_address) last_check.date = datetime.date.today() last_check.save() if created: last_check.ip_info = ip_info self.ip_info = ip_info self.save() return True elif save: self.save() return False except IPInfoCheck.DoesNotExist: self.ip_info = IPInfoCheck.check_ip(self.client_ip_address) self.save() return True
Update the IP info. Args: since_days (int): if checked less than this number of days ago, don't check again (default to 10 days). save (bool): whether to save anyway or not. force (bool): whether to update ip_info to last checked one. Returns: bool: check was run. IPInfo might not have been updated.
juraj-google-style
def _fix_fdef_in_place(fdef, functions, shared_name_suffix, new_gradient_op_types): orig_name = fdef.signature.name contains_unsaved_custom_gradients = False for node_def in fdef.node_def: fix_node_def(node_def, functions, shared_name_suffix) op_type = _get_gradient_op_type(node_def) if op_type is not None: if op_type in new_gradient_op_types: node_def.attr['_gradient_op_type'].s = compat.as_bytes(new_gradient_op_types[op_type]) else: contains_unsaved_custom_gradients = True if contains_unsaved_custom_gradients: logging.warning('Importing a function (%s) with ops with unsaved custom gradients. Will likely fail if a gradient is requested.', fdef.signature.name) fdef.signature.name = _clean_function_name(fdef.signature.name) return orig_name
Fixes a FunctionDef proto to be loaded in current context. In particular, when loading a function library into an eager context, one must rename the functions to avoid conflicts with existent functions. Args: fdef: FunctionDef proto to fix. It is mutated in-place. functions: map from function name to a ConcreteFunction instance. shared_name_suffix: A unique string for this load which helps to avoid `shared_name` collisions across loads. Two functions from the same load using the same `shared_name` still need to share, but functions from different loads with the same `shared_name` should not. new_gradient_op_types: map from old gradient op type to newly generated op type. Returns: orig_name: original value of fdef.signature.name
github-repos
def UsesArtifact(self, artifacts): if isinstance(artifacts, string_types): return artifacts in self.artifacts else: return any(True for artifact in artifacts if artifact in self.artifacts)
Determines if the check uses the specified artifact. Args: artifacts: Either a single artifact name, or a list of artifact names Returns: True if the check uses a specific artifact.
juraj-google-style
def _get_account_xml(soco): device = (soco or discovery.any_soco()) log.debug('Fetching account data from %s', device) settings_url = 'http: result = requests.get(settings_url).content log.debug('Account data: %s', result) return result
Fetch the account data from a Sonos device. Args: soco (SoCo): a SoCo instance to query. If soco is `None`, a random device will be used. Returns: str: a byte string containing the account data xml
codesearchnet
def GetTimeOfDay(self): normalized_timestamp = self._GetNormalizedTimestamp() if (normalized_timestamp is None): return (None, None, None) (_, hours, minutes, seconds) = self._GetTimeValues(normalized_timestamp) return (hours, minutes, seconds)
Retrieves the time of day represented by the date and time values. Returns: tuple[int, int, int]: hours, minutes, seconds or (None, None, None) if the date and time values do not represent a time of day.
codesearchnet
def make_sgf(move_history, result_string, ruleset='Chinese', komi=7.5, white_name=PROGRAM_IDENTIFIER, black_name=PROGRAM_IDENTIFIER, comments=[]): boardsize = go.N game_moves = ''.join((translate_sgf_move(*z) for z in itertools.zip_longest(move_history, comments))) result = result_string return SGF_TEMPLATE.format(**locals())
Turn a game into SGF. Doesn't handle handicap games or positions with incomplete history. Args: move_history: iterable of PlayerMoves result_string: "B+R", "W+0.5", etc. comments: iterable of string/None. Will be zipped with move_history.
codesearchnet
def _error_and_gradient(self, x): coords = x.reshape((self.m, self.n)) d = squareform(pdist(coords)) diff = self.D - d error = self._error(diff) gradient = self._gradient(diff, d, coords) return error, gradient.ravel()
Compute the error and the gradient. This is the function optimized by :obj:`scipy.optimize.minimize`. Args: x (`array-like`): [`m` * `n`, ] matrix. Returns: `tuple`: containing: - Error (`float`) - Gradient (`np.array`) [`m`, `n`]
juraj-google-style
def copy_pkg(self, filename, _): basename = os.path.basename(filename) self._copy(filename, os.path.join(self.connection["mount_point"], "Packages", basename))
Copy a package to the repo's Package subdirectory. Args: filename: Path for file to copy. _: Ignored. Used for compatibility with JDS repos.
juraj-google-style
def recipe_twitter(config, auth_read, auth_write, recipe_name, twitter_secret, recipe_slug, twitter_key): dataset(config, {'description': 'Create a dataset where data will be combined and transfored for upload.', 'auth': auth_write, 'dataset': recipe_slug}) sheets(config, {'description': 'Read mapping of hash tags to line item toggles from sheets.', 'auth': auth_read, 'template': {'sheet': 'https: twitter(config, {'description': 'Read trends from Twitter and place into BigQuery.', 'auth': auth_write, 'secret': twitter_secret, 'key': twitter_key, 'trends': {'places': {'single_cell': True, 'bigquery': {'dataset': recipe_slug, 'query': 'SELECT DISTINCT WOEID FROM {dataset}.Twitter_Triggers', 'legacy': False, 'parameters': {'dataset': recipe_slug}}}}, 'out': {'bigquery': {'dataset': recipe_slug, 'table': 'Twitter_Trends_Place'}}}) google_api(config, {'description': 'Combine sheet and twitter data into API operations for each line item. Match all possibilities and PAUSE if no trigger match.', 'auth': auth_write, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.lineItems.patch', 'kwargs_remote': {'bigquery': {'dataset': recipe_slug, 'query': "\n SELECT\n CAST(S.Advertiser_Id AS STRING) advertiserId,\n CAST(S.Line_Item_Id AS STRING) AS lineItemId,\n STRUCT(\n IF(LOGICAL_OR(T.Name is NULL), 'ENTITY_STATUS_ACTIVE', 'ENTITY_STATUS_PAUSED') AS entityStatus\n ) AS body,\n 'entityStatus' AS updateMask,\n FROM `{dataset}.Twitter_Triggers` AS S\n LEFT JOIN `{dataset}.Twitter_Trends_Place` As T\n ON S.WOEID=T.WOEID AND REPLACE(LOWER(S.Hashtag), '
Adjusts line item settings based on Twitter hashtags and locations specified in a sheet. Args: auth_read (authentication) - Credentials used for reading data. auth_write (authentication) - Credentials used for writing data. recipe_name (string) - Name of sheet where Line Item settings will be read from. twitter_secret (string) - Twitter API secret token. recipe_slug (string) - Name of Google BigQuery dataset to create. twitter_key (string) - Twitter API key token.
github-repos
def get(self, name, default=None): option = self._options.get(name, None) if option is None: return default return option.__get__(self)
Fetch an option from the dictionary. Args: name (str): The name of the option. default: The value to return if the name is missing. Returns: any: The value stored by the option. This method resolves the option to its value rather than returning the option object itself. Use the 'options()' method or this object's iter to get the raw options.
juraj-google-style
def parse_psqs(psqs_results_file): psqs_results = pd.read_csv(psqs_results_file, sep='\t', header=None) psqs_results['pdb_file'] = psqs_results[0].apply(lambda x: str(x).strip('./').strip('.pdb')) psqs_results = psqs_results.rename(columns = {1:'psqs_local', 2:'psqs_burial', 3:'psqs_contact', 4:'psqs_total'}).drop(0, axis=1) psqs_results['u_pdb'] = psqs_results['pdb_file'].apply(lambda x: x.upper() if len(x)==4 else np.nan) psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply(lambda x: x.split('_model1')[0] if len(x)>4 else np.nan) psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)] return psqs_results
Parse a PSQS result file and returns a Pandas DataFrame of the results Args: psqs_results_file: Path to psqs results file Returns: Pandas DataFrame: Summary of PSQS results
juraj-google-style
def clear(self, rows=None): rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
Reset episodes in the memory. Internally, this only sets their lengths to zero. The memory entries will be overridden by future calls to append() or replace(). Args: rows: Episodes to clear, defaults to all. Returns: Operation.
juraj-google-style
def _convert_reward(self, reward): if (not np.isfinite(reward).all()): raise ValueError('Infinite reward encountered.') return np.array(reward, dtype=np.float32)
Convert the reward to 32 bits. Args: reward: Numpy reward. Raises: ValueError: Rewards contain infinite values. Returns: Numpy reward with 32-bit data type.
codesearchnet
def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05): m_projs = np.array([np.dot(site.coords, self.mvec) for site in slab.sites]) mask = ((m_projs - np.amax(m_projs)) >= (- height)) surf_sites = [slab.sites[n] for n in np.where(mask)[0]] if xy_tol: surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)] surf_sites.reverse() (unique_sites, unique_perp_fracs) = ([], []) for site in surf_sites: this_perp = (site.coords - np.dot(site.coords, self.mvec)) this_perp_frac = slab.lattice.get_fractional_coords(this_perp) if (not in_coord_list_pbc(unique_perp_fracs, this_perp_frac)): unique_sites.append(site) unique_perp_fracs.append(this_perp_frac) surf_sites = unique_sites return surf_sites
This method finds surface sites by determining which sites are within a threshold value in height from the topmost site in a list of sites Args: site_list (list): list of sites from which to select surface sites height (float): threshold in angstroms of distance from topmost site in slab along the slab c-vector to include in surface site determination xy_tol (float): if supplied, will remove any sites which are within a certain distance in the miller plane. Returns: list of sites selected to be within a threshold of the highest
codesearchnet
def map_batch_parallel(input_list, batch_size, item_mapper=None, batch_mapper=None, flatten=True, n_jobs=(- 1), **kwargs): if ((item_mapper is None) and (batch_mapper is None)): raise ValueError('You should specify either batch_mapper or item_mapper.') if (batch_mapper is None): batch_mapper = _default_batch_mapper batches = split_into_batches(input_list, batch_size, batch_storage_dir='') all_batch_results = Parallel(n_jobs=n_jobs, **kwargs)((delayed(batch_mapper)(batch['data'], item_mapper) for batch in progressbar(batches, desc='Batches', total=len(batches), file=sys.stdout))) if flatten: final_result = [] for batch_result in all_batch_results: final_result.extend(batch_result) else: final_result = all_batch_results return final_result
Split the data into batches and process each batch in its own thread. Args: input_list: An input object that has a list-like interface (indexing and slicing). item_mapper: (optional) A function to apply to each item in the batch. batch_mapper: (optional) A function to apply to each batch. Either item_mapper or batch_mapper must be set. flatten: Whether to unwrap individual batch results or keep them grouped by batch. n_jobs: The number of parallel processing jobs. -1 will use the number of CPUs on the system. batch_size: The maximum number of input items in each batch. -1 will store all data as a single batch. **kwargs: Additional keyword arguments to joblib.Parallel. Returns: A list representing the combined output from the mapper function called on all input items of each batch.
codesearchnet
def difference(self, second_iterable, selector=identity): if self.closed(): raise ValueError('Attempt to call difference() on a closed Queryable.') if (not is_iterable(second_iterable)): raise TypeError('Cannot compute difference() with second_iterableof non-iterable {0}'.format(str(type(second_iterable))[7:(- 2)])) if (not is_callable(selector)): raise TypeError('difference() parameter selector={0} is not callable'.format(repr(selector))) return self._create(self._generate_difference_result(second_iterable, selector))
Returns those elements which are in the source sequence which are not in the second_iterable. This method is equivalent to the Except() LINQ operator, renamed to a valid Python identifier. Note: This method uses deferred execution, but as soon as execution commences the entirety of the second_iterable is consumed; therefore, although the source sequence may be infinite the second_iterable must be finite. Args: second_iterable: Elements from this sequence are excluded from the returned sequence. This sequence will be consumed in its entirety, so must be finite. selector: A optional single argument function with selects from the elements of both sequences the values which will be compared for equality. If omitted the identity function will be used. Returns: A sequence containing all elements in the source sequence except those which are also members of the second sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If the second_iterable is not in fact iterable. TypeError: If the selector is not callable.
codesearchnet
def transformer_revnet_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder"): def f(x, side_input): encoder_self_attention_bias = side_input[0] old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y def g(x): old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size with tf.variable_scope("ffn"): y = transformer.transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y x1, x2 = tf.split(encoder_input, 2, axis=-1) with tf.variable_scope(name): y1, y2 = tf.contrib.layers.rev_block( x1, x2, f, g, num_layers=hparams.num_hidden_layers, f_side_input=[encoder_self_attention_bias], is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN) y = tf.concat([y1, y2], axis=-1) return common_layers.layer_preprocess(y, hparams)
A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors
juraj-google-style
def right_shift_blockwise(x, query_shape, name=None): with tf.variable_scope( name, default_name="right_shift_blockwise", values=[x]): x_list_shape = x.get_shape().as_list() x_shape = common_layers.shape_list(x) x = tf.expand_dims(x, axis=1) x = pad_to_multiple_2d(x, query_shape) padded_x_shape = common_layers.shape_list(x) x_indices = gather_indices_2d(x, query_shape, query_shape) x_new = get_shifted_center_blocks(x, x_indices) output = scatter_blocks_2d(x_new, x_indices, padded_x_shape) output = tf.squeeze(output, axis=1) output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1]) output.set_shape(x_list_shape) return output
Right shifts once in every block. Args: x: a tensor of shape [batch, height, width, depth] query_shape: A 2d tuple of ints name: a string Returns: output: a tensor of the same shape as x
juraj-google-style
def __init__(self, variant, building): self.variant = variant self.building = building
Create a package variant. Args: variant (`Variant`): Package variant. building (bool): True if a build is occurring.
juraj-google-style
def key_vals_dict_to_tuple_list(key_vals_dict, fill=float('nan')): tuple_list = [ ] if not key_vals_dict: return tuple_list vlen = max([len(vs) for vs in itertools.chain(*key_vals_dict.values())]) for k, vs in key_vals_dict.items(): try: tuple_list.extend([k + tuple(v) + (fill, )*(vlen - len(v)) for v in vs]) except TypeError: tuple_list.extend([(k, ) + tuple(v) + (fill, )*(vlen - len(v)) for v in vs]) return tuple_list
Convert ``key_vals_dict`` to `tuple_list``. Args: key_vals_dict (dict): The first parameter. fill: a value to fill missing data Returns: A list of tuples
juraj-google-style
def to_string(cls, error_code): if error_code == cls.ZONE_NOT_FOUND_ERROR: return 'Zone not found' return super(JLinkReadErrors, cls).to_string(error_code)
Returns the string message for the given ``error_code``. Args: cls (JLinkReadErrors): the ``JLinkReadErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid.
juraj-google-style
def DeleteCampaignFeed(client, campaign_feed): campaign_feed_service = client.GetService('CampaignFeedService', 'v201809') operation = {'operand': campaign_feed, 'operator': 'REMOVE'} campaign_feed_service.mutate([operation])
Deletes a campaign feed. Args: client: an AdWordsClient instance. campaign_feed: the campaign feed to delete.
codesearchnet
def open(self, host, port=23): self._telnet_client.open(host, port) config_str = self._telnet_client.cmd('MN?') if config_str.startswith('MN='): config_str = config_str[len('MN='):] self.properties = dict(zip(['model', 'max_freq', 'max_atten'], config_str.split('-', 2))) self.max_atten = float(self.properties['max_atten'])
Opens a telnet connection to the desired AttenuatorDevice and queries basic information. Args: host: A valid hostname (IP address or DNS-resolvable name) to an MC-DAT attenuator instrument. port: An optional port number (defaults to telnet default 23)
codesearchnet
def sample(self, count=5, fields=None, sampling=None, use_cache=True, dialect=None, billing_tier=None): return Query.sampling_query(self._sql, self._context, count=count, fields=fields, sampling=sampling, udfs=self._udfs, data_sources=self._data_sources).results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)
Retrieves a sampling of rows for the query. Args: count: an optional count of rows to retrieve which is used if a specific sampling is not specified (default 5). fields: the list of fields to sample (default None implies all). sampling: an optional sampling strategy to apply to the table. use_cache: whether to use cached results or not (default True). dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryResultsTable containing a sampling of the result set. Raises: Exception if the query could not be executed or query response was malformed.
codesearchnet
def review_score(self, reviewer, product): return self._g.retrieve_review(reviewer, product).score
Find a review score from a given reviewer to a product. Args: reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`. product: Product i.e. an instance of :class:`ria.bipartite.Product`. Returns: A review object representing the review from the reviewer to the product.
codesearchnet
def get_request_header(self): if (self._client_id is not None): self._request_header.client_identifier.resource = self._client_id return self._request_header
Return ``request_header`` for use when constructing requests. Returns: Populated request header.
codesearchnet
def fit_texture(layer): x, y = layer x = (x - np.nanmin(x)) / (np.nanmax(x) - np.nanmin(x)) y = (y - np.nanmin(y)) / (np.nanmax(y) - np.nanmin(y)) return x, y
Fits a layer into a texture by scaling each axis to (0, 1). Does not preserve aspect ratio (TODO: make this an option). Args: layer (layer): the layer to scale Returns: texture: A texture.
juraj-google-style
def pnl_search(self, asset_manager_id, pnl_type, business_date, **kwargs): self.logger.info(('Retrieving Pnls - Asset Manager: %s - Business Date: %s' % (asset_manager_id, business_date))) url = ('%s/pnls/%s' % (self.endpoint, asset_manager_id)) search_params = {'pnl_type': pnl_type, 'business_date': business_date.isoformat()} for (param_key, param_val) in kwargs.items(): if (not param_val): continue search_params[param_key] = (','.join(param_val) if isinstance(param_val, list) else param_val) response = self.session.get(url, params=search_params) if response.ok: json_body = response.json() results = json_body.get('items') next_hash_key = json_body.get('next_hash_key') next_range_key = json_body.get('next_range_key') pnls = [json_to_pnl(pnl_json) for pnl_json in results] self.logger.info('Retrieved %s Pnl records.', len(pnls)) return (next_hash_key, next_range_key, pnls) else: self.logger.error(response.text) response.raise_for_status()
Search pnl records. Args: asset_manager_id (int): id of asset manager owning the pnl records pnl_type (str): either "Position" or "Transaction business_date (date): date of the pnl records to return book_ids (list): book id filter on pnl records asset_ids (list): asset id filter on pnl records transaction_ids (list): transactino id filter on pnl records next_hash_key (str): continuation hash key for paging the results next_range_key (str): continuation range key for paging the results page_size (int): the number of results to return
codesearchnet
def ParseMany(text): precondition.AssertType(text, Text) if compatibility.PY2: text = text.encode('utf-8') return list(yaml.safe_load_all(text))
Parses many YAML documents into a list of Python objects. Args: text: A YAML source with multiple documents embedded. Returns: A list of Python data structures corresponding to the YAML documents.
codesearchnet
def from_proto(saver_def, import_scope=None): return Saver(saver_def=saver_def, name=import_scope)
Returns a `Saver` object created from `saver_def`. Args: saver_def: a `SaverDef` protocol buffer. import_scope: Optional `string`. Name scope to use. Returns: A `Saver` built from saver_def.
github-repos
def plot_tree3d(ax, tree, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in iter_segments(tree)] linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth) color = _get_color(color, tree.type) collection = Line3DCollection(segs, color=color, linewidth=linewidth, alpha=alpha) ax.add_collection3d(collection) _update_3d_datalim(ax, tree)
Generates a figure of the tree in 3d. If the tree contains one single point the plot will be empty \ since no segments can be constructed. Args: ax(matplotlib axes): on what to plot tree(neurom.core.Tree or neurom.core.Neurite): plotted tree diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
codesearchnet
def get_dependencies(self): all_deps = OrderedSet() for (key, _) in list(self.__config.items()): if (key in self.__cli): continue if key.endswith('sources'): all_deps |= self.get_sources(key[:((len('sources') * (- 1)) - 1)]) for (key, _) in list(self.__cli.items()): if key.endswith('sources'): all_deps |= self.get_sources(key[:((len('sources') * (- 1)) - 1)]) if (self.conf_file is not None): all_deps.add(self.conf_file) all_deps.add(self.get_path('sitemap', rel_to_cwd=True)) cwd = os.getcwd() return [os.path.relpath(fname, cwd) for fname in all_deps if fname]
Retrieve the set of all dependencies for a given configuration. Returns: utils.utils.OrderedSet: The set of all dependencies for the tracked configuration.
codesearchnet
def patch_deepCopy(self, patches): patchesCopy = [] for patch in patches: patchCopy = patch_obj() patchCopy.diffs = patch.diffs[:] patchCopy.start1 = patch.start1 patchCopy.start2 = patch.start2 patchCopy.length1 = patch.length1 patchCopy.length2 = patch.length2 patchesCopy.append(patchCopy) return patchesCopy
Given an array of patches, return another array that is identical. Args: patches: Array of Patch objects. Returns: Array of Patch objects.
juraj-google-style
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True, role=settings.DEFAULT_ASSISTANT_ROLE): name = os.path.splitext(os.path.basename(source))[0] yaml_checker.check(source, y) assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant, fully_loaded=fully_loaded, role=role) return assistant
Constructs instance of YamlAssistant loaded from given structure y, loaded from source file source. Args: source: path to assistant source file y: loaded yaml structure superassistant: superassistant of this assistant Returns: YamlAssistant instance constructed from y with source file source Raises: YamlError: if the assistant is malformed
codesearchnet
def notify(self, new_issues, existing_issues, fixed_issues): if len(new_issues + existing_issues + fixed_issues) > 0: maxlen = max(len(x['properties']['source']) for x in (new_issues + existing_issues + fixed_issues)) + 2 text_tmpl = get_template('domain_hijacking.txt') html_tmpl = get_template('domain_hijacking.html') issues_text = text_tmpl.render( new_issues=new_issues, existing_issues=existing_issues, fixed_issues=fixed_issues, maxlen=maxlen ) issues_html = html_tmpl.render( new_issues=new_issues, existing_issues=existing_issues, fixed_issues=fixed_issues, maxlen=maxlen ) try: send_notification( subsystem=self.name, recipients=[NotificationContact('email', addr) for addr in self.recipients], subject=self.subject, body_html=issues_html, body_text=issues_text ) except Exception as ex: self.log.exception('Failed sending notification email: {}'.format(ex))
Send notifications (email, slack, etc.) for any issues that are currently open or has just been closed Args: new_issues (`list` of :obj:`DomainHijackIssue`): List of newly discovered issues existing_issues (`list` of :obj:`DomainHijackIssue`): List of existing open issues fixed_issues (`list` of `dict`): List of fixed issues Returns: None
juraj-google-style
def _GetNumberOfSeconds(self, fat_date_time): day_of_month = (fat_date_time & 0x1f) month = ((fat_date_time >> 5) & 0x0f) year = (fat_date_time >> 9) & 0x7f days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') number_of_days = self._GetDayOfYear(1980 + year, month, day_of_month) number_of_days -= 1 for past_year in range(0, year): number_of_days += self._GetNumberOfDaysInYear(past_year) fat_date_time >>= 16 seconds = (fat_date_time & 0x1f) * 2 minutes = (fat_date_time >> 5) & 0x3f hours = (fat_date_time >> 11) & 0x1f if hours not in range(0, 24): raise ValueError('Hours value out of bounds.') if minutes not in range(0, 60): raise ValueError('Minutes value out of bounds.') if seconds not in range(0, 60): raise ValueError('Seconds value out of bounds.') number_of_seconds = (((hours * 60) + minutes) * 60) + seconds number_of_seconds += number_of_days * definitions.SECONDS_PER_DAY return number_of_seconds
Retrieves the number of seconds from a FAT date time. Args: fat_date_time (int): FAT date time. Returns: int: number of seconds since January 1, 1980 00:00:00. Raises: ValueError: if the month, day of month, hours, minutes or seconds value is out of bounds.
juraj-google-style
def __init__(self, map_name, timestamp_dir, cache_options, automount_mountpoint=None): super(AutomountUpdater, self).__init__(map_name, timestamp_dir, cache_options, automount_mountpoint) self.local_master = False if self.OPT_LOCAL_MASTER in cache_options: if cache_options[self.OPT_LOCAL_MASTER] == 'yes': self.local_master = True
Initialize automount-specific updater options. Args: map_name: A string representing the type of the map we are an Updater for. timestamp_dir: A string with the directory containing our timestamp files. cache_options: A dict containing the options for any caches we create. automount_mountpoint: An optional string containing automount path info.
github-repos
def get_versions(self): versions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri) versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers) for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion): version_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython() self._affix_version(version_uri, version_label)
retrieves all versions of an object, and stores them at self.versions Args: None Returns: None: appends instances
juraj-google-style
def parse_ids(chrom, pos, ref, alt, case_id, variant_type): ids = {} pos = str(pos) ids['simple_id'] = parse_simple_id(chrom, pos, ref, alt) ids['variant_id'] = parse_variant_id(chrom, pos, ref, alt, variant_type) ids['display_name'] = parse_display_name(chrom, pos, ref, alt, variant_type) ids['document_id'] = parse_document_id(chrom, pos, ref, alt, variant_type, case_id) return ids
Construct the necessary ids for a variant Args: chrom(str): Variant chromosome pos(int): Variant position ref(str): Variant reference alt(str): Variant alternative case_id(str): Unique case id variant_type(str): 'clinical' or 'research' Returns: ids(dict): Dictionary with the relevant ids
juraj-google-style
def all(self, data={}, **kwargs): return super(VirtualAccount, self).all(data, **kwargs)
Fetch all Virtual Account entities Returns: Dictionary of Virtual Account data
codesearchnet
def _parse_banners(self): motd_value = login_value = None matches = re.findall('^banner\\s+(login|motd)\\s?$\n(.*?)$\nEOF$\n', self.config, (re.DOTALL | re.M)) for match in matches: if (match[0].strip() == 'motd'): motd_value = match[1] elif (match[0].strip() == 'login'): login_value = match[1] return dict(banner_motd=motd_value, banner_login=login_value)
Parses the global config and returns the value for both motd and login banners. Returns: dict: The configure value for modtd and login banners. If the banner is not set it will return a value of None for that key. The returned dict object is intendd to be merged into the resource dict
codesearchnet
def _list_like_func(self, func, axis, *args, **kwargs): func_prepared = self._prepare_method( lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)) ) new_data = self._map_across_full_axis(axis, func_prepared) new_index = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 0 else self.index ) new_columns = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 1 else self.columns ) return self.__constructor__(new_data, new_index, new_columns)
Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
juraj-google-style
def load_settings(path, setttings_only = True): if not os.path.exists(path): print(path) raise AttributeError('Path given does not exist!') tag = '_'.join(os.path.basename(os.path.dirname(os.path.abspath(path) + '/')).split('_')[3:]) search_str = os.path.abspath(path)+'/*'+tag +'.b26' fname = glob.glob(search_str) if len(fname)>1: print(('warning more than one .b26 file found, loading ', fname[0])) elif len(fname) == 0: print(('no .b26 file found in folder {:s}, check path !'.format(search_str))) return fname = fname[0] fname = Script.check_filename(fname) settings = load_b26_file(fname)['scripts'] if len(list(settings.keys())) == 1 and setttings_only: settings = settings[list(settings.keys())[0]]['settings'] return settings
loads the settings that has been save with Script.save_b26. Args: path: path to folder saved by Script.save_b26 setttings_only: if true returns only the settings if the .b26 file contains only a single script Returns: a dictionary with the settings
juraj-google-style
def _ParseKey(self, knowledge_base, registry_key, value_name): try: registry_value = registry_key.GetValueByName(value_name) except IOError as exception: raise errors.PreProcessFail(( 'Unable to retrieve Windows Registry key: {0:s} value: {1:s} ' 'with error: {2!s}').format( registry_key.path, value_name, exception)) if registry_value: value_object = registry_value.GetDataAsObject() if value_object: self._ParseValueData(knowledge_base, value_object)
Parses a Windows Registry key for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. value_name (str): name of the Windows Registry value. Raises: PreProcessFail: if the preprocessing fails.
juraj-google-style
def approx_eq(val: Any, other: Any, *, atol: Union[(int, float)]=1e-08) -> bool: approx_eq_getter = getattr(val, '_approx_eq_', None) if (approx_eq_getter is not None): result = approx_eq_getter(other, atol) if (result is not NotImplemented): return result other_approx_eq_getter = getattr(other, '_approx_eq_', None) if (other_approx_eq_getter is not None): result = other_approx_eq_getter(val, atol) if (result is not NotImplemented): return result if isinstance(val, (int, float)): if (not isinstance(other, (int, float))): return False return _isclose(val, other, atol=atol) if isinstance(val, complex): if (not isinstance(other, complex)): return False return _isclose(val, other, atol=atol) result = _approx_eq_iterables(val, other, atol=atol) if (result is NotImplemented): return (val == other) return result
Approximately compares two objects. If `val` implements SupportsApproxEquality protocol then it is invoked and takes precedence over all other checks: - For primitive numeric types `int` and `float` approximate equality is delegated to math.isclose(). - For complex primitive type the real and imaginary parts are treated independently and compared using math.isclose(). - For `val` and `other` both iterable of the same length, consecutive elements are compared recursively. Types of `val` and `other` does not necessarily needs to match each other. They just need to be iterable and have the same structure. Args: val: Source object for approximate comparison. other: Target object for approximate comparison. atol: The minimum absolute tolerance. See np.isclose() documentation for details. Defaults to 1e-8 which matches np.isclose() default absolute tolerance. Returns: True if objects are approximately equal, False otherwise.
codesearchnet
def is_http_running_on(port): try: conn = httplib.HTTPConnection('127.0.0.1:' + str(port)) conn.connect() conn.close() return True except Exception: return False
Check if an http server runs on a given port. Args: The port to check. Returns: True if it is used by an http server. False otherwise.
juraj-google-style
def stacked_highway_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None): for (n_layer, n_hidden) in enumerate(n_hidden_list): input_units = units if (input_units.get_shape().as_list()[(- 1)] != n_hidden): input_units = tf.layers.dense(input_units, n_hidden) if use_dilation: dilation_rate = (2 ** n_layer) else: dilation_rate = 1 units = tf.layers.conv1d(units, n_hidden, filter_width, padding='same', dilation_rate=dilation_rate, kernel_initializer=INITIALIZER()) if use_batch_norm: units = tf.layers.batch_normalization(units, training=training_ph) sigmoid_gate = tf.layers.dense(input_units, 1, activation=tf.sigmoid, kernel_initializer=INITIALIZER()) input_units = ((sigmoid_gate * input_units) + ((1 - sigmoid_gate) * units)) input_units = tf.nn.relu(input_units) units = input_units return units
Highway convolutional network. Skip connection with gating mechanism. Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the output of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) Returns: units: tensor at the output of the last convolutional layer with dimensionality [None, n_tokens, n_hidden_list[-1]]
codesearchnet
def _parse_normalization(normalization): parsed_normalization = None if isinstance(normalization, dict): if (len(normalization.keys()) == 1): items = list(normalization.items())[0] if (len(items) == 2): if (items[1] and isinstance(items[1], dict)): parsed_normalization = items else: parsed_normalization = items[0] elif isinstance(normalization, STR_TYPE): parsed_normalization = normalization return parsed_normalization
Parse a normalization item. Transform dicts into a tuple containing the normalization options. If a string is found, the actual value is used. Args: normalization: Normalization to parse. Returns: Tuple or string containing the parsed normalization.
codesearchnet
def module_entry(yfile): ytxt = yfile.read() mp = ModuleParser(ytxt) mst = mp.statement() submod = (mst.keyword == 'submodule') import_only = True rev = '' features = [] includes = [] rec = {} for sst in mst.substatements: if ((not rev) and (sst.keyword == 'revision')): rev = sst.argument elif (import_only and (sst.keyword in data_kws)): import_only = False elif (sst.keyword == 'feature'): features.append(sst.argument) elif submod: continue elif (sst.keyword == 'namespace'): rec['namespace'] = sst.argument elif (sst.keyword == 'include'): rd = sst.find1('revision-date') includes.append((sst.argument, (rd.argument if rd else None))) rec['import-only'] = import_only rec['features'] = features if submod: rec['revision'] = rev submodmap[mst.argument] = rec else: rec['includes'] = includes modmap[(mst.argument, rev)] = rec
Add entry for one file containing YANG module text. Args: yfile (file): File containing a YANG module or submodule.
codesearchnet
def __init__(self, name, display_name='', description='', default=False): self._name = name self._display_name = display_name self._description = description self._default = default
Attribute constructor. Args: name (str): Attribute name. display_name (str): Attribute display name. description (str): Attribute description. default (bool): Whether the attribute is a default attribute of the corresponding datasets.
juraj-google-style
def set_window_size(self, width, height, window_handle='current'): self._execute(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height), 'window_handle': window_handle})
Sets the width and height of the current window. Support: Web(WebView) Args: width(int): the width in pixels. height(int): the height in pixels. window_handle(str): Identifier of window_handle, default to 'current'. Returns: WebDriver Object.
codesearchnet
def id_in_cluster(cluster_spec, task_type, task_id): _validate_cluster_spec(cluster_spec, task_type, task_id) cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() if task_type == 'chief': return 0 if task_type == 'worker': return task_id + len(cluster_spec.get('chief', [])) if task_type == 'evaluator': return task_id raise ValueError('There is no id for task_type %r' % task_type)
Returns a unique id for the task in the `task_type`'s cluster. It returns an id ranging from [0, `worker_count(task_type, task_id)`). Note: this function assumes that "evaluate" job is in its own cluster or its own partition of a cluster. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated. task_type: string indicating the type of the task. task_id: the id of the `task_type` in this cluster. Returns: an int indicating the unique id. Throws: ValueError: if `task_type` is not "chief", "worker" or "evaluator".
github-repos
def _make_actor_method_executor(self, method_name, method, actor_imported): def actor_method_executor(dummy_return_id, actor, *args): self._worker.actor_task_counter += 1 try: if is_class_method(method): method_returns = method(*args) else: method_returns = method(actor, *args) except Exception as e: if (isinstance(actor, ray.actor.Checkpointable) and (self._worker.actor_task_counter != 1)): self._save_and_log_checkpoint(actor) raise e else: if isinstance(actor, ray.actor.Checkpointable): if (self._worker.actor_task_counter == 1): if actor_imported: self._restore_and_log_checkpoint(actor) else: self._save_and_log_checkpoint(actor) return method_returns return actor_method_executor
Make an executor that wraps a user-defined actor method. The wrapped method updates the worker's internal state and performs any necessary checkpointing operations. Args: method_name (str): The name of the actor method. method (instancemethod): The actor method to wrap. This should be a method defined on the actor class and should therefore take an instance of the actor as the first argument. actor_imported (bool): Whether the actor has been imported. Checkpointing operations will not be run if this is set to False. Returns: A function that executes the given actor method on the worker's stored instance of the actor. The function also updates the worker's internal state to record the executed method.
codesearchnet
def read_serializable_array(self, class_name, max_size=sys.maxsize): module = '.'.join(class_name.split('.')[:-1]) class_name = class_name.split('.')[-1] class_attr = getattr(importlib.import_module(module), class_name) length = self.read_var_int(max_size=max_size) items = [] try: for _ in range(0, length): item = class_attr() item.Deserialize(self) items.append(item) except Exception as e: raise SDKException(ErrorCode.param_err("Couldn't deserialize %s" % e)) return items
Deserialize a stream into the object specific by `class_name`. Args: class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block' max_size (int): (Optional) maximum number of bytes to read. Returns: list: list of `class_name` objects deserialized from the stream.
juraj-google-style
def unserialize_data(data, compression=False, encryption=False): try: if encryption: data = encryption.decrypt(data) except Exception as err: logger.error(('Decryption Error: ' + str(err))) message = False try: if compression: data = binascii.a2b_base64(data) data = zlib.decompress(data) message = json.loads(data) except Exception as err: logger.error(('Decompression Error: ' + str(err))) message = False decoded_message = data.decode() if ((not encryption) and (not compression)): message = json.loads(decoded_message) return message
Unserializes the packet data and converts it from json format to normal Python datatypes. If you choose to enable encryption and/or compression when serializing data, you MUST enable the same options when unserializing data. Args: data (str): The raw, serialized packet data delivered from the transport protocol. compression (boolean): True or False value on whether or not to uncompress the serialized data. encryption (rsa.encryption): An encryption instance used to decrypt the message if encryption is desired. Returns: The message unserialized in normal Python datatypes.
codesearchnet
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, XLMProphetNetSeq2SeqModelOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return XLMProphetNetSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, decoder_attentions=decoder_outputs.attentions, decoder_ngram_attentions=decoder_outputs.ngram_attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
Returns: Example: ```python >>> from transformers import AutoTokenizer, XLMProphetNetModel >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") >>> model = XLMProphetNetModel.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states ```
github-repos
def bootstrap(score_objs, n_boot=1000): all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True) return all_samples.sum(axis=1)
Given a set of DistributedROC or DistributedReliability objects, this function performs a bootstrap resampling of the objects and returns n_boot aggregations of them. Args: score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method n_boot (int): Number of bootstrap samples Returns: An array of DistributedROC or DistributedReliability
codesearchnet
def __init__(self, client_id, client_secret): self.box_request = BoxRestRequest(client_id, client_secret) self.client_id = client_id self.client_secret = client_secret
Constructor Args: client_id (str): Client ID provided by Box. client_secret (str): Client Secret provided by Box.
juraj-google-style
def delete_resource_view(self, resource_view): if isinstance(resource_view, str): if is_valid_uuid(resource_view) is False: raise HDXError('%s is not a valid resource view id!' % resource_view) resource_view = ResourceView({'id': resource_view}, configuration=self.configuration) else: resource_view = self._get_resource_view(resource_view) if 'id' not in resource_view: found = False title = resource_view.get('title') for rv in self.get_resource_views(): if resource_view['title'] == rv['title']: resource_view = rv found = True break if not found: raise HDXError('No resource views have title %s in this resource!' % title) resource_view.delete_from_hdx()
Delete a resource view from the resource and HDX Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None
juraj-google-style
def remove_vtep(self, name, vtep, vlan=None): if not vlan: cmd = 'vxlan flood vtep remove {}'.format(vtep) else: cmd = 'vxlan vlan {} flood vtep remove {}'.format(vlan, vtep) return self.configure_interface(name, cmd)
Removes a VTEP endpoint from the global or local flood list EosVersion: 4.13.7M Args: name (str): The name of the interface to configure vtep (str): The IP address of the remote VTEP endpoint to add vlan (str): The VLAN ID associated with this VTEP. If the VLAN keyword is used, then the VTEP is configured as a local flood endpoing Returns: True if the command completes successfully
juraj-google-style
def load_index(self, filename, reindex=False): self._reset_index() with open(filename, 'r') as fobj: data = json.load(fobj) for (path, file) in data.items(): (ents, domains) = (file['entities'], file['domains']) (root, f) = (dirname(path), basename(path)) if reindex: self._index_file(root, f, domains) else: f = self._make_file_object(root, f) tags = {k: Tag(self.entities[k], v) for (k, v) in ents.items()} f.tags = tags self.files[f.path] = f for (ent, val) in f.entities.items(): self.entities[ent].add_file(f.path, val)
Load the Layout's index from a plaintext file. Args: filename (str): Path to the plaintext index file. reindex (bool): If True, discards entity values provided in the loaded index and instead re-indexes every file in the loaded index against the entities defined in the config. Default is False, in which case it is assumed that all entity definitions in the loaded index are correct and do not need any further validation. Note: At the moment, directory-specific config files aren't serialized. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs within a project.
codesearchnet
def delete_snl(self, snl_ids): try: payload = {"ids": json.dumps(snl_ids)} response = self.session.post( "{}/snl/delete".format(self.preamble), data=payload) if response.status_code in [200, 400]: resp = json.loads(response.text, cls=MontyDecoder) if resp["valid_response"]: if resp.get("warning"): warnings.warn(resp["warning"]) return resp else: raise MPRestError(resp["error"]) raise MPRestError("REST error with status code {} and error {}" .format(response.status_code, response.text)) except Exception as ex: raise MPRestError(str(ex))
Delete earlier submitted SNLs. .. note:: As of now, this MP REST feature is open only to a select group of users. Opening up submissions to all users is being planned for the future. Args: snl_ids: List of SNL ids. Raises: MPRestError
juraj-google-style
def save(self, branch, commit_message, **kwargs): self.branch = branch self.commit_message = commit_message self.file_path = self.file_path.replace('/', '%2F') super(ProjectFile, self).save(**kwargs)
Save the changes made to the file to the server. The object is updated to match what the server returns. Args: branch (str): Branch in which the file will be updated commit_message (str): Message to send with the commit **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server cannot perform the request
codesearchnet