code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def setData(self, column, role, value): assert isinstance(column, int) assert isinstance(role, int) if isinstance(value, (QtWidgets.QComboBox, QtWidgets.QCheckBox)): self.treeWidget().setCurrentItem(self) if role == 2 and column == 1: if isinstance(value, str): value = self.cast_type(value) if isinstance(value, QtCore.QVariant): value = self.cast_type(value.toString()) if isinstance(value, QtWidgets.QComboBox): value = self.cast_type(value.currentText()) if isinstance(value, QtWidgets.QCheckBox): value = bool(int(value.checkState())) self.value = value elif column == 0: value = self.name if value is None: value = self.value if not isinstance(value, bool): super(B26QTreeItem, self).setData(column, role, value) else: self.emitDataChanged()
if value is valid sets the data to value Args: column: column of item role: role of item (see Qt doc) value: value to be set
juraj-google-style
def isValidUnit(self, w): bad = set(['point', 'a']) if (w in bad): return False try: pq.Quantity(0.0, w) return True except: return (w == '/')
Checks if a string represents a valid quantities unit. Args: w (str): A string to be tested against the set of valid quantities units. Returns: True if the string can be used as a unit in the quantities module.
codesearchnet
def plot_stacked_hist(self, key='wall_time', nmax=5, ax=None, **kwargs): (ax, fig, plt) = get_ax_fig_plt(ax=ax) mpi_rank = '0' timers = self.timers(mpi_rank=mpi_rank) n = len(timers) (names, values) = ([], []) rest = np.zeros(n) for (idx, sname) in enumerate(self.section_names(ordkey=key)): sections = self.get_sections(sname) svals = np.asarray([s.__dict__[key] for s in sections]) if (idx < nmax): names.append(sname) values.append(svals) else: rest += svals names.append(('others (nmax=%d)' % nmax)) values.append(rest) ind = np.arange(n) width = 0.35 colors = (nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']) bars = [] bottom = np.zeros(n) for (idx, vals) in enumerate(values): color = colors[idx] bar = ax.bar(ind, vals, width, color=color, bottom=bottom) bars.append(bar) bottom += vals ax.set_ylabel(key) ax.set_title(('Stacked histogram with the %d most important sections' % nmax)) ticks = (ind + (width / 2.0)) labels = [('MPI=%d, OMP=%d' % (t.mpi_nprocs, t.omp_nthreads)) for t in timers] ax.set_xticks(ticks) ax.set_xticklabels(labels, rotation=15) ax.legend([bar[0] for bar in bars], names, loc='best') return fig
Plot stacked histogram of the different timers. Args: key: Keyword used to extract data from the timers. Only the first `nmax` sections with largest value are show. mmax: Maximum nuber of sections to show. Other entries are grouped together in the `others` section. ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure
codesearchnet
def _set_input(el, value): if isinstance(value, dict): el.value = value["val"] elif type(value) in [list, tuple]: el.value = ", ".join(item["val"] for item in value) else: el.value = value
Set content of given `el` to `value`. Args: el (obj): El reference to input you wish to set. value (obj/list): Value to which the `el` will be set.
juraj-google-style
def cylindrical_vert(script, radius=1.0, inside=True): if inside: function = 'sqrt(x^2+y^2)<={}'.format(radius) else: function = 'sqrt(x^2+y^2)>={}'.format(radius) vert_function(script, function=function) return None
Select all vertices within a cylindrical radius Args: radius (float): radius of the sphere center_pt (3 coordinate tuple or list): center point of the sphere Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
juraj-google-style
def update(self, forecasts, observations): for (t, threshold) in enumerate(self.thresholds): tp = np.count_nonzero(((forecasts >= threshold) & (observations >= self.obs_threshold))) fp = np.count_nonzero(((forecasts >= threshold) & (observations < self.obs_threshold))) fn = np.count_nonzero(((forecasts < threshold) & (observations >= self.obs_threshold))) tn = np.count_nonzero(((forecasts < threshold) & (observations < self.obs_threshold))) self.contingency_tables.iloc[t] += [tp, fp, fn, tn]
Update the ROC curve with a set of forecasts and observations Args: forecasts: 1D array of forecast values observations: 1D array of observation values.
codesearchnet
def recv(self, request_id): log.debug(('Reading response %d from Kafka' % request_id)) if (not self._sock): self.reinit() resp = self._read_bytes(4) (size,) = struct.unpack('>i', resp) resp = self._read_bytes(size) return resp
Get a response packet from Kafka Arguments: request_id: can be any int (only used for debug logging...) Returns: str: Encoded kafka packet response from server
codesearchnet
def is_function_or_method(obj): return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj)
Check if an object is a function or method. Args: obj: The Python object in question. Returns: True if the object is an function or method.
juraj-google-style
def validate(self): if self.value: if (not isinstance(self.value, bool)): raise TypeError('expected: {0}, observed: {1}'.format(bool, type(self.value)))
Verify that the value of the Boolean object is valid. Raises: TypeError: if the value is not of type bool.
codesearchnet
def paint(self): snippet = {'heatmap-radius': VectorStyle.get_style_value(self.radius), 'heatmap-opacity': VectorStyle.get_style_value(self.opacity), 'heatmap-color': VectorStyle.get_style_value(self.color), 'heatmap-intensity': VectorStyle.get_style_value(self.intensity), 'heatmap-weight': VectorStyle.get_style_value(self.weight)} return snippet
Renders a javascript snippet suitable for use as a mapbox-gl heatmap paint entry Returns: A dict that can be converted to a mapbox-gl javascript paint snippet
codesearchnet
def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int): if not isinstance(grid_pinpoints, list): raise TypeError('grid_pinpoints should be a list of tuples or lists') if not isinstance(image_size, (list, tuple)): if not isinstance(image_size, (torch.Tensor, np.ndarray)): raise TypeError(f'image_size invalid type {type(image_size)} with value {image_size}') image_size = image_size.tolist() best_resolution = select_best_resolution(image_size, grid_pinpoints) height, width = best_resolution num_patches = 0 for i in range(0, height, patch_size): for j in range(0, width, patch_size): num_patches += 1 num_patches += 1 return num_patches
Calculate the number of patches after the preprocessing for images of any resolution. Args: image_size (`torch.LongTensor` or `np.ndarray` or `Tuple[int, int]`): The size of the input image in the format (height, width). ? grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: int: the number of patches
github-repos
def assert_count_equal(first, second, msg=None, extras=None): _call_unittest_assertion(_pyunit_proxy.assertCountEqual, first, second, msg=msg, extras=extras)
Asserts that two iterables have the same elements, the same number of times, without regard to order. Similar to assert_equal(Counter(list(first)), Counter(list(second))). Args: first: The first iterable to compare. second: The second iterable to compare. msg: A string that adds additional info about the failure. extras: An optional field for extra information to be included in test result. Example: assert_count_equal([0, 1, 1], [1, 0, 1]) passes the assertion. assert_count_equal([0, 0, 1], [0, 1]) raises an assertion error.
github-repos
def decode_schedule(string): splits = string.split() steps = [int(x[1:]) for x in splits[1:] if (x[0] == '@')] pmfs = np.reshape([float(x) for x in splits[1:] if (x[0] != '@')], [len(steps), (- 1)]) return (splits[0], tuplize(steps), tuplize(pmfs))
Decodes a string into a schedule tuple. Args: string: The string encoding of a schedule tuple. Returns: A schedule tuple, see encode_schedule for details.
codesearchnet
def OverwriteAndClose(self, compressed_data, size): self.Set(self.Schema.CONTENT(compressed_data)) self.Set(self.Schema.SIZE(size)) super(AFF4MemoryStreamBase, self).Close()
Directly overwrite the current contents. Replaces the data currently in the stream with compressed_data, and closes the object. Makes it possible to avoid recompressing the data. Args: compressed_data: The data to write, must be zlib compressed. size: The uncompressed size of the data.
codesearchnet
def _wait_for_response(self, requests): failed_requests = [] responses_for_requests = OrderedDict.fromkeys(requests) for retry in range(self._max_retry): try: logging.debug('Try self._availability_limiter.map_with_retries(requests, responses_for_requests) failed_requests = [] for (request, response) in responses_for_requests.items(): if (self._drop_404s and (response is not None) and (response.status_code == 404)): logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url)) elif (not response): failed_requests.append((request, response)) if (not failed_requests): break logging.warning('Try requests = [fr[0] for fr in failed_requests] except InvalidRequestError: raise except Exception as e: logging.exception('Try pass if failed_requests: logging.warning('Still {0} failed request(s) after {1} retries:'.format(len(failed_requests), self._max_retry)) for (failed_request, failed_response) in failed_requests: if (failed_response is not None): failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace') logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format(failed_request.url, failed_response.status_code, failed_response_text)) else: logging.warning('Request to {0} failed with None response.'.format(failed_request.url)) return list(responses_for_requests.values())
Issues a batch of requests and waits for the responses. If some of the requests fail it will retry the failed ones up to `_max_retry` times. Args: requests - A list of requests Returns: A list of `requests.models.Response` objects Raises: InvalidRequestError - if any of the requests returns "403 Forbidden" response
codesearchnet
def trace_function(args=None, kwargs=None, tracing_options=None): if not tracing_options: tracing_options = TracingOptions() args = args if args else () kwargs = kwargs if kwargs else {} if tracing_options.input_signature and (args or kwargs): bound_args = function_type_utils.bind_function_inputs(args, kwargs, tracing_options.polymorphic_type, tracing_options.default_values) args, kwargs = (bound_args.args, bound_args.kwargs) with tracing_options.lock or contextlib.nullcontext(): if tracing_options.input_signature and (not args) and (not kwargs): args = tracing_options.input_signature kwargs = {} concrete_function = _maybe_define_function(args, kwargs, tracing_options) if not tracing_options.bind_graph_to_function: concrete_function._garbage_collector.release() return concrete_function
Returns a `ConcreteFunction` specialized to inputs and execution context. Compiles a Graph corresponding to the Python function logic and uses that to generate a differentiable ConcreteFunction. Args: args: inputs to specialize on. Can be concrete values (e.g. 1) or `tf.Tensor` or `tf.TensorSpec`. kwargs: keyword inputs to specialize on. Concrete values (e.g. 1) or `tf.Tensor` or `tf.TensorSpec`. tracing_options: TracingOptions for the tracing process.
github-repos
def format_datetime(self, time_input, tz=None, date_format=None): dt_value = self.any_to_datetime(time_input, tz) if (date_format == '%s'): dt_value = calendar.timegm(dt_value.timetuple()) elif date_format: dt_value = dt_value.strftime(date_format) else: dt_value = dt_value.isoformat() return dt_value
Return timestamp from multiple input formats. Formats: #. Human Input (e.g 30 days ago, last friday) #. ISO 8601 (e.g. 2017-11-08T16:52:42Z) #. Loose Date format (e.g. 2017 12 25) #. Unix Time/Posix Time/Epoch Time (e.g. 1510686617 or 1510686617.298753) .. note:: To get a unix timestamp format use the strftime format **%s**. Python does not natively support **%s**, however this method has support. Args: time_input (string): The time input string (see formats above). tz (string): The time zone for the returned data. date_format (string): The strftime format to use, ISO by default. Returns: (string): Formatted datetime string.
codesearchnet
def GetUpdates(self, gcs_client, bucket_name, obj, since): bucket = gcs_client.bucket(bucket_name) blob = bucket.get_blob(obj) if blob is None: self.log.error('GCS object gs: raise error.SourceUnavailable('unable to download object from GCS.') if since and timestamps.FromDateTimeToTimestamp(blob.updated) < since: return [] data_map = self.GetMap(cache_info=blob.open()) data_map.SetModifyTimestamp(timestamps.FromDateTimeToTimestamp(blob.updated)) return data_map
Gets updates from a source. Args: gcs_client: initialized gcs client bucket_name: gcs bucket name obj: object with the data since: a timestamp representing the last change (None to force-get) Returns: A tuple containing the map of updates and a maximum timestamp
github-repos
def device_function(self, var): if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'): tf.logging.debug('Place {} on last device: {}.'.format( var.name, self._last_device)) return self._last_device shape = tf.TensorShape(var.get_attr('shape')) assert shape.num_elements() is not None size = var.get_attr('dtype').size mem, device = heapq.heappop(self._mem_device_heap) mem += shape.num_elements() * size heapq.heappush(self._mem_device_heap, (mem, device)) tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format( var.name, device, mem)) self._last_device = device return device
Choose a device for the input variable. Args: var: an Variable. Returns: The device for placing the var.
juraj-google-style
def _filter_headers(self): headers = {} for user in self.usernames: headers['fedora_messaging_user_{}'.format(user)] = True for package in self.packages: headers['fedora_messaging_rpm_{}'.format(package)] = True for container in self.containers: headers['fedora_messaging_container_{}'.format(container)] = True for module in self.modules: headers['fedora_messaging_module_{}'.format(module)] = True for flatpak in self.flatpaks: headers['fedora_messaging_flatpak_{}'.format(flatpak)] = True return headers
Add headers designed for filtering messages based on objects. Returns: dict: Filter-related headers to be combined with the existing headers
codesearchnet
def parse_readable_size_str(size_str): size_str = size_str.strip() if size_str.endswith('B'): size_str = size_str[:-1] if size_str.isdigit(): return int(size_str) elif size_str.endswith('k'): return int(float(size_str[:-1]) * 1024) elif size_str.endswith('M'): return int(float(size_str[:-1]) * 1048576) elif size_str.endswith('G'): return int(float(size_str[:-1]) * 1073741824) else: raise ValueError('Failed to parsed human-readable byte size str: "%s"' % size_str)
Convert a human-readable str representation to number of bytes. Only the units "kB", "MB", "GB" are supported. The "B character at the end of the input `str` may be omitted. Args: size_str: (`str`) A human-readable str representing a number of bytes (e.g., "0", "1023", "1.1kB", "24 MB", "23GB", "100 G". Returns: (`int`) The parsed number of bytes. Raises: ValueError: on failure to parse the input `size_str`.
github-repos
def _add_countriesdata(cls, iso3, country): countryname = country.get(' cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3 iso2 = country.get(' if iso2: cls._countriesdata['iso2iso3'][iso2] = iso3 cls._countriesdata['iso2iso3'][iso3] = iso2 m49 = country.get(' if m49: m49 = int(m49) cls._countriesdata['m49iso3'][m49] = iso3 cls._countriesdata['m49iso3'][iso3] = m49 cls._countriesdata['aliases'][iso3] = re.compile(country.get(' regionname = country.get(' sub_regionname = country.get(' intermediate_regionname = country.get(' regionid = country.get(' if regionid: regionid = int(regionid) sub_regionid = country.get(' if sub_regionid: sub_regionid = int(sub_regionid) intermediate_regionid = country.get(' if intermediate_regionid: intermediate_regionid = int(intermediate_regionid) def add_country_to_set(colname, idval, iso3): value = cls._countriesdata[colname].get(idval) if value is None: value = set() cls._countriesdata['regioncodes2countries'][idval] = value value.add(iso3) if regionname: add_country_to_set('regioncodes2countries', regionid, iso3) cls._countriesdata['regioncodes2names'][regionid] = regionname cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid if sub_regionname: add_country_to_set('regioncodes2countries', sub_regionid, iso3) cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid if intermediate_regionname: add_country_to_set('regioncodes2countries', intermediate_regionid, iso3) cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \ intermediate_regionid
Set up countries data from data in form provided by UNStats and World Bank Args: iso3 (str): ISO3 code for country country (hxl.Row): Country information Returns: None
juraj-google-style
def create_model(self, vpc_config_override=VPC_CONFIG_DEFAULT): return KNNModel(self.model_data, self.role, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override))
Return a :class:`~sagemaker.amazon.KNNModel` referencing the latest s3 model data produced by this Estimator. Args: vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model. Default: use subnets and security groups from this Estimator. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids.
juraj-google-style
def next_counter(start=0, step=1): r count_gen = it.count(start, step) next_ = functools.partial(six.next, count_gen) return next_
r""" Args: start (int): (default = 0) step (int): (default = 1) Returns: func: next_ CommandLine: python -m utool.util_iter --test-next_counter Example: >>> # ENABLE_DOCTEST >>> from utool.util_iter import * # NOQA >>> start = 1 >>> step = 1 >>> next_ = next_counter(start, step) >>> result = str([next_(), next_(), next_()]) >>> print(result) [1, 2, 3]
juraj-google-style
def apply_and_name(self, aggregator): reduced_df = self._apply(aggregator) if len(self.names) != len(reduced_df.columns): raise IndexError("ColumnFunction creates more columns than it has names for.") reduced_df.columns = self.names return reduced_df
Fetches the row-aggregated input columns for this ColumnFunction. Args: aggregator (Aggregator) Returns: pd.DataFrame: The dataframe has columns with names self.names that were created by this ColumnFunction, and is indexed by the index that was passed to aggregator.aggregate(index).
juraj-google-style
def __init__(self, structuring_element=None): if structuring_element is None: self.strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) else: self.strel = structuring_element self.fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
Initializes the `BackgroundSubtractorGMG`. *Note:* Requires OpenCV to be built with `--contrib` as it uses the `bgsegm` package. Unless a custom `structuring_element` is specified, it uses: `cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))` Args: structuring_element: The structuring element.
juraj-google-style
def _get_tensor_by_tf_output(self, tf_output) -> tensor_lib.Tensor: op = self._get_operation_by_tf_operation(tf_output.oper) return op.outputs[tf_output.index]
Returns the `Tensor` representing `tf_output`. Note that there is only one such `Tensor`, i.e. multiple calls to this function with the same TF_Output value will always return the same `Tensor` object. Args: tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`). Returns: The `Tensor` that represents `tf_output`.
github-repos
def get_dG_at_T(seq, temp): r_cal = (scipy.constants.R / scipy.constants.calorie) seq = ssbio.protein.sequence.utils.cast_to_str(seq) oobatake = {} for t in range(20, 51): oobatake[t] = calculate_oobatake_dG(seq, t) stable = [i for i in oobatake.values() if (i > 0)] if (len(stable) == 0): dG = (0.238846 * calculate_dill_dG(len(seq), temp)) method = 'Dill' else: dG = oobatake[temp] method = 'Oobatake' keq = math.exp((((- 1) * dG) / (r_cal * (temp + 273.15)))) return (dG, keq, method)
Predict dG at temperature T, using best predictions from Dill or Oobatake methods. Args: seq (str, Seq, SeqRecord): Amino acid sequence temp (float): Temperature in degrees C Returns: (tuple): tuple containing: dG (float) Free energy of unfolding dG (cal/mol) keq (float): Equilibrium constant Keq method (str): Method used to calculate
codesearchnet
def package_info(package, image=None): cmd = ['DISM', '/English', ('/Image:{0}'.format(image) if image else '/Online'), '/Get-PackageInfo'] if ('~' in package): cmd.append('/PackageName:{0}'.format(package)) else: cmd.append('/PackagePath:{0}'.format(package)) out = __salt__['cmd.run_all'](cmd) if (out['retcode'] == 0): ret = dict() for line in six.text_type(out['stdout']).splitlines(): if (' : ' in line): info = line.split(' : ') if (len(info) < 2): continue ret[info[0]] = info[1] else: ret = out return ret
Display information about a package Args: package (str): The full path to the package. Can be either a .cab file or a folder. Should point to the original source of the package, not to where the file is installed. You cannot use this command to get package information for .msu files image (Optional[str]): The path to the root directory of an offline Windows image. If `None` is passed, the running operating system is targeted. Default is None. Returns: dict: A dictionary containing the results of the command CLI Example: .. code-block:: bash salt '*' dism. package_info C:\\packages\\package.cab
codesearchnet
def get_num_bytes(self, batch: Sequence[Union[tf.Tensor, torch.Tensor]]) -> int: if self._framework == 'tf': return sum((sys.getsizeof(element) for element in batch)) else: return sum((el.element_size() for tensor in batch for el in tensor.values()))
Returns: The number of bytes of data for the Tensors batch.
github-repos
class DepthAnythingNeck(nn.Module): def __init__(self, config): super().__init__() self.config = config self.reassemble_stage = DepthAnythingReassembleStage(config) self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) self.fusion_stage = DepthAnythingFeatureFusionStage(config) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: if not isinstance(hidden_states, (tuple, list)): raise TypeError('hidden_states should be a tuple or list of tensors') if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.') hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] output = self.fusion_stage(features) return output
DepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as input and produces another list of tensors as output. For DepthAnything, it includes 2 stages: * DepthAnythingReassembleStage * DepthAnythingFeatureFusionStage. Args: config (dict): config dict.
github-repos
def generate_tree_path(fileDigest, depth): if(depth < 0): raise Exception("depth level can not be negative") if(os.path.split(fileDigest)[1] != fileDigest): raise Exception("fileDigest cannot contain path separator") min = (2**(depth + 1)) - 1 if(len(fileDigest) < min): raise Exception("fileDigest too short for the given depth") path = "" index = 0 for p in range(1, depth + 1): jump = 2**p path = os.path.join(path, fileDigest[index:index + jump]) index += jump path = os.path.join(path, fileDigest[index:]) return path
Generate a relative path from the given fileDigest relative path has a numbers of directories levels according to @depth Args: fileDigest -- digest for which the relative path will be generate depth -- number of levels to use in relative path generation Returns: relative path for the given digest
juraj-google-style
def launch_job(job_name, cmd=None, code_dir=None, excludes='*.ipynb .git .ipynb_checkpoints', dependencies=tuple(), queue='john', image='codalab/python', memory='18g', debug=False, tail=False): print 'Remember to set up SSH tunnel and LOG IN through the command line before calling this.' def execute(cmd): return shell(cmd, verbose=True, debug=debug) if code_dir: execute('cl up -n code -w {} {} -x {}'.format(worksheet, code_dir, excludes)) options = '-v -n {} -w {} --request-queue {} --request-docker-image {} --request-memory {}'.format( job_name, worksheet, queue, image, memory) dep_str = ' '.join(['{0}:{0}'.format(dep) for dep in dependencies]) cmd = "cl run {} {} '{}'".format(options, dep_str, cmd) if tail: cmd += ' -t' execute(cmd)
Launch a job on CodaLab (optionally upload code that the job depends on). Args: job_name: name of the job cmd: command to execute code_dir: path to code folder. If None, no code is uploaded. excludes: file types to exclude from the upload dependencies: list of other bundles that we depend on debug: if True, prints SSH commands, but does not execute them tail: show the streaming output returned by CodaLab once it launches the job
juraj-google-style
def __init__(self, channel): self.ListTraces = channel.unary_unary( "/google.devtools.cloudtrace.v1.TraceService/ListTraces", request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.ListTracesRequest.SerializeToString, response_deserializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.ListTracesResponse.FromString, ) self.GetTrace = channel.unary_unary( "/google.devtools.cloudtrace.v1.TraceService/GetTrace", request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.GetTraceRequest.SerializeToString, response_deserializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.Trace.FromString, ) self.PatchTraces = channel.unary_unary( "/google.devtools.cloudtrace.v1.TraceService/PatchTraces", request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.PatchTracesRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def __init__(self, asynchronous_correlation_value=None, cancellation_result=None): super(CancelResponsePayload, self).__init__( enums.Tags.RESPONSE_PAYLOAD ) self._asynchronous_correlation_value = None self._cancellation_result = None self.asynchronous_correlation_value = asynchronous_correlation_value self.cancellation_result = cancellation_result
Construct a Cancel response payload struct. Args: asynchronous_correlation_value (bytes): The ID of a pending operation that was cancelled, in bytes. Optional, defaults to None. cancellation_result (enum): A CancellationResult enumeration specifying the result of canceling the operation. Optional, defaults to None.
juraj-google-style
def should_close(http_version, connection_field): connection_field = (connection_field or '').lower() if (http_version == 'HTTP/1.0'): return (connection_field.replace('-', '') != 'keepalive') else: return (connection_field == 'close')
Return whether the connection should be closed. Args: http_version (str): The HTTP version string like ``HTTP/1.0``. connection_field (str): The value for the ``Connection`` header.
codesearchnet
def DummyMethod(name, *params): def make_param(param): return pytd.Parameter(param, type=pytd.AnythingType(), kind=pytd.ParameterKind.REGULAR, optional=False, mutated_type=None) sig = pytd.Signature(tuple((make_param(param) for param in params)), starargs=None, starstarargs=None, return_type=pytd.AnythingType(), exceptions=(), template=()) return pytd.Function(name=name, signatures=(sig,), kind=pytd.MethodKind.METHOD, flags=pytd.MethodFlag.NONE)
Create a simple method using only "Any"s as types. Arguments: name: The name of the method *params: The parameter names. Returns: A pytd.Function.
github-repos
def save_pcoder(self, pcoder, *labels): raise NotImplementedError
Saves pcoder for given PCollection. Correct reading of PCollection from Cache requires PCoder to be known. This method saves desired PCoder for PCollection that will subsequently be used by sink(...), source(...), and, most importantly, read(...) method. The latter must be able to read a PCollection written by Beam using non-Beam IO. Args: pcoder: A PCoder to be used for reading and writing a PCollection. *labels: List of labels for PCollection instance.
github-repos
def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names): print('Converting pooling ...') if (names == 'short'): tf_name = ('P' + random_string(7)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) if ('kernel_shape' in params): (height, width) = params['kernel_shape'] else: (height, width) = params['kernel_size'] if ('strides' in params): (stride_height, stride_width) = params['strides'] else: (stride_height, stride_width) = params['stride'] if ('pads' in params): (padding_h, padding_w, _, _) = params['pads'] else: (padding_h, padding_w) = params['padding'] input_name = inputs[0] pad = 'valid' if (((height % 2) == 1) and ((width % 2) == 1) and ((height pad = 'same' else: padding_name = (tf_name + '_pad') padding_layer = keras.layers.ZeroPadding2D(padding=(padding_h, padding_w), name=padding_name) layers[padding_name] = padding_layer(layers[inputs[0]]) input_name = padding_name pooling = keras.layers.AveragePooling2D(pool_size=(height, width), strides=(stride_height, stride_width), padding=pad, name=tf_name, data_format='channels_first') layers[scope_name] = pooling(layers[input_name])
Convert Average pooling. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def log(self, message): self._buffer.append([datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000%z'), message]) if not self.buffered or (self._flush_threshold and len(self._buffer) >= self._flush_threshold): self.flush()
Logs a message to the Bulkdozer feed's Log tab. Args: message: The message to log to the feed, it will be appended at the bottom of the log, after the last message that was written.
github-repos
def GetAnalyzersInformation(cls): analyzer_information = [] for (_, analyzer_class) in cls.GetAnalyzers(): description = getattr(analyzer_class, 'DESCRIPTION', '') analyzer_information.append((analyzer_class.NAME, description)) return analyzer_information
Retrieves the analyzers information. Returns: list[tuple]: containing: str: analyzer name. str: analyzer description.
codesearchnet
def update_offset(self, new_offset): self.offset = new_offset self.data_points = self._data_points[self.offset:] self.timestamps = self._timestamps[self.offset:]
Updates how many data points to skip in caculations. Always use this function to update offset instead of directly setting self.offset. Args: new_offset: The new offset.
juraj-google-style
def make_path_writable(path): from rez.config import config try: orig_mode = os.stat(path).st_mode new_mode = orig_mode if (config.make_package_temporarily_writable and (not os.access(path, os.W_OK))): new_mode = (orig_mode | stat.S_IWUSR) if (new_mode != orig_mode): os.chmod(path, new_mode) except OSError: orig_mode = None new_mode = None try: (yield) finally: if (new_mode != orig_mode): os.chmod(path, orig_mode)
Temporarily make `path` writable, if possible. Does nothing if: - config setting 'make_package_temporarily_writable' is False; - this can't be done (eg we don't own `path`). Args: path (str): Path to make temporarily writable
codesearchnet
def write_int64(self, value, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.pack('%sq' % endian, value)
Pack the value as a signed integer and write 8 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
juraj-google-style
def create_composite_loss(self, losses, regularize=True, include_marked=True, name='cost'): all_losses = [] if losses: all_losses.extend(losses) if include_marked: all_losses.extend(self.marked_losses) if not all_losses: raise ValueError('No losses specified!') if regularize: all_losses.extend(self.regularization_losses) with self._g.as_default(): result = tf.add_n(all_losses, name=name) self.add_scalar_summary(result) return result
Creates a loss that is the sum of all specified losses. Args: losses: A sequence of losses to include. regularize: Whether or not to include regularization losses. include_marked: Whether or not to use the marked losses. name: The name for this variable. Returns: A single tensor that is the sum of all losses. Raises: ValueError: if there are no losses.
juraj-google-style
def _maybe_truncate_traceback(traceback): if len(traceback) > MAX_TRACEBACK_LENGTH: return traceback[:MAX_TRACEBACK_LENGTH - 2] + [_ELLIPSIS, traceback[-1]] else: return traceback
Truncate the traceback if it is too long. Args: traceback: A list representing an error's traceback. There should be one list item per entry in the traceback (in the right order); beyond that, this function does not care about the item types. Returns: The traceback, possibly with some items removed and an _ELLIPSIS inserted. Guaranteed to be no longer than MAX_TRACEBACK_LENGTH.
github-repos
def json(self, data): self._headers['Content-Type'] = 'application/json' if not isinstance(data, str): data = json.dumps(data, indent=4) self._body = data
Defines the mock response JSON body. Arguments: data (dict|list|str): JSON body data. Returns: self: ``pook.Response`` current instance.
juraj-google-style
def sort_edge(edges): return sorted(edges, key=lambda x: (x.L, x.R))
Sort iterable of edges first by left node indices then right. Args: edges(list[Edge]): List of edges to be sorted. Returns: list[Edge]: Sorted list by left and right node indices.
juraj-google-style
def preprocess_JPEG(self, image, **kwargs): save_kwargs = {'progressive': VERSATILEIMAGEFIELD_PROGRESSIVE_JPEG, 'quality': QUAL} if (image.mode != 'RGB'): image = image.convert('RGB') return (image, save_kwargs)
Receive a PIL Image instance of a JPEG and returns 2-tuple. Args: * [0]: Image instance, converted to RGB * [1]: Dict with a quality key (mapped to the value of `QUAL` as defined by the `VERSATILEIMAGEFIELD_JPEG_RESIZE_QUALITY` setting)
codesearchnet
def _GetEarliestYearFromFileEntry(self): file_entry = self.GetFileEntry() if (not file_entry): return None stat_object = file_entry.GetStat() posix_time = getattr(stat_object, 'crtime', None) if (posix_time is None): posix_time = getattr(stat_object, 'ctime', None) if (file_entry.TYPE_INDICATOR == dfvfs_definitions.TYPE_INDICATOR_GZIP): posix_time = getattr(stat_object, 'mtime', None) if (posix_time is None): logger.warning('Unable to determine earliest year from file stat information.') return None try: year = timelib.GetYearFromPosixTime(posix_time, timezone=self._knowledge_base.timezone) return year except ValueError as exception: logger.error('Unable to determine earliest year from file stat information with error: {0!s}'.format(exception)) return None
Retrieves the year from the file entry date and time values. This function uses the creation time if available otherwise the change time (metadata last modification time) is used. Returns: int: year of the file entry or None.
codesearchnet
def summary(self, title, sentences=0, chars=0, auto_suggest=True, redirect=True): page_info = self.page(title, auto_suggest=auto_suggest, redirect=redirect) return page_info.summarize(sentences, chars)
Get the summary for the title in question Args: title (str): Page title to summarize sentences (int): Number of sentences to return in summary chars (int): Number of characters to return in summary auto_suggest (bool): Run auto-suggest on title before \ summarizing redirect (bool): Use page redirect on title before summarizing Returns: str: The summarized results of the page Note: Precedence for parameters: sentences then chars; if both are \ 0 then the entire first section is returned
codesearchnet
def observations_np(self, boundary=20): list_observations_np_ts = [t.observations_np for t in self.trajectories] OBS = list_observations_np_ts[0].shape[1:] num_time_steps = [t.num_time_steps for t in self.trajectories] t_max = max(num_time_steps) boundary = int(boundary) bucket_length = (boundary * int(np.ceil((float(t_max) / boundary)))) def padding_config(obs): num_to_pad = ((bucket_length + 1) - obs.shape[0]) return ([(0, num_to_pad)] + ([(0, 0)] * len(OBS))) return (np.stack([np.pad(obs, padding_config(obs), 'constant') for obs in list_observations_np_ts]), num_time_steps)
Pads the observations in all the trajectories and returns them. Args: boundary: integer, Observations will be padded to (n * boundary) + 1 where n is an integer. Returns: a tuple(padded_observations, time_steps), with shapes: padded_observations: (self.batch_size, n * boundary + 1) + OBS time_steps: integer list of length = self.batch_size
codesearchnet
def __str__(self): info = {'section': self._section, 'config': self.config, 'req_type': self._req_type, 'req': str(self.req), 'range': str(self.range), 'exclude': str(self.exclude), 'include': str(self.include), 'init': str(self._initialized)} req_str = '\n >>> _Reqs Instance <<<\n' req_str += 'Section: {section}\n' req_str += 'Configuration name: {config}\n' req_str += 'Requirement type: {req_type}\n' req_str += 'Requirement: {req}\n' req_str += 'Range: {range}\n' req_str += 'Exclude: {exclude}\n' req_str += 'Include: {include}\n' req_str += 'Initialized: {init}\n\n' return req_str.format(**info)
Prints a requirement and its components. Returns: String that has concatenated information about a requirement.
github-repos
def random_shuffle(value, seed=None, name=None): with ops.name_scope(name, 'shuffle', [value, seed]): if value.rank == 0: raise ValueError('Cannot shuffle a scalar StructuredTensor') first_dimension = value.nrows() index = random_ops.random_shuffle(math_ops.range(first_dimension), seed=seed) return gather(value, index, axis=0)
Shuffle a structured tensor on the zeroth axis. Args: value: a structured tensor of rank at least one. seed: the seed for shuffling. name: the name for shuffle. Returns: The shuffled structured tensor.
github-repos
def invitation_backend(backend=None, namespace=None): backend = (backend or ORGS_INVITATION_BACKEND) (class_module, class_name) = backend.rsplit('.', 1) mod = import_module(class_module) return getattr(mod, class_name)(namespace=namespace)
Returns a specified invitation backend Args: backend: dotted path to the invitation backend class namespace: URL namespace to use Returns: an instance of an InvitationBackend
codesearchnet
def getFingerprintsForExpressions(self, body, sparsity=1.0): return self._expressions.resolveBulkExpression(self._retina, body, sparsity)
Bulk resolution of expressions Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: list of Fingerprint Raises: CorticalioException: if the request was not successful
juraj-google-style
def get_tokens(max_value): vocab = [str(i) for i in range(max_value)] vocab = set(vocab) vocab.update(CodeOp.LITERALS) vocab.update(CodeOp.KEYWORDS) vocab |= set("".join(vocab)) return sorted(vocab)
Defines tokens. Args: max_value: the maximum numeric range for the token. Returns: list of string tokens in vocabulary.
juraj-google-style
def dismiss_confirm(self, text=None, wait=None): with self.driver.dismiss_modal("confirm", text=text, wait=wait): yield
Execute the wrapped code, dismissing a confirm. Args: text (str | RegexObject, optional): Text to match against the text in the modal. wait (int | float, optional): Maximum time to wait for the modal to appear after executing the wrapped code. Raises: ModalNotFound: If a modal dialog hasn't been found.
juraj-google-style
def fit_size_distribution_models(self, model_names, model_objs, input_columns, output_columns=None, calibrate=False): if output_columns is None: output_columns = ["Shape", "Location", "Scale"] groups = np.unique(self.data["train"]["member"][self.group_col]) weights=None for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data[group_data[output_columns[-1]] > 0] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) self.size_distribution_models[group] = {"multi": {}, "lognorm": {}} if calibrate: self.size_distribution_models[group]["calshape"] = {} self.size_distribution_models[group]["calscale"] = {} log_labels = np.log(group_data[output_columns].values) log_means = log_labels.mean(axis=0) log_sds = log_labels.std(axis=0) self.size_distribution_models[group]['lognorm']['mean'] = log_means self.size_distribution_models[group]['lognorm']['sd'] = log_sds for m, model_name in enumerate(model_names): print(group, model_name) self.size_distribution_models[group]["multi"][model_name] = deepcopy(model_objs[m]) try: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds, sample_weight=weights) except: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds) if calibrate: training_predictions = self.size_distribution_models[ group]["multi"][model_name].predict(group_data[input_columns]) self.size_distribution_models[group]["calshape"][model_name] = LinearRegression() self.size_distribution_models[group]["calshape"][model_name].fit(training_predictions[:, 0:1], (log_labels[:, 0] - log_means[0]) / log_sds[ 0], sample_weight=weights) self.size_distribution_models[group]["calscale"][model_name] = LinearRegression() self.size_distribution_models[group]["calscale"][model_name].fit(training_predictions[:, 1:], (log_labels[:, 1] - log_means[1]) / log_sds[ 1], sample_weight=weights)
Fits multitask machine learning models to predict the parameters of a size distribution Args: model_names: List of machine learning model names model_objs: scikit-learn style machine learning model objects input_columns: Training data columns used as input for ML model output_columns: Training data columns used for prediction calibrate: Whether or not to fit a log-linear regression to predictions from ML model
juraj-google-style
def encode(self, s): try: import matplotlib.image as im except ImportError as e: tf.logging.warning('Reading an image requires matplotlib to be installed: %s', e) raise NotImplementedError('Image reading not implemented.') return im.imread(s)
Transform a string with a filename into a list of RGB integers. Args: s: path to the file with an image. Returns: ids: list of integers
codesearchnet
def SetBalanceFor(self, assetId, fixed8_val): found = False for key, val in self.Balances.items(): if key == assetId: self.Balances[key] = fixed8_val found = True if not found: self.Balances[assetId] = fixed8_val
Set the balance for an asset id. Args: assetId (UInt256): fixed8_val (Fixed8): balance value.
juraj-google-style
def _select_position(self, width, height): positions = self._generate_placements(width, height) if (self.rot and (width != height)): positions += self._generate_placements(height, width) if (not positions): return (None, None) return min(((p[0], self._rect_fitness(*p)) for p in positions), key=operator.itemgetter(1))
Search for the placement with the bes fitness for the rectangle. Returns: tuple (Rectangle, fitness) - Rectangle placed in the fittest position None - Rectangle couldn't be placed
codesearchnet
def _absolute_position_to_relative_position_unmasked(x): (batch, heads, length, _) = common_layers.shape_list(x) x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, (length - 1)]]) x_flat = tf.reshape(x, [batch, heads, ((length ** 2) + (length * (length - 1)))]) x_flat = tf.pad(x_flat, [[0, 0], [0, 0], [length, 0]]) x = tf.reshape(x_flat, [batch, heads, length, (2 * length)]) x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, ((2 * length) - 1)]) return x
Helper function for dot_product_unmasked_self_attention_relative_v2. Rearrange an attention logits or weights Tensor. The dimensions of the input represent: [batch, heads, query_position, memory_position] The dimensions of the output represent: [batch, heads, query_position, memory_position - query_position + length - 1] Only works with unmasked_attention. Args: x: a Tensor with shape [batch, heads, length, length] Returns: a Tensor with shape [batch, heads, length, 2*length-1]
codesearchnet
def AddKeyByPath(self, key_path, registry_key): if (not key_path.startswith(definitions.KEY_PATH_SEPARATOR)): raise ValueError('Key path does not start with: {0:s}'.format(definitions.KEY_PATH_SEPARATOR)) if (not self._root_key): self._root_key = FakeWinRegistryKey(self._key_path_prefix) path_segments = key_paths.SplitKeyPath(key_path) parent_key = self._root_key for path_segment in path_segments: try: subkey = FakeWinRegistryKey(path_segment) parent_key.AddSubkey(subkey) except KeyError: subkey = parent_key.GetSubkeyByName(path_segment) parent_key = subkey parent_key.AddSubkey(registry_key)
Adds a Windows Registry key for a specific key path. Args: key_path (str): Windows Registry key path to add the key. registry_key (WinRegistryKey): Windows Registry key. Raises: KeyError: if the subkey already exists. ValueError: if the Windows Registry key cannot be added.
codesearchnet
def __eq__(self, other): if (type(self) is type(other) and self._index == other._index and self._drives == other._drives and self._controls == other._controls and self._measures == other._measures and self._acquires == other._acquires): return True return False
Two physical qubits are the same if they have the same index and channels. Args: other (Qubit): other Qubit Returns: bool: are self and other equal.
juraj-google-style
def export(self, remote_function): if self._worker.mode is None: self._functions_to_export.append(remote_function) return if self._worker.mode != ray.worker.SCRIPT_MODE: return self._do_export(remote_function)
Export a remote function. Args: remote_function: the RemoteFunction object.
juraj-google-style
def tox(args=''): basedir = dirname(__file__) latest_pythons = _determine_latest_pythons() highest_minor_python = _highest_minor(latest_pythons) _local_needs_pythons(flo('cd {basedir} && ' 'python{highest_minor_python} -m tox {args}'))
Run tox. Build package and run unit tests against several pythons. Args: args: Optional arguments passed to tox. Example: fab tox:'-e py36 -r'
juraj-google-style
def search(self, q): results = self._api.search(q=q) return results
Search tweets by keyword. Args: q: keyword Returns: list: tweet list
juraj-google-style
def get_version(): if not 'win' in sys.platform: return NO_WIN win_ver = sys.getwindowsversion() try: major, minor, build = win_ver.platform_version except AttributeError: if sys.version_info < (3, 0): from platform import _get_real_winver major, minor, build = _get_real_winver(win_ver.major, win_ver.minor, win_ver.build) major, minor, build = int(major), int(minor), int(build) else: major, minor, build = win_ver.major, win_ver.minor, win_ver.build try: is_server = 1 if win_ver.product_type == 3 else 0 except AttributeError: is_server = 0 try: if major == 10: sp_ver = build else: sp_ver = win_ver.service_pack_major or 0 except AttributeError: try: sp_ver = int(win_ver.service_pack.rsplit(' ', 1)) except (IndexError, ValueError): sp_ver = 0 return (major, minor, sp_ver, is_server)
Get the Windows OS version running on the machine. Params: None Returns: The Windows OS version running on the machine (comparables with the values list in the class).
juraj-google-style
def categorical_partition_data(data): series = pd.Series(data) value_counts = series.value_counts(dropna=True) null_indexes = series.isnull() nonnull_count = (null_indexes == False).sum() weights = (value_counts.values / nonnull_count) return {'values': value_counts.index.tolist(), 'weights': weights}
Convenience method for creating weights from categorical data. Args: data (list-like): The data from which to construct the estimate. Returns: A new partition object:: { "partition": (list) The categorical values present in the data "weights": (list) The weights of the values in the partition. }
codesearchnet
def __init__(self, engine_id, client): self._client = client self._dv = client.direct_view(targets='all') self._dv.use_dill() nengines = len(client) super(ObjectHub, self).__init__(engine_id, nengines)
Make an ObjectHub. Args: engine_id: ipyparallel engine id number where this Hub is located, or a negative number if it is on an ipyparallel client. client: ipyparallel.Client
juraj-google-style
def create(cls, session, record, imported=False, auto_reply=False): return super(Conversations, cls).create(session, record, imported=imported, auto_reply=auto_reply)
Create a conversation. Please note that conversation cannot be created with more than 100 threads, if attempted the API will respond with HTTP 412. Args: session (requests.sessions.Session): Authenticated session. record (helpscout.models.Conversation): The conversation to be created. imported (bool, optional): The ``imported`` request parameter enables conversations to be created for historical purposes (i.e. if moving from a different platform, you can import your history). When ``imported`` is set to ``True``, no outgoing emails or notifications will be generated. auto_reply (bool): The ``auto_reply`` request parameter enables auto replies to be sent when a conversation is created via the API. When ``auto_reply`` is set to ``True``, an auto reply will be sent as long as there is at least one ``customer`` thread in the conversation. Returns: helpscout.models.Conversation: Newly created conversation.
codesearchnet
def input_selector_schema(config_cls): config_type = resolve_config_cls_arg(config_cls) check.param_invariant(config_type.is_selector, 'config_cls') def _wrap(func): def _selector(context, config_value): selector_key, selector_value = single_item(config_value) return func(context, selector_key, selector_value) return _create_input_schema(config_type, _selector) return _wrap
A decorator for annotating a function that can take the selected properties from a ``config_value`` in to an instance of a custom type. Args: config_cls (Selector)
juraj-google-style
def resize(x, mode, factor=4): assert (mode in ['bilinear', 'nearest']), mode shp = (tf.shape(x)[2:] * factor) x = tf.transpose(x, [0, 2, 3, 1]) if (mode == 'bilinear'): x = tf.image.resize_bilinear(x, shp, align_corners=True) else: x = tf.image.resize_nearest_neighbor(x, shp, align_corners=False) return tf.transpose(x, [0, 3, 1, 2])
Resize input tensor with unkown input-shape by a factor Args: x (tf.Tensor): tensor NCHW factor (int, optional): resize factor for H, W Note: Differences here against Caffe have huge impacts on the quality of the predictions. Returns: tf.Tensor: resized tensor NCHW
codesearchnet
def _reset(self, indices): self.assert_common_preconditions() return np.stack([self._envs[index].reset() for index in indices])
Resets environments at indices shouldn't pre-process or record. Subclasses should override this to do the actual reset if something other than the default implementation is desired. Args: indices: list of indices of underlying envs to call reset on. Returns: np.ndarray of stacked observations from the reset-ed envs.
juraj-google-style
def should_use(intersection): if (intersection.interior_curve in ACCEPTABLE_CLASSIFICATIONS): return True if (intersection.interior_curve in TANGENT_CLASSIFICATIONS): return ((intersection.s == 0.0) or (intersection.t == 0.0)) return False
Check if an intersection can be used as part of a curved polygon. Will return :data:`True` if the intersection is classified as :attr:`~.IntersectionClassification.FIRST`, :attr:`~.IntersectionClassification.SECOND` or :attr:`~.IntersectionClassification.COINCIDENT` or if the intersection is classified is a corner / edge end which is classified as :attr:`~.IntersectionClassification.TANGENT_FIRST` or :attr:`~.IntersectionClassification.TANGENT_SECOND`. Args: intersection (.Intersection): An intersection to be added. Returns: bool: Indicating if the intersection will be used.
codesearchnet
def lists_to_tuples(structure): return tree_impl.lists_to_tuples(structure)
Returns the structure with list instances changed to tuples. Args: structure: Arbitrarily nested structure. Returns: The same structure but with tuples instead of lists.
github-repos
def get_training_or_validation_split(samples, labels, validation_split, subset): if not validation_split: return (samples, labels) num_val_samples = int(validation_split * len(samples)) if subset == 'training': io_utils.print_msg(f'Using {len(samples) - num_val_samples} files for training.') samples = samples[:-num_val_samples] if labels is not None: labels = labels[:-num_val_samples] elif subset == 'validation': io_utils.print_msg(f'Using {num_val_samples} files for validation.') samples = samples[-num_val_samples:] if labels is not None: labels = labels[-num_val_samples:] else: raise ValueError(f'`subset` must be either "training" or "validation", received: {subset}') return (samples, labels)
Potentially restrict samples & labels to a training or validation split. Args: samples: List of elements. labels: List of corresponding labels. validation_split: Float, fraction of data to reserve for validation. subset: Subset of the data to return. Either `"training"`, `"validation"`, or `None`. If `None`, we return all of the data. Returns: tuple (samples, labels), potentially restricted to the specified subset.
github-repos
def __init__(self, dtype, shape=None, shared_name=None, name='conditional_accumulator', reduction_type='MEAN'): accumulator_ref = gen_data_flow_ops.resource_conditional_accumulator(dtype=dtype, shape=shape, shared_name=shared_name, name=name, reduction_type=reduction_type) if context.executing_eagerly(): self._resource_deleter = resource_variable_ops.EagerResourceDeleter(handle=accumulator_ref, handle_device=context.context().device_name) super(ConditionalAccumulator, self).__init__(dtype, shape, accumulator_ref)
Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. shared_name: Optional. If non-empty, this accumulator will be shared under the given name across multiple sessions. name: Optional name for the accumulator. reduction_type: Reduction type to use when taking the gradient.
github-repos
def csv(self, ondemand=False): self._request_uri = '{}/{}'.format(self._api_uri, 'csv') self._stream = True if ondemand: self._request.add_payload('runNow', True)
Update request URI to return CSV data. For onDemand bulk generation to work it must first be enabled in the ThreatConnect platform under System settings. Args: ondemand (boolean): Enable on demand bulk generation.
codesearchnet
def supported_language(lang): try: self.get_collection(lang=lang) return True except LanguageNotSupported as e: return False
Return True if polyglot supports the language. Args: lang (string): Language code.
codesearchnet
def take_screenshot(webdriver, file_name): folder_location = os.path.join(ProjectUtils.get_project_root(), WebScreenShotUtil.SCREEN_SHOT_LOCATION) WebScreenShotUtil.__capture_screenshot( webdriver, folder_location, file_name + ".png")
Captures a screenshot. Args: webdriver (WebDriver) - Selenium webdriver. file_name (str) - File name to save screenshot as.
juraj-google-style
def full_like(array, fill_value, reverse=False, dtype=None, keepmeta=True): if keepmeta: return (dc.zeros_like(array) + fill_value).astype(dtype) else: return dc.full(array.shape, fill_value, dtype)
Create an array of `fill_value` with the same shape and type as the input array. Args: array (xarray.DataArray): The shape and data-type of it define these same attributes of the output array. fill_value (scalar or numpy.ndarray): Fill value or array. dtype (data-type, optional): If spacified, this function overrides the data-type of the output array. keepmeta (bool, optional): Whether *coords, attrs, and name of the input array are kept in the output one. Default is True. Returns: array (decode.array): Decode array filled with `fill_value`.
codesearchnet
def InitFromDataPoints(self, start_stats, complete_stats): self.start_points = self._ConvertToResultList(start_stats) self.complete_points = self._ConvertToResultList(complete_stats) return self
Check that this approval applies to the given token. Args: start_stats: A list of lists, each containing two values (a timestamp and the number of clients started at this time). complete_stats: A list of lists, each containing two values (a timestamp and the number of clients completed at this time). Returns: A reference to the current instance to allow method chaining.
juraj-google-style
def DEFINE_multi_enum(name, default, enum_values, help, flag_values=_flagvalues.FLAGS, case_sensitive=True, **args): parser = _argument_parser.EnumParser(enum_values, case_sensitive) serializer = _argument_parser.ArgumentSerializer() DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
Registers a flag whose value can be a list strings from enum_values. Use the flag on the command line multiple times to place multiple enum values into the list. The 'default' may be a single string (which will be converted into a single-element list) or a list of strings. Args: name: str, the flag name. default: Union[Iterable[Text], Text, None], the default value of the flag; see `DEFINE_multi`. enum_values: [str], a non-empty list of strings with the possible values for the flag. help: str, the help message. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. case_sensitive: Whether or not the enum is to be case-sensitive. **args: Dictionary with extra keyword args that are passed to the Flag __init__.
codesearchnet
def parse_google_format_docstring(docstring: str) -> tuple[Optional[str], Optional[dict], Optional[str]]: description_match = description_re.search(docstring) args_match = args_re.search(docstring) returns_match = returns_re.search(docstring) description = description_match.group(1).strip() if description_match else None docstring_args = args_match.group(1).strip() if args_match else None returns = returns_match.group(1).strip() if returns_match else None if docstring_args is not None: docstring_args = '\n'.join([line for line in docstring_args.split('\n') if line.strip()]) matches = args_split_re.findall(docstring_args) args_dict = {match[0]: re.sub('\\s*\\n+\\s*', ' ', match[1].strip()) for match in matches} else: args_dict = {} return (description, args_dict, returns)
Parses a Google-style docstring to extract the function description, argument descriptions, and return description. Args: docstring (str): The docstring to parse. Returns: The function description, arguments, and return description.
github-repos
def add_stream_logger(level=logging.DEBUG, name=None): logger = logging.getLogger(name) logger.setLevel(level) handler = logging.StreamHandler() handler.setFormatter(get_default_log_formatter()) handler.setLevel(level) logger.addHandler(handler)
Add a stream logger. This can be used for printing all SDK calls to stdout while working in an interactive session. Note this is a logger for the entire module, which will apply to all environments started in the same session. If you need a specific logger pass a ``logfile`` to :func:`~sdk.init` Args: level(int): :mod:`logging` log level name(str): logger name, will default to the root logger. Returns: None
juraj-google-style
def AddComment(self, comment): if not comment: return if not self.comment: self.comment = comment else: self.comment = ''.join([self.comment, comment])
Adds a comment to the event tag. Args: comment (str): comment.
juraj-google-style
def _CompareFields(field, other_field): field_attrs = _GetFieldAttributes(field) other_field_attrs = _GetFieldAttributes(other_field) if field_attrs != other_field_attrs: return False return field.__class__ == other_field.__class__
Checks if two ProtoRPC fields are "equal". Compares the arguments, rather than the id of the elements (which is the default __eq__ behavior) as well as the class of the fields. Args: field: A ProtoRPC message field to be compared. other_field: A ProtoRPC message field to be compared. Returns: Boolean indicating whether the fields are equal.
juraj-google-style
def setNetworkDataRequirement(self, eDataRequirement): print '%s call setNetworkDataRequirement' % self.port print eDataRequirement if eDataRequirement == Device_Data_Requirement.ALL_DATA: self.networkDataRequirement = 'n' return True
set whether the Thread device requires the full network data or only requires the stable network data Args: eDataRequirement: is true if requiring the full network data Returns: True: successful to set the network requirement
juraj-google-style
def broadcast(cls, shape1: 'TensorFluentShape', shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]: reshape_1, reshape_2 = None, None if not (shape1._batch or shape2._batch): return reshape_1, reshape_2 size_1, size_2 = shape1.fluent_size, shape2.fluent_size size_diff = abs(size_1 - size_2) if size_diff == 0: return reshape_1, reshape_2 if size_2 > size_1 and not (size_1 == 0 and not shape1._batch): reshape_1 = [1] * size_diff + list(shape1.fluent_shape) if shape1._batch: reshape_1 = [shape1.batch_size] + reshape_1 elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch): reshape_2 = [1] * size_diff + list(shape2.fluent_shape) if shape2._batch: reshape_2 = [shape2.batch_size] + reshape_2 return reshape_1, reshape_2
It broadcasts the fluent shapes if any input is in batch mode. It handles input shapes in different modes, expanding its dimensions if necessary. It outputs a tuple with new shapes. If no input shape is in batch mode, return (None, None). If an input shape does not need to be changed, return None. Args: shape1: A fluent's shape. shape2: A fluent's shape. Returns: A pair of new shapes.
juraj-google-style
def from_string(contents): if (contents[(- 1)] != '\n'): contents += '\n' white_space = '[ \\t\\r\\f\\v]' natoms_line = (((white_space + '*\\d+') + white_space) + '*\\n') comment_line = '[^\\n]*\\n' coord_lines = '(\\s*\\w+\\s+[0-9\\-\\+\\.eEdD]+\\s+[0-9\\-\\+\\.eEdD]+\\s+[0-9\\-\\+\\.eEdD]+\\s*\\n)+' frame_pattern_text = ((natoms_line + comment_line) + coord_lines) pat = re.compile(frame_pattern_text, re.MULTILINE) mols = [] for xyz_match in pat.finditer(contents): xyz_text = xyz_match.group(0) mols.append(XYZ._from_frame_string(xyz_text)) return XYZ(mols)
Creates XYZ object from a string. Args: contents: String representing an XYZ file. Returns: XYZ object
codesearchnet
def get_climate(self, device_label): response = None try: response = requests.get( urls.climate(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "deviceLabel": device_label}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Get climate history Args: device_label: device label of climate device
juraj-google-style
def singleprint_from_saved_model_proto(export_dir: str) -> str: try: return fingerprinting_pywrap.SingleprintFromSM(export_dir) except FingerprintException as e: raise ValueError(e) from None
Returns the singleprint of `saved_model.pb` in `export_dir`. Args: export_dir: The directory that contains `saved_model.pb`. Returns: A string containing the singleprint of `saved_model.pb` in `export_dir`. Raises: ValueError: If a valid singleprint cannot be constructed from `saved_model.pb`.
github-repos
def call(self, func, *args, **kwargs): for timer in self: with timer: func(*args, **kwargs) return self
Alternative way to time a simple function call using condensed syntax. Returns: self (timerit.Timerit): Use `min`, or `mean` to get a scalar. Use `print` to output a report to stdout. Example: >>> import math >>> time = Timerit(num=10).call(math.factorial, 50).min() >>> assert time > 0
codesearchnet
def _check_properties(cls, property_names, require_indexed=True): assert isinstance(property_names, (list, tuple)), repr(property_names) for name in property_names: assert isinstance(name, basestring), repr(name) if ('.' in name): (name, rest) = name.split('.', 1) else: rest = None prop = cls._properties.get(name) if (prop is None): cls._unknown_property(name) else: prop._check_property(rest, require_indexed=require_indexed)
Internal helper to check the given properties exist and meet specified requirements. Called from query.py. Args: property_names: List or tuple of property names -- each being a string, possibly containing dots (to address subproperties of structured properties). Raises: InvalidPropertyError if one of the properties is invalid. AssertionError if the argument is not a list or tuple of strings.
codesearchnet
def trace_start(self): cmd = enums.JLinkTraceCommand.START res = self._dll.JLINKARM_TRACE_Control(cmd, 0) if (res == 1): raise errors.JLinkException('Failed to start trace.') return None
Starts collecting trace data. Args: self (JLink): the ``JLink`` instance. Returns: ``None``
juraj-google-style
def initialize_typeshed_or_die(): try: return typeshed.Typeshed() except OSError as e: logging.critical(str(e)) sys.exit(1)
Initialize a Typeshed object or die. Returns: An instance of Typeshed()
github-repos
def __init__(self, name=None, url=None, timezone=None, id=None, email=None, field_dict=None, lang=None, **kwargs): self._schedule = None if not field_dict: if name: kwargs['agency_name'] = name if url: kwargs['agency_url'] = url if timezone: kwargs['agency_timezone'] = timezone if id: kwargs['agency_id'] = id if lang: kwargs['agency_lang'] = lang if email: kwargs['agency_email'] = email field_dict = kwargs self.__dict__.update(field_dict)
Initialize a new Agency object. Args: field_dict: A dictionary mapping attribute name to unicode string name: a string, ignored when field_dict is present url: a string, ignored when field_dict is present timezone: a string, ignored when field_dict is present id: a string, ignored when field_dict is present kwargs: arbitrary keyword arguments may be used to add attributes to the new object, ignored when field_dict is present
juraj-google-style
def substitute(dict_, source): d_esc = (re.escape(k) for k in dict_.keys()) pattern = re.compile('|'.join(d_esc)) return pattern.sub(lambda x: dict_[x.group()], source)
Perform re.sub with the patterns in the given dict Args: dict_: {pattern: repl} source: str
juraj-google-style