content
stringlengths
22
815k
id
int64
0
4.91M
def expected_shd(posterior, ground_truth): """Compute the Expected Structural Hamming Distance. This function computes the Expected SHD between a posterior approximation given as a collection of samples from the posterior, and the ground-truth graph used in the original data generation process. Parameters ---------- posterior : np.ndarray instance Posterior approximation. The array must have size `(B, N, N)`, where `B` is the number of sample graphs from the posterior approximation, and `N` is the number of variables in the graphs. ground_truth : np.ndarray instance Adjacency matrix of the ground-truth graph. The array must have size `(N, N)`, where `N` is the number of variables in the graph. Returns ------- e_shd : float The Expected SHD. """ # Compute the pairwise differences diff = np.abs(posterior - np.expand_dims(ground_truth, axis=0)) diff = diff + diff.transpose((0, 2, 1)) # Ignore double edges diff = np.minimum(diff, 1) shds = np.sum(diff, axis=(1, 2)) / 2 return np.mean(shds)
5,500
def package_list_read(pkgpath): """Read package list""" try: with open(PACKAGE_LIST_FILE, 'r') as pkglistfile: return json.loads(pkglistfile.read()) except Exception: return []
5,501
def hpat_pandas_series_le(self, other, level=None, fill_value=None, axis=0): """ Pandas Series method :meth:`pandas.Series.le` implementation. .. only:: developer Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8 Parameters ---------- self: :class:`pandas.Series` input arg other: :obj:`pandas.Series`, :obj:`int` or :obj:`float` input arg level: :obj:`int` or name *unsupported* fill_value: :obj:`float` or None, default None *unsupported* axis: default 0 *unsupported* Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ _func_name = 'Method le().' if not isinstance(self, SeriesType): raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self)) if level is not None or fill_value is not None or axis != 0: raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis)) if isinstance(other, SeriesType): def hpat_pandas_series_le_impl(self, other): """ Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8 """ return pandas.Series(self._data <= other._data) return hpat_pandas_series_le_impl if isinstance(other, types.Integer) or isinstance(other, types.Float): def hpat_pandas_series_le_impl(self, other): """ Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_integer_scalar Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_float_scalar """ return pandas.Series(self._data <= other) return hpat_pandas_series_le_impl raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other))
5,502
def add_obs_info(telem, obs_stats): """ Add observation-specific information to a telemetry table (ok flag, and outlier flag). This is done as part of get_agasc_id_stats. It is a convenience for writing reports. :param telem: list of tables One or more telemetry tables (potentially many observations) :param obs_stats: table The result of calc_obs_stats. :return: """ logger.debug(' Adding observation info to telemetry...') obs_stats['obs_ok'] = ( (obs_stats['n'] > 10) & (obs_stats['f_track'] > 0.3) & (obs_stats['lf_variability_100s'] < 1) ) obs_stats['comments'] = np.zeros(len(obs_stats), dtype='<U80') telem = vstack(telem) telem['obs_ok'] = True telem['obs_outlier'] = False for s in obs_stats: obsid = s['obsid'] o = (telem['obsid'] == obsid) telem['obs_ok'][o] = np.ones(np.sum(o), dtype=bool) * s['obs_ok'] if (np.any(telem['ok'][o]) and s['f_track'] > 0 and np.isfinite(s['q75']) and np.isfinite(s['q25'])): iqr = s['q75'] - s['q25'] telem['obs_outlier'][o] = ( telem[o]['ok'] & (iqr > 0) & ((telem[o]['mags'] < s['q25'] - 1.5 * iqr) | (telem[o]['mags'] > s['q75'] + 1.5 * iqr)) ) logger.debug(f' Adding observation info to telemetry {obsid=}') return telem
5,503
def plot_greedy_kde_interval_2d(pts, levels, xmin=None, xmax=None, ymin=None, ymax=None, Nx=100, Ny=100, cmap=None, colors=None, *args, **kwargs): """Plots the given probability interval contours, using a greedy selection algorithm. Additional arguments passed to :func:`pp.contour`. The algorithm uses a two-step process (see `this document <https://dcc.ligo.org/LIGO-P1400054/public>`_) so that the resulting credible areas will be unbiased. :param pts: Array of shape ``(Npts, 2)`` that contains the points in question. :param levels: Sequence of levels (between 0 and 1) of probability intervals to plot. :param xmin: Minimum value in x. If ``None``, use minimum data value. :param xmax: Maximum value in x. If ``None``, use minimum data value. :param ymin: Minimum value in y. If ``None``, use minimum data value. :param ymax: Maximum value in y. If ``None``, use minimum data value. :param Nx: Number of subdivisions in x for contour plot. (Default 100.) :param Ny: Number of subdivisions in y for contour plot. (Default 100.) :param cmap: See :func:`pp.contour`. :param colors: See :func:`pp.contour`. """ Npts=pts.shape[0] kde_pts = pts[:Npts/2,:] den_pts = pts[Npts/2:,:] Nkde = kde_pts.shape[0] Nden = den_pts.shape[0] kde=ss.gaussian_kde(kde_pts.T) den=kde(den_pts.T) densort=np.sort(den)[::-1] if xmin is None: xmin = np.min(pts[:,0]) if xmax is None: xmax = np.max(pts[:,0]) if ymin is None: ymin = np.min(pts[:,1]) if ymax is None: ymax = np.max(pts[:,1]) xs = np.linspace(xmin, xmax, Nx) ys = np.linspace(ymin, ymax, Ny) XS,YS=np.meshgrid(xs,ys) ZS=np.reshape(kde(np.row_stack((XS.flatten(), YS.flatten()))), (Nx, Ny)) zvalues=[] for level in levels: ilevel = int(Nden*level + 0.5) if ilevel >= Nden: ilevel = Nden-1 zvalues.append(densort[ilevel]) pp.contour(XS, YS, ZS, zvalues, colors=colors, cmap=cmap, *args, **kwargs)
5,504
def map_view(request): """ Place to show off the new map view """ # Define view options view_options = MVView( projection='EPSG:4326', center=[-100, 40], zoom=3.5, maxZoom=18, minZoom=2 ) # Define drawing options drawing_options = MVDraw( controls=['Modify', 'Delete', 'Move', 'Point', 'LineString', 'Polygon', 'Box'], initial='Point', output_format='GeoJSON' ) # Define GeoJSON layer geojson_object = { 'type': 'FeatureCollection', 'crs': { 'type': 'name', 'properties': { 'name': 'EPSG:3857' } }, 'features': [ { 'type': 'Feature', 'geometry': { 'type': 'Point', 'coordinates': [0, 0] } }, { 'type': 'Feature', 'geometry': { 'type': 'LineString', 'coordinates': [[4e6, -2e6], [8e6, 2e6]] } }, { 'type': 'Feature', 'geometry': { 'type': 'Polygon', 'coordinates': [[[-5e6, -1e6], [-4e6, 1e6], [-3e6, -1e6]]] } } ] } # Define layers map_layers = [] geojson_layer = MVLayer(source='GeoJSON', options=geojson_object, editable=False, legend_title='Test GeoJSON', legend_extent=[-46.7, -48.5, 74, 59], legend_classes=[ MVLegendClass('polygon', 'Polygons', fill='rgba(255,255,255,0.8)', stroke='#3d9dcd'), MVLegendClass('line', 'Lines', stroke='#3d9dcd') ]) map_layers.append(geojson_layer) if get_geoserver_wms(): # Define GeoServer Layer geoserver_layer = MVLayer(source='ImageWMS', options={'url': get_geoserver_wms(), 'params': {'LAYERS': 'topp:states'}, 'serverType': 'geoserver'}, legend_title='USA Population', legend_extent=[-126, 24.5, -66.2, 49], legend_classes=[ MVLegendClass('polygon', 'Low Density', fill='#00ff00', stroke='#000000'), MVLegendClass('polygon', 'Medium Density', fill='#ff0000', stroke='#000000'), MVLegendClass('polygon', 'High Density', fill='#0000ff', stroke='#000000') ]) map_layers.append(geoserver_layer) # Define KML Layer kml_layer = MVLayer(source='KML', options={'url': '/static/tethys_gizmos/data/model.kml'}, legend_title='Park City Watershed', legend_extent=[-111.60, 40.57, -111.43, 40.70], legend_classes=[ MVLegendClass('polygon', 'Watershed Boundary', fill='#ff8000'), MVLegendClass('line', 'Stream Network', stroke='#0000ff'), ]) map_layers.append(kml_layer) # Tiled ArcGIS REST Layer arc_gis_layer = MVLayer(source='TileArcGISRest', options={'url': 'http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/' + 'Specialty/ESRI_StateCityHighway_USA/MapServer'}, legend_title='ESRI USA Highway', legend_extent=[-173, 17, -65, 72]) map_layers.append(arc_gis_layer) # Define map view options map_view_options = MapView( height='600px', width='100%', controls=['ZoomSlider', 'Rotate', 'FullScreen', {'MousePosition': {'projection': 'EPSG:4326'}}, {'ZoomToExtent': {'projection': 'EPSG:4326', 'extent': [-130, 22, -65, 54]}}], layers=map_layers, view=view_options, basemap='OpenStreetMap', draw=drawing_options, legend=True ) submitted_geometry = request.POST.get('geometry', None) if submitted_geometry is not None: messages.info(request, submitted_geometry) context = {'map_view': map_view_options} return render(request, 'tethys_gizmos/gizmo_showcase/map_view.html', context)
5,505
def test_auto_linebreaks_no_ignore_lf(get_lcd): """ Do not ignore manual \n after auto linebreak. """ lcd = get_lcd(16, 2, True) lcd.write_string('a' * 16) # Fill up line lcd.write_string('\nb') assert lcd._content[0] == [98, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97] assert lcd._content[1] == [SP, SP, SP, SP, SP, SP, SP, SP, SP, SP, SP, SP, SP, SP, SP, SP]
5,506
def decompose_jamo(compound): """Return a tuple of jamo character constituents of a compound. Note: Non-compound characters are echoed back. WARNING: Archaic jamo compounds will raise NotImplementedError. """ if len(compound) != 1: raise TypeError("decompose_jamo() expects a single character,", "but received", type(compound), "length", len(compound)) if compound not in JAMO_COMPOUNDS: # Strict version: # raise TypeError("decompose_jamo() expects a compound jamo,", # "but received", compound) return compound return _JAMO_TO_COMPONENTS.get(compound, compound)
5,507
def test_wrong_input(): """Test all kinds of wrong inputs.""" with pytest.raises(ValueError): HiddenLayerHandle(method="potato")(n_in=10, n_out=10, n_sample=100)
5,508
def cp_in_drive( source_id: str, dest_title: Optional[str] = None, parent_dir_id: Optional[str] = None, ) -> DiyGDriveFile: """Copy a specified file in Google Drive and return the created file.""" drive = create_diy_gdrive() if dest_title is None: dest_title = build_dest_title(drive, source_id) return drive.copy_file(source_id, dest_title, parent_dir_id)
5,509
def label_tuning( text_embeddings, text_labels, label_embeddings, n_steps: int, reg_coefficient: float, learning_rate: float, dropout: float, ) -> np.ndarray: """ With N as number of examples, K as number of classes, k as embedding dimension. Args: 'text_embeddings': float[N,k] of embedded texts 'text_labels': float[N,K] class score for each example. 'label_embeddings': float[K,k] class embeddings Returns: float[K,k] updated class embeddings """ if text_embeddings.shape[0] == 0: raise ValueError(text_embeddings.shape) if label_embeddings.shape[0] == 0: raise ValueError(label_embeddings.shape) text_embeddings = tf.constant(text_embeddings) text_labels = tf.constant(text_labels) label_embeddings = tf.constant(label_embeddings) init_label_embeddings = label_embeddings for i in range(n_steps): with tf.GradientTape() as tape: tape.watch(label_embeddings) dot_loss = _get_loss( text_embeddings, text_labels, label_embeddings, dropout=dropout, ) drift_loss = tf.reduce_mean( (label_embeddings - init_label_embeddings) ** 2 ) total_loss = dot_loss + reg_coefficient * drift_loss gradient = tape.gradient(total_loss + drift_loss, label_embeddings) label_embeddings = label_embeddings - (learning_rate * gradient) label_embeddings = label_embeddings.numpy() return label_embeddings
5,510
def create_nan_filter(tensor): """Creates a layer which replace NaN's with zero's.""" return tf.where(tf.is_nan(tensor), tf.zeros_like(tensor), tensor)
5,511
def requestor_is_superuser(requestor): """Return True if requestor is superuser.""" return getattr(requestor, "is_superuser", False)
5,512
def run_inital_basgra(basali, weed_dm_frac, harv_targ, harv_trig, freq): """ run an intial test :param basali: :param weed_dm_frac: :return: """ params, matrix_weather, days_harvest, doy_irr = get_input_data(basali, weed_dm_frac, harv_targ=harv_targ, harv_trig=harv_trig, freq=freq) temp = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False) out = {'temp': temp} temp.to_csv(r"C:\Users\Matt Hanson\Downloads\test_get_time.csv") plot_multiple_results(out, out_vars=['DM', 'YIELD', 'DMH_RYE', 'DM_RYE_RM', 'IRRIG', 'PAW', 'DMH','BASAL'])
5,513
def process(business: Business, # pylint: disable=too-many-branches filing: Dict, filing_rec: Filing, filing_meta: FilingMeta): # pylint: disable=too-many-branches """Process the incoming historic conversion filing.""" # Extract the filing information for incorporation if not (conversion_filing := filing.get('filing', {}).get('conversion')): raise QueueException(f'CONVL legal_filing:conversion missing from {filing_rec.id}') if business: raise QueueException(f'Business Already Exist: CONVL legal_filing:conversion {filing_rec.id}') if not (corp_num := filing.get('filing', {}).get('business', {}).get('identifier')): raise QueueException(f'conversion {filing_rec.id} missing the business idnetifier.') # Initial insert of the business record business_info_obj = conversion_filing.get('nameRequest') if not (business := business_info.update_business_info(corp_num, Business(), business_info_obj, filing_rec)): raise QueueException(f'CONVL conversion {filing_rec.id}, Unable to create business.') if offices := conversion_filing.get('offices'): update_offices(business, offices) if parties := conversion_filing.get('parties'): update_parties(business, parties) if share_structure := conversion_filing.get('shareStructure'): shares.update_share_structure(business, share_structure) if name_translations := conversion_filing.get('nameTranslations'): aliases.update_aliases(business, name_translations) return business, filing_rec
5,514
def pid(text): """Print text if global debug flag set to true""" if global_debug: print(text)
5,515
def est_const_bsl(bsl,starttime=None,endtime=None,intercept=False,val_tw=None): """Performs a linear regression (assuming the intercept at the origin). The corresponding formula is tt-S*1/v-c = 0 in which tt is the travel time of the acoustic signal in seconds and 1/v is the reciprocal of the harmonic mean of the sound speed. The slope S is equal to the constant baseline length and by default c is assumed to be 0, but can optionally also be determined (intercept=True). It needs: bsl ... pandas.Dataframe with ID of beacon 1 ('ID'), ID of beacon 2 ('range_ID'), calculated baseline lengths in metres ('bsl'), one way traveltime in seconds ('tt'), sound speed at beacon 1 ('ssp1') in metres per second, sound speed at beacon 2 ('ssp2') in metres per second, measured traveltime in milliseconds ('range'), turn around time in milliseconds ('TAT')(eventually harmonic mean of 'ssp1' and 'ssp2' ('hmssp') and reciprocal of harmonic mean of 'ssp1' and 'ssp2' ('1/v'); if they do not exist, they will be calculated) with corresponding times of measurement for beacon pair. starttime (optional) ... string with starttime of time window for estimation of constant baseline length (format: 'YYYY-mm-dd HH:MM:SS', default: first entry in bsl) endtime (optional) ... string with endtime of time window for estimation of constant baseline length (format: 'YYYY-mm-dd HH:MM:SS', default: last entry in bsl) intercept (optional) ... specify whether intercept should be set to 0 [False] or should be calculated [True] (default is False) val_tw (optional) ... specify time window for which estimated constant baseline length and standard deviation (as well as intercept) will be stored in returned pandas.Dataframe (format: ['YYYY-mm-dd HH:MM:SS', 'YYYY-mm-dd HH:MM:SS'], default is starttime and endtime) It returns: bsl ... pandas.Dataframe with ID of beacon 1 ('ID'), ID of beacon 2 ('range_ID'), calculated baseline lengths in metres ('bsl'), one way traveltime in seconds ('tt'), sound speed at beacon 1 ('ssp1') in metres per second, sound speed at beacon 2 ('ssp2') in metres per second, measured traveltime in milliseconds ('range'), turn around time in milliseconds ('TAT'), harmonic mean of 'ssp1' and 'ssp2' ('hmssp'), reciprocal of harmonic mean of 'ssp1' and 'ssp2' ('1/v'), constant baseline length ('bsl_const') in given time window and standard deviation of the measurements compared to the fitted line in seconds (sigma = sqrt(sum((tt-S*1/v)^2)/(len(1/v)-1)), 'std_dev_tt') in given time window (and intercept ('intercept') ) with corresponding times of measurement for beacon pair. """ # check if columns 'hmssp' and '1/v' (harmonic mean of sound speeds and its # reciprocal already exist in bsl and if not then calculate them if not set(['hmssp','1/v']).issubset(bsl.columns): bsl = calc_hmssp_recp_v(bsl) # end if not set(['hmssp','1/v']).issubset(bsl.columns): # copy bsl to new pandas.Dataframe to cut it in time bsl_new = bsl.copy() # check if time window for estimation of constant baseline length is given if starttime is not None: bsl_new = bsl_new.loc[starttime:] else: # set startime to first index in bsl starttime = bsl_new.index[0] # end if starttime is not None: if endtime is not None: bsl_new = bsl_new.loc[:endtime] else: # set endtime to last index in bsl endtime = bsl_new.index[-1] # end if endtime is not None: # the numpy function numpy.linalg.lstsq() needs x as (M,N) matrix if not intercept: x = bsl_new['1/v'][:,np.newaxis] else: x = np.array(([[bsl_new['1/v'][j], 1] for j in range(len(bsl_new))])) # end if not intercept: S,residuals,_,_ = np.linalg.lstsq(x,bsl_new['tt']) sigma = np.sqrt(residuals/(len(x)-1)) # set column 'bsl_const' for values between starttime and endtime to S and # column 'std_dev_tt' to estimated sigma in bsl if val_tw is not None: starttime = val_tw[0] endtime = val_tw[1] # end if val_tw is not None: if not intercept: bsl.loc[starttime:endtime,'bsl_const'] = S else: bsl.loc[starttime:endtime,'bsl_const'] = S[0] bsl.loc[starttime:endtime,'intercept'] = S[1] # end if not intercept: bsl.loc[starttime:endtime,'std_dev_tt'] = sigma return(bsl)
5,516
def GetExtractedFiles(Directory): """A generator that outputs all files in a diretory""" for Thing in os.listdir(Directory): PathThing = os.path.join(Directory, Thing) if os.path.isdir(PathThing): for File in GetExtractedFiles(PathThing): yield File else: yield PathThing
5,517
def print_all(key = None): """ Prints out the complete list of physical_constants to the screen or one single value Parameters ---------- key : Python string or unicode Key in dictionary `physical_constants` Returns ------- None See Also -------- _constants : Contains the description of `physical_constants`, which, as a dictionary literal object, does not itself possess a docstring. """ column_width = [25, 20, 20, 20] table_width = (column_width[0] + column_width[1] + column_width[2] + column_width[3]) format_string = ('{0:<' + str(column_width[0]) + '}' + '{1:>' + str(column_width[1]) + '}' + '{2:>' + str(column_width[2]) + '}' + '{3:>' + str(column_width[3]) + '}') print(format_string.format('Name', 'Value', 'Units', 'Error')) print(('{:-^' + str(table_width) + '}').format('')) if key is None: for key in physical_constants: print(format_string.format(key, str(value(key)), unit(key), str(uncertainty(key)))) else: print(format_string.format(key, str(value(key)), unit(key), str(uncertainty(key))))
5,518
def SeasonUPdate(temp): """ Update appliance characteristics given the change in season Parameters ---------- temp (obj): appliance set object for an individual season Returns ---------- app_expected_load (float): expected load power in Watts app_expected_dur (float): expected duration in hours appliance_set (list of applience objects): applience list for a given season t_delta_exp_dur (pandas datetime): expected appliance duration app_index (array): index for each applience """ app_expected_load = temp.app_expected_load app_expected_dur = temp.app_expected_dur appliance_set = temp.appliance_set t_delta_exp_dur = temp.t_delta_exp_dur app_index = np.arange(0,len(temp.appliance_set)) return app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index
5,519
def pad_and_reshape(instr_spec, frame_length, F): """ :param instr_spec: :param frame_length: :param F: :returns: """ spec_shape = tf.shape(instr_spec) extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1])) n_extra_row = (frame_length) // 2 + 1 - F extension = tf.tile(extension_row, [1, 1, n_extra_row, 1]) extended_spec = tf.concat([instr_spec, extension], axis=2) old_shape = tf.shape(extended_spec) new_shape = tf.concat([ [old_shape[0] * old_shape[1]], old_shape[2:]], axis=0) processed_instr_spec = tf.reshape(extended_spec, new_shape) return processed_instr_spec
5,520
def GetExclusiveStorageForNodes(cfg, node_uuids): """Return the exclusive storage flag for all the given nodes. @type cfg: L{config.ConfigWriter} @param cfg: cluster configuration @type node_uuids: list or tuple @param node_uuids: node UUIDs for which to read the flag @rtype: dict @return: mapping from node uuids to exclusive storage flags @raise errors.OpPrereqError: if any given node name has no corresponding node """ getflag = lambda n: _GetExclusiveStorageFlag(cfg, n) flags = map(getflag, node_uuids) return dict(zip(node_uuids, flags))
5,521
def get_read_data(file, dic, keys): """ Assigns reads to labels""" r = csv.reader(open(file)) lines = list(r) vecs_forwards = [] labels_forwards = [] vecs_reverse = [] labels_reverse = [] for key in keys: for i in dic[key]: for j in lines: if i in j[0]: if '_2.fq' in j[0] or '_R2_' in j[0]: vecs_reverse.append(j[2:]) labels_reverse.append(key) else: vecs_forwards.append(j[2:]) labels_forwards.append(key) return np.array(vecs_forwards), np.array(labels_forwards), np.array(vecs_reverse), np.array(labels_reverse)
5,522
def removeDuplicates(listToRemoveFrom: list[str]): """Given list, returns list without duplicates""" listToReturn: list[str] = [] for item in listToRemoveFrom: if item not in listToReturn: listToReturn.append(item) return listToReturn
5,523
def check_hms_angle(value): """ Validating function for angle sexagesimal representation in hours. Used in the rich_validator """ if isinstance(value, list): raise validate.ValidateError("expected value angle, found list") match = hms_angle_re.match(value) if not match: raise VdtAngleError("not a valid hour angle: %s" % value) return hms_to_angle(match.groups())
5,524
def adjust_learning_rate(optimizer, epoch, default_lr=0.1): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" lr = default_lr * (0.1 ** (epoch // 30)) for param_group in optimizer.param_groups: param_group['lr'] = lr
5,525
def test_matcher_without_allure( request: SubRequest, pytester: Pytester, ): """Test matcher without allure""" *_, not_implemented = test_cases() percent = (len(scenarios) - len(not_implemented)) * 100 // len(scenarios) with allure_unloaded(request): pytester_result = run_tests( pytester=pytester, testfile_path="matcher_pytester_test.py", additional_opts=["--sc-type", "test", "--sc-only", "--sc-target", percent], outcomes={"passed": 0}, ) with allure.step("Check summary for coverage percent"): # Last one line will be greetings while previous one with stats assert any( f"{percent}%" in outline for outline in pytester_result.outlines ), f'Should be "{percent}%" in outlines'
5,526
def test_merge_configs_extend_two(): """Ensure extension on first level is fine""" a = {'a': 1, 'b': 2} b = {'c': 3} m = resolve_config.merge_configs(a, b) assert m == {'a': 1, 'b': 2, 'c': 3}
5,527
def get_logger(module_name): """Generates a logger for each module of the project. By default, the logger logs debug-level information into a newscrapy.log file and info-level information in console. Parameters ---------- module_name: str The name of the module for which the logger should be generated, in snakecase. Returns ------- Logger A logger for a specific module. """ logger = logging.getLogger('%s_logger' % (module_name)) file_handler = logging.FileHandler('newscrapy.log') console_handler = logging.StreamHandler() file_formatter = logging.Formatter('%(asctime)s - %(name)s - ' '%(levelname)s - %(message)s') console_formatter = logging.Formatter('%(message)s') logger.setLevel(logging.DEBUG) file_handler.setLevel(logging.DEBUG) console_handler.setLevel(logging.INFO) file_handler.setFormatter(file_formatter) console_handler.setFormatter(console_formatter) logger.addHandler(file_handler) logger.addHandler(console_handler) return logger
5,528
def stats(): """Retrives the count of each object type. Returns: JSON object with the number of objects by type.""" return jsonify({ "amenities": storage.count("Amenity"), "cities": storage.count("City"), "places": storage.count("Place"), "reviews": storage.count("Review"), "states": storage.count("State"), "users": storage.count("User") })
5,529
def test_task_persist(_task): """ show that the task object is the same throughout """ _task.on_success = False # start False def on_success(task, result): task.on_success = True # change to True in success function task.callback(0, result) _task.call( task_callback, on_success=on_success, ) assert _task.on_success
5,530
def addflux2pix(px,py,pixels,fmod): """Usage: pixels=addflux2pix(px,py,pixels,fmod) Drizel Flux onto Pixels using a square PSF of pixel size unity px,py are the pixel position (integers) fmod is the flux calculated for (px,py) pixel and it has the same length as px and py pixels is the image. """ xmax = pixels.shape[0] #Size of pixel array ymax = pixels.shape[1] pxmh = px-0.5 #location of reference corner of PSF square pymh = py-0.5 dx = np.floor(px+0.5)-pxmh dy = np.floor(py+0.5)-pymh # Supposing right-left as x axis and up-down as y axis: # Lower left pixel npx = int(pxmh) #Numpy arrays start at zero npy = int(pymh) #print('n',npx,npy) if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*dx*dy #Same operations are done for the 3 pixels other neighbouring pixels # Lower right pixel npx = int(pxmh)+1 #Numpy arrays start at zero npy = int(pymh) if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*(1.0-dx)*dy # Upper left pixel npx = int(pxmh) #Numpy arrays start at zero npy = int(pymh)+1 if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*dx*(1.0-dy) # Upper right pixel npx = int(pxmh)+1 #Numpy arrays start at zero npy = int(pymh)+1 if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*(1.0-dx)*(1.0-dy) return pixels;
5,531
def get_dea_landsat_vrt_dict(feat_list): """ this func is designed to take all releveant landsat bands on the dea public database for each scene in stac query. it results in a list of vrts for each band seperately and maps them to a dict where band name is the key, list is the value pair. """ # notify print('Getting landsat vrts for each relevant bands.') # check features type, length if not isinstance(feat_list, list): raise TypeError('Features must be a list of xml objects.') elif not len(feat_list) > 0: raise ValueError('No features provided.') # required dea landsat ard band names bands = [ 'nbart_blue', 'nbart_green', 'nbart_red', 'nbart_nir', 'nbart_swir_1', 'nbart_swir_2', 'oa_fmask' ] # iter each band name and build associated vrt list band_vrts_dict = {} for band in bands: print('Building landsat vrt list for band: {}.'.format(band)) band_vrts_dict[band] = make_vrt_list(feat_list, band=band) # notify and return print('Got {} landsat vrt band lists successfully.'.format(len(band_vrts_dict))) return band_vrts_dict
5,532
def test_validate_skip_inputs_fq_files_not_found(tmp_file): """ Test that non-existent :py:const:`riboviz.params.FQ_FILES` files in the presence of both :py:const:`riboviz.params.VALIDATE_ONLY` and :py:const:`riboviz.params.SKIP_INPUTS` returns a zero exit code. :param tmp_file: Path to temporary file, to write configuration to :type tmp_file: str or unicode """ with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f: config = yaml.load(f, yaml.SafeLoader) config[params.FQ_FILES] = { "foo1": "foo1.fq", "foo2": "foo2.fq" } config[params.VALIDATE_ONLY] = True config[params.SKIP_INPUTS] = True with open(tmp_file, 'w') as f: yaml.dump(config, f) exit_code = run_nextflow(tmp_file) assert exit_code == 0, \ "Unexpected exit code %d" % exit_code
5,533
def test_spammers_error(mailchimp, mailchimp_member, err_response): """Integration test to validate an error is thrown when mailchimp returns error during subscription""" mailchimp.http_mock.post('https://us05.api.mailchimp.com/3.0/lists/test-list-id', json=err_response) with pytest.raises(MailchimpSubscriptionFailed) as exception: mailchimp.mass_subscribe( list_id='test-list-id', members=[mailchimp_member], ) assert 'f+localmachinetest@f213.in has' in str(exception) assert '(ERROR_GENERIC)' in str(exception)
5,534
def load_json() -> tuple[list["Team"], list["User"]]: """Load the Json file.""" logging.debug("Starting to load data file.") with open(".example.json") as file: data = json.load(file) if any(field not in data for field in REQUIRED_DATA_FIELDS): raise ValueError("Required field is missing.") team_mapping = {} users = [] for uid, user_data in data["users"].items(): if any(field not in user_data for field in REQUIRED_USER_FIELDS): raise ValueError("Required field is missing.") user = User(uid, **user_data) users.append(user) if user_data["team"] not in team_mapping: team_mapping[user_data["team"]] = [] team_mapping[user_data["team"]].append(user) teams = [] for tid, team_data in data["teams"].items(): if any(field not in team_data for field in REQUIRED_TEAM_FIELDS): raise ValueError("Required field is missing.") team = Team(tid, team_mapping.get(tid, []), None, **team_data) teams.append(team) for user in users: if user.team == tid: user.team = team if user.leader: if team.leader is not None: raise ValueError(f"Team {tid!r} has more than one leader.") team.leader = user for user in users: if isinstance(user.team, str): raise ValueError(f"Unknown team {user.team!r}") logging.debug("Data loaded.") return teams, users
5,535
def is_zh_l_bracket(uni_ch): """判断一个 unicode 是否是中文左括号。""" if uni_ch == u'\uff08': return True else: return False
5,536
def test_archive__ArchiveForm__3(search_data, browser, role): """It cannot be accessed by non-admin users.""" browser.login(role) browser.keyword_search('church') # There is no archive option which can be applied: assert (search_result_handlers_without_archive_for_editor == browser.getControl('Apply on selected persons').displayOptions) browser.assert_forbidden(browser.SEARCH_ARCHIVE_URL)
5,537
def petlink32_to_dynamic_projection_mMR(filename,n_packets,n_radial_bins,n_angles,n_sinograms,time_bins,n_axial,n_azimuthal,angles_axial,angles_azimuthal,size_u,size_v,n_u,n_v,span,n_segments,segments_sizes,michelogram_segments,michelogram_planes, status_callback): """Make dynamic compressed projection from list-mode data. """ descriptor = [ {'name':'filename', 'type':'string', 'value':filename ,'size':len(filename)}, {'name':'n_packets', 'type':'long', 'value':n_packets }, {'name':'n_radial_bins', 'type':'uint', 'value':n_radial_bins }, {'name':'n_angles', 'type':'uint', 'value':n_angles }, {'name':'n_sinograms', 'type':'uint', 'value':n_sinograms }, {'name':'n_time_bins', 'type':'uint', 'value':len(time_bins)-1 }, {'name':'time_bins', 'type':'array', 'value':np.int32(time_bins) }, {'name':'n_axial', 'type':'uint', 'value':n_axial }, {'name':'n_azimuthal', 'type':'uint', 'value':n_azimuthal }, {'name':'angles_axial', 'type':'array', 'value':angles_axial }, {'name':'angles_azimuthal', 'type':'array', 'value':angles_azimuthal }, {'name':'size_u', 'type':'float', 'value':size_u }, {'name':'size_v', 'type':'float', 'value':size_v }, {'name':'n_u', 'type':'uint', 'value':n_u }, {'name':'n_v', 'type':'uint', 'value':n_v }, {'name':'span', 'type':'uint', 'value':span }, {'name':'n_segments', 'type':'uint', 'value':n_segments }, {'name':'segments_sizes', 'type':'array', 'value':np.int32(segments_sizes) }, {'name':'michelogram_segments', 'type':'array', 'value':np.int32(michelogram_segments) }, {'name':'michelogram_planes', 'type':'array', 'value':np.int32(michelogram_planes) }, {'name':'status_callback', 'type':'function','value':status_callback, 'arg_types':['uint'] }, ] r = call_c_function( mMR_c.petlink32_to_dynamic_projection_mMR_michelogram, descriptor ) if not r.status == petlink.status_success(): raise ErrorInCFunction("The execution of 'petlink32_to_dynamic_projection_mMR_michelogram' was unsuccessful.",r.status,'mMR_c.petlink32_to_dynamic_projection_mMR') return r.dictionary
5,538
def is_core_recipe(component: Dict) -> bool: """ Returns True if a recipe component contains a "Core Recipe" preparation. """ preparations = component.get('recipeItem', {}).get('preparations') or [] return any(prep.get('id') == PreparationEnum.CORE_RECIPE.value for prep in preparations)
5,539
def login_submit_step(context): """ The cognito signin form is rendered in HTML twice for difference screen sizes. The small screen version appears first in the HTML but is hidden by CSS. Without the .visible-md class this resolves the hidden form element and is unable to interact with the form. """ elem = context.browser.find_element_by_css_selector( ".visible-md .modal-body #signInFormPassword" ) elem.submit()
5,540
def build_estimator(output_dir, first_layer_size, num_layers, dropout, learning_rate, save_checkpoints_steps): """Builds and returns a DNN Estimator, defined by input parameters. Args: output_dir: string, directory to save Estimator. first_layer_size: int, size of first hidden layer of DNN. num_layers: int, number of hidden layers. dropout: float, dropout rate used in training. learning_rate: float, learning_rate used in training. save_checkpoints_steps: int, training steps to save Estimator. Returns: `Estimator` instance. """ # Sets head to default head for DNNClassifier with two classes. model_params = { 'head': head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(), 'feature_columns': [ tf.feature_column.numeric_column(c, shape=[]) for c in constants.FEATURE_COLUMNS ], 'hidden_units': [ max(int(first_layer_size / (pow(2, i))), 2) for i in range(int(num_layers)) ], 'dropout': dropout, 'optimizer': tf.train.AdagradOptimizer(learning_rate) } def _model_fn(features, labels, mode, params): """Build TF graph based on canned DNN classifier.""" key_column = features.pop(constants.KEY_COLUMN, None) if key_column is None: raise ValueError('Key is missing from features.') spec = _dnn_model_fn(features=features, labels=labels, mode=mode, **params) predictions = spec.predictions if predictions: predictions[constants.KEY_COLUMN] = tf.convert_to_tensor_or_sparse_tensor( key_column) spec = spec._replace(predictions=predictions) spec = spec._replace(export_outputs={ 'classes': tf.estimator.export.PredictOutput(predictions) }) return spec config = tf.estimator.RunConfig(save_checkpoints_steps=save_checkpoints_steps) return tf.estimator.Estimator( model_fn=_model_fn, model_dir=output_dir, config=config, params=model_params)
5,541
def importConfig(): """設定ファイルの読み込み Returns: tuple: str: interface, str: alexa_remote_control.sh path list: device list """ with open("config.json", "r", encoding="utf-8") as f: config = json.load(f) interface = config["interface"] if not interface: return False arc_path = config["arc_path"] devices = config["device_list"] return (interface, arc_path, devices)
5,542
def create_dataset(project_id): """Creates a dataset for the given Google Cloud project.""" from google.cloud import datalabeling_v1beta1 as datalabeling client = datalabeling.DataLabelingServiceClient() # [END datalabeling_create_dataset_beta] # If provided, use a provided test endpoint - this will prevent tests on # this snippet from triggering any action by a real human if 'DATALABELING_ENDPOINT' in os.environ: opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT')) client = datalabeling.DataLabelingServiceClient(client_options=opts) # [START datalabeling_create_dataset_beta] formatted_project_name = client.project_path(project_id) dataset = datalabeling.types.Dataset( display_name='YOUR_DATASET_SET_DISPLAY_NAME', description='YOUR_DESCRIPTION' ) response = client.create_dataset(formatted_project_name, dataset) # The format of resource name: # project_id/{project_id}/datasets/{dataset_id} print('The dataset resource name: {}'.format(response.name)) print('Display name: {}'.format(response.display_name)) print('Description: {}'.format(response.description)) print('Create time:') print('\tseconds: {}'.format(response.create_time.seconds)) print('\tnanos: {}\n'.format(response.create_time.nanos)) return response
5,543
def create_local_command(opts: Options, jobs: List[Dict[str, Any]], jobs_metadata: List[Options]) -> str: """Create a terminal command to run the jobs locally.""" cmd = "" for meta, job in zip(jobs_metadata, jobs): input_file = meta.input.absolute().as_posix() workdir = meta.workdir.absolute().as_posix() # Run locally cmd += f'cd {workdir} && {opts.command} {input_file} & ' return cmd
5,544
def test_write_tags(tmpdir): """Test writing tags from a FLAC to mp3 file.""" # Prepare. flac = tmpdir.mkdir('flac').join('song.flac').ensure(file=True) mp3 = tmpdir.mkdir('mp3').join('song.mp3').ensure(file=True) with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f: flac.write(f.read(), 'wb') with open(os.path.join(os.path.dirname(__file__), '1khz_sine.mp3'), 'rb') as f: mp3.write(f.read(), 'wb') flac, mp3 = str(flac.realpath()), str(mp3.realpath()) tags = FLAC(flac) tags.update(dict(artist='Artist2', date='2012', album='Album', tracknumber='01', title='Title', unsyncedlyrics='L')) image = Picture() image.type, image.mime = 3, 'image/jpeg' with open(os.path.join(os.path.dirname(__file__), '1_album_art.jpg'), 'rb') as f: image.data = f.read() tags.add_picture(image) tags.save() # Test. ConvertFiles.write_tags(flac, mp3) # Check. id3 = ID3(mp3) assert 'Artist2' == id3['TPE1'] assert '2012' == id3['TDRC'] assert 'Album' == id3['TALB'] assert '01' == id3['TRCK'] assert 'Title' == id3['TIT2'] assert 'L' == id3["USLT:Lyrics:'eng'"].text with open(os.path.join(os.path.dirname(__file__), '1_album_art.jpg'), 'rb') as f: assert f.read() == id3['APIC:'].data assert ({}, [], [], []) == find_files(str(tmpdir.join('flac')), str(tmpdir.join('mp3')))
5,545
def time_for_log() -> str: """Function that print the current time for bot prints""" return time.strftime("%d/%m %H:%M:%S - ")
5,546
def reset_session(self, request): """ Resets the session present in the current request, to reset the session is to unset it from the request. This method is useful for situation where a new session context is required or one is meant to be created always. :type request: Request :param request: The request to be used. """ # resets the session removing it from the request # this allows subsequent calls to create a new session request.reset_session()
5,547
def _is_int(n) -> bool: """ is_int 是判断给定数字 n 是否为整数, 在判断中 n 小于epsilon的小数部分将被忽略, 是则返回 True,否则 False :param n: 待判断的数字 :return: True if n is A_ub integer, False else """ return (n - math.floor(n) < _epsilon) or (math.ceil(n) - n < _epsilon)
5,548
def _cpp_het_stat(A, t_stop, rates, t_start=0. * pq.ms): """ Generate a Compound Poisson Process (CPP) with amplitude distribution A and heterogeneous firing rates r=r[0], r[1], ..., r[-1]. Parameters ---------- A : np.ndarray CPP's amplitude distribution. A[j] represents the probability of a synchronous event of size j among the generated spike trains. The sum over all entries of A must be equal to one. t_stop : pq.Quantity The end time of the output spike trains rates : pq.Quantity Array of firing rates of each spike train generated with t_start : pq.Quantity, optional The start time of the output spike trains Default: 0 pq.ms Returns ------- list of neo.SpikeTrain List of neo.SpikeTrains with different firing rates, forming a CPP with amplitude distribution `A`. """ # Computation of Parameters of the two CPPs that will be merged # (uncorrelated with heterog. rates + correlated with homog. rates) n_spiketrains = len(rates) # number of output spike trains # amplitude expectation expected_amplitude = np.dot(A, np.arange(n_spiketrains + 1)) r_sum = np.sum(rates) # sum of all output firing rates r_min = np.min(rates) # minimum of the firing rates # rate of the uncorrelated CPP r_uncorrelated = r_sum - n_spiketrains * r_min # rate of the correlated CPP r_correlated = r_sum / expected_amplitude - r_uncorrelated # rate of the hidden mother process r_mother = r_uncorrelated + r_correlated # Check the analytical constraint for the amplitude distribution if A[1] < (r_uncorrelated / r_mother).rescale( pq.dimensionless).magnitude: raise ValueError('A[1] too small / A[i], i>1 too high') # Compute the amplitude distribution of the correlated CPP, and generate it A = A * (r_mother / r_correlated).magnitude A[1] = A[1] - r_uncorrelated / r_correlated compound_poisson_spiketrains = _cpp_hom_stat( A, t_stop, r_min, t_start) # Generate the independent heterogeneous Poisson processes poisson_spiketrains = \ [homogeneous_poisson_process(rate - r_min, t_start, t_stop) for rate in rates] # Pool the correlated CPP and the corresponding Poisson processes return [_pool_two_spiketrains(compound_poisson_spiketrain, poisson_spiketrain) for compound_poisson_spiketrain, poisson_spiketrain in zip(compound_poisson_spiketrains, poisson_spiketrains)]
5,549
def return_bad_parameter_config() -> CloudSettings: """Return a wrongly configured cloud config class.""" CloudSettingsTest = CloudSettings( # noqa: N806 settings_order=[ "init_settings", "aws_parameter_setting", "file_secret_settings", "env_settings", ] ) # noqa: N806 class AWSSettings(CloudSettingsTest): # type: ignore test: str = "Cool" prefix_test_store: str = "" return AWSSettings()
5,550
def update(isamAppliance, instance_id, id, filename=None, contents=None, check_mode=False, force=False): """ Update a file in the administration pages root :param isamAppliance: :param instance_id: :param id: :param name: :param contents: :param check_mode: :param force: :return: """ if force is True or _check_file(isamAppliance, instance_id, id) is True: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: if filename is not None: return isamAppliance.invoke_put_files( "Update a file in the administration page root", "/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id), [ { 'file_formfield': 'file', 'filename': filename, 'mimetype': 'application/octet-stream' } ], { 'file': filename, 'type': 'file' }) elif contents is not None: return isamAppliance.invoke_put_files( "Update a file in the administration page root", "/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id), { 'contents': contents, 'type': 'file' }) else: return isamAppliance.create_return_object( warnings=["Either contents or filename parameter need to be provided. Skipping update request."])
5,551
def view_deflate_encoded_content(): """Returns Deflate-encoded data. --- tags: - Response formats produces: - application/json responses: 200: description: Defalte-encoded data. """ return jsonify(get_dict("origin", "headers", method=request.method, deflated=True))
5,552
def predict_from_word_vectors_matrix(tokens, matrix, nlp, POS="NOUN", top_number=constants.DEFAULT_TOP_ASSOCIATIONS): """ Make a prediction based on the word vectors :param tokens: :param matrix: :param nlp: :param POS: :param top_number: :return: """ vector_results = collect_word_vector_associations(tokens, matrix) top_results = get_top_results(vector_results, nlp, top_number, POS) return top_results
5,553
def get_luis_keys(): """Retrieve Keys for LUIS app""" load_dotenv() key = os.getenv("LUIS_KEY") region = os.getenv("LUIS_REGION") app_id = os.getenv("LUIS_APP_ID") return key, region, app_id
5,554
def xls_to_dict(path_or_file): """ Return a Python dictionary with a key for each worksheet name. For each sheet there is a list of dictionaries, each dictionary corresponds to a single row in the worksheet. A dictionary has keys taken from the column headers and values equal to the cell value for that row and column. All the keys and leaf elements are unicode text. """ try: if isinstance(path_or_file, basestring): workbook = xlrd.open_workbook(filename=path_or_file) else: workbook = xlrd.open_workbook(file_contents=path_or_file.read()) except XLRDError as error: raise PyXFormError("Error reading .xls file: %s" % error) def xls_to_dict_normal_sheet(sheet): def iswhitespace(string): return isinstance(string, basestring) and len(string.strip()) == 0 # Check for duplicate column headers column_header_list = list() for column in range(0, sheet.ncols): column_header = sheet.cell_value(0, column) if column_header in column_header_list: raise PyXFormError("Duplicate column header: %s" % column_header) # xls file with 3 columns mostly have a 3 more columns that are # blank by default or something, skip during check if column_header is not None: if not iswhitespace(column_header): # strip whitespaces from the header clean_header = re.sub(r"( )+", " ", column_header.strip()) column_header_list.append(clean_header) result = [] for row in range(1, sheet.nrows): row_dict = OrderedDict() for column in range(0, sheet.ncols): # Changing to cell_value function # convert to string, in case it is not string key = "%s" % sheet.cell_value(0, column) key = key.strip() value = sheet.cell_value(row, column) # remove whitespace at the beginning and end of value if isinstance(value, basestring): value = value.strip() value_type = sheet.cell_type(row, column) if value is not None: if not iswhitespace(value): try: row_dict[key] = xls_value_to_unicode( value, value_type, workbook.datemode ) except XLDateAmbiguous: raise PyXFormError( XL_DATE_AMBIGOUS_MSG % (sheet.name, column_header, row) ) # Taking this condition out so I can get accurate row numbers. # TODO: Do the same for csvs # if row_dict != {}: result.append(row_dict) return result, _list_to_dict_list(column_header_list) def xls_value_from_sheet(sheet, row, column): value = sheet.cell_value(row, column) value_type = sheet.cell_type(row, column) if value is not None and value != "": try: return xls_value_to_unicode(value, value_type, workbook.datemode) except XLDateAmbiguous: raise PyXFormError(XL_DATE_AMBIGOUS_MSG % (sheet.name, column, row)) else: raise PyXFormError("Empty Value") result = OrderedDict() for sheet in workbook.sheets(): # Note that the sheet exists but do no further processing here. result[sheet.name] = [] # Do not process sheets that have nothing to do with XLSForm. if sheet.name not in constants.SUPPORTED_SHEET_NAMES: if len(workbook.sheets()) == 1: ( result[constants.SURVEY], result["%s_header" % constants.SURVEY], ) = xls_to_dict_normal_sheet(sheet) else: continue else: ( result[sheet.name], result["%s_header" % sheet.name], ) = xls_to_dict_normal_sheet(sheet) return result
5,555
def blendImg(img_a, img_b, α=0.8, β=1., γ=0.): """ The result image is computed as follows: img_a * α + img_b * β + γ """ return cv2.addWeighted(img_a, α, img_b, β, γ)
5,556
def setup(app): """Sets up the extension""" app.add_autodocumenter(documenters.FunctionDocumenter) app.add_config_value( "autoclass_content", "class", True, ENUM("both", "class", "init") ) app.add_config_value( "autodoc_member_order", "alphabetical", True, ENUM("alphabetic", "alphabetical", "bysource", "groupwise"), ) app.add_config_value("autodoc_default_options", {}, True) app.add_config_value("autodoc_docstring_signature", True, True) app.add_config_value("autodoc_mock_imports", [], True) app.add_config_value( "autodoc_typehints", "signature", True, ENUM("signature", "description", "none") ) app.add_config_value("autodoc_type_aliases", {}, True) app.add_config_value("autodoc_warningiserror", True, True) app.add_config_value("autodoc_inherit_docstrings", True, True) app.add_event("autodoc-before-process-signature") app.add_event("autodoc-process-docstring") app.add_event("autodoc-process-signature") app.add_event("autodoc-skip-member") app.connect("config-inited", migrate_autodoc_member_order, priority=800) app.setup_extension("sphinx.ext.autodoc.type_comment") app.setup_extension("sphinx.ext.autodoc.typehints") return {"version": sphinx.__display_version__, "parallel_read_safe": True}
5,557
def test_register_invalid_transfer(raiden_network, settle_timeout): """ Regression test for registration of invalid transfer. The bug occurred if a transfer with an invalid allowance but a valid secret was registered, when the local end registered the transfer it would "unlock" the partners' token, but the transfer wouldn't be sent because the allowance check failed, leaving the channel in an inconsistent state. """ app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking graph0 = app0.raiden.channelgraphs.values()[0] graph1 = app1.raiden.channelgraphs.values()[0] channel0 = graph0.partneraddress_channel.values()[0] channel1 = graph1.partneraddress_channel.values()[0] balance0 = channel0.balance balance1 = channel1.balance amount = 10 block_number = app0.raiden.chain.block_number() expiration = block_number + settle_timeout - 1 secret = 'secret' hashlock = sha3(secret) transfer1 = channel0.create_mediatedtransfer( block_number, transfer_initiator=app0.raiden.address, transfer_target=app1.raiden.address, fee=0, amount=amount, identifier=1, expiration=expiration, hashlock=hashlock, ) # register a locked transfer app0.raiden.sign(transfer1) channel0.register_transfer( app0.raiden.chain.block_number(), transfer1, ) channel1.register_transfer( app1.raiden.chain.block_number(), transfer1, ) # assert the locked transfer is registered assert_synched_channels( channel0, balance0, [], channel1, balance1, [transfer1.lock], ) # handcrafted transfer because channel.create_transfer won't create it transfer2 = DirectTransfer( 1, # TODO: fill in identifier nonce=channel0.our_state.nonce, token=channel0.token_address, transferred_amount=channel1.balance + balance0 + amount, recipient=channel0.partner_state.address, locksroot=channel0.partner_state.balance_proof.merkleroot_for_unclaimed(), ) app0.raiden.sign(transfer2) # this need to fail because the allowance is incorrect with pytest.raises(Exception): channel0.register_transfer( app0.raiden.chain.block_number(), transfer2, ) with pytest.raises(Exception): channel1.register_transfer( app1.raiden.chain.block_number(), transfer2, ) # the registration of a bad transfer need fail equaly on both channels assert_synched_channels( channel0, balance0, [], channel1, balance1, [transfer1.lock], )
5,558
def genoimc_dup4_loc(): """Create genoimc dup4 sequence location""" return { "_id": "ga4gh:VSL.us51izImAQQWr-Hu6Q7HQm-vYvmb-jJo", "sequence_id": "ga4gh:SQ.-A1QmD_MatoqxvgVxBLZTONHz9-c7nQo", "interval": { "type": "SequenceInterval", "start": { "value": 30417575, "comparator": "<=", "type": "IndefiniteRange" }, "end": { "value": 31394018, "comparator": ">=", "type": "IndefiniteRange" } }, "type": "SequenceLocation" }
5,559
def compare_versions(a, b): """Return 0 if a == b, 1 if a > b, else -1.""" a, b = version_to_ints(a), version_to_ints(b) for i in range(min(len(a), len(b))): if a[i] > b[i]: return 1 elif a[i] < b[i]: return -1 return 0
5,560
def check_ast_schema_is_valid(ast: DocumentNode) -> None: """Check the schema satisfies structural requirements for rename and merge. In particular, check that the schema contains no mutations, no subscriptions, no InputObjectTypeDefinitions, no TypeExtensionDefinitions, all type names are valid and not reserved (not starting with double underscores), and all query type field names match the types they query. Args: ast: represents schema Raises: - SchemaStructureError if the AST cannot be built into a valid schema, if the schema contains mutations, subscriptions, InputObjectTypeDefinitions, TypeExtensionsDefinitions, or if any query type field does not match the queried type. - InvalidNameError if a type has a type name that is invalid or reserved """ schema = build_ast_schema(ast) if schema.mutation_type is not None: raise SchemaStructureError( "Renaming schemas that contain mutations is currently not supported." ) if schema.subscription_type is not None: raise SchemaStructureError( "Renaming schemas that contain subscriptions is currently not supported." ) visit(ast, CheckValidTypesAndNamesVisitor()) query_type = get_query_type_name(schema) visit(ast, CheckQueryTypeFieldsNameMatchVisitor(query_type))
5,561
def get_machine_action_data(machine_action_response): """Get machine raw response and returns the machine action info in context and human readable format. Notes: Machine action is a collection of actions you can apply on the machine, for more info https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/machineaction Returns: dict. Machine action's info """ action_data = \ { "ID": machine_action_response.get('id'), "Type": machine_action_response.get('type'), "Scope": machine_action_response.get('scope'), "Requestor": machine_action_response.get('requestor'), "RequestorComment": machine_action_response.get('requestorComment'), "Status": machine_action_response.get('status'), "MachineID": machine_action_response.get('machineId'), "ComputerDNSName": machine_action_response.get('computerDnsName'), "CreationDateTimeUtc": machine_action_response.get('creationDateTimeUtc'), "LastUpdateTimeUtc": machine_action_response.get('lastUpdateTimeUtc'), "RelatedFileInfo": { "FileIdentifier": machine_action_response.get('fileIdentifier'), "FileIdentifierType": machine_action_response.get('fileIdentifierType') }, "Commands": machine_action_response.get('commands') } return action_data
5,562
def test_validate_cabling_invalid_ip_file(): """Test that the `canu validate network cabling` command errors on invalid IPs from a file.""" invalid_ip = "999.999.999.999" with runner.isolated_filesystem(): with open("test.txt", "w") as f: f.write(invalid_ip) result = runner.invoke( cli, [ "--cache", cache_minutes, "validate", "network", "cabling", "--architecture", architecture, "--ips-file", "test.txt", "--username", username, "--password", password, ], ) assert result.exit_code == 2 assert "Error: Invalid value:" in str(result.output)
5,563
def do_zone_validation(domain): """Preform validation on domain. This function calls the following functions:: check_for_soa_partition check_for_master_delegation validate_zone_soa .. note:: The type of the domain that is passed is determined dynamically :param domain: The domain/reverse_domain being validated. :type domain: :class:`Domain` or :class:`ReverseDomain` The following code is an example of how to call this function during *domain* introspection. >>> do_zone_validation(self, self.master_domain) The following code is an example of how to call this function during *reverse_domain* introspection. >>> do_zone_validation(self, self.master_reverse_domain) """ check_for_master_delegation(domain, domain.master_domain) validate_zone_soa(domain, domain.master_domain) check_for_soa_partition(domain, domain.domain_set.all())
5,564
def BulkRemove(fname,masterfile=None,edlfile=None): """ Given a file with one IP per line, remove the given IPs from the EDL if they are in there """ global AutoSave success = True removes = list() if os.path.exists(fname): with open(fname,"rt") as ip_list: for ip in ip_list: removes.append(ip.strip()) Remove(removes,masterfile,edlfile) else: success = False return success
5,565
def log(message, level_in=0, tag=PLUGIN_NAME): """ Writes to QGIS inbuilt logger accessible through panel. :param message: logging message to write, error or URL. :type message: str :param level_in: integer representation of logging level. :type level_in: int @param tag: if relevant give tag name. """ if level_in == 0: level = Qgis.Info elif level_in == 1: level = Qgis.Warning elif level_in == 2: level = Qgis.Critical else: level = Qgis.Info QgsMessageLog.logMessage(message, tag.strip(), level)
5,566
def convolutionalize(modules, input_size): """ Recast `modules` into fully convolutional form. The conversion transfers weights and infers kernel sizes from the `input_size` and modules' action on it. n.b. This only handles the conversion of linear/fully-connected modules, although other module types could require conversion for correctness. """ fully_conv_modules = [] x = torch.zeros((1, ) + input_size) for m in modules: if isinstance(m, nn.Linear): n = nn.Conv2d(x.size(1), m.weight.size(0), kernel_size=(x.size(2), x.size(3))) n.weight.data.view(-1).copy_(m.weight.data.view(-1)) n.bias.data.view(-1).copy_(m.bias.data.view(-1)) m = n fully_conv_modules.append(m) x = m(x) return fully_conv_modules
5,567
def is_unique2(s): """ Use a list and the int of the character will tell if that character has already appeared once """ d = [] for t in s: if d[int(t)]: return False d[int(t)] = True return True
5,568
def generate_wav(audio, file_name, sample_rate=41000): """ Generate .wav file from recorded audio :param audio: Numpy array of audio samples :param file_name: File name :param sample_rate: Audio sample rate. (Default = 41000) :return: None """ wavio.write(file_name, audio, sample_rate, sampwidth=3)
5,569
def uncomplete_tree_parallel(x:ATree, mode="full"): """ Input is tuple (nl, fl, split) Output is a randomly uncompleted tree, every node annotated whether it's terminated and what actions are good at that node """ fl = x fl.parent = None add_descendants_ancestors(fl) y = ATree("@START@", []) y.align = fl y.is_open = True i = 0 y = assign_gold_actions(y, mode=mode) choices = [deepcopy(y)] # !! can't cache because different choices ! while not all_terminated(y): y = mark_for_execution(y, mode=mode) y = execute_chosen_actions(y, mode=mode) y = assign_gold_actions(y, mode=mode) y = adjust_gold(y, mode=mode) choices.append(deepcopy(y)) i += 1 ret = random.choice(choices[:-1]) return ret
5,570
def stations_by_river(stations): """Give a dictionary to hold the rivers name as keys and their corresponding stations' name as values""" rivers_name = [] for i in stations: if i.river not in rivers_name: rivers_name.append(i.river) elif i.river in rivers_name: continue big_list = [] for n in rivers_name: lists = [] for y in stations: if n == y.river: lists.append(y.name) elif n != y.river: continue lists = sorted(lists) big_list.append(lists) dictionary = dict(zip(rivers_name, big_list)) dicti = {} for key in sorted(dictionary): dicti.update({key : dictionary[key]}) assert dicti != {} return dicti
5,571
def clean(): """Delete Generated Documentation""" with lcd("docs"): pip(requirements="requirements.txt") local("make clean")
5,572
def QA_SU_save_huobi(frequency): """ Save huobi kline "smart" """ if (frequency not in ["1d", "1day", "day"]): return QA_SU_save_huobi_min(frequency) else: return QA_SU_save_huobi_day(frequency)
5,573
def new_users(): """ I have the highland! Create some users. """
5,574
def get_cachefile(filename=None): """Resolve cachefile path """ if filename is None: for f in FILENAMES: if os.path.exists(f): return f return IDFILE else: return filename
5,575
def inverse(a): """ [description] calculating the inverse of the number of characters, we do this to be able to find our departure when we arrive. this part will be used to decrypt the message received. :param a: it is an Int :return: x -> it is an Int """ x = 0 while a * x % 97 != 1: x = x + 1 return x
5,576
def currency_column_to_numeric( df: pd.DataFrame, column_name: str, cleaning_style: Optional[str] = None, cast_non_numeric: Optional[dict] = None, fill_all_non_numeric: Optional[Union[float, int]] = None, remove_non_numeric: bool = False, ) -> pd.DataFrame: """Convert currency column to numeric. This method does not mutate the original DataFrame. This method allows one to take a column containing currency values, inadvertently imported as a string, and cast it as a float. This is usually the case when reading CSV files that were modified in Excel. Empty strings (i.e. `''`) are retained as `NaN` values. Example: >>> import pandas as pd >>> import janitor >>> df = pd.DataFrame({ ... "a_col": [" 24.56", "-", "(12.12)", "1,000,000"], ... "d_col": ["", "foo", "1.23 dollars", "-1,000 yen"], ... }) >>> df # doctest: +NORMALIZE_WHITESPACE a_col d_col 0 24.56 1 - foo 2 (12.12) 1.23 dollars 3 1,000,000 -1,000 yen The default cleaning style. >>> df.currency_column_to_numeric("d_col") a_col d_col 0 24.56 NaN 1 - NaN 2 (12.12) 1.23 3 1,000,000 -1000.00 The accounting cleaning style. >>> df.currency_column_to_numeric("a_col", cleaning_style="accounting") # doctest: +NORMALIZE_WHITESPACE a_col d_col 0 24.56 1 0.00 foo 2 -12.12 1.23 dollars 3 1000000.00 -1,000 yen Valid cleaning styles are: - `None`: Default cleaning is applied. Empty strings are always retained as `NaN`. Numbers, `-`, `.` are extracted and the resulting string is cast to a float. - `'accounting'`: Replaces numbers in parentheses with negatives, removes commas. :param df: The pandas DataFrame. :param column_name: The column containing currency values to modify. :param cleaning_style: What style of cleaning to perform. :param cast_non_numeric: A dict of how to coerce certain strings to numeric type. For example, if there are values of 'REORDER' in the DataFrame, `{'REORDER': 0}` will cast all instances of 'REORDER' to 0. Only takes effect in the default cleaning style. :param fill_all_non_numeric: Similar to `cast_non_numeric`, but fills all strings to the same value. For example, `fill_all_non_numeric=1`, will make everything that doesn't coerce to a currency `1`. Only takes effect in the default cleaning style. :param remove_non_numeric: If set to True, rows of `df` that contain non-numeric values in the `column_name` column will be removed. Only takes effect in the default cleaning style. :raises ValueError: If `cleaning_style` is not one of the accepted styles. :returns: A pandas DataFrame. """ # noqa: E501 check("column_name", column_name, [str]) check_column(df, column_name) column_series = df[column_name] if cleaning_style == "accounting": df.loc[:, column_name] = df[column_name].apply( _clean_accounting_column ) return df if cleaning_style is not None: raise ValueError( "`cleaning_style` is expected to be one of ('accounting', None). " f"Got {cleaning_style!r} instead." ) if cast_non_numeric: check("cast_non_numeric", cast_non_numeric, [dict]) _make_cc_patrial = partial( _currency_column_to_numeric, cast_non_numeric=cast_non_numeric, ) column_series = column_series.apply(_make_cc_patrial) if remove_non_numeric: df = df.loc[column_series != "", :] # _replace_empty_string_with_none is applied here after the check on # remove_non_numeric since "" is our indicator that a string was coerced # in the original column column_series = _replace_empty_string_with_none(column_series) if fill_all_non_numeric is not None: check("fill_all_non_numeric", fill_all_non_numeric, [int, float]) column_series = column_series.fillna(fill_all_non_numeric) column_series = _replace_original_empty_string_with_none(column_series) df = df.assign(**{column_name: pd.to_numeric(column_series)}) return df
5,577
async def subreddit_type_submissions(sub="wallstreetbets", kind="new"): """ """ comments = [] articles = [] red = await reddit_instance() subreddit = await red.subreddit(sub) if kind == "hot": submissions = subreddit.hot() elif kind == "top": submissions = subreddit.top() elif kind == "new": submissions = subreddit.new() elif kind == "random_rising": submissions = subreddit.random_rising() else: submissions = subreddit.random() async for submission in submissions: article = clean_submission(submission) article['subreddit'] = sub articles.append(article) top_level_comments = await submission.comments() print(f"📗 Looking at submission: {article['title'][:40]}...") for top_level_comment in top_level_comments: if isinstance(top_level_comment, MoreComments): continue comment = clean_comment(top_level_comment) print(f"🗯️ ... {comment['author']} said {comment['body'][:40]}") comment['article_id'] = article['id'] comments.append(comment) return (articles, comments)
5,578
def get_args(): """Get argument""" try: opts, args = getopt.getopt( sys.argv[1:], "i:s:t:o:rvh", ["ibam=", "snp=", "tag=", "output=", "rstat", "verbose", "help"]) except getopt.GetoptError as err: print(str(err)) usage() sys.exit(-1) return opts
5,579
def _get_bundle_manifest( uuid: str, replica: Replica, version: typing.Optional[str], *, bucket: typing.Optional[str] = None) -> typing.Optional[dict]: """ Return the contents of the bundle manifest file from cloud storage, subject to the rules of tombstoning. If version is None, return the latest version, once again, subject to the rules of tombstoning. If the bundle cannot be found, return None """ uuid = uuid.lower() handle = Config.get_blobstore_handle(replica) default_bucket = replica.bucket # need the ability to use fixture bucket for testing bucket = default_bucket if bucket is None else bucket def tombstone_exists(uuid: str, version: typing.Optional[str]): return test_object_exists(handle, bucket, BundleTombstoneID(uuid=uuid, version=version).to_key()) # handle the following deletion cases # 1. the whole bundle is deleted # 2. the specific version of the bundle is deleted if tombstone_exists(uuid, None) or (version and tombstone_exists(uuid, version)): return None # handle the following deletion case # 3. no version is specified, we want the latest _non-deleted_ version if version is None: # list the files and find the one that is the most recent. prefix = f"bundles/{uuid}." object_names = handle.list(bucket, prefix) version = _latest_version_from_object_names(object_names) if version is None: # no matches! return None bundle_fqid = BundleFQID(uuid=uuid, version=version) # retrieve the bundle metadata. try: bundle_manifest_blob = handle.get(bucket, bundle_fqid.to_key()).decode("utf-8") return json.loads(bundle_manifest_blob) except BlobNotFoundError: return None
5,580
def handler400(request, exception): """ This is a Django handler function for 400 Bad Request error :param request: The Django Request object :param exception: The exception caught :return: The 400 error page """ context = get_base_context(request) context.update({ 'message': { 'title': '400 Bad Request', 'description': 'Your client has issued a malformed or illegal request.' } }) return render(request, 'velarium/base.html', context=context, status=400)
5,581
def object_trajectory_proposal(vid, fstart, fend, gt=False, verbose=False): """ Set gt=True for providing groundtruth bounding box trajectories and predicting classme feature only """ vsig = get_segment_signature(vid, fstart, fend) name = 'traj_cls_gt' if gt else 'traj_cls' path = get_feature_path(name, vid) path = os.path.join(path, '{}-{}.json'.format(vsig, name)) if os.path.exists(path): if verbose: print('loading object {} proposal for video segment {}'.format(name, vsig)) with open(path, 'r') as fin: trajs = json.load(fin) trajs = [Trajectory(**traj) for traj in trajs] else: if verbose: print('no object {} proposal for video segment {}'.format(name, vsig)) trajs = [] return trajs
5,582
def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size): """Gather top beams from nested structure.""" _, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size) return _gather_beams(nested, topk_indexes, batch_size, beam_size)
5,583
def _traceinv_exact(K, B, C, matrix, gram, exponent): """ Finds traceinv directly for the purpose of comparison. """ # Exact solution of traceinv for band matrix if B is not None: if scipy.sparse.isspmatrix(K): K_ = K.toarray() B_ = B.toarray() if C is not None: C_ = C.toarray() else: K_ = K B_ = B if C is not None: C_ = C if exponent == 0: if C is not None: traceinv_exact = numpy.trace(C_ @ B_) else: traceinv_exact = numpy.trace(B_) else: if gram: K_ = numpy.matmul(K_.T, K_) if exponent > 1: K1 = K_.copy() for i in range(1, exponent): K_ = numpy.matmul(K_, K1) Kinv = numpy.linalg.inv(K_) Op = numpy.matmul(Kinv, B_) if C is not None: Op = Kinv @ C_ @ Op traceinv_exact = numpy.trace(Op) elif exponent == 1 and not gram: # B is identity. Using analytic formula. traceinv_exact = band_matrix_traceinv(matrix['a'], matrix['b'], matrix['size'], True) else: # B and C are identity. Compute traceinv directly. if scipy.sparse.isspmatrix(K): K_ = K.toarray() else: K_ = K if exponent == 0: traceinv_exact = K_.shape[0] else: if gram: K_ = numpy.matmul(K_.T, K_) K_temp = K_.copy() for i in range(1, exponent): K_ = numpy.matmul(K_, K_temp) Kinv = numpy.linalg.inv(K_) traceinv_exact = numpy.trace(Kinv) return traceinv_exact
5,584
def create_feature_vector_of_mean_mfcc_for_song(song_file_path: str) -> ndarray: """ Takes in a file path to a song segment and returns a numpy array containing the mean mfcc values :param song_file_path: str :return: ndarray """ song_segment, sample_rate = librosa.load(song_file_path) mfccs = librosa.feature.mfcc(y=song_segment, sr=sample_rate, n_mfcc=NUMBER_OF_MFCC) mfccs_processed = np.mean(mfccs.T, axis=0) df = pd.DataFrame(mfccs_processed) z_score_normalized_mfccs = (df.values - df.values.mean()) / df.values.std() z_score_normalized_mfccs = np.array([i[0] for i in z_score_normalized_mfccs]) return z_score_normalized_mfccs
5,585
def stations_highest_rel_level(stations, N): """Returns a list containing the names of the N stations with the highest water level relative to the typical range""" names = [] # create list for names levels = [] # create list for levels for i in range(len(stations)): # iterate through stations if stations[i].relative_water_level() is not None: # ^checks for valid relative water level names.append(stations[i].name) levels.append(stations[i].relative_water_level()) # ^adds names and levels to respective lists combined = list(zip(names, levels)) # combines names and levels combined.sort(key=lambda x: x[1], reverse=1) # sorts in reverse output = [] # create output list for i in range(N): # iterate up to N output.append(combined[i][0]) # add station name to output return output
5,586
def read_image(file_name, format=None): """ Read an image into the given format. Will apply rotation and flipping if the image has such exif information. Args: file_name (str): image file path format (str): one of the supported image modes in PIL, or "BGR" Returns: image (np.ndarray): an HWC image in the given format. """ with PathManager.open(file_name, "rb") as f: image = Image.open(f) # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973 try: image = ImageOps.exif_transpose(image) except Exception: pass if format is not None: # PIL only supports RGB, so convert to RGB and flip channels over below conversion_format = format if format == "BGR": conversion_format = "RGB" image = image.convert(conversion_format) image = np.asarray(image) if format == "BGR": # flip channels if needed image = image[:, :, ::-1] # PIL squeezes out the channel dimension for "L", so make it HWC if format == "L": image = np.expand_dims(image, -1) return image
5,587
def reduce_opacity_filter(image, opacity_level): """Filter divides each pixel by the specified amount of opacity_level """ data = [] image_data = get_image_data(image) # creating new opacity level pixels for i in range(len(image_data)): current_tuple = list(image_data[i]) current_tuple[0] = round(current_tuple[0] / opacity_level) current_tuple[1] = round(current_tuple[1] / opacity_level) current_tuple[2] = round(current_tuple[2] / opacity_level) data.append(tuple(current_tuple)) # saving the image footer(image, data, "reduce_opacity_filter")
5,588
def add_global_nodes_edges(g_nx : nx.Graph, feat_data: np.ndarray, adj_list: np.ndarray, g_feat_data: np.ndarray, g_adj_list: np.ndarray): """ :param g_nx: :param feat_data: :param adj_list: :param g_feat_data: :param g_adj_list: :return: """ feat_data = np.concatenate([feat_data, g_feat_data], 0) # adj_list.update((k, adj_list[k].union(g_adj_list[k])) for k in range(len(g_adj_list))) adj_list.update((k, adj_list[k].union(g_adj_list[k])) for k in range(len(feat_data))) g_edge_list = [[[k, v] for v in vs] for k, vs in g_adj_list.items()] g_edge_list = [x for sublist in g_edge_list for x in sublist] g_nx.add_edges_from(g_edge_list) return g_nx, feat_data, adj_list
5,589
def _load_readme(file_name: str = "README.md") -> str: """ Load readme from a text file. Args: file_name (str, optional): File name that contains the readme. Defaults to "README.md". Returns: str: Readme text. """ with open(os.path.join(_PATH_ROOT, file_name), "r", encoding="utf-8") as file: readme = file.read() return readme
5,590
def get_data_collector_instance(args, config): """Get the instance of the data :param args: arguments of the script :type args: Namespace :raises NotImplementedError: no data collector implemented for given data source :return: instance of the specific data collector :rtype: subclass of BaseDataCollector """ if args.data_source == DATA_SOURCE_RSS: return RssDataCollector(args.base_url, config[CONFIG_RSS_HEADER]) elif args.data_source == DATA_SOURCE_REDDIT: return RedditDataCollector(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET) elif args.data_source == DATA_SOURCE_TWITTER: return TwitterDataCollector(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, TWITTER_BEARER_TOKEN) else: raise NotImplementedError
5,591
def timeIntegration(params): """Sets up the parameters for time integration :param params: Parameter dictionary of the model :type params: dict :return: Integrated activity variables of the model :rtype: (numpy.ndarray,) """ dt = params["dt"] # Time step for the Euler intergration (ms) duration = params["duration"] # imulation duration (ms) RNGseed = params["seed"] # seed for RNG # ------------------------------------------------------------------------ # local parameters # See Papadopoulos et al., Relations between large-scale brain connectivity and effects of regional stimulation # depend on collective dynamical state, arXiv, 2020 tau_exc = params["tau_exc"] # tau_inh = params["tau_inh"] # c_excexc = params["c_excexc"] # c_excinh = params["c_excinh"] # c_inhexc = params["c_inhexc"] # c_inhinh = params["c_inhinh"] # a_exc = params["a_exc"] # a_inh = params["a_inh"] # mu_exc = params["mu_exc"] # mu_inh = params["mu_inh"] # # external input parameters: # Parameter of the Ornstein-Uhlenbeck process for the external input(ms) tau_ou = params["tau_ou"] # Parameter of the Ornstein-Uhlenbeck (OU) process for the external input ( mV/ms/sqrt(ms) ) sigma_ou = params["sigma_ou"] # Mean external excitatory input (OU process) (mV/ms) exc_ou_mean = params["exc_ou_mean"] # Mean external inhibitory input (OU process) (mV/ms) inh_ou_mean = params["inh_ou_mean"] # ------------------------------------------------------------------------ # global coupling parameters # Connectivity matrix # Interareal relative coupling strengths (values between 0 and 1), Cmat(i,j) connection from jth to ith Cmat = params["Cmat"] N = len(Cmat) # Number of nodes K_gl = params["K_gl"] # global coupling strength # Interareal connection delay lengthMat = params["lengthMat"] signalV = params["signalV"] if N == 1: Dmat = np.zeros((N, N)) else: # Interareal connection delays, Dmat(i,j) Connnection from jth node to ith (ms) Dmat = dp.computeDelayMatrix(lengthMat, signalV) Dmat[np.eye(len(Dmat)) == 1] = np.zeros(len(Dmat)) Dmat_ndt = np.around(Dmat / dt).astype(int) # delay matrix in multiples of dt params["Dmat_ndt"] = Dmat_ndt # ------------------------------------------------------------------------ # Initialization # Floating point issue in np.arange() workaraound: use integers in np.arange() t = np.arange(1, round(duration, 6) / dt + 1) * dt # Time variable (ms) sqrt_dt = np.sqrt(dt) max_global_delay = np.max(Dmat_ndt) startind = int(max_global_delay + 1) # timestep to start integration at exc_ou = params["exc_ou"] inh_ou = params["inh_ou"] exc_ext = params["exc_ext"] inh_ext = params["inh_ext"] # state variable arrays, have length of t + startind # they store initial conditions AND simulated data excs = np.zeros((N, startind + len(t))) inhs = np.zeros((N, startind + len(t))) # ------------------------------------------------------------------------ # Set initial values # if initial values are just a Nx1 array if np.shape(params["exc_init"])[1] == 1: exc_init = np.dot(params["exc_init"], np.ones((1, startind))) inh_init = np.dot(params["inh_init"], np.ones((1, startind))) # if initial values are a Nxt array else: exc_init = params["exc_init"][:, -startind:] inh_init = params["inh_init"][:, -startind:] # xsd = np.zeros((N,N)) # delayed activity exc_input_d = np.zeros(N) # delayed input to x inh_input_d = np.zeros(N) # delayed input to y np.random.seed(RNGseed) # Save the noise in the activity array to save memory excs[:, startind:] = np.random.standard_normal((N, len(t))) inhs[:, startind:] = np.random.standard_normal((N, len(t))) excs[:, :startind] = exc_init inhs[:, :startind] = inh_init noise_exc = np.zeros((N,)) noise_inh = np.zeros((N,)) # ------------------------------------------------------------------------ return timeIntegration_njit_elementwise( startind, t, dt, sqrt_dt, N, Cmat, K_gl, Dmat_ndt, excs, inhs, exc_input_d, inh_input_d, exc_ext, inh_ext, tau_exc, tau_inh, a_exc, a_inh, mu_exc, mu_inh, c_excexc, c_excinh, c_inhexc, c_inhinh, noise_exc, noise_inh, exc_ou, inh_ou, exc_ou_mean, inh_ou_mean, tau_ou, sigma_ou, )
5,592
def test_set_stage_invalid_buttons(memory): """Test setting the stage with invalid buttons.""" stage = 0 display = 1 expected_exception = 'buttons must be of type list' with pytest.raises(Exception, message=expected_exception): memory.set_stage(stage, display, '1, 2, 3, 4') expected_exception = 'buttons list must contain exactly 4 items' with pytest.raises(Exception, message=expected_exception): memory.set_stage(stage, display, [1, 2, 3]) expected_exception = 'buttons list must contain one each of 1, 2, 3, 4' with pytest.raises(Exception, message=expected_exception): memory.set_stage(stage, display, [1, 2, 3, 1]) expected_exception = 'buttons items must be of type int' with pytest.raises(Exception, message=expected_exception): memory.set_stage(stage, display, [1, 2, 3, '4']) expected_exception = 'buttons items must be between 1 and 4' with pytest.raises(Exception, message=expected_exception): memory.set_stage(stage, display, [1, 2, 3, 5])
5,593
def is_role_user(session, user=None, group=None): # type: (Session, User, Group) -> bool """ Takes in a User or a Group and returns a boolean indicating whether that User/Group is a component of a service account. Args: session: the database session user: a User object to check group: a Group object to check Throws: AssertionError if neither a user nor a group is provided Returns: whether the User/Group is a component of a service account """ if user is not None: return user.role_user assert group is not None user = User.get(session, name=group.groupname) if not user: return False return user.role_user
5,594
def argCOM(y): """argCOM(y) returns the location of COM of y.""" idx = np.round(np.sum(y/np.sum(y)*np.arange(len(y)))) return int(idx)
5,595
def fringe(z, z1, z2, rad, a1): """ Approximation to the longitudinal profile of a multipole from a permanent magnet assembly. see Wan et al. 2018 for definition and Enge functions paper (Enge 1964) """ zz1 = (z - z1) / (2 * rad / pc.pi) zz2 = (z - z2) / (2 * rad / pc.pi) fout = ( (1 / ( 2 * np.tanh((z2 - z1) / (4 * rad / pc.pi)) ) ) * (np.tanh(zz1 + a1 * zz1**2 ) - np.tanh(zz2 - a1 * zz2**2) ) ) return fout
5,596
def random_param_shift(vals, sigmas): """Add a random (normal) shift to a parameter set, for testing""" assert len(vals) == len(sigmas) shifts = [random.gauss(0, sd) for sd in sigmas] newvals = [(x + y) for x, y in zip(vals, shifts)] return newvals
5,597
def compute_encrypted_request_hash(caller): """ This function will compute encrypted request Hash :return: encrypted request hash """ first_string = get_parameter(caller.params_obj, "requesterNonce") or "" worker_order_id = get_parameter(caller.params_obj, "workOrderId") or "" worker_id = get_parameter(caller.params_obj, "workerId") or "" workload_id = get_parameter(caller.params_obj, "workloadId") or "" requester_id = get_parameter(caller.params_obj, "requesterId") or "" requester_id = str(requester_id) first_string += \ worker_order_id + worker_id + workload_id + requester_id concat_hash = first_string.encode("UTF-8") hash_1 = crypto_utils.compute_message_hash(concat_hash) in_data = get_parameter(caller.params_obj, "inData") out_data = get_parameter(caller.params_obj, "outData") hash_2 = bytearray() if in_data is not None: hash_2 = compute_hash_string(in_data) hash_3 = bytearray() if out_data is not None: hash_3 = compute_hash_string(out_data) final_string = hash_1 + hash_2 + hash_3 caller.final_hash = crypto_utils.compute_message_hash(final_string) encrypted_request_hash = crypto_utils.byte_array_to_hex( crypto_utils.encrypt_data( caller.final_hash, caller.session_key, caller.session_iv)) return encrypted_request_hash
5,598
def test_num_samples(res_surf): """ Verify dimension value.""" assert res_surf.num_samples == 47
5,599