content
stringlengths
22
815k
id
int64
0
4.91M
def read_students(path): """ Read a tab-separated file of students. The only required field is 'github_repo', which is this student's github repository. """ students = [line for line in csv.DictReader(open(path), delimiter='\t')] check_students(students) return students
7,300
def get_extended_attention_mask( attention_mask: Tensor, input_shape: Tuple[int], device: torch.device, is_decoder=False, ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = ( seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] ) # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones( (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype, ), causal_mask, ], axis=-1, ) extended_attention_mask = ( causal_mask[:, None, :, :] * attention_mask[:, None, None, :] ) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. # extended_attention_mask = extended_attention_mask.to( # dtype=self.dtype # ) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask
7,301
def ci_generate(args): """Generate jobs file from a spack environment file containing CI info. Before invoking this command, you can set the environment variable SPACK_CDASH_AUTH_TOKEN to contain the CDash authorization token for creating a build group for the generated workload and registering all generated jobs under that build group. If this environment variable is not set, no build group will be created on CDash.""" env = ev.get_env(args, 'ci generate', required=True) output_file = args.output_file copy_yaml_to = args.copy_to spack_repo = args.spack_repo spack_ref = args.spack_ref run_optimizer = args.optimize use_dependencies = args.dependencies if not output_file: output_file = os.path.abspath(".gitlab-ci.yml") else: output_file_path = os.path.abspath(output_file) gen_ci_dir = os.path.dirname(output_file_path) if not os.path.exists(gen_ci_dir): os.makedirs(gen_ci_dir) # Generate the jobs spack_ci.generate_gitlab_ci_yaml( env, True, output_file, spack_repo, spack_ref, run_optimizer=run_optimizer, use_dependencies=use_dependencies) if copy_yaml_to: copy_to_dir = os.path.dirname(copy_yaml_to) if not os.path.exists(copy_to_dir): os.makedirs(copy_to_dir) shutil.copyfile(output_file, copy_yaml_to)
7,302
def setup(c): # type: ignore """Set this project up for Development.""" c.run("ls -lah --color=yes .venv") c.run("poetry env info --path") c.run("poetry debug info") c.run("poetry run pre-commit install")
7,303
def getdiskuuidvm(diskuuid): """ get vm uuid from disk uuid and return it """ if debug: print 'vm from disk uuid :',diskuuid cmd='xe vbd-list vdi-uuid='+diskuuid response=docmd(cmd).split('vm-uuid ( RO): ') vmuuid=response[1].split('\n')[0] return vmuuid
7,304
def storedata(): """ Upload a new file """ #path = os.path.join(app.config['UPLOAD_DIR'],current_user.name) path = os.path.join(app.config['UPLOAD_DIR']) dirs = os.listdir(path) if dirs!="": # If user's directory is empty dirs.sort(key=str.lower) if request.method == 'POST': if 'file' not in request.files: flash('Chosse a file .csv',"alert alert-danger") return render_template( 'uploadData.html', infoUpload='Chosse a file .csv', files=dirs) file = request.files['file'] # get the file if file.filename == '': flash('File not selected',"alert alert-danger") return render_template( 'uploadData.html', infoUpload='file not selected', files=dirs) file_name = '' data_name = '' if file and allowed_file(file.filename): file_name = secure_filename(file.filename) file_path = os.path.join(path, file_name) file.save(file_path) dirs = os.listdir(path) if dirs!="": # If user's directory is empty dirs.sort(key=str.lower) flash('Uploaded!! '+file_name,"alert alert-success") return render_template( 'uploadData.html', infoUpload='Uploaded!! '+file_name, files=dirs) flash('Error',"alert alert-danger") return render_template( 'uploadData.html', infoUpload='Error', files=dirs) else: return redirect(url_for('defineData'))
7,305
def run_fpp_locs( build: Build, parsed: argparse.Namespace, _: Dict[str, str], make_args: Dict[str, str], ): """Runs the fpp_locs command Args: build: build cache parsed: parsed arguments object _: cmake args, not used make_args: make arguments passed to the fpp-locs target """ print( f"fpp Locations File: {fpp_get_locations_file(Path(parsed.path), build, make_args=make_args)}" )
7,306
def adler32(string, start=ADLER32_DEFAULT_START): """ Compute the Adler-32 checksum of the string, possibly with the given start value, and return it as a unsigned 32 bit integer. """ return _crc_or_adler(string, start, _adler32)
7,307
def list_projects(sortBy=None, sortOrder=None, nextToken=None): """ Gets a list of build project names, with each build project name representing a single build project. See also: AWS API Documentation :example: response = client.list_projects( sortBy='NAME'|'CREATED_TIME'|'LAST_MODIFIED_TIME', sortOrder='ASCENDING'|'DESCENDING', nextToken='string' ) :type sortBy: string :param sortBy: The criterion to be used to list build project names. Valid values include: CREATED_TIME : List the build project names based on when each build project was created. LAST_MODIFIED_TIME : List the build project names based on when information about each build project was last changed. NAME : List the build project names based on each build project's name. Use sortOrder to specify in what order to list the build project names based on the preceding criteria. :type sortOrder: string :param sortOrder: The order in which to list build projects. Valid values include: ASCENDING : List the build project names in ascending order. DESCENDING : List the build project names in descending order. Use sortBy to specify the criterion to be used to list build project names. :type nextToken: string :param nextToken: During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token . To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned. :rtype: dict :return: { 'nextToken': 'string', 'projects': [ 'string', ] } :returns: (string) -- """ pass
7,308
def find_earliest_brs_idx(g: Grid, V: np.ndarray, state: np.narray, low: int, high: int) -> int: """ Determines the earliest time the current state is in the reachable set Args: g: Grid V: Value function state: state of dynamical system low: lower bound of search range (inclusive) high: upper bound of search range (inclusive) Returns: t: Earliest time where the state is in the reachable set """ epsilon = 1e-4 while low < high: mid = np.ceil((high + low) / 2) value = g.get_value(V[..., mid], state) if value < epsilon: low = mid else: high = mid - 1 return low
7,309
def italic(s): """Returns the string italicized. Source: http://stackoverflow.com/a/16264094/2570866 """ return r'\textit{' + s + '}'
7,310
def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): test_obj.skipTest(message) func(*a, **kwargs) return wrapped
7,311
def _compare_keys(target: Any, key: Any) -> bool: """ Compare `key` to `target`. Return True if each value in `key` == corresponding value in `target`. If any value in `key` is slice(None), it is considered equal to the corresponding value in `target`. """ if not isinstance(target, tuple): return target == key for k1, k2 in itertools.zip_longest(target, key, fillvalue=None): if k2 == slice(None): continue if k1 != k2: return False return True
7,312
def parser(_, objconf, skip=False, **kwargs): """ Parses the pipe content Args: _ (None): Ignored objconf (obj): The pipe configuration (an Objectify instance) skip (bool): Don't parse the content kwargs (dict): Keyword arguments Kwargs: assign (str): Attribute to assign parsed content (default: content) stream (dict): The original item Returns: Iter[dict]: The stream of items Examples: >>> from riko import get_path >>> from riko.utils import get_abspath >>> from meza.fntools import Objectify >>> >>> feed = 'http://feeds.feedburner.com/TechCrunch/' >>> url = 'http://query.yahooapis.com/v1/public/yql' >>> query = "select * from feed where url='%s'" % feed >>> conf = {'query': query, 'url': url, 'debug': False} >>> objconf = Objectify(conf) >>> url = get_abspath(get_path('yql.xml')) >>> >>> with fetch(url) as f: ... kwargs = {'stream': {}, 'response': f} ... result = parser(None, objconf, **kwargs) >>> >>> next(result)['title'] 'Bring pizza home' """ if skip: stream = kwargs['stream'] else: f = kwargs.get('response') if not f: params = {'q': objconf.query, 'diagnostics': objconf.debug} if objconf.memoize and not objconf.cache_type: objconf.cache_type = 'auto' f = fetch(params=params, **objconf) # TODO: consider paging for large result sets root = xml2etree(f).getroot() results = root.find('results') stream = map(etree2dict, results) return stream
7,313
def predict_xr(result_ds, regressors): """input: results_ds as came out of MLR and saved to file, regressors dataset""" # if produce_RI isn't called on data then you should explicitely put time info import xarray as xr import aux_functions_strat as aux rds = result_ds regressors = regressors.sel(time=rds.time) # slice regressors = regressors.apply(aux.normalize_xr, norm=1, verbose=False) # normalize reg_dict = dict(zip(rds.regressors.values, regressors.data_vars.values())) # make sure that all the regressors names are linking to their respective dataarrays for key, value in reg_dict.items(): # print(key, value) assert value.name == key reg_da = xr.concat(reg_dict.values(), dim='regressors') reg_da['regressors'] = list(reg_dict.keys()) reg_da.name = 'regressors_time_series' rds['predicted'] = xr.dot(rds.params, reg_da) + rds.intercept rds = aux.xr_order(rds) # retures the same dataset but with total predicted reconstructed geo-time-series field result_ds = rds return result_ds
7,314
def _set_chap_security(module, array): """Set CHAP usernames and passwords""" pattern = re.compile("[^ ]{12,255}") if module.params["host_user"]: if not pattern.match(module.params["host_password"]): module.fail_json( msg="host_password must contain a minimum of 12 and a maximum of 255 characters" ) try: array.set_host( module.params["name"], host_user=module.params["host_user"], host_password=module.params["host_password"], ) except Exception: module.params(msg="Failed to set CHAP host username and password") if module.params["target_user"]: if not pattern.match(module.params["target_password"]): module.fail_json( msg="target_password must contain a minimum of 12 and a maximum of 255 characters" ) try: array.set_host( module.params["name"], target_user=module.params["target_user"], target_password=module.params["target_password"], ) except Exception: module.params(msg="Failed to set CHAP target username and password")
7,315
async def list_features(location_id): """ List features --- get: summary: List features tags: - features parameters: - name: envelope in: query required: false description: If set, the returned list will be wrapped in an envelope with this name. responses: 200: description: A list of objects. content: application/json: schema: type: array items: Feature """ location = g.active_incident.Location.find_by_id(location_id) if location is None: raise exceptions.NotFound(description="Location {} was not found".format(location_id)) features = location.Feature.find() # Wrap the list if the caller requested an envelope. query = request.args if "envelope" in query: result = {query.get("envelope"): features} else: result = features return jsonify(result), HTTPStatus.OK
7,316
def delete_hosts(ec2_conn, ipa_client, hostnames): """ Unenrolls hosts from IPA and AWS """ for hostname in hostnames: try: ipa_client.unenroll_host(hostname=hostname) except (KeyError, ipaclient.NotFoundError): pass ec2client.delete_instances(ec2_conn, hostnames=hostnames)
7,317
def shot_start_frame(shot_node): """ Returns the start frame of the given shot :param shot_node: str :return: int """ return sequencer.get_shot_start_frame(shot_node)
7,318
def word1(x: IntVar) -> UInt8: """Implementation for `WORD1`.""" return word_n(x, 1)
7,319
def _getCols1(): """ Robs Version 1 CSV files """ cols = 'Date,DOY,Time,Location,Satellite,Collection,Longitude,Latitude,SolarZenith,SolarAzimuth,SensorZenith,SensorAzimuth,ScatteringAngle,nval_AOT_1020_l20,mean_AOT_1020_l20,mean_AOT_870_l20,mean_AOT_675_l20,sdev_AOT_675_l20,mean_AOT_500_l20,mean_AOT_440_l20,mean_AOT_380_l20,mean_AOT_340_l20,mean_Water_cm_l20,nval_AOT_1020_l15,mean_AOT_1020_l15,mean_AOT_870_l15,mean_AOT_675_l15,sdev_AOT_675_l15,mean_AOT_500_l15,mean_AOT_440_l15,mean_AOT_380_l15,mean_AOT_340_l15,mean_Water_cm_l15,npix_AOT0550,mean_AOT0550,sdev_AOT0550,mean_rAOTse0550,sdev_rAOTse0550,mean_AOT0470corr_l,npix_AOT0550corr_l,pval_AOT0550corr_l,mean_AOT0550corr_l,sdev_AOT0550corr_l,mean_AOT0660corr_l,mean_AOT2100corr_l,mean_rAOTse0550_l,pval_rAOTse0550_l,mean_AOT0550sm_l,pval_AOT0550sm_l,mean_Aexp0470_0670_l,mean_surfre0470_l,mean_surfre0660_l,mean_surfre2100_l,mean_fiterr_l,mean_atype_l,mean_cfrac_l,mean_mconc_l,QA0470_l,mean_mref0470_l,mean_mref0550_l,mean_mref0660_l,mean_mref0870_l,mean_mref1200_l,mean_mref1600_l,mean_mref2100_l,pval_mref0470_l,pval_mref0550_l,pval_mref0660_l,pval_mref0870_l,pval_mref1200_l,pval_mref1600_l,pval_mref2100_l,mean_AOT0470ea_o,npix_AOT0550ea_o,pval_AOT0550ea_o,mean_AOT0550ea_o,sdev_AOT0550ea_o,mean_AOT0660ea_o,mean_AOT0870ea_o,mean_AOT1200ea_o,mean_AOT1600ea_o,mean_AOT2100ea_o,mean_AOT0470sa_o,npix_AOT0550sa_o,pval_AOT0550sa_o,mean_AOT0550sa_o,sdev_AOT0550sa_o,mean_AOT0660sa_o,mean_AOT0870sa_o,mean_AOT1200sa_o,mean_AOT1600sa_o,mean_AOT2100sa_o,mean_rAOTse0550a_o,mean_effr0550a_o,sdev_effr0550a_o,mean_solindx_sa_o,mean_solindx_la_o,mean_lsqerr_a_o,mean_cfrac_o,sdev_cfrac_o,QAavg_o,mean_mref0470_o,mean_mref0550_o,mean_mref0660_o,mean_mref0870_o,mean_mref1200_o,mean_mref1600_o,mean_mref2100_o,sdev_mref0470_o,sdev_mref0550_o,sdev_mref0660_o,sdev_mref0870_o,sdev_mref1200_o,sdev_mref1600_o,sdev_mref2100_o,mean_wni,mean_wir,pval_wni,pval_wir,mean_pathrad0470_l,mean_pathrad0660_l,mean_critref0470_l,mean_critref0660_l,mean_errprad0470_l,mean_errprad0660_l,mean_errcref0470_l,mean_errcref0660_l,mean_qwtprad0470_l,mean_qwtprad0660_l,mean_qwtcref0470_l,mean_qwtcref0660_l,npix_AOT0550dpbl_l,pval_AOT0550dpbl_l,mean_AOT0550dpbl_l,sdev_AOT0550dpbl_l,mean_AOT0412dpbl_l,mean_AOT0470dpbl_l,mean_AOT0660dpbl_l,mean_Aext0412_0470dpbl_l,mean_SSA0412dpbl_l,mean_SSA0470dpbl_l,mean_SSA0660dpbl_l,mean_surfre0412dpbl_l,mean_surfre0470dpbl_l,mean_surfre0660dpbl_l,tau_550_norm,eta_norm,tau_f,tau_c,alpha_norm,alpha_f,Deta,tau_466,tau_553,tau_644,tau_866,tau_2119,Angs_466_644,exp_errorO_pct,exp_errorL_pct,ncep_pwat,ncep_O3,ncep_pres,ncep_windspd,ncep_winddir' return cols
7,320
def extract_tmt_reporters(mzml_files: List[Path], output_path: Path, correction_factor_path: Path, num_threads: int = 1, extraction_level: int = 3): """ Takes about 1.5 minute for a 700MB file with 40k MS2 scans """ if not output_path.is_dir(): output_path.mkdir(parents=True) if num_threads > 1: from .utils.multiprocessing_pool import JobPool processing_pool = JobPool(processes=num_threads) for mzml_file in mzml_files: if num_threads > 1: processing_pool.applyAsync(extract_and_correct_reporters, (mzml_file, output_path, correction_factor_path, extraction_level)) else: extract_and_correct_reporters(mzml_file, output_path, correction_factor_path, extraction_level) if num_threads > 1: processing_pool.checkPool(printProgressEvery=1)
7,321
def sendMail(sendTo, msg): """ To send a mail Args: sendTo (list): List of mail targets msg (str): Message """ mail = smtplib.SMTP('smtp.gmail.com', 587) # host and port # Hostname to send for this command defaults to the FQDN of the local host. mail.ehlo() mail.starttls() # security connection mail.login(EMAIL_ID, PASSWORD) # login part for person in sendTo: mail.sendmail(EMAIL_ID, person, msg) # send part print("Mail sent successfully to " + person) mail.close()
7,322
def main(): """ main """ Session = sessionmaker(bind=engine) session = Session() NRFBase.metadata.create_all(engine) sdk_dir = "developer.nordicsemi.com/nRF5_SDK/" download_sdk(sdk_dir, "http://" + sdk_dir) sdks = SDKs(sdk_dir) for sdk_v, zip_path in sdks.dict.items(): sdk = SDK(sdk_v, zip_path) print("\n ====================") print(" ", sdk_v, "=>", zip_path) print(" ====================") sdk.extract_softdevices() soft_devices = list(sdk.list_softdevices()) for soft_dvc in soft_devices: if "nrf" in soft_dvc: nrf = soft_dvc.split(",")[0] sdvc = soft_dvc.split(",")[1] if sdk_v == "4.4.2": header_dir = nrf + "/Include/" else: header_dir = nrf + "/Include/" + sdvc +"/" linker_dir = nrf + "/Source/templates/gcc/" hex_dir = None print("\n=== {0} {1} ===".format(sdvc, nrf)) print("Hex file for {0} not found in archive. Firmware's signature will depend on the strings contained in the given binary".format(sdvc)) else: nrf = "hex" sdvc = soft_dvc header_dir = "components/softdevice/" + sdvc + "/headers/" linker_dir = "components/softdevice/" + sdvc + "/toolchain/armgcc/" linkers_path = "./SDKs/" + sdk_v + "/" + linker_dir if os.path.exists(linkers_path) is False: linker_dir = "components/toolchain/gcc/" hex_dir = "components/softdevice/" + sdvc + "/hex/" print("\n=== {0} {1} ===".format(sdvc, nrf)) sdk.extract_hex(hex_dir) soft_device = SoftDevice(sdk_v, sdvc, nrf, header_dir, linker_dir, sdk.hex_path, session) soft_device.signature() print("SoftDevice Signature: {0}".format(soft_device.sign)) soft_device.set_headers() # Setting a list of header files for parsing try: soft_device.set_linkers() if soft_device.linkers != []: soft_device.mem_parser() except IOError as err: print("I/O error: {0}".format(err)) soft_device.svc_parser() print("SVCALLs, functions, structures' parsing completed") session.add(soft_device) session.commit() print("SoftDevice successfully added to database")
7,323
def run_primer3(sequence, region, primer3_exe: str, settings_dict: dict, padding=True, thermodynamic_params: Optional[Path] = None): """Run primer 3. All other kwargs will be passed on to primer3""" if padding: target_start = region.padding_left target_len = len(sequence) - region.padding_left - region.padding_right else: target_start = 1 target_len = len(sequence) target = ",".join(map(str, [target_start, target_len])) p3 = Primer3(primer3_exe, sequence, target, target, settings_dict, thermodynamic_params=thermodynamic_params) p3_out = p3.run() primers = parse_primer3_output(p3_out) return primers
7,324
def plot_analytic(axespdf, axescdf, z_cut, beta=2., f=1., jet_type='quark', bin_space='lin', plotnum=0, label='Analytic, LL'): """Plots the analytic groomed GECF cdf on axescdf, and the groomed GECF pdf on axespdf. """ verify_bin_space(bin_space) if bin_space == 'lin': angs = np.linspace(0, .5, 1000) angs_central = (angs[:-1] + angs[1:])/2. if bin_space == 'log': angs = np.logspace(-8, np.log10(.5), 1000) angs_central = np.sqrt(angs[:-1] * angs[1:]) cdf = critSudakov_fc_LL(angs_central, z_cut, beta, f=f, jet_type=jet_type) col = compcolors[(plotnum, 'light')] # Analytic plot: axescdf[0].plot(angs_central, cdf, **style_dashed, color=col, zorder=.5, label=label) if len(axescdf) > 1: axescdf[1].plot(angs, np.ones(len(angs)), **style_dashed, color=col, zorder=.5) if axespdf is not None: _, pdf = histDerivative(cdf, angs, giveHist=True, binInput=bin_space) if bin_space == 'log': pdf = angs_central*pdf # Analytic plot: axespdf[0].plot(angs_central, pdf, **style_dashed, color=col, zorder=.5, label=label) if len(axespdf) > 1: axespdf[1].plot(angs, np.ones(len(angs)), **style_dashed, color=col, zorder=.5)
7,325
def filter_safe_dict(data, attrs=None, exclude=None): """ Returns current names and values for valid writeable attributes. If ``attrs`` is given, the returned dict will contain only items named in that iterable. """ def is_member(cls, k): v = getattr(cls, k) checks = [ not k.startswith("_"), not inspect.ismethod(v) or getattr(v, "im_self", True), not inspect.isfunction(v), not isinstance(v, (classmethod, staticmethod, property)), ] return all(checks) cls = None if inspect.isclass(data): cls = data data = {k: getattr(cls, k) for k in dir(cls) if is_member(cls, k)} ret = {} for k, v in data.items(): checks = [ not k.startswith("_"), not inspect.ismethod(v) or getattr(v, "im_self", True), not isinstance(v, (classmethod, staticmethod, property)), not attrs or (k in attrs), not exclude or (k not in exclude), ] if all(checks): ret[k] = v return ret
7,326
def get_member_from_list(): """ GET /lists/<address>/members/<member_address> :return: """ req = client.lists_members.get(domain=domain, address="everyone@mailgun.zeefarmer.com", member_address="zerreissen@hotmail.com") print(req.json())
7,327
def checkUdimValue(udim): """None""" pass
7,328
def fixture_loqus_exe(): """Return the path to a loqus executable""" return "a/path/to/loqusdb"
7,329
def setup_nat(): """ Make sure IP forwarding is enabled """ import fabtools fabtools.require.system.sysctl('net.ipv4.ip_forward', 1)
7,330
def test_cases(): """Some sample test cases""" assert count('aba') == {'a': 2, 'b': 1} assert count('abcddbacdb') == {'a': 2,'b': 3,'c': 2,'d': 3} assert count('') == {} print("Test Success!")
7,331
def inference(tasks, name, convnet_model, convnet_weight_path, input_patch_size, output_patch_size, output_patch_overlap, output_crop_margin, patch_num, num_output_channels, dtype, framework, batch_size, bump, mask_output_chunk, mask_myelin_threshold, input_chunk_name, output_chunk_name): """Perform convolutional network inference for chunks.""" with Inferencer( convnet_model, convnet_weight_path, input_patch_size=input_patch_size, output_patch_size=output_patch_size, num_output_channels=num_output_channels, output_patch_overlap=output_patch_overlap, output_crop_margin=output_crop_margin, patch_num=patch_num, framework=framework, dtype=dtype, batch_size=batch_size, bump=bump, mask_output_chunk=mask_output_chunk, mask_myelin_threshold=mask_myelin_threshold, dry_run=state['dry_run']) as inferencer: for task in tasks: if task is not None: if 'log' not in task: task['log'] = {'timer': {}} start = time() task[output_chunk_name] = inferencer( task[input_chunk_name]) task['log']['timer'][name] = time() - start task['log']['compute_device'] = inferencer.compute_device yield task
7,332
def set_command_line_flags(flags: List[str]): """Set the command line flags.""" sys.argv = flags FLAGS.unparse_flags() FLAGS(flags)
7,333
def get_reads_section(read_length_r1, read_length_r2): """ Yield a Reads sample sheet section with the specified R1/R2 length. :rtype: SampleSheetSection """ rows = [[str(read_length_r1)], [str(read_length_r2)]] return SampleSheetSection(SECTION_NAME_READS, rows)
7,334
def dummy_user_as_group_manager(logged_in_dummy_user, dummy_group): """Make the dummy user a manager of the dummy-group group.""" ipa_admin.group_add_member(a_cn="dummy-group", o_user="dummy") ipa_admin.group_add_member_manager(a_cn="dummy-group", o_user="dummy") yield
7,335
def RegisterCallback(callback, msgtype): """Register a callback method for the given message type @param callback: callable @param msgtype: message type """ if isinstance(msgtype, tuple): mtype = '.'.join(msgtype) else: mtype = msgtype if mtype not in _CALLBACK_REGISTRY: _CALLBACK_REGISTRY[mtype] = list() if callback not in _CALLBACK_REGISTRY[mtype]: _CALLBACK_REGISTRY[mtype].append(callback)
7,336
def validate(number): """Check if the number is valid. This checks the length, format and check digit.""" number = compact(number) if not all(x in _alphabet for x in number): raise InvalidFormat() if len(number) != 16: raise InvalidLength() if number[-1] == '-': raise InvalidFormat() if number[-1] != calc_check_digit(number): raise InvalidChecksum() return number
7,337
def find_atom(i): """ Set the anchor to the atom with index `i`. """ gcv().find_atom(i)
7,338
def parse_directory(filename): """ read html file (nook directory listing), return users as [{'name':..., 'username':...},...] """ try: file = open(filename) html = file.read() file.close() except: return [] users = [] for match in re.finditer(r'<b>([^<]+)</b>.*?mailto:([^@]+)@', html): groups = match.groups() users.append({'name':groups[0], 'username':groups[1]}) users.sort(key=lambda x:x['username']) return users
7,339
def ShortAge(dt): """Returns a short string describing a relative time in the past. Args: dt: A datetime. Returns: A short string like "5d" (5 days) or "32m" (32 minutes). """ # TODO(kpy): This is English-specific and needs localization. seconds = time.time() - UtcToTimestamp(dt) minutes = int(seconds / 60 + 0.5) hours = int(seconds / 3600 + 0.5) days = int(seconds / 86400 + 0.5) if seconds < 60: return 'just now' if minutes < 100: return '%dm ago' % minutes if hours < 48: return '%dh ago' % hours return '%dd ago' % days
7,340
def test_finder_installs_pre_releases_with_version_spec(): """ Test PackageFinder only accepts stable versioned releases by default. """ req = InstallRequirement.from_line("bar>=0.0.dev0", None) links = ["https://foo/bar-1.0.tar.gz", "https://foo/bar-2.0b1.tar.gz"] finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-2.0b1.tar.gz" links.reverse() finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-2.0b1.tar.gz"
7,341
def _raxml(exe, msa, tree, model, gamma, alpha, freq, outfile): """ Reconstruct ancestral sequences using RAxML_. :param exe: str, path to the executable of an ASR program. :param msa: str, path to the MSA file (must in FASTA format). :param tree: str, path to the tree file (must in NEWICK format) or a NEWICK format tree string (must start with "(" and end with ";"). :param model: namedtuple, substitution model for ASR. :param gamma: int, The number of categories for the discrete gamma rate heterogeneity model. :param freq: str, the equilibrium frequencies of the twenty amino acids. :param alpha: float, the shape (alpha) for the gamma rate heterogeneity. :param outfile: str, path to the output file. :return: tuple, a tree object and a dict for sequences. .. note:: See doc string of function ``asr()`` for details of all arguments. .. _RAxML: https://sco.h-its.org/exelixis/web/software/raxml/ """ if model.type == 'custom': mf = model.name name = 'WAG' info('Use model file {} for ancestral states reconstruction.'.format(mf)) else: name = model.name if name.upper() in RAXML_MODELS: mf = '' info('Use {} model for ancestral states ' 'reconstruction.'.format(name)) else: error('RAxML does not accept {} model, aborted.'.format(name)) sys.exit(1) wd, tf = tempfile.mkdtemp(dir=os.path.dirname(msa)), 'raxml.tree.newick' tf = tree.file(os.path.join(wd, tf), brlen=False) cmd = [exe, '-f', 'A', '-s', msa, '-t', tf, '-n', 'iMC'] m = 'PROTGAMMA' if (gamma or model.gamma) else 'PROTCAT' m += name.upper() freq = 'F' if freq == 'estimate' or model.frequency == 'estimate' else 'X' if 'AUTO' not in m: m += freq cmd.extend(['-m', m]) if mf: cmd.extend(['-P', mf]) cmd.append('--silent') try: info('Reconstructing ancestral states for {} using RAxML.'.format(msa)) process = Popen(cmd, cwd=wd, stdout=PIPE, stderr=PIPE, universal_newlines=True) code = process.wait() msg = process.stdout.read() or process.stderr.read() # Sometime RAxML does not return a non-zero code when it runs error if code: error('Ancestral reconstruction via RAxML failed for {} due to:' '\n{}'.format(msa, indent(msg, prefix='\t'))) sys.exit(1) else: ancestor = os.path.join(wd, 'RAxML_marginalAncestralStates.iMC') # Check again to see if reconstruction success if not os.path.isfile(ancestor): msg = '\n'.join([line for line in msg.splitlines() if line.strip().startswith('ERROR')]) error('Ancestral reconstruction via RAxML failed for {} due to:' '\n{}'.format(msa, indent(msg, prefix='\t'))) sys.exit(1) info('Parsing ancestral sequence reconstruction results.') with open(ancestor) as handle: ancestor = dict(line.strip().split() for line in handle) tree = os.path.join(wd, 'RAxML_nodeLabelledRootedTree.iMC') tree = Phylo.read(tree, 'newick') for clade in tree.find_clades(): if clade.confidence and not clade.name: clade.name = str(clade.confidence) tree, ancestor = _label(tree, ancestor) for record in AlignIO.read(msa, 'fasta'): ancestor[record.id] = record.seq _write(tree, ancestor, [], {}, outfile) info('Successfully save ancestral states reconstruction ' 'results to {}.'.format(outfile)) return outfile except OSError as err: print(err) error('Invalid RAxML executable {}, running RAxML failed for ' '{}.'.format(exe, msa)) sys.exit(1) finally: shutil.rmtree(wd)
7,342
def dec_multiply(*args) -> Optional[Decimal]: """ Multiplication of numbers passed as *args. Args: *args: numbers we want to multiply Returns: The result of the multiplication as a decimal number Examples: >>> dec_multiply(3, 3.5, 4, 2.34) Decimal('98.280') >>> dec_multiply() is None True """ if not args: return total = Decimal(str(args[0])) for element in args[1:]: total *= Decimal(str(element)) return total
7,343
def get_networks(): """ Returns a list of all available network names :return: JSON string, ex. "['bitcoin','bitcoin-cash','dash','litecoin']" """ return json.dumps([x[0] for x in db.session.query(Node.network).distinct().all()])
7,344
def consume(queue): """ """ while True: data = queue.get() print(f"consume: {id(data)}") queue.task_done()
7,345
def test_no_update_existing(api: KeepApi, config: Config, fsync: FileSync) -> None: """Existing note files are not updated if id not in the updated list""" fsync.start(None) config.state = State.InitialSync note = Note() note.title = "My Note" note.text = "Initial text" fname = write_note(api, config, note) note.text = "Some text" api.add(note) nf = NoteFile.from_note(api, config, note) renames = fsync.finish_startup([]) assert renames == {} assert os.path.exists(fname), "The note file should still exist" new_note = Note() parser.parse(api, config, fname, new_note) assert new_note.title == "My Note", "Note file title should still match" assert new_note.text == "Initial text", "Note file text should NOT be updated"
7,346
def get_cache_key_generator(request=None, generator_cls=None, get_redis=None): """Return an instance of ``CacheKeyGenerator`` configured with a redis client and the right cache duration. """ # Compose. if generator_cls is None: generator_cls = CacheKeyGenerator if get_redis is None: get_redis = get_redis_client # Instantiate and return the cache key generator. return generator_cls(get_redis(request))
7,347
def is_from_derms(output): """Given an output, check if it's from DERMS simulation. Parameters ---------- output: str or pathlib.Path """ if not isinstance(output, pathlib.Path): output = pathlib.Path(output) derms_info_file = output / DERMS_INFO_FILENAME if derms_info_file.exists(): return True return False
7,348
def create_property(): """ Check setting some RDF node as an RDF:Property dependent on rdflib. """ g = rdflib.Graph() node = rdflib.BNode() rdflib_model.type_property(g, node) assert ( list(g.triples((None, None, None)))[0] == node, rdflib.RDF.type, rdflib.RDF.Property )
7,349
def js_squeeze(parser, token): """ {% js_squeeze "js/dynamic_minifyed.js" "js/script1.js,js/script2.js" %} will produce STATIC_ROOT/js/dynamic_minifyed.js """ bits = token.split_contents() if len(bits) != 3: raise template.TemplateSyntaxError, "%r tag requires exactly two arguments" % bits[0] return SqueezeNode('js', *bits[1:])
7,350
def view_extracted_data() -> str: """ Display Raw extracted data from Documents """ extracted_data = read_collection(FIRESTORE_PROJECT_ID, FIRESTORE_COLLECTION) if not extracted_data: return render_template("index.html", message_error="No data to display") return render_template("index.html", extracted_data=extracted_data)
7,351
def get_lstm_trump_text(): """Use the LSTM trump tweets model to generate text.""" data = json.loads(request.data) sl = data["string_length"] st = data["seed_text"] gen_text = lstm_trump.generate_text(seed_text=st, pred_len=int(sl)) return json.dumps(gen_text)
7,352
def calc_and_save_YLMs(measure_obj): """ COMPUTE YLMS SEQUENTIALLY AND SAVE. xmiydivr = e^(-iφ)sin(θ) = (x - iy)/r zdivr = cos(θ) = z/r xmidivrsq = e^(-2iφ)sin^2(θ) = [(x - iy)/r]^2 zdivrsq = cos^2(θ) = [z/r]^2 ..cu means cubed ..ft means to the fourth power ..fi means to the fifth power Args: measure_obj (class: measure): an object that carries the necessary values and data structures to compute 3/4 PCFs """ if hasattr(measure_obj, 'X'): X = measure_obj.X Y = measure_obj.Y Z = measure_obj.Z R = measure_obj.R else: raise AssertionError("You need to run create_XYZR() first") #ell, m = 0,0 y00 =.5*(1./np.pi)**.5*np.ones((measure_obj.ld_one_d,measure_obj.ld_one_d,measure_obj.ld_one_d)) ylm_save(measure_obj,y00, 0, 0) del y00 #ell, m = 1, -1 xdivr = X/R del X ydivr = Y/R #we'll need these as individuals later anyway. del Y xmiydivr = xdivr - 1j*ydivr y1m1 = .5*np.sqrt(3./(2.*np.pi))*xmiydivr ylm_save(measure_obj,y1m1, 1, 1) del y1m1 #ell, m = 1, 0 zdivr = Z/R del Z y10 = .5*np.sqrt(3./np.pi)*zdivr ylm_save(measure_obj,y10, 1, 0) del y10 #ell, m = 2, -2 xmiydivrsq = xmiydivr*xmiydivr y2m2 = .25*np.sqrt(15./(2.*np.pi))*xmiydivrsq ylm_save(measure_obj,y2m2, 2, 2) del y2m2 #ell, m = 2, -1 y2m1 = .5*np.sqrt(15./(2.*np.pi))*xmiydivr*zdivr ylm_save(measure_obj,y2m1, 2, 1) del y2m1 #ell, m = 2, 0 xdivrsq = xdivr*xdivr ydivrsq = ydivr*ydivr zdivrsq = zdivr*zdivr y20 = .25*np.sqrt(5./np.pi)*(2.*zdivrsq-xdivrsq-ydivrsq) ylm_save(measure_obj,y20, 2, 0) del y20 #ell, m = 3, -3 xmiydivrcu = xmiydivr*xmiydivrsq y3m3 = .125*np.sqrt(35./np.pi)*xmiydivrcu ylm_save(measure_obj,y3m3, 3, 3) del y3m3 #ell, m = 3, -2 y3m2 = .25*np.sqrt(105./(2.*np.pi))*xmiydivrsq*zdivr ylm_save(measure_obj,y3m2, 3, 2) del y3m2 #ell, m = 3, -1 y3m1 = .125*np.sqrt(21./np.pi)*(xmiydivr*(4.*zdivrsq-xdivrsq-ydivrsq)) ylm_save(measure_obj,y3m1, 3, 1) del y3m1 #ell, m = 3, 0 y30 = .25*np.sqrt(7./np.pi)*(zdivr*(2.*zdivrsq-3.*xdivrsq-3.*ydivrsq)) ylm_save(measure_obj,y30, 3, 0) del y30 #ell, m = 4, -4 xmiydivrft = xmiydivr*xmiydivrcu y4m4 = .1875*np.sqrt(35./(2.*np.pi))*xmiydivrft ylm_save(measure_obj,y4m4, 4, 4) del y4m4 #ell, m = 4, -3 y4m3 = .375*np.sqrt(35./np.pi)*xmiydivrcu*zdivr ylm_save(measure_obj,y4m3, 4, 3) del y4m3 #ell, m = 4, -2 y4m2 = .375*np.sqrt(5./(2.*np.pi))*xmiydivrsq*(7.*zdivrsq-1) ylm_save(measure_obj,y4m2, 4, 2) del y4m2 #ell, m = 4, -1 y4m1 = .375*np.sqrt(5./np.pi)*xmiydivr*zdivr*(7.*zdivrsq-3.) ylm_save(measure_obj,y4m1, 4, 1) del y4m1 #ell, m = 4, 0 zdivrft = zdivrsq*zdivrsq y40 = .1875*np.sqrt(1./np.pi)*(35.*zdivrft-30.*zdivrsq+3.) ylm_save(measure_obj,y40, 4, 0) del y40 #ell, m = 5, -5 xmiydivrfi = xmiydivr*xmiydivrft y5m5 = (3./32.)*np.sqrt(77./np.pi)*xmiydivrfi ylm_save(measure_obj,y5m5, 5, 5) del y5m5 #ell, m = 5, -4 y5m4 = (3./16.)*np.sqrt(385./(2.*np.pi))*xmiydivrft*zdivr ylm_save(measure_obj,y5m4, 5, 4) del y5m4 #ell, m = 5, -3 y5m3 = (1./32.)*np.sqrt(385./np.pi)*xmiydivrcu*(9.*zdivrsq-1.) ylm_save(measure_obj,y5m3, 5, 3) del y5m3 #ell, m = 5, -2 zdivrcu = zdivr*zdivrsq y5m2 = (1./8.)*np.sqrt(1155./(2.*np.pi))*xmiydivrsq*(3.*zdivrcu-zdivr) ylm_save(measure_obj,y5m2, 5, 2) del y5m2 #ell, m = 5, -1 y5m1 = (1./16.)*np.sqrt(165./(2.*np.pi))*xmiydivr*(21.*zdivrft-14.*zdivrsq+1.) ylm_save(measure_obj,y5m1, 5, 1) del y5m1 #ell, m = 5, 0 zdivrfi = zdivr*zdivrft y50 = (1./16.)*np.sqrt(11./np.pi)*(63.*zdivrfi-70.*zdivrcu+15.*zdivr) ylm_save(measure_obj,y50, 5, 0) del y50
7,353
def collect_bazel_rules(root_path): """Collects and returns all bazel rules from root path recursively.""" rules = [] for cur, _, _ in os.walk(root_path): build_path = os.path.join(cur, "BUILD.bazel") if os.path.exists(build_path): rules.extend(read_bazel_build("//" + cur)) return rules
7,354
def countSort(alist): """计数排序""" if alist == []: return [] cntLstLen = max(alist) + 1 cntLst = [0] * cntLstLen for i in range(len(alist)): cntLst[alist[i]] += 1 #数据alist[i] = k就放在第k位 alist.clear() for i in range(cntLstLen): while cntLst[i] > 0: #将每个位置的数据k循环输出多次 alist.append(i) cntLst[i] -= 1 return alist
7,355
def _FloatsTraitsBase_read_values_dataset(arg2, arg3, arg4, arg5): """_FloatsTraitsBase_read_values_dataset(hid_t arg2, hid_t arg3, hid_t arg4, unsigned int arg5) -> FloatsList""" return _RMF_HDF5._FloatsTraitsBase_read_values_dataset(arg2, arg3, arg4, arg5)
7,356
def TRI_Query(state=None, county=None,area_code=None, year=None,chunk_size=100000): """Query the EPA Toxic Release Inventory Database This function constructs a query for the EPA Toxic Release Inventory API, with optional arguments for details such as the two-letter state, county name, area code, and year. More info here: https://www.epa.gov/enviro/envirofacts-data-service-api """ base_url='https://data.epa.gov/efservice/' #Declare the names of the tables that we want to pull table_name1='TRI_FACILITY' table_name2='TRI_REPORTING_FORM' table_name3 = 'TRI_TRANSFER_QTY' output_format='CSV' query = base_url query+=table_name1+'/' #Add in the state qualifier, if the desired_state variable is named if state: query+='state_abbr/=/'+state+'/' #Add in the county qualifier, if the desired_county variable is named if county: query+='county_name/'+county+'/' #Add in the area code qualifier, if the desired_area_code variable is named if area_code: query+='zip_code/'+str(area_code)+'/' #Add in the next table name and year qualifier, if the desired_year variable is named query += table_name2+'/' if year: if type(year) is list: query+='reporting_year/'+str(year[0])+'/'+str(year[1])+'/' else: query+='reporting_year/'+str(year)+'/' #add the third table query += table_name3+'/' count_query = query+'count/' count_xml = requests.get(count_query).content nrows= int(BeautifulSoup(count_xml,features="lxml").find('requestrecordcount').contents[0]) #Add in the desired output format to the query csv_query = query+ output_format #Return the completed query bar = Bar('Downloading Records:',max=nrows,\ suffix='%(index)d/%(max)d %(percent).1f%% - %(eta)ds') bar.check_tty = False s=requests.get(csv_query).content dataframe=pd.read_csv(io.StringIO(s.decode('utf-8')), engine='python', encoding='utf-8', error_bad_lines=False) bar.next(n = dataframe.shape[0]) nrows_prev = dataframe.shape[0] while dataframe.shape[0] < nrows: new_query = query + 'rows/'+str(dataframe.shape[0])+':'\ +str(dataframe.shape[0]+chunk_size)+'/' csv_query = new_query+ output_format s=requests.get(csv_query).content dataframe = dataframe.append(pd.read_csv(io.StringIO(s.decode('utf-8')), engine='python',encoding='utf-8', error_bad_lines=False)) bar.next(n=dataframe.shape[0]-nrows_prev) nrows_prev = dataframe.shape[0] bar.finish() # do the replacement: if 'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT' in dataframe.columns: dataframe.replace({'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT':wm_dict},inplace=True) return dataframe
7,357
def get_monthly_schedule(year, month): """ :param year: a string, e.g. 2018 :param month: a string, e.g. january :return schedule: a pd.DataFrame containing game info for the month """ url = f'https://www.basketball-reference.com/leagues/NBA_{year}_games-{month}.html' page = requests.get(url) tree = html.fromstring(page.content) game_date = tree.xpath('//*[@data-stat="date_game"]/a/text()') road_team = tree.xpath('//*[@data-stat="visitor_team_name"]/a/text()') road_pts = tree.xpath('//*[@data-stat="visitor_pts"]/text()') road_pts.pop(0) # Remove column name home_team = tree.xpath('//*[@data-stat="home_team_name"]/a/text()') home_pts = tree.xpath('//*[@data-stat="home_pts"]/text()') home_pts.pop(0) # Remove column name box_score_url = tree.xpath('//*[@data-stat="box_score_text"]/a/@href') schedule = { 'DATE': game_date, 'ROAD_TEAM': road_team, 'ROAD_PTS': road_pts, 'HOME_TEAM': home_team, 'HOME_PTS': home_pts, 'BOX_SCORE_URL': box_score_url, } # Create a dictionary with different length columns (Series) that is # suitable for a DataFrame schedule = dict([ (k, pd.Series(v)) for k, v in schedule.items() ]) schedule = pd.DataFrame(schedule) schedule.dropna(how='any', inplace=True) schedule['ROAD_TM'] = schedule['ROAD_TEAM'].map(team_name_abbrev) schedule['HOME_TM'] = schedule['HOME_TEAM'].map(team_name_abbrev) schedule = schedule[['DATE', 'ROAD_TEAM', 'ROAD_TM', 'ROAD_PTS', 'HOME_TEAM', 'HOME_TM', 'HOME_PTS', 'BOX_SCORE_URL']] BBALLREF = 'https://www.basketball-reference.com' schedule['BOX_SCORE_URL'] = \ schedule['BOX_SCORE_URL'].apply(lambda x: BBALLREF + x) def format_date(date): return arrow.get(date, 'ddd, MMM D, YYYY').datetime.strftime('%Y-%m-%d') schedule['DATE'] = schedule['DATE'].apply(format_date) return schedule
7,358
def test_erroring(mock_send_report, handler_that_errors, mock_context): """Assert that the agent catches and traces uncaught exceptions""" iopipe = IOpipeCore() assert iopipe.report is None try: iopipe.error(Exception("Before report is created")) except Exception: pass assert iopipe.report is None iopipe, handler = handler_that_errors with pytest.raises(ValueError): handler(None, mock_context) assert iopipe.report.report["errors"]["name"] == "ValueError" assert iopipe.report.report["errors"]["message"] == "Behold, a value error" assert isinstance(iopipe.report.report["errors"]["stack"], str) assert "@iopipe/error" in iopipe.report.labels
7,359
async def test_discovery_update_attr(opp, mqtt_mock, caplog): """Test update of discovered MQTTAttributes.""" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, "openpeerpower", {}, entry) data1 = ( '{ "name": "Beer",' ' "command_topic": "test_topic",' ' "json_attributes_topic": "attr-topic1" }' ) data2 = ( '{ "name": "Beer",' ' "command_topic": "test_topic",' ' "json_attributes_topic": "attr-topic2" }' ) async_fire_mqtt_message(opp, "openpeerpower/lock/bla/config", data1) await opp.async_block_till_done() async_fire_mqtt_message(opp, "attr-topic1", '{ "val": "100" }') state = opp.states.get("lock.beer") assert state.attributes.get("val") == "100" # Change json_attributes_topic async_fire_mqtt_message(opp, "openpeerpower/lock/bla/config", data2) await opp.async_block_till_done() # Verify we are no longer subscribing to the old topic async_fire_mqtt_message(opp, "attr-topic1", '{ "val": "50" }') state = opp.states.get("lock.beer") assert state.attributes.get("val") == "100" # Verify we are subscribing to the new topic async_fire_mqtt_message(opp, "attr-topic2", '{ "val": "75" }') state = opp.states.get("lock.beer") assert state.attributes.get("val") == "75"
7,360
def get_gzip_guesses(preview, stream, chunk_size, max_lines): """ :type preview: str :param preview: The initial chunk of content read from the s3 file stream. :type stream: botocore.response.StreamingBody :param stream: StreamingBody object of the s3 dataset file. :type chunk_size: int :param chunk_size: Maximum size of the chunk in bytes peeking. :type max_lines: int :param max_lines: Maximum number of lines to peek into. """ COMPRESSION_TYPE = 'GZIP' guesses = dict() dialect = csv.Sniffer().sniff(zlib.decompressobj(zlib.MAX_WBITS|16).decompress(preview)) has_header = csv.Sniffer().has_header(zlib.decompressobj(zlib.MAX_WBITS|16).decompress(preview)) d = zlib.decompressobj(zlib.MAX_WBITS|16) lines_read = 0 first_row = True data = '' while True: if first_row: chunk = preview else: chunk = stream.read(chunk_size) if not chunk: break data += d.decompress(chunk) if '\n' in data: guesses, data, lines_read = analyze_data(data, lines_read, max_lines, first_row, guesses, dialect, has_header) first_row = False if lines_read >= max_lines: return guesses, has_header, COMPRESSION_TYPE, dialect return guesses, has_header, COMPRESSION_TYPE, dialect
7,361
def remove_user(ctx, username, endpoint): """Remove a user's access from a server This command is used to remove a user from the system after they have been removed from the external auth provider. This step is required because even if a user can no longer log in, the could in theory still exchange a PAT for valid credentials. Running this command will delete a user's PATs and group memberships. To run this command, you must set the environment variable `HOSS_PAT` to a valid PAT from a user with the `admin` role. """ user.remove_user(username, endpoint)
7,362
def commit(image): """ Commit changes from a poured container to an image """ if image != "None": os.environ["maple_image"] = str(image) Backend().container.commit()
7,363
def make_preds_epoch(classifier: nn.Module, data: List[SentenceEvidence], batch_size: int, device: str=None, criterion: nn.Module=None, tensorize_model_inputs: bool=True): """Predictions for more than one batch. Args: classifier: a module that looks like an AttentiveClassifier data: a list of elements to make predictions over. These must be SentenceEvidence objects. batch_size: the biggest chunk we can fit in one batch. device: Optional; what compute device this should run on criterion: Optional; a loss function tensorize_model_inputs: should we convert our data to tensors before passing it to the model? Useful if we have a model that performs its own tokenization """ epoch_loss = 0 epoch_soft_pred = [] epoch_hard_pred = [] epoch_truth = [] batches = _grouper(data, batch_size) classifier.eval() for batch in batches: loss, soft_preds, hard_preds, targets = make_preds_batch(classifier, batch, device, criterion=criterion, tensorize_model_inputs=tensorize_model_inputs) if loss is not None: epoch_loss += loss.sum().item() epoch_hard_pred.extend(hard_preds) epoch_soft_pred.extend(soft_preds.cpu()) epoch_truth.extend(targets) epoch_loss /= len(data) epoch_hard_pred = [x.item() for x in epoch_hard_pred] epoch_truth = [x.item() for x in epoch_truth] return epoch_loss, epoch_soft_pred, epoch_hard_pred, epoch_truth
7,364
def array_3_1(data): """ 功能:将3维数组转换成1维数组 \n 参数: \n data:图像数据,3维数组 \n 返回值:图像数据,1维数组 \n """ # 受不了了,不判断那么多了 shape = data.shape width = shape[0] height = shape[1] # z = list() z = np.zeros([width * height, 1]) for i in range(0, width): for j in range(0, height): index = i * width + j z[index][0] = data[i, j, 0] # z.append(data[i, j, 0]) return z
7,365
def UpdateString(update_intervals): """Calculates a short and long message to represent frequency of updates. Args: update_intervals: A list of interval numbers (between 0 and 55) that represent the times an update will occur Returns: A two-tuple of the long and short message (respectively) corresponding to the frequency. This is intended to be sent via AJAX and hence the tuple is turned into json before being returned. Raises: BadInterval in the case that the length of update_intervals is not a key in the constant RESPONSES """ length = len(update_intervals) if length not in RESPONSES: raise BadInterval(length) else: return json.dumps(RESPONSES[length])
7,366
def coalesce(*xs: Optional[T]) -> T: """Return the first non-None value from the list; there must be at least one""" for x in xs: if x is not None: return x assert False, "Expected at least one element to be non-None"
7,367
def deleteqospolicy(ctx, # Mandatory main parameter qospolicyid): """You can use the DeleteQoSPolicy method to delete a QoS policy from the system.""" """The QoS settings for all volumes created of modified with this policy are unaffected.""" cli_utils.establish_connection(ctx) ctx.logger.info(""": """"""qospolicyid = """ + str(qospolicyid)+""";"""+"") try: _DeleteQoSPolicyResult = ctx.element.delete_qos_policy(qos_policy_id=qospolicyid) except common.ApiServerError as e: ctx.logger.error(e.message) exit() except BaseException as e: ctx.logger.error(e.__str__()) exit() if ctx.json: print(simplejson.dumps(simplejson.loads(_DeleteQoSPolicyResult), indent=4)) return else: cli_utils.print_result(_DeleteQoSPolicyResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
7,368
def test_upgrade_db_2_to_3(data_dir, username): """Test upgrading the DB from version 2 to version 3, rename BCHSV to BSV""" msg_aggregator = MessagesAggregator() data = DataHandler(data_dir, msg_aggregator) with creation_patch: data.unlock(username, '123', create_new=True) # Manually set version (Both here and in 4 -> 5 it needs to be done like this and # target patch can't be used for some reason. Still have not debugged what fails cursor = data.db.conn.cursor() cursor.execute( 'INSERT OR REPLACE INTO settings(name, value) VALUES(?, ?)', ('version', str(2)), ) data.db.conn.commit() populate_db_and_check_for_asset_renaming( cursor=cursor, data=data, data_dir=data_dir, msg_aggregator=msg_aggregator, username=username, to_rename_asset='BCHSV', renamed_asset=A_BSV, target_version=3, ) version = data.db.get_version() # Also make sure that we have updated to the target_version assert version == 3
7,369
def check_phil(phil, scope=True, definition=True, raise_error=True): """ Convenience function for checking if the input is a libtbx.phil.scope only or a libtbx.phil.definition only or either. Parameters ---------- phil: object The object to be tested scope: bool Flag to check if phil is a libtbx.phil.scope definition: bool Flag to check if phil is a libtbx.phil.definition raise_error: bool If true, a RuntimeError is raised if the check(s) fail Returns ------- value: bool """ value = False if scope: # check for only libtbx.phil.scope value = isinstance(phil, libtbx.phil.scope) if definition: # check for only libtbx.phil.definition value = isinstance(phil, libtbx.phil.definition) if scope and definition: # check for either value = isinstance(phil, libtbx.phil.scope) or isinstance(phil, libtbx.phil.definition) if (scope and definition) and not value and raise_error: raise RuntimeError('A libtbx.phil.scope or libtbx.phil.definition is expected.') elif scope and not value and raise_error: raise RuntimeError('A libtbx.phil.scope is expected.') elif definition and not value and raise_error: raise RuntimeError('A libtbx.phil.definition is expected.') return value
7,370
def get_ssm_environment() -> dict: """Get the value of environment variables stored in the SSM param store under $DSS_DEPLOYMENT_STAGE/environment""" p = ssm_client.get_parameter(Name=fix_ssm_variable_prefix("environment")) parms = p["Parameter"]["Value"] # this is a string, so convert to dict return json.loads(parms)
7,371
def remove_tags(modeladmin, request, queryset): """Remove tags.""" logger.debug('MA: %s, request: %s', modeladmin, request) for obj in queryset: obj.tags.clear()
7,372
def test_failure(database): """ Test that its fails when cfda_number does not exists. """ # test for cfda_number that doesn't exist in the table cfda = CFDAProgram(program_number=12.340) det_award_1 = DetachedAwardFinancialAssistanceFactory(cfda_number="54.321") det_award_2 = DetachedAwardFinancialAssistanceFactory(cfda_number="AB.CDE") det_award_3 = DetachedAwardFinancialAssistanceFactory(cfda_number="11.111") errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, cfda]) assert errors == 3
7,373
def get_default_date_stamp(): """ Returns the default date stamp as 'now', as an ISO Format string 'YYYY-MM-DD' :return: """ return datetime.now().strftime('%Y-%m-%d')
7,374
def prepifg_handler(config_file): """ Perform multilooking and cropping on geotiffs. """ config_file = os.path.abspath(config_file) params = cf.get_config_params(config_file, step=PREPIFG) prepifg.main(params)
7,375
def check_for_launchpad(old_vendor, name, urls): """Check if the project is hosted on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: the name of the project on launchpad, or an empty string. """ if old_vendor != "pypi": # XXX This might work for other starting vendors # XXX but I didn't check. For now only allow # XXX pypi -> launchpad. return '' for url in urls: try: return re.match(r"https?://launchpad.net/([\w.\-]+)", url).groups()[0] except AttributeError: continue return ''
7,376
def hook_mem_write(uc, access, address, size, value, data_entries): """Callback for memory write.""" is_64bit = (uc._mode == UC_MODE_64) eip = uc.reg_read(UC_X86_REG_RIP if is_64bit else UC_X86_REG_EIP) do_skip = False # Decode instruction insn = ida_ua.insn_t() inslen = ida_ua.decode_insn(insn, eip) if (inslen > 0): # Skip instruction? if (insn.itype in SKIP_WRITE_MEM_INSTRUCTIONS): do_skip = True if DEBUG: # Debug: trace log disasm_text = ida_lines.generate_disasm_line(eip, 0) if disasm_text: disasm_text = ida_lines.tag_remove(disasm_text) addr_fmt_str = '%016X' if is_64bit else '%08X' val_fmt_str = '%0' + str(2 * size) + 'X' log_fmt_str = addr_fmt_str + '\t%s\t' + \ val_fmt_str + ' (%d) -> ' + addr_fmt_str log_str = log_fmt_str % (eip, disasm_text, value, size, address) log_str += '\t' + ('Skipped' if do_skip else 'Processed') trace_log(log_str) if do_skip: return start = address end = start + size for i, data_entry in reversed(list(enumerate(data_entries))): data_start = data_entry[0] data_end = data_start + data_entry[1] if ((data_start <= start <= data_end) or (data_start <= end <= data_end)): start = min(data_start, start) end = max(data_end, end) new_entry = (start, end - start) if (i < len(data_entries) - 1): del data_entries[i] data_entries.append(new_entry) else: data_entries[i] = new_entry break else: data_entries.append((address, size))
7,377
def apply_file_collation_and_strip(args, fname): """Apply collation path or component strip to a remote filename Parameters: args - arguments fname - file name Returns: remote filename Raises: No special exception handling """ remotefname = fname.replace(os.path.sep,"/").strip("/") # replace os.path.sep with python's universal sep if args.collate is not None: remotefname = remotefname.split("/")[-1] if args.collate != '.': remotefname = "/".join((args.collate, remotefname)) elif args.stripcomponents > 0: rtmp = remotefname.split("/") nsc = min((len(rtmp) - 1, args.stripcomponents)) if nsc > 0: remotefname = "/".join(rtmp[nsc:]) return remotefname
7,378
def constructRBFStates(L1, L2, W1, W2, sigma): """ Constructs a dictionary dict[tuple] -> torch.tensor that converts tuples (x,y) representing positions to torch tensors used as input to the neural network. The tensors have an entry for each valid position on the race track. For each position (x,y), the tensor is constructed using the gaussian radial basis function with standard deviation sigma. In other words, if entry i corresponds to the position p2 = (x2, y2), then the tensor for a point p1 = (x1,y1) will have tensor[i] = Gaussian_RBF(p1, p2). @type L1: int See description in the @RaceCar class. @type L2: int See description in the @RaceCar class. @type W1: int See description in the @RaceCar class. @type W2: int See description in the @RaceCar class. @type sigma: float The standard deviation of the gaussian radial basis function. """ N_states = (L1+1)*(W1+W2+1)+L2*(W2+1) x_coords = torch.zeros(N_states, dtype=torch.float32) y_coords = torch.zeros(N_states, dtype=torch.float32) state_to_basis = {} ind = 0 for x in range(L1+L2+1): for y in range(W1+W2+1): if (0<=x<=L1 and 0<=y<=W1+W2) or (0<=x<=L1+L2 and W1<=y<=W1+W2): x_coords[ind] = x y_coords[ind] = y ind += 1 for x in range(L1 + L2 + 1): for y in range(W1 + W2 + 1): if (0 <= x <= L1 and 0 <= y <= W1 + W2) or (0 <= x <= L1 + L2 and W1 <= y <= W1 + W2): basis = torch.exp(-((x_coords-x)**2 + (y_coords-y)**2)/(2*sigma**2)) state_to_basis[(x,y)] = basis.view(1, -1).to(device) return state_to_basis
7,379
def gather_data(path, save_file=None, path_json='src/python_code/settings.json'): """ Gather data from different experiments :param path: path of the experiments :param save_file: path if you want to save data as csv (Default None) :param path_json: setting file :return: dataframe """ experiments = next(os.walk(path))[1] settings = json.load(open(path_json))["OOD"]["Gather_Data"] methods = settings["Feature_methods"] for j, experiment in enumerate(tqdm(experiments)): df = get_data(path + experiment + '/logs') df = process_RandNet(df) df = separate_ood(df, path_json=path_json) df2 = remove_latent(df) df2['auc'] = df2.apply(lambda x: auc(x, df2) if x.ood == 1 else None, axis=1) print("methods ", methods) for method in methods: methods2 = methods.copy() methods2.remove(method) print("methods ", method, methods2) df3 = keep_feature(df, methods2, path_json=path_json) df3['auc'] = df3.apply(lambda x: auc(x, df3) if x.ood == 1 else None, axis=1) df2 = pd.concat([df2, df3]) df = df2 """ df3 = keep_latent(df) df3['auc'] = df3.apply(lambda x: auc(x, df3) if x.ood == 1 else None, axis=1) df4 = keep_like(df) df4['auc'] = df4.apply(lambda x: auc(x, df4) if x.ood == 1 else None, axis=1) df = pd.concat([df2, df3, df4]) """ #df['Metric use'] = df['Values'].apply(lambda x: np.mean(np.array(x).reshape(-1))) df['Epoch'] = df['Epoch'].apply(lambda x: int(x)) df = df[['Model', 'Latent Space', 'Hidden Space', 'DataSet', 'Epoch', 'ood', 'auc']] if j == 0: final_df = df.copy() else: final_df = pd.concat([final_df, df]) if save_file is not None: final_df.to_csv(save_file) return final_df
7,380
def tile_grid_intersection( src0: DatasetReader, src1: DatasetReader, blockxsize: Union[None, int] = None, blockysize: Union[None, int] = None ) -> tuple[Iterator[Window], Iterator[Window], Iterator[Window], Affine, int, int]: """Generate tiled windows for the intersection between two grids. Given two rasters having different dimensions calculate read-window generators for each and a write-window generator for the intersecion. Parameters: src0: rasterio read source src1: rasterio read source blockxsize: write-window width blockysize: write-window height Returns: read windows for src0, read windows for src1, write windows for the intersection, write raster Affine, write raster width in columns write raster height in rows """ bbox0 = window_bounds(((0, 0), src0.shape), src0.transform, offset='ul') bbox1 = window_bounds(((0, 0), src1.shape), src1.transform, offset='ul') bounds = intersect_bounds(bbox0, bbox1) (row_start0, row_stop0), (col_start0, col_stop0) = bounds_window( bounds, src0.transform ) (row_start1, row_stop1), (col_start1, col_stop1) = bounds_window( bounds, src1.transform ) ncols = col_stop0 - col_start0 nrows = row_stop0 - row_start0 affine = from_bounds(bounds[0], bounds[1], bounds[2], bounds[3], ncols, nrows) if blockxsize is None: blockxsize = ncols if blockysize is None: blockysize = nrows windows0 = tile_grid( ncols, nrows, blockxsize, blockysize, col_offset=col_start0, row_offset=row_start0, ) windows1 = tile_grid( ncols, nrows, blockxsize, blockysize, col_offset=col_start1, row_offset=row_start1, ) write_windows = tile_grid(ncols, nrows, blockxsize, blockysize) return (windows0, windows1, write_windows, affine, nrows, ncols)
7,381
def etapes_index_view(request): """ GET etapes index """ # Check connected if not check_connected(request): raise exc.HTTPForbidden() records = request.dbsession.query(AffaireEtapeIndex).filter( AffaireEtapeIndex.ordre != None ).order_by(AffaireEtapeIndex.ordre.asc()).all() return Utils.serialize_many(records)
7,382
def brute_force(ciphered_text: str, charset: str = DEFAULT_CHARSET, _database_path: Optional[str] = None) -> int: """ Get Caesar ciphered text key. Uses a brute force technique trying the entire key space until finding a text that can be identified with any of our languages. **You should not use this function. Use *brute_force_mp* instead.** This function is slower than *mp* one because is sequential while the other uses a multiprocessing approach. This function only stay here to allow comparisons between sequential and multiprocessing approaches. :param ciphered_text: Text to be deciphered. :param charset: Charset used for Caesar method substitution. Both ends, ciphering and deciphering, should use the same charset or original text won't be properly recovered. :param _database_path: Absolute pathname to database file. Usually you don't set this parameter, but it is useful for tests. :return: Caesar key found. """ key_space_length = len(charset) return simple_brute_force(key_generator=integer_key_generator(key_space_length), assess_function=_assess_caesar_key, # key_space_length=key_space_length, ciphered_text=ciphered_text, charset=charset, _database_path=_database_path)
7,383
def mk_data(yml, sep_ext_kwargs=';', sep_key_val='='): """Generate data files from a YAML file. Arguments --------- yml : dict YAML-generated dict containing data specifications. sep_ext_kwargs : str Separator for a file extension and pandas keyword args. (default ';') sep_key_val : str Separator for pandas keyword args. (default '=') """ for active in yml['run']['active_ids']: if active not in yml: continue # Copy the contents of "run" keys to each activate ID. for run_keys in yml['run']: if run_keys == 'active_ids': continue # e.g. run_keys == out_path, out_fmts, header, ... if run_keys not in yml[active]: # Enables ID-wise overriding yml[active][run_keys] = yml['run'][run_keys] # Initialization list_of_lists = [[] for _ in yml[active]['header']] dict_of_lists = {} # DataFrame input num_header = len(yml[active]['header']) # Process the data. for line in yml[active]['data']: spl = re.split(r'\s*[{}]\s*'.format(yml[active]['sep']), line) if len(spl) < num_header: num_insufficient = num_header - len(spl) spl += [None] * num_insufficient for i in range(num_header): list_of_lists[i].append(spl[i]) for i, s in enumerate(yml[active]['header']): # e.g. (0, 'Name'), (1, 'Nawabari'), (2, 'Age (year)'), ... dict_of_lists[s] = list_of_lists[i] # DF construction df = pd.DataFrame(dict_of_lists) # File generation if not os.path.exists(yml[active]['out_path']): os.makedirs(yml[active]['out_path']) rpt_gen(yml[active]['out_path']) out_bname_w_path = '{}/{}'.format(yml[active]['out_path'], yml[active]['out_bname']) for fmt, ext_kwargs in yml[active]['out_fmts'].items(): # spl[0]: File extension # spl[1:]: pandas keyword arguments (optional) spl = re.split(r'\s*[{}]\s*'.format(sep_ext_kwargs), ext_kwargs) out_fname = '{}.{}'.format(out_bname_w_path, spl[0]) args_dct = {} if len(spl) >= 2: args = [] for arg in spl[1:]: args += re.split(r'\s*[{}]\s*'.format(sep_key_val), arg) args_dct = {args[i]: args[i+1] for i in range(0, len(args), 2)} meth_to_call = getattr(df, 'to_{}'.format(fmt)) meth_to_call(out_fname, **args_dct) rpt_gen(out_fname)
7,384
def SqZerniketoOPD(x,y,coef,N,xwidth=1.,ywidth=1.): """ Return an OPD vector based on a set of square Zernike coefficients """ stcoef = np.dot(zern.sqtost[:N,:N],coef) x = x/xwidth y = y/ywidth zm = zern.zmatrix(np.sqrt(x**2+y**2),np.arctan2(y,x),N) opd = np.dot(zm,stcoef) return opd
7,385
def build_voterinfo(campaign, state): """Render a tweet of voting info for a state""" state_info = campaign.info_by_state[state] num_cities = len(state_info[CITIES]) assert num_cities == len(set(state_info[CITIES])), f"Duplicate entries in CITIES for {state}." city_ct = num_cities effective_length = 0 tweet_text = "" while city_ct > 0: # Iterate on building a tweet until it fits within the limit. # Return none if unsuccessful city_set = set(state_info[CITIES]) try: # Select up to city_ct cities cities = [] cities_found = 0 while cities_found < city_ct: city_idx = random.randint(0, num_cities - 1) city = state_info[CITIES][city_idx] if city in city_set: cities.append(hashtag(city)) city_set.remove(city) cities_found += 1 effective_length, tweet_text = render_voterinfo(campaign, state, cities) break except AssertionError: tweet_text = "" city_ct -= 1 return effective_length, tweet_text
7,386
def test_check_badusernames(state: State): """Verify logged-in base_admin can test usernames, gets correct result - nonexistant user """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} data = {"username":"got_no_username_like_this"} response = requests.post(SERVER_URL + "/api/admin/user/check_name", headers=auth_hdr, json=data) assert response.status_code == 200 is_user = response.json() assert is_user == 0
7,387
def blync_callback( ctx: typer.Context, light_id: int = typer.Option( 0, "--light-id", "-l", show_default=True, help="Light identifier", ), red: int = typer.Option( 0, "--red", "-r", is_flag=True, show_default=True, help="Red color value range: 0 - 255", ), blue: int = typer.Option( 0, "--blue", "-b", is_flag=True, show_default=True, help="Blue color value range: 0 - 255", ), green: int = typer.Option( 0, "--green", "-g", is_flag=True, help="Green color value range: 0 - 255", show_default=True, ), red_b: bool = typer.Option( False, "--RED", "-R", is_flag=True, help="Full value red [255]" ), blue_b: bool = typer.Option( False, "--BLUE", "-B", is_flag=True, help="Full value blue [255]" ), green_b: bool = typer.Option( False, "--GREEN", "-G", is_flag=True, help="Full value green [255]" ), off: bool = typer.Option( False, "--off/--on", "-o/-n", show_default=True, help="Turn the light off/on." ), dim: bool = typer.Option( False, "--dim", "-d", is_flag=True, help="Toggle bright/dim mode.", show_default=True, ), flash: int = typer.Option( 0, "--flash", "-f", count=True, is_flag=True, help="Enable flash mode.", ), play: int = typer.Option(0, "--play", "-p", help="Select song: 1-15"), repeat: bool = typer.Option( False, "--repeat", is_flag=True, show_default=True, help="Repeat the selected song.", ), volume: int = typer.Option( 5, "--volume", show_default=True, help="Set the volume: 1-10" ), available: bool = typer.Option( False, "--list-available", "-a", is_flag=True, is_eager=True, callback=list_lights, ), verbose: int = typer.Option(0, "--verbose", "-v", count=True, callback=verbosity), version: bool = typer.Option( False, "--version", "-V", is_flag=True, is_eager=True, callback=report_version ), ): """Control your Embrava BlyncLight from the command-line! ## Usage Use the `blync` utility to directly control your Embrava BlyncLight: \b ```console $ blync -R # turn the light on with red color and leave it on $ blync --off # turn the light off $ blync -RG --dim # turn the light on with yellow color and dim $ blync -RBG # turn the light on with white color ``` Colors can be specified by values between 0 and 255 using the lower-case color options or using the upper-case full value options. \b ```console $ blync -r 127 # half intensity red $ blync -r 255 # full intensity red $ blync -R # also full intensity red $ blync -r 255 -b 255 -g 255 # full intensity white $ blync -RBG # full intensity white ``` If that's not enough fun, there are three builtin color modes: `fli`, `throbber`, and `rainbow`. All modes continue until the user terminates with a Control-C or platform equivalent. \b ```console $ blync fli $ blync throbber $ blync rainbow ``` ## Installation \b ```console $ python3 -m pip install blynclight $ python3 -m pip install git+https://github.com/JnyJny/blynclight.git # latest ``` This module depends on [hidapi](https://github.com/trezor/cython-hidapi), which supports Windows, Linux, FreeBSD and MacOS via a Cython module. """ if ctx.invoked_subcommand == "udev-rules": return try: light = BlyncLight.get_light(light_id, immediate=False) except BlyncLightNotFound as error: typer.secho(str(error), fg="red") raise typer.Exit(-1) from None assert not light.immediate light.red = red if not red_b else 255 light.blue = blue if not blue_b else 255 light.green = green if not green_b else 255 light.off = 1 if off else 0 light.dim = 1 if dim else 0 light.flash = 1 if flash > 0 else 0 light.speed = flash light.mute = 0 if play else 1 light.music = play light.play = 1 if play else 0 light.volume = volume light.repeat = 1 if repeat else 0 if not ctx.invoked_subcommand: if light.on and light.color == (0, 0, 0): light.color = DEFAULT_COLOR try: light.immediate = True for line in str(light).splitlines(): logger.info(line) except Exception as error: typer.secho(str(error), fg="red") raise typer.Exit(-1) from None raise typer.Exit() # Disable flashing for subcommands. light.flash = 0 ctx.obj = light
7,388
def train_model(training_df, stock): """ Summary: Trains XGBoost model on stock prices Inputs: stock_df - Pandas DataFrame containing data about stock price, date, and daily tweet sentiment regarding that stock stock - String representing stock symbol to be used in training Return value: Trained XGBoost model """ print("Beginning training model for ", stock) X_train, X_test, y_train, y_test = create_train_data(training_df) print("Created data") xgb = XGBRegressor(objective="reg:squarederror", random_state=42) parameters = { 'n_estimators': [100, 200, 300, 400], 'learning_rate': [0.001, 0.005, 0.01, 0.05], 'max_depth': [8, 10, 12, 15], 'gamma': [0.001, 0.005, 0.01, 0.02], } print("Performing Grid Search") gs = GridSearchCV(xgb, parameters) gs.fit(X_train, y_train, verbose=2) print("Grid Search Done") model = XGBRegressor(**gs.best_params_, objective="reg:squarederror") model.fit(X_train, y_train) print("Model fit") y_pred = model.predict(X_test) print(stock) print(f'y_true = {np.array(y_test)[:5]}') print(f'y_pred = {y_pred[:5]}') print(f'mean_squared_error = {mean_squared_error(y_test, y_pred)}') print("----------------") return model
7,389
def find_ext(files, ext): """ Finds all files with extension `ext` in `files`. Parameters ---------- files : list List of files to search in ext : str File extension Returns ------- dict A dictionary of pairs (filename, full_filename) """ dic = defaultdict(lambda: None) for full_filename in files: filename, fext = os.path.splitext(full_filename) if fext.lower() == ext: dic[filename] = full_filename return dic
7,390
def create_test_node(context, **kw): """Create and return a test Node object. Create a node in the DB and return a Node object with appropriate attributes. """ node = get_test_node(context, **kw) node.create() return node
7,391
def get_physical_id(r_properties): """ Generated resource id """ bucket = r_properties['Bucket'] key = r_properties['Key'] return f's3://{bucket}/{key}'
7,392
def entity_test_models(translation0, locale1): """This fixture provides: - 2 translations of a plural entity - 1 translation of a non-plural entity - A subpage that contains the plural entity """ entity0 = translation0.entity locale0 = translation0.locale project0 = entity0.resource.project locale0.cldr_plurals = "0,1" locale0.save() translation0.plural_form = 0 translation0.save() resourceX = Resource.objects.create( project=project0, path="resourceX.po") entity0.string = "Entity zero" entity0.key = entity0.string entity0.string_plural = "Plural %s" % entity0.string entity0.save() entityX = Entity.objects.create( resource=resourceX, string="entityX", key='Key%sentityX' % KEY_SEPARATOR) translation0pl = Translation.objects.create( entity=entity0, locale=locale0, plural_form=1, string="Plural %s" % translation0.string) translationX = Translation.objects.create( entity=entityX, locale=locale0, string="Translation %s" % entityX.string) subpageX = Subpage.objects.create( project=project0, name="Subpage") subpageX.resources.add(entity0.resource) return translation0, translation0pl, translationX, subpageX
7,393
def process_block( cond, loglines, logname ): """Apply a block condition to lines of text Parameters: cond (a YAML thing): a condition loglines (string): the contents of the log file Raises: LogCheckFail: if the block does not satisfy the condition """ err_return ='' diffcount = 0 pluscount = 0 firstlinedetected = False maxplusinst = 0 instcount = 0 errorcount = 0 firstline = '' lastline = '' contigcount = 0 msg = "" condname = cond["name"] blockinstcount = cond["inst"] blocklinecount = cond["linecount"] iscontig = False if cond.get('contiglines'): # get block lines lines = split_multiline(cond["contiglines"]) firstline = lines[0].strip() lastline = lines[len(lines) - 1].strip() iscontig = True else: # get first/last lines firstline = cond["firstline"] lastline = cond["lastline"] threshold = int(cond["threshold"]) if cond["pluslines"] != "none": pluslines = cond["pluslines"] else: pluslines = "" # get 'white list' # if a block contains one of these lines, then it is ignored ignorelines = None if cond.get('ignorelines'): ignorelines = split_multiline(cond["ignorelines"]) for line in loglines: # this strips leading/trailing space and newlines logline = line.strip() # look for pattern ignored = False if ignorelines: for s in ignorelines: if contains(s, logline): ignored = True if not ignored: if contains(firstline, logline) and not firstlinedetected: blockline = "" fline = diffcount firstlinedetected = True pluscount = 0 contigcount = 0 # single plusline at present if not ignored and firstlinedetected: blockline = "{}\n{}".format(blockline, logline) if iscontig: # so far no pluslines for blocklines # check rest of lines[] if diffcount > fline: if logline.startswith(lines[contigcount + 1]): contigcount += 1 else: for plusline in pluslines: maxplusinst = int(plusline["maxinst"]) if logline == plusline["line"]: pluscount += 1 if not ignored and contains(lastline, logline) and firstlinedetected: firstlinedetected = False lline = diffcount diff = (lline - fline) + 1 diffcount = 0 # check if this text block should be ignored #ignored = False #if ignorelines: #for s in ignorelines: # if contains(s, blockline): # ignored = True #if not ignored: if iscontig: contigcount += 1 if contigcount == blocklinecount: instcount += 1 if diff > blocklinecount: msg = "condition failed: exceeded Max Block Contiguous Line Count \ " errorcount += 1 elif diff < blocklinecount: msg = "condition failed: did not acquire complete block \ " errorcount += 1 else: if pluscount > maxplusinst: # independent of block length errorcount += 1 msg = "condition failed: too many pluslines \ " else: # may encounter block with same first last lines # so discriminate using threshold if (threshold == 0 or (threshold < 0 and diff < int(math.fabs(threshold))) or (threshold > 0 and diff > int(math.fabs(threshold)))): if diff > (blocklinecount + pluscount): errorcount += 1 msg = "condition failed: block too large \ " else: # perfect or almost perfect instcount += 1 # end if logline == lastline diffcount += 1 # end for line if (instcount != blockinstcount): errorcount += 1 msg = msg + "condition failed: wrong number of blocks: expected " + str(blockinstcount) + " found " + str(instcount) if errorcount > 0: err_return = 'Block condition \"' +\ condname +\ '\" error in ' +\ logname +\ ', causes: ' +\ msg +\ '.' if err_return != '' : #logger.info( 'condition failed: %s', cond['name'] ) raise LogCheckFail( err_return ) # end process_block
7,394
def paris_topology(self, input_path): """Generation of the Paris metro network topology Parameters: input_path: string, input folder path Returns: self.g: nx.Graph(), Waxman graph topology self.length: np.array, lengths of edges """ adj_file = open(input_path + "adj.dat", "r") lines = adj_file.readlines() # graph adjacency list topol = np.zeros([len(lines), 2], dtype=int) for iedge, line in enumerate(lines): topol[iedge][:] = [int(w) for w in line.split()[0:2]] self.g.add_edges_from(topol) # coordinate of nodes coord_file = open(input_path + "coord.dat", "r") lines = coord_file.readlines() for inode, line in enumerate(lines): self.g.nodes[inode]["pos"] = tuple([float(w) for w in line.split()[0:2]]) # length of edges self.length = np.zeros(self.g.number_of_edges()) for i, edge in enumerate(self.g.edges()): self.length[i] = distance.euclidean(self.g.nodes[edge[0]]["pos"], self.g.nodes[edge[1]]["pos"]) # right hand side construction forcing_path = input_path + "rhs.dat" self.forcing = forcing_generation(self, forcing_path) return self.g, self.length, self.forcing
7,395
def show_layouts(kb_info_json, title_caps=True): """Render the layouts with info.json labels. """ for layout_name, layout_art in render_layouts(kb_info_json).items(): title = layout_name.title() if title_caps else layout_name cli.echo('{fg_cyan}%s{fg_reset}:', title) print(layout_art)
7,396
def _rescale(vector): """Scale values in vector to the range [0, 1]. Args: vector: A list of real values. """ # Subtract min, making smallest value 0 min_val = min(vector) vector = [v - min_val for v in vector] # Divide by max, making largest value 1 max_val = float(max(vector)) try: return [v / max_val for v in vector] except ZeroDivisionError: # All values are the same return [1.0] * len(vector)
7,397
def censor_contig(contig_end, u_contigs, o_dict): """ removes the entries for both ends of a contig in a contig list and overlap dict """ for c_e in [contig_end, other_end(contig_end)]: if c_e in u_contigs: u_contigs.remove(c_e) if c_e in o_dict: o_dic = o_dict[c_e] if o_dic != {}: overlapped_contig = list(o_dic.keys())[0] if overlapped_contig in o_dict: del o_dict[overlapped_contig][c_e] del o_dict[c_e] return
7,398
def validate_travel_dates(departure, arrival): """It validates arrival and departure dates :param departure: departure date :param arrival: arrival date :returns: error message or Boolean status """ date_format = "%Y-%m-%dT%H:%M:%SZ" status = True error_message = "" if datetime.strptime(departure, date_format) < datetime.now(): status = False error_message = Response( {"message": "Departure time cannot be in the past"}, status=HTTP_400_BAD_REQUEST, ) elif datetime.strptime(arrival, date_format) < datetime.now(): status = False error_message = Response( {"message": "Arrival time cannot be in the past"}, status=HTTP_400_BAD_REQUEST, ) elif datetime.strptime(departure, date_format) > datetime.strptime( arrival, date_format ): status = False error_message = Response( {"message": "Departure time cannot be greater than arrival time"}, status=HTTP_400_BAD_REQUEST, ) return status, error_message
7,399