content
stringlengths
22
815k
id
int64
0
4.91M
def TraceMainDescendant(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder=False): """ Follows a halo along descendant tree to root tails if reverse order than late times start at 0 and as one moves up in index one moves backwards in time """ #start at this snapshot halosnap=istart #see if halo does not have a Head set if (halodata[halosnap]['Head'][ihalo]==0): #if halo has not had a Head set the branch needs to be walked along the main branch haloid=halodata[halosnap]['ID'][ihalo] #only set the Root Tail if it has not been set. Here if halo has not had #tail set, then must be the the first progenitor #otherwise it should have already been set and just need to store the root tail if (halodata[halosnap]['Tail'][ihalo]==0): halodata[halosnap]['Tail'][ihalo]=haloid halodata[halosnap]['TailSnap'][ihalo]=halosnap halodata[halosnap]['RootTail'][ihalo]=haloid halodata[halosnap]['RootTailSnap'][ihalo]=halosnap roottail,rootsnap,rootindex=haloid,halosnap,ihalo else: roottail=halodata[halosnap]['RootTail'][ihalo] rootsnap=halodata[halosnap]['RootTailSnap'][ihalo] rootindex=int(roottail%TEMPORALHALOIDVAL)-1 #now move along tree first pass to store head and tails and root tails of main branch while (True): #ids contain index information haloindex=int(haloid%TEMPORALHALOIDVAL)-1 halodata[halosnap]['Num_descen'][haloindex]=tree[halosnap]['Num_descen'][haloindex] #if no more descendants, break from search if (halodata[halosnap]['Num_descen'][haloindex]==0): #store for current halo its tail and root tail info (also store root tail for root head) halodata[halosnap]['Head'][haloindex]=haloid halodata[halosnap]['HeadSnap'][haloindex]=halosnap halodata[halosnap]['RootHead'][haloindex]=haloid halodata[halosnap]['RootHeadSnap'][haloindex]=halosnap rootheadid,rootheadsnap,rootheadindex=haloid,halosnap,haloindex #only set the roots head of the root tail #if it has not been set before (ie: along the main branch of root halo) if (halodata[rootsnap]['RootHead'][rootindex]==0): halosnap,haloindex,haloid=rootsnap,rootindex,roottail #set the root head of the main branch while(True): halodata[halosnap]['RootHead'][haloindex]=rootheadid halodata[halosnap]['RootHeadSnap'][haloindex]=rootheadsnap descen=halodata[halosnap]['Head'][haloindex] descenindex=int(descen%TEMPORALHALOIDVAL)-1 descensnap=int(((descen-descen%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL) if (ireverseorder): descensnap=numsnaps-1-descensnap if (haloid==descen): break halosnap,haloindex,haloid=descensnap,descenindex,descen break #now store the rank of the of the descandant. descenrank=tree[halosnap]['Rank'][haloindex][0] halodata[halosnap]['HeadRank'][haloindex]=descenrank #as we are only moving along main branches stop if object is rank is not 0 if (descenrank>0): break #otherwise, get the descendant #store main progenitor maindescen=tree[halosnap]['Descen'][haloindex][0] maindescenindex=int(maindescen%TEMPORALHALOIDVAL)-1 maindescensnap=int(((maindescen-maindescen%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL) #if reverse order, then higher snap values correspond to lower index if (ireverseorder): maindescensnap=numsnaps-1-maindescensnap #calculate stepsize in time based on the halo ids stepsize=maindescensnap-halosnap #store descendant halodata[halosnap]['Head'][haloindex]=maindescen halodata[halosnap]['HeadSnap'][haloindex]=maindescensnap #and update the root tails of the object halodata[maindescensnap]['Tail'][maindescenindex]=haloid halodata[maindescensnap]['TailSnap'][maindescenindex]=halosnap halodata[maindescensnap]['RootTail'][maindescenindex]=roottail halodata[maindescensnap]['RootTailSnap'][maindescenindex]=rootsnap halodata[maindescensnap]['Num_progen'][maindescenindex]+=1 #then move to the next descendant haloid=maindescen halosnap=maindescensnap
5,200
def get_service_mapping(): """ Get mapping dict of service types Returns: A mapping dict which maps service types names to their ids """ # Get all Service types: all_service_type = requests.get(base_url + 'services/v2/service_types', headers=headers3).json() # Make Dict of service names and ids service_name_to_id = {service_type['attributes']['name']:service_type['id'] for service_type in all_service_type['data']} return service_name_to_id
5,201
def get_recently_viewed(request): """ get settings.PRODUCTS_PER_ROW most recently viewed products for current customer """ t_id = tracking_id(request) views = ProductView.objects.filter(tracking_id=t_id).values( 'product_id').order_by('-date')[0:PRODUCTS_PER_ROW] product_ids = [v['product_id'] for v in views] return Product.active.filter(id__in=product_ids)
5,202
def chapter_by_url(url): """Helper function that iterates through the chapter scrapers defined in cu2.scrapers.__init__ and returns an initialized chapter object when it matches the URL regex. """ for Chapter in chapter_scrapers: if re.match(Chapter.url_re, url): return Chapter.from_url(url)
5,203
def get_model_name(part_num): """ 根据型号获取设备名称 :param part_num: :return: """ models = current_config.MODELS for model_name, model_part_num in models.items(): if model_part_num == part_num: return model_name
5,204
def run_get_pk(process, *args, **inputs): """Run the process with the supplied inputs in a local runner that will block until the process is completed. :param process: the process class or process function to run :param inputs: the inputs to be passed to the process :return: tuple of the outputs of the process and process node pk """ if isinstance(process, Process): runner = process.runner else: runner = manager.get_manager().get_runner() return runner.run_get_pk(process, *args, **inputs)
5,205
def test_remove_specified_kwargs_empty(empty_kwargs, warn): """Test the internal remove_specified_kwargs function on empty kwargs. Parameters ---------- empty_kwargs : tuple pytest fixture. See local conftest.py. warn : bool ``True`` to warn if a specified string key not in kwargs, else silence. """ # get kwargs dict and list of keys to drop kwargs, droplist = empty_kwargs # callable with kwargs, droplist, warn already filled in test_callable = partial( _mnewton_internal.remove_specified_kwargs, kwargs, droplist, warn=warn ) # if warn, expect warnings to be raised. save number of dropped keys. if warn: with pytest.warns(UserWarning, match=_specified_match): drops = test_callable() # else expect no warnings to be raised. raised warnings fail the test else: drops = test_callable() # no keys should be dropped from empty_kwargs since it is empty assert drops == 0
5,206
def _random_mask(target_tokens, noise_probability=None, target_length=None): """ target_length其实是mask_length""" unk = 3 target_masks = get_base_mask(target_tokens) if target_length is None: target_length = target_masks.sum(1).float() if noise_probability is None: # sample from [0,1] target_length = target_length * target_length.clone().uniform_() # 要mask的长度 else: target_length = target_length * noise_probability target_length = target_length + 1 # make sure to mask at least one token. target_score = target_tokens.clone().float().uniform_() target_score.masked_fill_(~target_masks, 2.0) _, target_rank = target_score.sort(1) target_cutoff = new_arange(target_rank) < target_length[:, None].long() prev_target_tokens = target_tokens.masked_fill( target_cutoff.scatter(1, target_rank, target_cutoff), unk) return prev_target_tokens
5,207
def test_random_multi_image(): """ Just make sure the image_plot function doesn't crash. """ shap.image_plot([np.random.randn(3, 20,20) for i in range(3)], np.random.randn(3, 20,20), show=False)
5,208
def download_and_extract(data_dir, force=False): """Download fname from the datasets_url, and save it to target_dir, unless the file already exists, and force is False. Parameters ---------- data_dir : str Directory of where to download cifar10 data force : bool Force downloading the file, if it already exists Returns ------- fname : str Full path of the downloaded file """ target_fname = os.path.join(data_dir, 'cifar-10-batches-py') if force or not os.path.isdir(target_fname): try: os.makedirs(data_dir) except IOError: pass download_fname = os.path.join(data_dir, 'cifar-10-python.tar.gz') logger.info("Downloading CIFAR10 dataset from:" + str(DATASET_URL)) with urllib.request.urlopen(DATASET_URL) as response, open(download_fname, 'wb') as out_file: logger.info(str(DATASET_URL) + ' --> ' + download_fname) shutil.copyfileobj(response, out_file) tf = tarfile.open(download_fname) tf.extractall(data_dir) # verify files are there, otherwise throw error for f in TRAIN_FLIST: if not os.path.isfile(os.path.join(target_fname, f)): msg = "Training file " + str(f) + " missing! Please try manually downloading the data from: "\ + str(DATASET_URL) logger.error(msg) raise IOError(msg) for f in TEST_FLIST: if not os.path.isfile(os.path.join(target_fname, f)): msg = "Test file " + str(f) + " missing! Please try manually downloading the data from: " \ + str(DATASET_URL) logger.error(msg) raise IOError(msg) return target_fname
5,209
def EnableDeploymentManager(config): """Enables Deployment manager, with role/owners for its service account.""" logging.info('Setting up Deployment Manager...') project_id = config.project['project_id'] # Enabled Deployment Manger and Cloud Resource Manager for this project. utils.RunGcloudCommand(['services', 'enable', 'deploymentmanager', 'cloudresourcemanager.googleapis.com'], project_id) # Grant deployment manager service account (temporary) owners access. dm_service_account = utils.GetDeploymentManagerServiceAccount(project_id) utils.RunGcloudCommand(['projects', 'add-iam-policy-binding', project_id, '--member', dm_service_account, '--role', 'roles/owner'], project_id=None)
5,210
def raise_if_parameters_are_invalid(parameters): """Raises an exception if the specified parameters are not valid. Args: parameters (dict): the parameters dict. """ if not isinstance(parameters, dict): raise BadRequest(PARAMETERS_EXCEPTION_MSG) for key, value in parameters.items(): if not isinstance(value, (str, int, float, bool, list, dict)): raise BadRequest(PARAMETERS_EXCEPTION_MSG)
5,211
def test_secretsmanager_provider_deploy( mock_decrypt, boto_fs, capsys ): """ Should deploy the AWS Secrets Manager changes """ mock_decrypt.return_value = b'PlainTextData' client = boto3.client('secretsmanager') with patch.object(boto3.Session, 'client') as mock_client: with Stubber(client) as stubber: mock_client.return_value = client stubber.add_response('list_secrets', { 'SecretList': [] }, { 'Filters': [ { 'Key': 'name', 'Values': ['secret1'] } ] }) stubber.add_response('create_secret', {}, { 'Name': 'secret1', 'Description': '', 'SecretString': 'PlainTextData', 'Tags': [] }) stubber.add_response('list_secrets', { 'SecretList': [{ 'ARN': SECRET_ARN, 'Name': 'secret2', 'Description': '', 'Tags': [] }] }, { 'Filters': [ { 'Key': 'name', 'Values': ['secret2'] } ] }) stubber.add_response('get_secret_value', { 'SecretString': 'PlainTextData' }, { 'SecretId': 'secret2' }) stubber.add_response('list_secrets', { 'SecretList': [{ 'ARN': SECRET_ARN, 'Name': 'secret3', 'Description': '', 'Tags': [] }] }, { 'Filters': [ { 'Key': 'name', 'Values': ['secret3'] } ] }) stubber.add_response('get_secret_value', { 'SecretString': 'PlainTextData2' }, { 'SecretId': 'secret3' }) stubber.add_response('update_secret', {}, { 'SecretId': 'secret3', 'Description': '', 'SecretString': 'PlainTextData' }) stubber.add_response('untag_resource', {}, { 'SecretId': 'secret3', 'TagKeys': [] }) config_file = 'config.yaml' secrets_file = 'config.secrets.yaml' session.aws_profile = None session.aws_region = 'us-east-1' boto_fs.create_file(config_file, contents=f""" kms: arn: {KEY_ARN} secrets: - name: 'secret1' - name: 'secret2' - name: 'secret3' """) boto_fs.create_file(secrets_file, contents=""" secrets: - name: 'secret1' value: 'SecretData' - name: 'secret2' value: 'SecretData' - name: 'secret3' value: 'SecretData' """) config = ConfigReader(config_file) provider = SecretsManagerProvider(config) provider.deploy(None, False, False) captured = capsys.readouterr() assert "Secret: [secret1]\n--> New Secret\n" in captured.out assert "Secret: [secret3]\n--> Changes:\n -->" + \ " Value:\n Old Value: PlainTextData2\n New Value: PlainTextData\n" in captured.out
5,212
def calculate(cart): """Return the total shipping cost for the cart. """ total = 0 for line in cart.get_lines(): total += line.item.shipping_cost * line.quantity return total
5,213
def arr_to_rgb(arr, rgb=(0, 0, 0), alpha=1, invert=False, ax=None): """ arr to be made a mask rgb:assumed using floats (0..1,0..1,0..1) or string """ # arr should be scaled to 1 img = np.asarray(arr, dtype=np.float64) img = img - np.nanmin(img) img = img / np.nanmax(img) im2 = np.zeros(img.shape + (4,)) if isinstance(rgb, str): rgb = mpl.colors.to_rgb(rgb) if invert: img = 1 - img im2[:, :, 3] = img * alpha r, g, b = rgb im2[:, :, 0] = r im2[:, :, 1] = g im2[:, :, 2] = b # if ax is None: # ax = plt.gca() # plt.sca(ax) # plt.imshow(im2) return im2
5,214
def parameters(number, size, v=3): """ sets item parameters of items and puts in list :param number: number of items :param size: characteristic size of the items :param v: velocity :return: list with items """ param = [] for i in range(number): angle = randint(0, int(2 * pi * 100)) param.append({ 'x': randint(100, screen_width - 100), 'y': randint(100, screen_height - 100), 'vx': v * cos(angle / 100), 'vy': v * sin(angle / 100), 'r': randint(40, 255), 'g': randint(40, 255), 'b': randint(40, 255), 's': size }) return param
5,215
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape, enforce_square=True): """Validate arguments and format inputs for toeplitz functions Parameters ---------- c_or_cr : array_like or tuple of (array_like, array_like) The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the actual shape of ``c``, it will be converted to a 1-D array. If not supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape of ``r``, it will be converted to a 1-D array. b : (M,) or (M, K) array_like Right-hand side in ``T x = b``. check_finite : bool Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (result entirely NaNs) if the inputs do contain infinities or NaNs. keep_b_shape : bool Whether to convert a (M,) dimensional b into a (M, 1) dimensional matrix. enforce_square : bool, optional If True (default), this verifies that the Toeplitz matrix is square. Returns ------- r : array 1d array corresponding to the first row of the Toeplitz matrix. c: array 1d array corresponding to the first column of the Toeplitz matrix. b: array (M,), (M, 1) or (M, K) dimensional array, post validation, corresponding to ``b``. dtype: numpy datatype ``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of ``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``, otherwise, it is ``np.float``. b_shape: tuple Shape of ``b`` after passing it through ``_asarray_validated``. """ if isinstance(c_or_cr, tuple): c, r = c_or_cr c = _asarray_validated(c, check_finite=check_finite).ravel() r = _asarray_validated(r, check_finite=check_finite).ravel() else: c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel() r = c.conjugate() if b is None: raise ValueError('`b` must be an array, not None.') b = _asarray_validated(b, check_finite=check_finite) b_shape = b.shape is_not_square = r.shape[0] != c.shape[0] if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]: raise ValueError('Incompatible dimensions.') is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b) dtype = np.complex128 if is_cmplx else np.double r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b)) if b.ndim == 1 and not keep_b_shape: b = b.reshape(-1, 1) elif b.ndim != 1: b = b.reshape(b.shape[0], -1) return r, c, b, dtype, b_shape
5,216
def test_runner_should_call_hooks_when_running_a_feature( hook_registry, default_config, mocker ): """The Runner should call the ``each_feature`` hooks when running a Feature""" # given runner = Runner(default_config, None, hook_registry) feature_mock = mocker.MagicMock(name="Feature") # when runner.run_feature(feature_mock) # then hook_registry.call.assert_has_calls( [ call("each_feature", "before", False, feature_mock), call("each_feature", "after", False, feature_mock), ] )
5,217
def run_dft_en_par(dft_input:str, structure, dft_loc:str, ncpus:int, dft_out:str ="dft.out", npool:int =None, mpi:str ="mpi", **dft_kwargs): """run DFT calculation with given input template and atomic configurations. This function is not used atm. :param dft_input: input template file name :param structure: atomic configuration :param dft_loc: relative/absolute executable of the DFT code :param ncpus: # of CPU for mpi :param dft_out: output file name :param npool: not used :param mpi: not used :param **dft_wargs: not used :return: forces, energy """ newfilename = edit_dft_input_positions(dft_input, structure) dft_command = \ f'{dft_loc} -i {newfilename} > {dft_out}' if (ncpus > 1): dft_command = f'mpirun -np {ncpus} {dft_command}' # output.write_to_output(dft_command+'\n') call(dft_command, shell=True) os.remove(newfilename) forces, energy = parse_dft_forces_and_energy(dft_out) return forces, energy
5,218
def get_application_id(): """Returns the app id from the app_identity service.""" return app_identity.get_application_id()
5,219
def prior(X, ls, kernel_func=rbf, ridge_factor=1e-3, name=None): """Defines Gaussian Process prior with kernel_func. Args: X: (np.ndarray of float32) input training features. with dimension (N, D). kernel_func: (function) kernel function for the gaussian process. Default to rbf. ls: (float32) length scale parameter. ridge_factor: (float32) ridge factor to stabilize Cholesky decomposition. name: (str) name of the random variable Returns: (ed.RandomVariable) A random variable representing the Gaussian Process, dimension (N,) """ X = tf.convert_to_tensor(X, dtype=tf.float32) N, _ = X.shape.as_list() K_mat = kernel_func(X, ls=ls, ridge_factor=ridge_factor) return ed.MultivariateNormalTriL(loc=tf.zeros(N, dtype=tf.float32), scale_tril=tf.cholesky(K_mat), name=name)
5,220
def some_handler(element, current_thread: threading.Thread = None, *args, **kwargs): """Handles the elment received from the stream.""" current_value = counter.count if SLOW_DOWN: time.sleep(random.randint(1,5)) counter.update(current_value + 1) print(f"\033[92mHandling iteration number {element:^3} \033[38;5;226m[{name:^20}]\033[92m-[\033[38;5;117m{counter.count:^4}\033[92m] [{id(counter)}]\033[0m")
5,221
def ast_for_statement(statement: Ast, ctx: ReferenceDict): """ statement ::= (label | let | expr | into | importExpr) [';']; """ # assert statement.name == 'statement' sexpr = statement[0] s_name: str = sexpr.name try: if s_name is UNameEnum.expr: # expr # RuikoEBNF: # expr ::= testExpr (thenTrailer | applicationTrailer)* [where]; if len(statement) is 2: # end with ';' then return None ast_for_expr(sexpr, ctx) else: return ast_for_expr(sexpr, ctx) elif s_name is UNameEnum.label: [symbol] = sexpr assert symbol.name is UNameEnum.symbol ctx.set_local('@label', symbol.string) elif s_name is UNameEnum.let: # RuikoEBNF: # let Throw ['=' '!'] # ::= ['`let`'] symbol ['!' trailer+] '=' expr; to_new_ctx = False if sexpr[0].string is UNameEnum.keyword_let: # bind a new var in current environment(closure). to_new_ctx = True _, symbol, *trailers, expr = sexpr else: # For the readability of source codes, # pattern matching using list destruction is better. symbol, *trailers, expr = sexpr res = ast_for_expr(expr, ctx) if not trailers: # let symbol = ... ctx.set_local(symbol.string, res) if to_new_ctx else ctx.set_nonlocal(symbol.string, res) return # let symbol 'attr = ... | let symbol ![item] = ... ref = ctx.get_nonlocal(symbol.string) *fst_n, [last] = trailers # `trailers` is a list of trailer. # RuikoEBNF: # trailer Throw ['[' ']' '.'] # ::= '[' exprCons ']' | '\'' symbol; for each, in fst_n: if each.name is UNameEnum.symbol: # symbol ref = getattr(ref, each.string) else: # [exprCons] item = tuple(ast_for_expr_cons(each, ctx)) if len(item) is 1: item = item[0] ref = ref[item] if last.name == UNameEnum.symbol: # symbol # trailer = . symbol setattr(ref, last.string, res) else: # trailer = [exprCons] item = tuple(ast_for_expr_cons(last, ctx)) if len(item) is 1: item = item[0] ref[item] = res # let expr return Nothing elif s_name is UNameEnum.into: # RuikoEBNF: # into Throw ['`into`'] # ::= '`into`' symbol; [symbol] = sexpr # TODO with result raise BreakUntil(symbol.string) elif s_name is UNameEnum.importStmt: # RuikoEBNF: # importExpr # ::= singleImportExpr | fromImportExpr | remImport; [branch] = sexpr if branch.name is not UNameEnum.remImport: exec(' ' .join (map(lambda _: _.string, flatten( branch))) .strip(), ctx.local) return import os if len(branch) is 2: string, symbol = branch path = eval(string.string) name = symbol.string else: [string] = branch path = eval(string.string) name = os.path.split( os.path.splitext(path)[0])[1] src_code, md5_v = md5(path) manager = ctx.module_manager managed_modules = manager['@modules'] if md5_v == managed_modules.get(path): # imported and file not changed. # so do not import again return managed_modules[path] = md5_v env = make_new_module(name, manager, ctx['__compiler__']) add_exec_func(to=env) ast_for_file(env['__compiler__'].from_source_code(path, src_code, MetaInfo(fileName=path)), env) ctx.set_local(name, ModuleAgent(env.local)) else: raise TypeError('unknown statement.') except BreakUntil as e: raise e except Exception as e: raise Trace(e, statement)
5,222
def test_actual_results_accumulated_cost_matrix(params, arr_desired): """Test that the actual results are the expected ones.""" arr_actual = accumulated_cost_matrix(**params) np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
5,223
def multiclass_eval(y_hat: FloatTensor, y: IntTensor) -> int: """ Returns number correct: how often the rounded predicted value matches gold. Arguments: y_hat: 2d (N x C): guesses for each class y: 2d (N x C): onehot representation of class labels Returns: nubmer correct """ # max(dim) returns both values and indices. compare best indices from # predictions and gold (which are just onehot) _, pred_idxes = y_hat.max(1) _, gold_idxes = y.max(1) return (pred_idxes == gold_idxes).sum()
5,224
def _VMTestChrome(board, sdk_cmd): """Run cros_run_test.""" image_dir_symlink = image_lib.GetLatestImageLink(board) image_path = os.path.join(image_dir_symlink, constants.VM_IMAGE_BIN) # Run VM test for boards where we've built a VM. if image_path and os.path.exists(image_path): sdk_cmd.VMTest(image_path)
5,225
def get_image(bot, update): """Gets a random image from unsplash.com with /random command. Uses the unsplash api and gets the random image from the api. It also includes the reference of author's name (with unsplash profile link) and unsplash website as the caption of the image. """ # Insert your app name, registered with unsplash API, e.g. wallpaperz. unsplash_app_name = "" """Insert your unsplash api token (client_id) here. Get yours at https://unsplash.com/developers. It should look something like this: parameters = {"client_id": "98lsjdfjjslfjl28329...43ulfjs9d8f7s89df7s9d9"} """ parameters = {"client_id": ""} # if no client_id specified, then /random command won't work. if parameters['client_id'] == "": error_message = "You forgot to mention the Unsplash API token." print(error_message) bot.send_message(chat_id=update.message.chat_id, text=error_message) # API call for the random image. response = requests.get("https://api.unsplash.com/photos/random", params=parameters).json() # Getting image link from the response. image_link = response['urls']['regular'] # Link to Author's profile on Unsplash. author_profile = response['user']['links']['html'] # Photographer/Author's name. author_name = response['user']['name'] # Needed to refer back to unsplash. unsplash_referral = "?utm_source={0}&utm_medium=referral"\ .format(unsplash_app_name) author = "<a href='{0}{2}'>{1}</a>"\ .format(author_profile, author_name, unsplash_referral) # Unsplash.com link. unsplash = "<a href='https://unsplash.com{0}'>Unsplash</a>"\ .format(unsplash_referral) # Caption under the photo giving attribution to the photographer. caption = "Photo by {0} on {1}".format(author, unsplash) # Send the photo with HTML caption. bot.send_photo(update.message.chat_id, image_link, caption, parse_mode='HTML') # update.message.reply_photo(image_link)
5,226
def punctuation_for_spaces_dict() -> Dict[int, str]: """Provide a dictionary for removing punctuation, keeping spaces. Essential for scansion to keep stress patterns in alignment with original vowel positions in the verse. :return dict with punctuation from the unicode table >>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate( ... punctuation_for_spaces_dict()).strip()) I m ok Oh Fine """ return dict( (i, " ") for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P") )
5,227
def extract止めないでお姉さま(item): """ Parser for '止めないで、お姉さま…' """ badwords = [ 'subs', ] if any([bad in item['tags'] for bad in badwords]): return None vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None if 'WATTT' in item['tags']: return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix) return False
5,228
def GetExpID(startRow=0,numRows=2000,totalRows = -1): """ Queries the Allen Mouse Brain Institute website for all gene expression data available for download. Returns: -------- GeneNames: list[dict()] list of all genes where expression data is available for download. Dict contains experiment/gene metadata. SectionDataSetID : list(int) corresponding SectionDataSetID (SectionDataSet: see "http://help.brain-map.org/display/api/Data+Model") ID needed to specify download target. """ startRow = startRow numRows = numRows totalRows = totalRows rows = [] GeneNames = [] SectionDataSetID = [] info = list() done = False while not done: r = "&start_row={0}&num_rows={1}".format(startRow,numRows) pagedUrl = API_DATA_PATH + "query.json?criteria=model::SectionDataSet,rma::criteria,products%5Bid$eq5%5D,rma::include,specimen(stereotaxic_injections(primary_injection_structure,structures))" + r source = urllib.request.urlopen(pagedUrl).read() response = json.loads(source) rows += response['msg'] for x in response['msg']: if x['failed'] == False : print(x['id']) info.append(x['id']) if totalRows < 0: totalRows = int(response['total_rows']) startRow += len(response['msg']) if startRow >= totalRows: done = True return info
5,229
def fsinit(S:Optional[StorageSettings]=None, check_not_exist:bool=False, remove_existing:bool=False)->None: """ Create the storage and/or temp direcory if they don't exist. Default locations are determined by `PYLIGHTNIX_STORAGE` and `PYLIGHTNIX_TMP` env variables. """ if remove_existing: dirrm(fsstorage(S)) dirrm(fstmpdir(S)) makedirs(fsstorage(S), exist_ok=False) makedirs(fstmpdir(S), exist_ok=False) else: makedirs(fsstorage(S), exist_ok=False if check_not_exist else True) makedirs(fstmpdir(S), exist_ok=False if check_not_exist else True) assert_valid_storage(S)
5,230
def _add_biotype_attribute(gene_content): """ Add `biotype` attribute to all intervals in gene_content. Parameters ---------- gene_content_ : dict Intervals in gene separated by transcript id. Returns ------- dict Same gene_content_ object with added `biotype` attributes. """ gene_content = gene_content.copy() # Determine gene biotype: gbiotype = _get_biotype(gene_content['gene']) # List to keep track of all possible biotypes in gene: gene_biotypes = [gbiotype] if gbiotype else [] for transcript_id, transcript_intervals in gene_content.items(): if transcript_id == 'gene': continue first_exon = [i for i in transcript_intervals if i[2] in ['CDS', 'ncRNA']][0] biotype = _get_biotype(first_exon) gene_biotypes.append(biotype) new_intervals = [] for interval in transcript_intervals: new_intervals.append(_add_biotype_value(interval, biotype)) gene_content[transcript_id] = new_intervals # Finally, make also gene biotype: a list of all biotypes in gene, # sorted by frequency. Additionally, another sorting is added to sort # by alphabet if counts are equal. biotype = ', '.join([i[0] for i in sorted( sorted(Counter(gene_biotypes).items()), key=lambda x: x[1], reverse=True)]) gene_content['gene'] = _add_biotype_value(gene_content['gene'], biotype) return gene_content
5,231
def test_list_task(): """测试任务列表,这个接口主要admin用""" res = get("/v3/databus/tasks/") assert res["result"] assert res["data"]["page_total"] == 12 assert res["data"]["page"] == 1 assert len(res["data"]["tasks"]) == 12 res = get("/v3/databus/tasks/?cluster_name=hdfs-kafka_cluster_name-M") assert res["result"] assert res["data"]["page_total"] == 1 assert res["data"]["page"] == 1 assert len(res["data"]["tasks"]) == 1 res = get("/v3/databus/tasks/?page=1&page_size=2") assert res["result"] assert res["data"]["page"] == 1 assert res["data"]["page_total"] == 12 assert len(res["data"]["tasks"]) == 2 res = get("/v3/databus/tasks/?status=running&page=1&page_size=2") assert res["result"] assert res["data"]["page"] == 1 assert res["data"]["page_total"] == 12 assert len(res["data"]["tasks"]) == 2
5,232
def verify_macrotask_request(current_time_ms, scheduler, event_list): """Verifies that the provided list of Events contains a single MacrotaskRequest Event. Also checks that the MacrotaskRequest Event is scheduled for the correct time. """ assert len(event_list) == 1 arrival_time_ms, macrotask_request_event = event_list[0] assert arrival_time_ms == current_time_ms + scheduler.worker.conf.network_latency_ms assert isinstance(macrotask_request_event, events.MacrotaskRequest) assert macrotask_request_event.worker is scheduler.worker
5,233
def random_solution(W, D, n): """ We generate a solution of size n """ sol = np.random.permutation(n) + 1 fitness_sol = fitness(W=W, D=D, sol=sol) return [fitness_sol, sol]
5,234
def singlePlotMain(): """Main, but in a single plot and only interesting data """ data = [] line = f.readline() count = 0 while line != "": data.append(readline(line)) line = f.readline() count += 1 data.sort() data = removeStartTime(data) data = afterAndBeforeArray(data, 385, 425) data = offsetArray(data) indexToPlot = [2, 5, 11] singlePlotWithOffset(data, indexToPlot) plt.legend() # plotVerticalLine(data, 11, "max") # plotVerticalLine(data, 2, "min") plt.show()
5,235
def connect_host_loop(_ctx: Context, timeout_secs: int = 3) -> None: """Tries to connect host in permanent loop.""" i = 0 while i < CONNECT_TO_HOST_MAX_RETRY: print(f'{format_dt(datetime.datetime.now())} Trying connect to {_ctx.host}:{_ctx.port}...') if connect_host(_ctx): break time.sleep(timeout_secs) i += 1
5,236
def format_perm(model, action): """ Format a permission string "app.verb_model" for the model and the requested action (add, change, delete). """ return '{meta.app_label}.{action}_{meta.model_name}'.format( meta=model._meta, action=action)
5,237
def update_variables(params): """Updates ILAMB metadata for benchmark data uploaded through the PBS. Parameters ---------- params : list The WMT parameters for the ILAMB component. """ data_files = get_pbs_listing(data_dir) # Extract variable names from file list, removing duplicates. variable_dict = {} for pbs_file in data_files: variable_name = variables.get_name(pbs_file) if variable_name not in variable_dict.keys(): variable_dict[variable_name] = pbs_file variables.update_parameters(params, variable_dict.keys()) # Create or update the .cfg.tmpl file for each variable. for var_name, file_name in variable_dict.items(): variables.update_template(var_name, file_name)
5,238
def verify_password(plain_password: str, hashed_password: str) -> bool: """Verify plain password and hashed password. Args: plain_password (str): Plain text password. hashed_password (str): Hashed password. Returns: bool: Returns true if secret is verified against given hash. """ return pwd_context.verify(plain_password, hashed_password)
5,239
def render_siecle2(tpl: str, parts: List[str], data: Dict[str, str]) -> str: """ >>> render_siecle2("siècle2", ["1"], defaultdict(str)) 'I<sup>er</sup>' >>> render_siecle2("siècle2", ["I"], defaultdict(str)) 'I<sup>er</sup>' >>> render_siecle2("siècle2", ["i"], defaultdict(str)) 'I<sup>er</sup>' >>> render_siecle2("siècle2", ["18"], defaultdict(str)) 'XVIII<sup>e</sup>' >>> render_siecle2("siècle2", ["XVIII"], defaultdict(str)) 'XVIII<sup>e</sup>' >>> render_siecle2("siècle2", ["xviii"], defaultdict(str)) 'XVIII<sup>e</sup>' """ number = parts[0] number = int_to_roman(int(number)) if number.isnumeric() else number.upper() suffix = "er" if number == "I" else "e" return f"{number}{superscript(suffix)}"
5,240
def get_interactions(request): """Function to get the interactions for a molecule""" dist_dict = {"SingleAtomAcceptor_SingleAtomDonor": {"dist": 4.0}, # H-bonding "SingleAtomAcceptor_WeakDonor": {"dist": 3.0}, # Weak H-bond "Halogen_SingleAtomAcceptor": {"dist": 4.0}, # Halogen bonding "AcidicGroup_BasicGroup": {"dist": 4.0}, # Acid-base "Arom5_Arom6": {"dist": 5.5},"Arom6_Arom6": {"dist": 5.5},"Arom5_Arom5": {"dist": 5.5},# Aromatic-aromatic interactions "Arom6_Carbonyl": {"dist": 4.5}, "Arom5_Carbonyl": {"dist": 4.5},# Carbonyl-aromatic interactions - CARBONLY from PROTEIN ONLY!!!! "Hydrophobe_Hydrophobe": {"dist": 4.5}}#Hydrophobic interactions mol_pk = request.GET['obs_id'] my_dist = request.GET['dist'] host = request.get_host() mol = Molecule.objects.get(pk=mol_pk) out_l = [] prot = mol.prot_id # Get the interactions interactions = ProbeBit.objects.filter(prot_id=prot, mol_id=mol, dist__lte=my_dist) i = -1 for my_int in interactions: if my_int.type not in dist_dict: continue if my_int.dist > dist_dict[my_int.type]["dist"]: continue print "HERE" i += 1 out_l.append({}) f = my_int.probe_source_id out_l[i]["url_1"] = "http://"+host+'/WONKA/show_point/?x_com='+str(f.x_com)+'&y_com='+str(f.y_com)+'&z_com='+str(f.z_com) f = my_int.probe_dest_id out_l[i]["url_2"] = "http://"+host+'/WONKA/show_point/?x_com='+str(f.x_com)+'&y_com='+str(f.y_com)+'&z_com='+str(f.z_com) out_l[i]["dist"] = my_int.dist out_l[i]["type"] = my_int.type out_l[i]["angle_1"] = my_int.angle_1 out_l[i]["angle_2"] = my_int.angle_2 return HttpResponse(json.dumps(out_l))
5,241
def sendmail_action(): """Send an email to the address entered in the sendmail form.""" if not MSGRAPHAPI.loggedin: redirect("/sendmail") email_body = json.dumps( { "Message": { "Subject": request.query.subject, "Body": {"ContentType": "HTML", "Content": request.query.body}, "ToRecipients": [{"EmailAddress": {"Address": request.query.to}}], }, "SaveToSentItems": "true", } ) # send the email response = MSGRAPHAPI.post(endpoint="me/microsoft.graph.sendMail", data=email_body) # refresh the sendmail page, showing result (status_code) for this action return template( "sendmail.tpl", dict( fullname=MSGRAPHAPI.loggedin_name, email=MSGRAPHAPI.loggedin_email, status_code=response.status_code, ), )
5,242
def parseArg(): """ CMD argument parsing :return: the parser """ parser = argparse.ArgumentParser(description='SAT solver') parser.add_argument('infile', nargs=1, type=argparse.FileType('r')) parser.add_argument('level', nargs='?', default=0, type=int) return parser
5,243
def test_attachment(testdir, tmpdir): """ Test a basic test with an additional property """ testdir.makepyfile(""" def test_basic(add_nunit_attachment): add_nunit_attachment("file.pth", "desc") assert 1 == 1 """) outfile = tmpdir.join('out.xml') outfile_pth = str(outfile) result = testdir.runpytest( '-v', '--nunit-xml='+outfile_pth ) result.stdout.fnmatch_lines([ '*test_basic PASSED*', ]) assert result.ret == 0 os.path.exists(outfile_pth) xs = xmlschema.XMLSchema(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../ext/nunit-src/TestResult.xsd'), validation='lax') out = xs.to_dict(outfile_pth) assert out['@total'] == 1, out assert out['@passed'] == 1, out assert out['@failed'] == 0, out assert out['@skipped'] == 0, out assert out['test-suite']['@total'] == 1 assert out['test-suite']['@passed'] == 1 assert out['test-suite']['@failed'] == 0 assert out['test-suite']['@skipped'] == 0 assert out['test-suite']['test-case']['attachments']['attachment'][0]['description'] == "desc" assert out['test-suite']['test-case']['attachments']['attachment'][0]['filePath'] == "file.pth"
5,244
def normal_to_angle(x, y): """ Take two normal vectors and return the angle that they give. :type x: float :param x: x normal :type y: float :param y: y normal :rtype: float :return: angle created by the two normals """ return math.atan2(y, x) * 180 / math.pi
5,245
def glDeleteFramebuffersEXT( baseOperation, n, framebuffers=None ): """glDeleteFramebuffersEXT( framebuffers ) -> None """ if framebuffers is None: framebuffers = arrays.GLuintArray.asArray( n ) n = arrays.GLuintArray.arraySize( framebuffers ) return baseOperation( n, framebuffers )
5,246
def enforce(data, service=False, tenant=None): """Enforce zone app or service.""" tstr = " -tenant=%s " % (tenant) if tenant else "" ostr = " -enforce-service " if service else " -enforce-zone-app " ret_val = {} if not data: ret_val['empty'] = {"success": "Empty enforcement request"} else: if g_debug: print(yaml.dump(data)) rc = run_command("%s api %s %s" % ( g_araalictl_path, ostr, tstr), in_text=yaml.dump(data), debug=False, result=True, strip=False) assert rc[0] == 0, rc[1] ret_val = json.loads(rc[1]) return ret_val
5,247
def outline(image, mask, color): """ Give a color to the outline of the mask Args: image: an image mask: a label color: a RGB color for outline Return: image: the image which is drawn outline """ mask = np.round(mask) yy, xx = np.nonzero(mask) for y, x in zip(yy, xx): if 0.0 < np.mean(mask[max(0, y - 1) : y + 2, max(0, x - 1) : x + 2]) < 1.0: image[max(0, y) : y + 1, max(0, x) : x + 1] = color return image
5,248
def test_interpolation(): """Test string interpolation during execution""" execute_workflow(r""" [0: shared='res'] res = '' b = 200 res += f"{b}" """) assert env.sos_dict["res"] == "200"
5,249
def main(ctx, config_file, config_pairs, debug): """Buy and sell anything on the internet for bitcoin. \b For detailed help, run 21 help --detail. For full documentation, visit 21.co/learn. """ need_wallet_and_account = ctx.invoked_subcommand not in ( 'help', 'update', 'login', 'doctor') # Set UUID if available uuid = bitcoin_computer.get_device_uuid() if uuid: two1.TWO1_DEVICE_ID = uuid ctx.obj = parse_config( config_file=config_file, config_dict=dict(config_pairs), need_wallet_and_account=need_wallet_and_account, debug=debug, )
5,250
def bogen_ab(dl, dr): """Bogen abwärts, für weites D (durch usw.). Ende nur spitz wenn allein steht, sonst letzten Stützpunkt etwas früher, letzten Kontrollpunkt für Fortsetzung in gleicher Richtung setzen, damit glatte Verbindung zu Folgekonsonant. """ y0 = 0.5 # Höhe Anfang- und Endpunkt h = 0.65 l = 0.3 b = [(0, y0), (0, y0)] # Start immer spitz [P2], (P3/Q0) # TODO: ändern für jedoch m = [(l, y0 - h), (1 - l, y0 - h)] # [Q1], [Q2] e = [(1, y0), (1, y0)] if not dr else [(1 - l/2, y0 - h/2), (1 - l/3, y0 - h/3)] # Ende (Q3/R0) return b + m + e
5,251
def IterateXmlElements(node): """minidom helper function that iterates all the element nodes. Iteration order is pre-order depth-first.""" if node.nodeType == node.ELEMENT_NODE: yield node for child_node in node.childNodes: for child_node_element in IterateXmlElements(child_node): yield child_node_element
5,252
def is_open_port(port): """ Check if port is open :param port: port number to be checked :type port: int :return: is port open :rtype: bool """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind(("127.0.0.1", port)) except socket.error: return False return True
5,253
def stopping_fn_from_metric(metric_name: str): """ Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name. """ def stopping_fn(engine: Engine): return engine.state.metrics[metric_name] return stopping_fn
5,254
def output_formats(cmd, data, output) -> None: """Test output formats.""" with requests_mock.Mocker() as mock: mock.get( "http://localhost:8123/api/states", text=data, status_code=200 ) runner = CliRunner() result = runner.invoke(cli.cli, cmd, catch_exceptions=False) print('--seen--') print(result.output) print('----') print('---expected---') print(output) print('----') assert result.exit_code == 0 assert result.output == output
5,255
def write_to_db(db_name, table_name): """ Doc string for functionwrite_to_db. """ return
5,256
def deconstruct_proto(model_proto, compression_pipeline): """Deconstruct the protobuf. Args: model_proto: The protobuf of the model compression_pipeline: The compression pipeline object Returns: protobuf: A protobuf of the model """ # extract the tensor_dict and metadata bytes_dict, metadata_dict = model_proto_to_bytes_and_metadata(model_proto) # decompress the tensors # TODO: Handle tensors meant to be held-out from the compression pipeline # (currently none are held out). tensor_dict = {} for key in bytes_dict: tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key], transformer_metadata=metadata_dict[key]) return tensor_dict
5,257
def cmd_mdremove(bot, trigger, caseid): """ Remove a case from the Marked for Deletion List™ (Does NOT reopen the case!) required parameter: database id aliases: mdremove, mdr, mdd, mddeny """ try: result = callapi(bot, method='GET', uri='/rescues/' + str(caseid), triggernick=str(trigger.nick)) try: addNamesFromV2Response(result['included']) except: pass result['data'] = convertV2DataToV1(result['data']) rescue = Rescue.load(result['data'][0]) setRescueMarkedForDeletion(bot, rescue, marked=False) bot.say('Successfully removed ' + str(rescue.data["IRCNick"]) + '\'s case from the Marked for Deletion List™.') except: bot.reply('Couldn\'t find a case with id ' + str(caseid) + ' or other APIError')
5,258
def Parallel(*plist): """ Parallel(P1, [P2, .. ,PN]) """ _parallel(plist, True)
5,259
def configure_container(container): """Configures the container Params: container (ServiceContainer) """ container.register(ServiceDefinition('command_processor', _make_command_processor)) container.register(ServiceDefinition('logger', _make_logger)) container.register(ServiceDefinition('streamalert_forwarder', _make_kinesis)) container.register(ServiceDefinition('state_manager', _make_cache)) container.register(ServiceDefinition('athena', _make_athena)) container.register(ServiceDefinition('query_parameter_generator', _make_param_generator)) container.register(ServiceDefinition('query_pack_repository', _make_query_pack_repo)) container.register(ServiceDefinition('query_pack_manager_factory', _make_query_pack_factory)) container.register(ServiceDefinition('query_pack_execution_context', _make_execution_context)) container.register(ServiceDefinition('clock', _make_clock)) container.register(ServiceDefinition('boto3_athena_client', _make_boto3_athena_client)) container.register(ServiceDefinition('boto3_kinesis_client', _make_boto3_kinesis_client))
5,260
def build_attribute_set(items, attr_name): """Build a set off of a particular attribute of a list of objects. Adds 'None' to the set if one or more of the objects in items is missing the attribute specified by attr_name. """ attribute_set = set() for item in items: attribute_set.add(getattr(item, attr_name, None)) return attribute_set
5,261
def potential_fn(q): """ - log density for the normal distribution """ return 0.5 * np.sum(((q['z'] - true_mean) / true_std) ** 2)
5,262
def estimate_pointcloud_local_coord_frames( pointclouds: Union[torch.Tensor, "Pointclouds"], neighborhood_size: int = 50, disambiguate_directions: bool = True, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Estimates the principal directions of curvature (which includes normals) of a batch of `pointclouds`. The algorithm first finds `neighborhood_size` nearest neighbors for each point of the point clouds, followed by obtaining principal vectors of covariance matrices of each of the point neighborhoods. The main principal vector corresponds to the normals, while the other 2 are the direction of the highest curvature and the 2nd highest curvature. Note that each principal direction is given up to a sign. Hence, the function implements `disambiguate_directions` switch that allows to ensure consistency of the sign of neighboring normals. The implementation follows the sign disabiguation from SHOT descriptors [1]. The algorithm also returns the curvature values themselves. These are the eigenvalues of the estimated covariance matrices of each point neighborhood. Args: **pointclouds**: Batch of 3-dimensional points of shape `(minibatch, num_point, 3)` or a `Pointclouds` object. **neighborhood_size**: The size of the neighborhood used to estimate the geometry around each point. **disambiguate_directions**: If `True`, uses the algorithm from [1] to ensure sign consistency of the normals of neighboring points. Returns: **curvatures**: The three principal curvatures of each point of shape `(minibatch, num_point, 3)`. If `pointclouds` are of `Pointclouds` class, returns a padded tensor. **local_coord_frames**: The three principal directions of the curvature around each point of shape `(minibatch, num_point, 3, 3)`. The principal directions are stored in columns of the output. E.g. `local_coord_frames[i, j, :, 0]` is the normal of `j`-th point in the `i`-th pointcloud. If `pointclouds` are of `Pointclouds` class, returns a padded tensor. References: [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for Local Surface Description, ECCV 2010. """ points_padded, num_points = convert_pointclouds_to_tensor(pointclouds) ba, N, dim = points_padded.shape if dim != 3: raise ValueError( "The pointclouds argument has to be of shape (minibatch, N, 3)" ) if (num_points <= neighborhood_size).any(): raise ValueError( "The neighborhood_size argument has to be" + " >= size of each of the point clouds." ) # undo global mean for stability # TODO: replace with tutil.wmean once landed pcl_mean = points_padded.sum(1) / num_points[:, None] points_centered = points_padded - pcl_mean[:, None, :] # get the per-point covariance and nearest neighbors used to compute it cov, knns = get_point_covariances(points_centered, num_points, neighborhood_size) # get the local coord frames as principal directions of # the per-point covariance # this is done with torch.symeig, which returns the # eigenvectors (=principal directions) in an ascending order of their # corresponding eigenvalues, while the smallest eigenvalue's eigenvector # corresponds to the normal direction curvatures, local_coord_frames = torch.symeig(cov, eigenvectors=True) # disambiguate the directions of individual principal vectors if disambiguate_directions: # disambiguate normal n = _disambiguate_vector_directions( points_centered, knns, local_coord_frames[:, :, :, 0] ) # disambiguate the main curvature z = _disambiguate_vector_directions( points_centered, knns, local_coord_frames[:, :, :, 2] ) # the secondary curvature is just a cross between n and z y = torch.cross(n, z, dim=2) # cat to form the set of principal directions local_coord_frames = torch.stack((n, y, z), dim=3) return curvatures, local_coord_frames
5,263
def patch_rbac(rbac_v1: RbacAuthorizationV1Api, yaml_manifest) -> RBACAuthorization: """ Patch a clusterrole and a binding. :param rbac_v1: RbacAuthorizationV1Api :param yaml_manifest: an absolute path to yaml manifest :return: RBACAuthorization """ with open(yaml_manifest) as f: docs = yaml.safe_load_all(f) role_name = "" binding_name = "" for dep in docs: if dep["kind"] == "ClusterRole": print("Patch the cluster role") role_name = dep["metadata"]["name"] rbac_v1.patch_cluster_role(role_name, dep) print(f"Patched the role '{role_name}'") elif dep["kind"] == "ClusterRoleBinding": print("Patch the binding") binding_name = dep["metadata"]["name"] rbac_v1.patch_cluster_role_binding(binding_name, dep) print(f"Patched the binding '{binding_name}'") return RBACAuthorization(role_name, binding_name)
5,264
def _shp_to_boundary_gdf(shp_file_path): """.shpかshpが格納された.zipを指定してgdfを作成する Args: shp_file_path (Path): 変換対象のshpファイルを格納するディレクトリのパス文字列 Returns: gpd.GeoDataFrame: shpを変換したGeoDataFrame """ s2g = ShapeToGeoPandas(str(shp_file_path.resolve())) gdf = s2g.gdf necessary_columns = [ "KEY_CODE", "PREF", "CITY", "PREF_NAME", "CITY_NAME", "geometry"] geo_d = GdfDissolve(gdf, necessary_columns) geo_d.join_columns("AREA_CODE", "PREF", "CITY") geo_d.dissolve_poly("AREA_CODE") boundary_gdf = geo_d.new_gdf geojson_obj = geojson_str_to_obj(df_to_geojson(boundary_gdf)) write_geojson(geojson_obj, "./created/", "boundary.geojson") output_csv_from_df(boundary_gdf, "./created/", "boundary.csv") return boundary_gdf
5,265
def test_from_subpackage(): """Build the package tree from a package inside another.""" container = PackageTree( module='packageB', root='TopLevelPackage', directory="tests", ) assert 2 == len(container.classes) assert 1 == len(container.subpackages)
5,266
def zip2dir(zip_fname, out_dir): """ Extract `zip_fname` into output directory `out_dir` Parameters ---------- zip_fname : str Filename of zip archive to write out_dir : str Directory path containing files to go in the zip archive """ # Use unzip command rather than zipfile module to preserve permissions # http://bugs.python.org/issue15795 back_tick(['unzip', '-o', '-d', out_dir, zip_fname])
5,267
def git_repo_parse_stats(): """ Create a fake git repo with the following properties: - upstream set to another local git repo - 3 staged files (1 changed, 2 additions) - 1 changed file unstaged - 2 untracked files - 1 stashed change set """ cwd = os.getcwd() folder = tempfile.mkdtemp() folder_up = folder + "_upstream" cmds = [ "git init", "git config user.email 'you@example.com'", "git config user.name 'Your Name'", "first:A single line", "second:A single line", "third:A single line", "touch untracked1 untracked2", "git add first", "git commit -m 'first commit'", "first:Changes to stash", "git stash", "first:Changes to stage", "git add first second third", "first:Changes but unstaged", "cp -R %s %s" % (folder, folder_up), "git remote add -f up %s" % folder_up, "git branch --set-upstream-to=up/master", ] try: os.chdir(folder) for cmd in cmds: if re.match(r'\S+:', cmd): assert len(cmd.split(":")) == 2 fname, text = cmd.split(":") with open(os.path.join(folder, fname), 'a') as fout: fout.write(text + '\n') else: with open(os.devnull, 'w') as devnull: sub.check_call(shlex.split(cmd), stdout=devnull, stderr=sub.STDOUT) yield finally: try: shutil.rmtree(folder) except (OSError, IOError): pass try: shutil.rmtree(folder_up) except (OSError, IOError): pass os.chdir(cwd)
5,268
def insert_site(site, seq, offset=None): """Inserts a sequence (represeting a site) into a larger sequence (which is a sequence object rather than a series of letters.""" # inputs: # site The site to be inserted # offsets the offset where the site is to be inserted # seq The sequence into which the specified site is to # be implanted # get sequence info name = seq.getName() seq_data = seq.getSeq() assert ((offset == None) or ((offset >= 0) and \ (offset <= (len(seq_data) - len(site))))) # select a random offset if none given: if (offset == None): # insert signal in a random position, from 0 up to m (= l - w) offset = random.randint(0,(len(seq_data) - len(site))) # insert the signal signal_seq = seq_data[:offset]+str(site)+seq_data[(offset + len(site)):] # create a modified sequence object to return new_seq = sequence.Seq(name, signal_seq) return new_seq
5,269
def sex2bpzmags(f, ef, zp=0., sn_min=1., m_lim=None): """ This function converts a pair of flux, error flux measurements from SExtractor into a pair of magnitude, magnitude error which conform to BPZ input standards: - Nondetections are characterized as mag=99, errormag=m_1sigma - Objects with absurd flux/flux error combinations or very large errors are characterized as mag=-99 errormag=0. """ nondetected = less_equal(f, 0.) * greater( ef, 0) #Flux <=0, meaningful phot. error nonobserved = less_equal(ef, 0.) #Negative errors #Clip the flux values to avoid overflows f = clip(f, 1e-100, 1e10) ef = clip(ef, 1e-100, 1e10) nonobserved += equal(ef, 1e10) nondetected += less_equal( old_div(f, ef), sn_min) #Less than sn_min sigma detections: consider non-detections detected = logical_not(nondetected + nonobserved) m = zeros(len(f)) * 1. em = zeros(len(ef)) * 1. m = where(detected, -2.5 * log10(f) + zp, m) m = where(nondetected, 99., m) m = where(nonobserved, -99., m) em = where(detected, 2.5 * log10(1. + old_div(ef, f)), em) if not m_lim: em = where(nondetected, -2.5 * log10(ef) + zp, em) else: em = where(nondetected, m_lim, em) em = where(nonobserved, 0., em) return m, em
5,270
def get_elk_command(line): """Return the 2 character command in the message.""" if len(line) < 4: return "" return line[2:4]
5,271
def safe_div(a, b): """ Safe division operation. When b is equal to zero, this function returns 0. Otherwise it returns result of a divided by non-zero b. :param a: number a :param b: number b :return: a divided by b or zero """ if b == 0: return 0 return a / b
5,272
def decrypt_story(): """ Using the methods you created in this problem set, decrypt the story given by the function getStoryString(). Use the functions getStoryString and loadWords to get the raw data you need. returns: string - story in plain text """ story = CiphertextMessage(get_story_string()) return story.decrypt_message()
5,273
def shouldAvoidDirectory(root, dirsToAvoid): """ Given a directory (root, of type string) and a set of directory paths to avoid (dirsToAvoid, of type set of strings), return a boolean value describing whether the file is in that directory to avoid. """ subPaths = root.split('/') for i, subPath in enumerate(subPaths): dir = '/'.join(subPaths[:i+1]) if dir in dirsToAvoid: return True return False
5,274
def atom_explicit_hydrogen_valences(xgr): """ explicit hydrogen valences, by atom """ return dict_.transform_values(atom_explicit_hydrogen_keys(xgr), len)
5,275
def change_account_type(user_id): """Change a user's account type.""" if current_user.id == user_id: flash('You cannot change the type of your own account. Please ask ' 'another administrator to do this.', 'error') return redirect(url_for('admin.user_info', user_id=user_id)) user = User.query.get(user_id) if user is None: abort(404) form = ChangeAccountTypeForm() if form.validate_on_submit(): user.role = form.role.data db.session.add(user) db.session.commit() flash('Role for user {} successfully changed to {}.'.format( user.full_name(), user.role.name), 'form-success') return render_template('admin/manage_user.html', user=user, form=form)
5,276
def test_runner(filtered_tests, args): """ Driver function for the unit tests. Prints information about the tests being run, executes the setup and teardown commands and the command under test itself. Also determines success/failure based on the information in the test case and generates TAP output accordingly. """ testlist = filtered_tests tcount = len(testlist) index = 1 tap = str(index) + ".." + str(tcount) + "\n" for tidx in testlist: result = True tresult = "" if "flower" in tidx["category"] and args.device == None: continue print("Test " + tidx["id"] + ": " + tidx["name"]) prepare_env(tidx["setup"]) (p, procout) = exec_cmd(tidx["cmdUnderTest"]) exit_code = p.returncode if (exit_code != int(tidx["expExitCode"])): result = False print("exit:", exit_code, int(tidx["expExitCode"])) print(procout) else: match_pattern = re.compile(str(tidx["matchPattern"]), re.DOTALL) (p, procout) = exec_cmd(tidx["verifyCmd"]) match_index = re.findall(match_pattern, procout) if len(match_index) != int(tidx["matchCount"]): result = False if result == True: tresult += "ok " else: tresult += "not ok " tap += tresult + str(index) + " " + tidx["id"] + " " + tidx["name"] + "\n" if result == False: tap += procout prepare_env(tidx["teardown"]) index += 1 return tap
5,277
def test_get_scores_from_ratings_dataframe_by_inferring_rating_provider_longterm(): """Tests if function can correctly handle pd.DataFrame objects.""" act = rtg.get_scores_from_ratings( ratings=conftest.rtg_df_wide_with_err_row, tenor="long-term" ) # noinspection PyTypeChecker assert_frame_equal(act, exp_lt)
5,278
def check_downloaded(link, filename, downloader_name, do_not_resume=False, tmp_file=None): """Check if downloading filename already exists.""" if os.path.exists(filename): logger.info('%s already exists', filename) else: if not tmp_file: tmp_file = filename + '.tmp' if os.path.exists(tmp_file) and do_not_resume: os.remove(tmp_file) if 'youtube.com/' in link: downloader_name = 'yt_dl' download_status = download(link, tmp_file, update_progress, downloader_name) if download_status is not 'failed': os.rename(tmp_file, filename)
5,279
def omp_set_num_threads(threads: int): """omp_set_num_threads omp_set_num_threadsを呼び出す バックグランドで学習する場合など、Host側のCPUをすべて使うと 逆に性能が落ちる場合や、運用上不便なケースなどで個数制限できる Args: threads (int) : OpenMPでのスレッド数 """ core.omp_set_num_threads(threads)
5,280
def get_all_infos_about_argument(db_argument: Argument, main_page, db_user, lang) -> dict: """ Returns bunch of information about the given argument :param db_argument: The argument :param main_page: url of the application :param db_user: User :param lang: Language :rtype: dict :return: dictionary with many information or an error """ _t = Translator(lang.ui_locales) return_dict = dict() db_votes = DBDiscussionSession.query(ClickedArgument).filter(ClickedArgument.argument_uid == db_argument.uid, ClickedArgument.is_valid == True, ClickedStatement.is_up_vote == True).all() db_author = DBDiscussionSession.query(User).get(db_argument.author_uid) return_dict['vote_count'] = str(len(db_votes)) return_dict['author'] = db_author.global_nickname return_dict['author_url'] = main_page + '/user/' + str(db_author.uid) return_dict['gravatar'] = get_profile_picture(db_author) return_dict['timestamp'] = sql_timestamp_pretty_print(db_argument.timestamp, db_argument.lang) text = get_text_for_argument_uid(db_argument.uid) return_dict['text'] = start_with_capital(text) supporters = [] gravatars = dict() public_page = dict() for vote in db_votes: db_author = DBDiscussionSession.query(User).get(vote.author_uid) name = db_author.global_nickname if db_user.nickname == db_author.nickname: name += ' (' + _t.get(_.itsYou) + ')' supporters.append(name) gravatars[name] = get_profile_picture(db_author) public_page[name] = main_page + '/user/' + str(db_author.uid) return_dict['supporter'] = supporters return_dict['gravatars'] = gravatars return_dict['public_page'] = public_page return return_dict
5,281
def play(i, arr, num=1): """Save wav sequence for num-times into the result""" for c in range(num): for x in SND_PARTS[i]: arr.append(x)
5,282
def average_proxy(ray, method, proxy_type): """ method to average proxy over the raypath. Simple method is direct average of the proxy: $\sum proxy(r) / \sum dr$. Other methods could be: $1/(\sum 1 / proxy)$ (better for computing \delta t) """ total_proxy = 0. try: methode.evaluation except (NameError, AttributeError): method.evaluation = None # in case the variable was not defined. if method.evaluation == "inverse": for _, point in enumerate(ray): _proxy = method.proxy_singlepoint(point, proxy_type)[proxy_type] total_proxy += 1. / _proxy number = len(ray) proxy = 1. / total_proxy / float(number) else: for j, point in enumerate(ray): _proxy = method.proxy_singlepoint(point, proxy_type)[proxy_type] total_proxy += _proxy number = len(ray) proxy = total_proxy / float(number) return proxy
5,283
def test_get_ips_not_empty( mocker, ip_block_array, ip_v4, ip_v6, service_expired): """ Test ip.get_ips task with some IPs returned by API """ mocker.patch( 'ovh_api_tasks.api_wrappers.ip.get_ips', return_value=ip_block_array) get_ip_responses = [ip_v4, ip_v6, service_expired] mocker.patch( 'ovh_api_tasks.api_wrappers.ip.get_ip', side_effect=get_ip_responses) ip_tasks.get_ips(MockContext()) output = sys.stdout.getvalue() assert re.search(r'Type\s*Service name\s*IP blocks', output) assert re.search(r'dedicated\s*foo\s*10.0.0.1/32', output) assert re.search(r'vps\s*bar\s*fd2e:c4d1:8a4b:d6f7::/64', output) assert re.search(r'Expired IP blocks\s*-*\s*10.0.0.3$', output)
5,284
def export_graph(filename, graph, num_obs, num_int, fixed_partial_interventions=False): """ Takes a graph and samples 'num_obs' observational data points and 'num_int' interventional data points per variable. All those are saved in the file 'filename' Parameters ---------- filename : str Filename to save the exported graph to. graph : CausalDAG Causal graph to sample from and export. num_obs : int Number of observational data points to sample. num_int : int Number of data points to sample per intervention. """ # Sample observational dataset data_obs = graph.sample(batch_size=num_obs, as_array=True) # Sample interventional dataset data_int = [] for var_idx in range(graph.num_latents, graph.num_vars): var = graph.variables[var_idx] values = np.random.randint(var.prob_dist.num_categs, size=(num_int,)) int_sample = graph.sample(interventions={var.name: values}, batch_size=num_int, as_array=True) data_int.append(int_sample) # Stack all data data_int = np.stack(data_int, axis=0) data_obs = data_obs.astype(np.uint8) data_int = data_int.astype(np.uint8) adj_matrix = graph.adj_matrix # If the graph has latent variable, remove them from the dataset latents = graph.latents if graph.num_latents > 0: data_obs = data_obs[:, graph.num_latents:] data_int = data_int[:, :, graph.num_latents:] adj_matrix = adj_matrix[graph.num_latents:, graph.num_latents:] latents = latents - graph.num_latents # Correcting indices if fixed_partial_interventions: exclude_inters = list(range(graph.num_vars)) random.shuffle(exclude_inters) exclude_inters = np.array(exclude_inters) else: exclude_inters = np.array([], dtype=np.uint8) # Export and visualize np.savez_compressed(filename, data_obs=data_obs, data_int=data_int, adj_matrix=adj_matrix, latents=latents, exclude_inters=exclude_inters) if graph.num_vars <= 100: for i, v in enumerate(graph.variables): v.name = r"$X_{%i}$" % (i+1) visualize_graph(graph, filename=filename+".pdf", figsize=(8, 8), layout="graphviz")
5,285
def after(base=_datetime, diff=None): """ count datetime after diff args :param base: str/datetime/date :param diff: str :return: datetime """ _base = parse(base) if isinstance(_base, datetime.date): _base = midnight(_base) result_dict = dp(diff) for unit in result_dict: _val = result_dict[unit] if not _val: continue if unit == 'years': _base = _base.replace(year=(_base.year + _val)) elif unit == 'months': if _base.month + _val <= 12: _base = _base.replace(month=_base.month + _val) else: _month_diff = (_base.month + _val) - 12 _base = _base.replace(year=_base.year + 1).replace(month=_month_diff) elif unit in ['days', 'hours', 'minutes', 'seconds']: _base = _base + datetime.timedelta(**{unit: _val}) return _base
5,286
def tmux_call(command_list): """Executes a tmux command """ tmux_cmd = ['tmux'] + command_list # print(' '.join(tmux_cmd)) _safe_call(tmux_cmd)
5,287
def open_lib(ifile): """Opens lib with name ifile and returns stationmeta, arraydim, rlengths, heights, sectors, data.""" with open(ifile, 'rb') as f: lines = f.readlines() lines = [x.strip() for x in lines] data = {} lines = filter(lambda x: x.strip(), lines) data["meta"] = lines[0] printable = set(string.printable) data["meta"] = filter(lambda x: x in printable, data["meta"]) data["dim"] = np.array(lines[1].split()).astype(int) data["R"] = np.array(lines[2].split()).astype(float) #[m] data["H"] = np.array(lines[3].split()).astype(float) #[m] data["sect"] = int(data["dim"][2]) data_block = lines[4:] # frequencies data["f"] = convert_to_np(data_block[::len(data["H"])*2+1],data["sect"]) # create masks for A, k value mask = np.ones(len(data_block), dtype=bool) mask[::len(data["H"])*2+1] = False AK = convert_to_np(list(compress(data_block,mask)),data["sect"]) data["A"] = AK[::2] data["k"] = AK[1::2] f.close() return data
5,288
def convert_examples_to_features(examples, intent_label_list, slot_label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, intent_label_list, slot_label_list, max_seq_length, tokenizer) features.append(feature) return features
5,289
def get_blb_links(driver): """takes (driver) and returns list of links to scrape""" homepage = "https://www.bloomberg.com/europe" rootpage = "https://www.bloomberg.com" driver.get(homepage) ssm = driver.find_elements_by_class_name("single-story-module")[0].get_attribute( "outerHTML" ) spm_1 = driver.find_elements_by_class_name("story-package-module")[0].get_attribute( "outerHTML" ) spm_2 = driver.find_elements_by_class_name("story-package-module")[1].get_attribute( "outerHTML" ) oped = driver.find_elements_by_class_name("story-package-module")[2].get_attribute( "outerHTML" ) soup = BeautifulSoup(ssm + spm_1 + spm_2 + oped, "lxml") links = [ rootpage + link.get("href") for link in soup.findAll("a") if "/news/" in link.get("href") ] links = list(dict.fromkeys(links)) return links
5,290
def randomProfile(freq,psd): """ Generate a random profile from an input PSD. freq should be in standard fft.fftfreq format psd should be symmetric as with a real signal sqrt(sum(psd)) will equal RMS of profile """ amp = np.sqrt(psd)*len(freq) ph = randomizePh(amp) f = amp*ph sig = np.fft.ifft(f) return np.real(sig)
5,291
def test_accepts_filenames_with_umlauts(tmp_path): """Accepts filenames with umlauts.""" os.chdir(tmp_path) Path("föö").write_text("foo stuff") Path("bär").write_text("bar stuff") assert _ls_visiblefile_paths() == [ str(Path("bär").resolve()), str(Path("föö").resolve()), ] assert Path.cwd() == Path(tmp_path)
5,292
def getP(W, diagRegularize = False): """ Turn a similarity matrix into a proability matrix, with each row sum normalized to 1 :param W: (MxM) Similarity matrix :param diagRegularize: Whether or not to regularize the diagonal of this matrix :returns P: (MxM) Probability matrix """ if diagRegularize: P = 0.5*np.eye(W.shape[0]) WNoDiag = np.array(W) np.fill_diagonal(WNoDiag, 0) RowSum = np.sum(WNoDiag, 1) RowSum[RowSum == 0] = 1 P = P + 0.5*WNoDiag/RowSum[:, None] return P else: RowSum = np.sum(W, 1) RowSum[RowSum == 0] = 1 P = W/RowSum[:, None] return P
5,293
def error(message, **formatting): """ Return an error message to the command line """ click.secho(message, fg = 'red', **formatting)
5,294
def get_structure_index(structure_pattern,stream_index): """ Translates the stream index into a sequence of structure indices identifying an item in a hierarchy whose structure is specified by the provided structure pattern. >>> get_structure_index('...',1) [1] >>> get_structure_index('.[.].',1) [1, 0] >>> get_structure_index('.[[...],..].',1) [1, 0, 0] >>> get_structure_index('.[[...]...].',2) [1, 0, 1] >>> get_structure_index('.[[...]...].',3) [1, 0, 2] >>> get_structure_index('.[[...]...].',4) [1, 1] >>> get_structure_index('.[[...]...].',5) [1, 2] >>> get_structure_index('.[[...]...].',6) [1, 3] >>> get_structure_index('.[[...]...].',7) [2] """ structure_index = [0] current_stream_index = 0 for p in structure_pattern: if p == '[': structure_index.append(0) elif p == '.': if current_stream_index == stream_index: return structure_index structure_index[-1] += 1 current_stream_index += 1 elif p == ']': structure_index.pop(-1) structure_index[-1] += 1 else: raise Exception('Invalid character in structure pattern: %s' % repr(p)) raise Exception('Provided stream index does not exist in the provided structure pattern')
5,295
def n_knapsack(n_knapsacks=5, n_items=100, # Should be divisible by n_knapsack n_weights_per_items=500, use_constraints=False, method='trust-constr', backend='tf' ): """ Here we solve a continuous relaxation of the multiknapsack problem. """ # Let's emulate the multiknapsack problem with random weights weights_ = random((n_weights_per_items, n_items)) # We create knapsacks with attribution of the items to knapsacks [0,1,2,3,4] as: # [0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4] capacity_knapsacks = weights_.reshape( (n_weights_per_items, -1, n_knapsacks)).sum(-2) if backend == 'tf': weights_ = tf.constant(weights_, tf.float32) capacity_knapsacks_ = tf.constant(capacity_knapsacks, tf.float32) def func(W): # We use softmax to impose the constraint that the attribution of items to knapsacks should sum to one if use_constraints: W = tf.nn.softmax(W, 1) # We add a penalty only when the weights attribution sums higher than the knapsacks capacity. res = tf.nn.relu(weights_@W-capacity_knapsacks_) res = tf.reduce_mean(res**2) return res dev = None else: dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") weights_ = torch.tensor(weights_, dtype=torch.float32, device=dev) capacity_knapsacks_ = torch.tensor( capacity_knapsacks, dtype=torch.float32, device=dev) def func(W): # We use softmax to impose the constraint that the attribution of items to knapsacks should sum to one if use_constraints: W = torch.nn.functional.softmax(W, 1) # We add a penalty only when the weights attribution sums higher than the knapsacks capacity. res = torch.nn.functional.relu(weights_@W-capacity_knapsacks_) res = (res**2).mean() return res if use_constraints: if backend == 'tf': def eq_fun(W): return tf.reduce_sum(W, 1)-1 else: def eq_fun(W): return W.sum(1)-1 constraints = { 'type': 'eq', 'fun': eq_fun, 'lb': 0, 'ub': 0, 'use_autograd': False } else: constraints = None Winit = np.zeros((n_items, n_knapsacks)) res = minimize(func, Winit, tol=1e-8, constraints=constraints, bounds=(0, None), method=method, torch_device=dev, backend=backend) return res
5,296
def has_url(account: Accounts) -> bool: """Return True if the account's note or fields seem to contain a URL.""" if account.note and "http" in account.note.lower(): return True if "http" in str(account.fields).lower(): return True return False
5,297
def conv2x2(in_planes, out_planes, stride=1, groups=1, dilation=1, padding=0): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=2, stride=stride, padding=padding, groups=groups, bias=False, dilation=dilation)
5,298
def face_normals(P,T,normalize=True): """Computes normal vectors to triangles (faces). Args: P: n*3 float array T: m*3 int array normalize: Whether or not to normalize to unit vectors. If False, then the magnitude of each vector is twice the area of the corresponding triangle. Default is True Returns: A Numpy array of size (num_tri,3) containing the face normal vectors. """ P1 = P[T[:,0],:] P2 = P[T[:,1],:] P3 = P[T[:,2],:] N = np.cross(P2-P1,P3-P1) if normalize: N = (N.T/np.linalg.norm(N,axis =1)).T return N
5,299