content
stringlengths
22
815k
id
int64
0
4.91M
def get_merged_contextvars(bound_logger: BindableLogger) -> Dict[str, Any]: """ Return a copy of the current context-local context merged with the context from *bound_logger*. .. versionadded:: 21.2.0 """ ctx = get_contextvars() ctx.update(structlog.get_context(bound_logger)) return ctx
7,600
def policy_iteration(policy, env, value_function=None, threshold=0.00001, max_steps=1000, **kwargs): """ Policy iteration algorithm, which consists on iterative policy evaluation until convergence for the current policy (estimate over many sweeps until you can't estimate no more). And then finally updates policy to be greedy. """ value_function = last_converged_v_fun = np.zeros(env.world.size) if value_function is None else value_function greedy_policy = policy for step_number in range(max_steps): new_value_function = utils.single_step_policy_evaluation(greedy_policy, env, value_function=value_function, **kwargs) delta_eval = np.max(value_function - new_value_function) value_function = new_value_function if delta_eval < threshold: # policy evaluation converged new_policy = utils.greedy_policy_from_value_function(greedy_policy, env, value_function=value_function, **kwargs) delta = np.max(last_converged_v_fun - new_value_function) last_converged_v_fun = new_value_function if delta < threshold: # last converged value functions difference converged break else: greedy_policy = new_policy elif step_number == max_steps - 1: greedy_policy = utils.greedy_policy_from_value_function(greedy_policy, env, value_function=last_converged_v_fun, **kwargs) warning_message = 'Policy iteration did not reach the selected threshold. Finished after reaching ' \ 'the maximum {} steps with delta_eval {}'.format(step_number + 1, delta_eval) warnings.warn(warning_message, UserWarning) return last_converged_v_fun, greedy_policy
7,601
def main(pdb_complex_core, pdb_fragment, pdb_atom_core_name, pdb_atom_fragment_name, steps, core_chain="L", fragment_chain="L", output_file_to_tmpl="growing_result.pdb", output_file_to_grow="initialization_grow.pdb", h_core = None, h_frag = None, rename=False, threshold_clash=1.70): """ From a core (protein + ligand core = core_chain) and fragment (fragment_chain) pdb files, given the heavy atoms names that we want to connect, this function add the fragment to the core structure. We will get three PDB files: (1) the ligand core of the complex isolated, that will be used in further steps to generate the template of the initial structure; (2) the ligand completed with the core and the fragment added, also prepared to generate the template of the final structure; (3) the pdb file that will be used to initialise PELE simulations. Here we have the core structure with the fragment added, but this fragment has been size-reduced in order to get small bond lengths between its atoms. (During PELE simulations this distance will increase linearly until it reaches the bond length given by the template of the final structure) :param pdb_complex_core: pdb file with a complex (protein + ligand) that will be used as core to perform the addition of the fragment. The chain of the ligand needs to be named as "L". We will also use the information of the protein to perform calculations of contacts with the ligand. :param pdb_fragment: pdb file, normally with only the ligand (please, put "L" as name of the chain that contain the ligand), that will be added to the core. :param pdb_atom_core_name: heavy atom name (string) of the ligand core where we want to add the fragment and form a new bond. :param pdb_atom_fragment_name: heavy atom name (string) of the ligand fragment where we want to perform the connection to form a new bond with the core. :param core_chain: name of the chain which contains the ligand in the pdb of the core complex. string :param fragment_chain: name of the chain which contains the ligand in the pdb of the fragment. string :param output_file_to_tmpl: name of the pdb file with the result of the connection between the core and the fragment (single ligand). string. The resname of the molecule will be "GRW" and the resnum "1". "growing_result.pdb" by default. :param output_file_to_grow: name of the pdb file that will be used to initialise PELE simulations. string. "initialization_grow.pdb" by default. :param h_core: if the user wants to select an specific hydrogen atom of the core to create the new bond, its name must be specified here. :param h_frag: if the user wants to select an specific hydrogen atom of the fragment to create the new bond, its name must be specified here. :param rename: if set, the names of the pdb atom names will be replaced with "G+atom_number_fragment". :param threshold_clash: distance that will be used to identity which atoms are doing clashes between atoms of the fragment and the core. :returns: [changing_names_dictionary, hydrogen_atoms, "{}.pdb".format(core_residue_name), output_file_to_tmpl, output_file_to_grow, core_original_atom, fragment_original_atom] """ if not os.path.exists(c.PRE_WORKING_DIR): os.mkdir(c.PRE_WORKING_DIR) # Check that ligand names are not repeated check_and_fix_repeated_lignames(pdb_complex_core, pdb_fragment, core_chain, fragment_chain) for pdb_file in (pdb_complex_core, pdb_fragment): logging.info("Checking {} ...".format(pdb_file)) checker.check_and_fix_pdbatomnames(pdb_file) # Get the selected chain from the core and the fragment and convert them into ProDy molecules. ligand_core = complex_to_prody.pdb_parser_ligand(pdb_complex_core, core_chain) fragment = complex_to_prody.pdb_parser_ligand(pdb_fragment, fragment_chain) # We will check that the structures are protonated. We will also create a new PDB file for each one and we will get # the residue name of each ligand. core_residue_name = extract_heteroatoms_pdbs(pdb_complex_core, True, core_chain, output_folder=c.PRE_WORKING_DIR) frag_residue_name = extract_heteroatoms_pdbs(pdb_fragment, True, fragment_chain, output_folder=c.PRE_WORKING_DIR) # We will use the PDBs previously generated to get a list of Bio.PDB.Atoms for each structure bioatoms_core_and_frag = from_pdb_to_bioatomlist([os.path.join(c.PRE_WORKING_DIR, core_residue_name), os.path.join(c.PRE_WORKING_DIR, frag_residue_name)]) # Then, we will have to transform the atom names of the core and the fragment to a list object # (format required by functions) pdb_atom_names = [pdb_atom_core_name, pdb_atom_fragment_name] # Using the Bio.PDB.Atoms lists and this names we will get the heavy atoms that we will use later to do the bonding heavy_atoms = extract_heavy_atoms(pdb_atom_names, bioatoms_core_and_frag) # Once we have the heavy atoms, for each structure we will obtain the hydrogens bonded to each heavy atom. # We will need pdbs because we will use the information of the protein to select the hydrogens properly. hydrogen_atoms = extract_hydrogens(pdb_atom_names, bioatoms_core_and_frag, [pdb_complex_core, pdb_fragment], h_core, h_frag, core_chain, fragment_chain) # Create a list with the atoms that form a bond in core and fragment. core_bond = [heavy_atoms[0], hydrogen_atoms[0]] fragment_bond = [hydrogen_atoms[1], heavy_atoms[1]] # This has to be in inverted order to do correctly the superimposition logger.info("Performing a superimposition of bond {} of the fragment on bond {} of the core..." .format(fragment_bond, core_bond)) # Using the previous information we will superimpose the whole fragment on the bond of the core in order to place # the fragment in the correct position, deleting the H. merged_structure, core_original_atom, fragment_original_atom = join_structures(core_bond, fragment_bond, ligand_core, fragment, pdb_complex_core, pdb_fragment, core_chain, fragment_chain) # It is possible to create intramolecular clashes after placing the fragment on the bond of the core, so we will # check if this is happening, and if it is, we will perform rotations of 10º until avoid the clash. check_results = check_collision(merged_structure=merged_structure[0], bond=heavy_atoms, theta=0, theta_interval=math.pi/18, core_bond=core_bond, list_of_atoms=bioatoms_core_and_frag[1], fragment_bond=fragment_bond, core_structure=ligand_core, fragment_structure=fragment, pdb_complex=pdb_complex_core, pdb_fragment=pdb_fragment, chain_complex=core_chain, chain_fragment=fragment_chain, threshold_clash=threshold_clash) # If we do not find a solution in the previous step, we will repeat the rotations applying only increments of 1º if not check_results: check_results = check_collision(merged_structure=merged_structure[0], bond=heavy_atoms, theta=0, theta_interval=math.pi/180, core_bond=core_bond, list_of_atoms=bioatoms_core_and_frag[1], fragment_bond=fragment_bond, core_structure=ligand_core, fragment_structure=fragment, pdb_complex=pdb_complex_core, pdb_fragment=pdb_fragment, chain_complex=core_chain, chain_fragment=fragment_chain, threshold_clash=threshold_clash) # Now, we want to extract this structure in a PDB to create the template file after the growing. We will do a copy # of the structure because then we will need to resize the fragment part, so be need to keep it as two different # residues. try: structure_to_template = check_results.copy() except AttributeError: raise AttributeError("Frag cannot superimpose the fragment onto the core's hydrogen. \ In order to create space for the fragment \ manually rotate the hydrogen bond of the core where the fragment will be attached to. \ We are currently working to fix this automatically") # Once we have all the atom names unique, we will rename the resname and the resnum of both, core and fragment, to # GRW and 1. Doing this, the molecule composed by two parts will be transformed into a single one. changing_names = pdb_joiner.extract_and_change_atomnames(structure_to_template, fragment.getResnames()[0], core_residue_name, rename=rename) molecule_names_changed, changing_names_dictionary = changing_names # Check if there is still overlapping names if pdb_joiner.check_overlapping_names(molecule_names_changed): logger.critical("{} is repeated in the fragment and the core. Please, change this atom name of the core by" " another one.".format(pdb_joiner.check_overlapping_names(molecule_names_changed))) logger.info("The following names of the fragment have been changed:") for transformation in changing_names_dictionary: logger.info("{} --> {}".format(transformation, changing_names_dictionary[transformation])) finishing_joining(molecule_names_changed, core_chain) # Extract a PDB file to do the templates prody.writePDB(os.path.join(c.PRE_WORKING_DIR, output_file_to_tmpl), molecule_names_changed) logger.info("The result of core + fragment has been saved in '{}'. This will be used to create the template file." .format(output_file_to_tmpl)) # Now, we will use the original molecule to do the resizing of the fragment. reduce_molecule_size(check_results, frag_residue_name, steps) point_reference = check_results.select("name {} and resname {}".format(pdb_atom_fragment_name, frag_residue_name)) fragment_segment = check_results.select("resname {}".format(frag_residue_name)) translate_to_position(hydrogen_atoms[0].get_coord(), point_reference.getCoords(), fragment_segment) # Repeat all the preparation process to finish the writing of the molecule. changing_names = pdb_joiner.extract_and_change_atomnames(check_results, fragment.getResnames()[0], core_residue_name, rename=rename) molecule_names_changed, changing_names_dictionary = changing_names finishing_joining(molecule_names_changed, core_chain) logger.info("The result of core + fragment(small) has been saved in '{}'. This will be used to initialise the growing." .format(output_file_to_grow)) # Add the protein to the ligand output_ligand_grown_path = os.path.join(c.PRE_WORKING_DIR, "ligand_grown.pdb") prody.writePDB(output_ligand_grown_path, molecule_names_changed) with open(output_ligand_grown_path) as lig: content_lig = lig.readlines() content_lig = content_lig[1:] content_lig = "".join(content_lig) # Join all parts of the PDB output_file = [] chain_not_lig = get_everything_except_ligand(pdb_complex_core, core_chain) output_file.append(chain_not_lig) output_file.append("{}TER".format(content_lig)) out_joined = "".join(output_file) with open(os.path.join(c.PRE_WORKING_DIR, output_file_to_grow), "w") as output: # Save the file in the pregrow folder output.write(out_joined) # Make a copy of output files in the main directory shutil.copy(os.path.join(c.PRE_WORKING_DIR, output_file_to_grow), ".") # We assume that the user will be running FrAG in PELE's main folder... # In further steps we will probably need to recover the names of the atoms for the fragment, so for this reason we # are returning this dictionary in the function. return changing_names_dictionary, hydrogen_atoms, "{}.pdb".format(core_residue_name), output_file_to_tmpl, \ output_file_to_grow, core_original_atom, fragment_original_atom
7,602
def iterator_filtered( gff_iterator, feature = None, source = None, contig = None, interval = None, strand = None): """iterate over the contents of a gff file. yield only entries for a given feature """ if interval: start, end = interval if strand == ".": strand = None for gff in gff_iterator: if feature and gff.feature != feature: continue if source and gff.source != source: continue if contig and gff.contig != contig: continue if strand and gff.strand != strand: continue if interval and min(end,gff.end) - max(start,gff.start) < 0: continue yield gff
7,603
def create_blueprint(app): """Register blueprint routes on app.""" blueprint = Blueprint( "invenio_records_marc21", __name__, template_folder="../templates", url_prefix="/marc21", ) blueprint = init_theme_views(blueprint, app) blueprint = init_records_views(blueprint, app) return blueprint
7,604
def mock_machine(): """Fixture localapi Machine init with the data/response.json file.""" with requests_mock.Mocker() as mock_resp: f = open(response_test_path,) data = json.load(f) machine_ipaddr = "0.0.0.0" mock_addr = f"http://{machine_ipaddr}:3000/api/v1/hvac" mock_resp.post(mock_addr, json=data) return Machine(machine_ipaddr)
7,605
def test_update_height(): """ Test to make sure the height is being updated correctly: Height = 3: 4 / \ Height = 2: 2 5 / \\ / \ Height = 1: 1 3 6 7 """ tree = AVLTree() tree.insert_array([1, 2, 3, 4, 5, 6, 7]) assert tree.root.height == 3 assert tree.root.left.height == 2 assert tree.root.right.height == 2 assert tree.root.left.left.height == 1 assert tree.root.left.right.height == 1 assert tree.root.right.left.height == 1 assert tree.root.right.right.height == 1
7,606
def get_files_links(service, v): """Print links of uploaded files. :param: service (object): Goolge Drive service object. :param: v (string): Version of Tor Browser to look for. """ windows_re = 'torbrowser-install-%s_\w\w(-\w\w)?\.exe(\.asc)?' % v linux_re = 'tor-browser-linux\d\d-%s_(\w\w)(-\w\w)?\.tar\.xz(\.asc)?' % v osx_re = 'TorBrowser-%s-osx\d\d_(\w\w)(-\w\w)?\.dmg(\.asc)?' % v # dictionary to store file names and IDs files_dict = dict() print "Trying to fetch links of uploaded files..." links = service.files().list().execute() items = links.get('items', []) if not items: raise ValueError('No files found.') else: for item in items: if re.search(windows_re, item['title']): files_dict[item['title']] = item['id'] elif re.search(linux_re, item['title']): files_dict[item['title']] = item['id'] elif re.search(osx_re, item['title']): files_dict[item['title']] = item['id'] return files_dict
7,607
def list_(context, field, mpd_query=None): """ *musicpd.org, music database section:* ``list {TYPE} [ARTIST]`` Lists all tags of the specified type. ``TYPE`` should be ``album``, ``artist``, ``date``, or ``genre``. ``ARTIST`` is an optional parameter when type is ``album``, ``date``, or ``genre``. This filters the result list by an artist. *Clarifications:* The musicpd.org documentation for ``list`` is far from complete. The command also supports the following variant: ``list {TYPE} {QUERY}`` Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs of a field name and a value. If the ``QUERY`` consists of more than one pair, the pairs are AND-ed together to find the result. Examples of valid queries and what they should return: ``list "artist" "artist" "ABBA"`` List artists where the artist name is "ABBA". Response:: Artist: ABBA OK ``list "album" "artist" "ABBA"`` Lists albums where the artist name is "ABBA". Response:: Album: More ABBA Gold: More ABBA Hits Album: Absolute More Christmas Album: Gold: Greatest Hits OK ``list "artist" "album" "Gold: Greatest Hits"`` Lists artists where the album name is "Gold: Greatest Hits". Response:: Artist: ABBA OK ``list "artist" "artist" "ABBA" "artist" "TLC"`` Lists artists where the artist name is "ABBA" *and* "TLC". Should never match anything. Response:: OK ``list "date" "artist" "ABBA"`` Lists dates where artist name is "ABBA". Response:: Date: Date: 1992 Date: 1993 OK ``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"`` Lists dates where artist name is "ABBA" and album name is "Gold: Greatest Hits". Response:: Date: 1992 OK ``list "genre" "artist" "The Rolling Stones"`` Lists genres where artist name is "The Rolling Stones". Response:: Genre: Genre: Rock OK *GMPC:* - does not add quotes around the field argument. *ncmpc:* - does not add quotes around the field argument. - capitalizes the field argument. """ field = field.lower() query = _list_build_query(field, mpd_query) if field == u'artist': return _list_artist(context, query) elif field == u'album': return _list_album(context, query) elif field == u'date': return _list_date(context, query) elif field == u'genre': pass
7,608
def test_sort_values_within_attribute_invalid_product_type( staff_api_client, permission_manage_product_types_and_attributes ): """Try to reorder an invalid attribute (invalid ID).""" attribute_id = graphene.Node.to_global_id("Attribute", -1) value_id = graphene.Node.to_global_id("AttributeValue", -1) variables = { "attributeId": attribute_id, "moves": [{"id": value_id, "sortOrder": 1}], } content = get_graphql_content( staff_api_client.post_graphql( ATTRIBUTE_VALUES_REORDER_MUTATION, variables, permissions=[permission_manage_product_types_and_attributes], ) )["data"]["attributeReorderValues"] assert content["errors"] == [ { "field": "attributeId", "message": f"Couldn't resolve to an attribute: {attribute_id}", } ]
7,609
def palindrome(d: int)-> str: """ Function is getting the digits of the number, left shifting it by multiplying it with 10 at each iteration and adding it the previous result. Input: Integer Output: String (Sentence telling if the number is palindrome or not) """ remainder = 0 revnum = 0 n = len(str(d)) copynum2 = d while copynum2 != 0: remainder = copynum2%10 revnum = revnum * 10 + remainder copynum2 //= 10 if d == revnum: return "Given Numer {} is palindrome".format(d) else: return "Given Numer {} is not palindrome".format(d)
7,610
def main(): """ Converting the JSON Data to Parquet """ inputs = "./raw_data/" for file in os.listdir(inputs): if file[-5:] == ".json": df = spark.read.json(inputs + file) df.write.parquet("./raw_data/parquet/{}".format(file[:-5])) pass
7,611
def import_from_file(request): """ Import a part of a source site's page tree via an import of a JSON file exported to a user's filesystem from the source site's Wagtail Admin The source site's base url and the source page id of the point in the tree to import defined what to import and the destination parent page defines where to import it to. """ if request.method == 'POST': form = ImportFromFileForm(request.POST, request.FILES) if form.is_valid(): import_data = json.loads(form.cleaned_data['file'].read().decode('utf-8-sig')) parent_page = form.cleaned_data['parent_page'] try: page_count = import_pages(import_data, parent_page) except LookupError as e: messages.error(request, _( "Import failed: %(reason)s") % {'reason': e} ) else: messages.success(request, ungettext( "%(count)s page imported.", "%(count)s pages imported.", page_count) % {'count': page_count} ) return redirect('wagtailadmin_explore', parent_page.pk) else: form = ImportFromFileForm() return render(request, 'wagtailimportexport/import_from_file.html', { 'form': form, })
7,612
def teqc_version(): """ return string with location of teqcexecutable author: kristine larson """ exedir = os.environ['EXE'] gpse = exedir + '/teqc' # heroku version should be in the main area if not os.path.exists(gpse): gpse = './teqc' return gpse
7,613
def graph_search(problem, verbose=False, debug=False): """graph_search(problem, verbose, debug) - Given a problem representation attempt to solve the problem. Returns a tuple (path, nodes_explored) where: path - list of actions to solve the problem or None if no solution was found nodes_explored - Number of nodes explored (dequeued from frontier) """ frontier = PriorityQueue() root = Node(problem, problem.initial) frontier.append(root) node = frontier.pop() pop = True # for right pop left pop for BFS if node.expand(node.problem)[0].g < 0: # DFS which has the negative depth # since start from the deepest node frontier = deque() frontier.append(root) elif node.expand(node.problem)[0].h == 2: # BFS pop = False frontier = deque() frontier.append(root) else: # Manhattan frontier.append(node) DONE = False nodes_explored = 0 explored_set = Explored() while not DONE: if pop: node = frontier.pop() # DFS A* else: node = frontier.popleft() # BFS if debug: print("Next decision is:", str(node)) explored_set.add(node.state.state_tuple()) nodes_explored += 1 if problem.goal_test(node.state): solved_path = node.path() if debug: print("Puzzle solved") # print("path:", str(node.path())) DONE = True # if Verbose True display the info stats in requirement if verbose: print("Solution in %d moves" % (len(solved_path) - 1)) print("Initial State") print(solved_path[0]) for i in range(1, len(solved_path)): print("Move %d - %s" % (i, solved_path[i].action)) print(solved_path[i].state) return solved_path, nodes_explored # Not solved yet else: for child in node.expand(node.problem): # add new child to frontier set if not explored_set.exists(child.state.state_tuple()): frontier.append(child) explored_set.add(child) # finish when there is no node in the queue # if debug: # print("Num node in quenue:", str(len(frontier))) DONE = len(frontier) == 0 if verbose: print("No solution found") return None, nodes_explored
7,614
def _build_groupby_indices(df, table_name, join_columns): """ Pre-computes indexes based on the group-by columns. Returns a dictionary of tuples to the list of indices. """ log.info("Grouping table '{}' by: {}.".format(table_name, ", ".join(join_columns))) ret = df.groupby(join_columns).indices if len(join_columns) == 1: # Manually patch the dictionary to make sure its keys are tuples. ret = {(k,): v for k, v in ret.items()} return ret
7,615
def test_print_warning_message_for_non_declared_skill_components(): """Test the helper function '_print_warning_message_for_non_declared_skill_components'.""" with unittest.mock.patch.object( aea.skills.base._default_logger, "warning" ) as mock_logger_warning: _print_warning_message_for_non_declared_skill_components( SkillContext(), {"unknown_class_1", "unknown_class_2"}, set(), "type", "path", ) mock_logger_warning.assert_any_call( "Class unknown_class_1 of type type found but not declared in the configuration file path." ) mock_logger_warning.assert_any_call( "Class unknown_class_2 of type type found but not declared in the configuration file path." )
7,616
def add_multiple_package(package_list: List[str]) -> str: """ Generate latex code to add multiple package to preamble :param package_list: List of package to add in preamble """ usepackage_command_list = [] for package in package_list: usepackage_command_list.append(rf"""\usepackage{{{package}}}""") return "\n".join(usepackage_command_list)
7,617
def emailAdmins(msgData): """ Emails all admins with given message. States which admin created/is sending the message to everyone. Return: {bool} """ from metrics.models import Group try: if not msgData['msg']: print('No message was provided to send.') return False admins = list(Group.objects.get(name='admins').user_set.all().values_list('username', flat=True)) returnMessage, emailSent = sendEmail({ 'subject': '[Omnia] Admins communication', 'recipients': admins, 'fromEmail': msgData['fromEmail'], 'message': f'<div style="font-family:sans-serif;font-size:14px;line-height:20px;"><p>Message from {msgData["sender"]} to all {len(admins)} Omnia admins:</p><p>{msgData["msg"]}</p></div>' }) return (returnMessage, emailSent) except Exception as ex: return (f"Error: Admin email failed to send. Error message: {returnMessage}", False)
7,618
def add(isamAppliance, name, chainItems=[], description=None, check_mode=False, force=False): """ Create an STS chain template """ if force is False: ret_obj = search(isamAppliance, name) if force is True or ret_obj['data'] == {}: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: json_data = { "name": name, "chainItems": chainItems } if description is not None: json_data['description'] = description return isamAppliance.invoke_post( "Create an STS chain template", uri, json_data, requires_modules=requires_modules, requires_version=requires_version) return isamAppliance.create_return_object()
7,619
def MinHamDistance(pattern, dna_list): """Calculate the minimum Hamming distance from a DNA list.""" return sum(HammingDistanceDiffLen(pattern, sequence) for sequence in dna_list)
7,620
def _add_note(text: str, user: KarmaUser) -> str: """Adds a new note to the database for the given user.""" _, note_msg = _parse_note_cmd(text) if not note_msg: return f"Sorry {user.username}, could not find a note in your message." if _note_exists(note_msg, user): return f"Sorry {user.username}, you already have an identical note." note = KarmaNote( user_id=user.user_id, timestamp=datetime.datetime.now(), note=note_msg ) session = db_session.create_session() session.add(note) session.commit() return f"Hey {user.username}, you've just stored a note."
7,621
def get_next_method(generator_instance): """ Cross-platform function that retrieves the 'next' method from a generator instance. :type generator_instance: Any :rtype: () -> Any """ if sys.version_info > (3, 0): return generator_instance.__next__ else: return generator_instance.next
7,622
def process_line(this_line, do_stemming=False, remove_stopwords=False): """ Given a line from the CSV file, gets the stemmed tokens. """ speech = process_csv_line(this_line) speech_tokens = process_raw_speech_text(speech.contents, perform_stemming=do_stemming, delete_stopwords=remove_stopwords) return speech_tokens
7,623
def kill_action(args): """Entry point for the 'kill' cli command.""" kill_app(args.app_name, args.sigkill_timeout)
7,624
def metadef_tag_count(context, namespace_name): """Get metadef tag count in a namespace""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) count = 0 for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id']: count = count + 1 return count
7,625
def act2graph(graph: Graph, xml_root: Xml, registry: dict, namespaces: dict, tag: str) -> Graph: """ Transform activityName tag into RDF graph. The function transforms the Activity MasterData into identifier. The output is a RDF graph that represents a part of the Ecoinvent nomenclature structured with The IEO ontology. The output represents the centrally registrered identifier (CRID) by the database version and the activity name identifier, e.g. ecoinvent3.0:88d6c0aa-0053-4367-b0be-05e4b49ff3c5 for the copper production, primary. Variables: - graph: the graph to update - xml_root: the root of the xml file - registry: dictionary containing the reference/info of the data registry - tag: string containing the namespace tag - namespaces: dictionary containing the namespaces with tags """ # crid_reg: CRID registry, e.g Ecoinvent crid_reg = registry['reg_id'] crid_reg_label = registry['label'] # Database identifier, e.g. EcoInvent3.1 major_release = xml_root.attrib['majorRelease'] minor_release = xml_root.attrib['minorRelease'] database_version = f'v{major_release}_{minor_release}' database_label = f'{crid_reg_label}{major_release}.{minor_release}' database_id = crid_reg+database_version graph.add((ECO[crid_reg], RDFS.label, Literal(crid_reg_label, lang='en'))) graph.add((ECO.activityId, RDFS.subClassOf, ACT_CRID)) activity_id_label = 'EcoInvent activity identifier' graph.add((ECO.activityId, RDFS.label, Literal(activity_id_label, lang='en'))) graph.add((ECO.activity_name, RDFS.subClassOf, REF_ACTIVITY)) activity_label = 'EcoInvent activity label' graph.add((ECO.activity_name, RDFS.label, Literal(activity_label, lang='en'))) for activity_name in xml_root.findall(tag, namespaces): activity_name_id = activity_name.attrib['id'] crid = activity_name_id+database_version graph.add((ECO[crid], RDF.type, ECO.activityId)) graph.add((ECO[activity_name_id], RDF.type, ECO.activity_name)) # Define the property relation between the symbols of the CRID graph.add((ECO[crid], BFO.has_part, ECO[database_id])) graph.add((ECO[database_id], BFO.part_of, ECO[crid])) graph.add((ECO[crid], BFO.has_part, ECO[activity_name_id])) graph.add((ECO[activity_name_id], BFO.part_of, ECO[crid])) # Define the labels with the different languages xml_ns = namespaces['xml'] for name in activity_name.findall('eco:name', namespaces): lang = name.attrib['{'+xml_ns+'}lang'] activity_label = name.text crid_label = f'{database_label}:{activity_label}' graph.add((ECO[crid], RDFS.label, Literal(crid_label, lang=lang))) graph.add((ECO[activity_name_id], RDFS.label, Literal(activity_label, lang=lang))) return graph
7,626
def test_complex004_complex004_v2_xml(mode, save_output, output_format): """ xsi:type default Default value for xsi:type is allowed but ignored """ assert_bindings( schema="saxonData/Complex/complex004.xsd", instance="saxonData/Complex/complex004.n2.xml", class_name="Root", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
7,627
def to_checksum_address(value: AnyStr) -> ChecksumAddress: """ Makes a checksum address given a supported format. """ norm_address = to_normalized_address(value) address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address))) checksum_address = add_0x_prefix( "".join( ( norm_address[i].upper() if int(address_hash[i], 16) > 7 else norm_address[i] ) for i in range(2, 42) ) ) return ChecksumAddress(HexAddress(checksum_address))
7,628
def is_volatile(type): """returns True, if type represents C++ volatile type, False otherwise""" nake_type = remove_alias(type) return isinstance(nake_type, cpptypes.volatile_t)
7,629
def load_adult(as_frame: bool = False): """Load and return the higly imbalanced binary classification [adult income datatest](http://www.cs.toronto.edu/~delve/data/adult/desc.html). you may find detailed description [here](http://www.cs.toronto.edu/~delve/data/adult/adultDetail.html) """ with resources.path( "pytorch_widedeep.datasets.data", "adult.parquet.brotli" ) as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
7,630
def sam( body: Optional[Union[bool,Callable]] = json.loads, pathParams: Optional[Union[bool,Callable]] = False, queryString: Optional[Union[bool,Callable]] = False, headers: Optional[Union[bool,Callable]] = False, authenticate: Optional[Callable[[dict], types.AuthUser]] = None, authorize: Optional[Callable[[types.AuthUser], bool]] = None, jsonize_response: bool = True, keep_event: bool = False, keep_context: bool = False, pass_auth_user: bool = True, ): """Wraps an AWS lambda handler function to handle auth, to catch and handle errors, and to convert lambda handler default parameters to a functions declared parameters. :param body: Should the wrapper function pass `event`'s "body" attribute as an arg to inner function (called "body")? If `body` is callable, it will be used to parse the values. For example, if the body is string-ified JSON, you can use `json.loads` to load the request (or `parsers.json`, a wrapper around `json.loads`). Or, you could use a `pydantic` model to parse and validate the input. If this param parsing raises an error, it will be caught and returned as an `errors.RequestParseError`. See also other params: `pathParams`, `queryString`, and `headers`. :param pathParams: Should the wrapper function pass `event`'s "pathParams" attribute as an arg to inner function (called "path")? If `pathParams` is callable, it will be used to parse the values. See also other params: `body`, `queryString`, and `headers`. :param queryString: Should the wrapper function pass `event`'s "queryString" attribute as an arg to inner function (called "query")? If `queryString` is callable, it will be used to parse the values. See also other params: `pathParams`, `body`, and `headers`. :param headers: Should the wrapper function pass `event`'s "headers" attribute as an arg to inner function (called "headers")? If `headers` is callable, it will be used to parse the values. See also other params: `pathParams`, `queryString`, and `body`. :param authenticate: Function to authenticate the requesting user. Takes the full `event` as an input and returns a User. :param authorize: Function to authorize the requesting user. Note: `authenticate` must also be present. :param jsonize_response: Should the response body be wrapped in JSON? If so, the response's body will be a string-ified json dict of the following form: `{"success": true, "result": ...}` If `jsonize_response` is `True` but the function's signature shows a return value of `None` or `NoReturn`, and the function does in fact return `None`, the body will not have a "result" attribute, only "success". If `jsonize_response` is `True` and the returned value is a dict, that value will be merged with a dict: `{"success": True}` :param keep_event: Should the `event` dict be passed to the wrapped function from AWS Lambda? :param keep_context: Should the `context` object be passed to the wrapped function from AWS Lambda? :param pass_auth_user: If authentication function supplied, should `authUser` be passed as a kwarg to the wrapped function? :returns: Decorated lambda handler function """ # Check authorize/authenticate if authorize is not None: assert authenticate is not None, "If `authorize` is not `None`, "+\ "`authenticate` can't be `None`." def wrapper(fn: Callable): # Get the function's return type, to use later when # deciding how to format response return_type = args.get_return_type(fn) @ft.wraps(fn) def inner(event: dict, context) -> dict: # Store function arguments kwargs = {} if authenticate is not None: # Authenticate the user try: user = authenticate(event) except errors.HttpError as e: return e.json() if authorize is not None: # Authorize the user try: if not authorize(user): raise errors.AuthorizationError() except errors.HttpError as e: return e.json() # Does the user want the authorized # user as an argument? if pass_auth_user: kwargs["authUser"] = user # Get the query/path/body/header params if body: try: kwargs["body"] = body(event["body"]) if callable(body) else event["body"] except Exception as e: return errors.RequestParseError().json( f"Unable to read request body." ) if pathParams: try: kwargs["path"] = pathParams(event["pathParameters"]) if callable(pathParams) \ else event["pathParameters"] except Exception as e: return errors.RequestParseError().json( f"Unable to read request path parameters." ) if queryString: try: kwargs["query"] = queryString(event["queryStringParameters"]) if callable(queryString) \ else event["queryStringParameters"] except Exception as e: return errors.RequestParseError().json( f"Unable to read request query string parameters." ) if headers: try: kwargs["headers"] = headers(event["headers"]) if callable(headers) else event["headers"] except Exception as e: return errors.RequestParseError().json( f"Unable to read request headers." ) # Add event/context if requested if keep_event: kwargs["event"] = event if keep_context: kwargs["context"] = context # Call the function try: res = fn(**kwargs) except errors.HttpError as e: return e.json() except Exception as e: print(f"UNCAUGHT ERROR: \"{e}\"") return errors.InternalServerError().json() # Return a response if jsonize_response: # If there isn't a return (as expected) # just return the success-ness if res is None and return_type in (None, NoReturn): return { "statusCode": 200, "body": json.dumps({ "success": True, }) } # If the response is a dict, merge # it with the `success`-ness flag if isinstance(res, dict): return { "statusCode": 200, "body": json.dumps({ "success": True, **res }) } # Otherwise (if result isn't a dict) # return it as the value to key "result" return { "statusCode": 200, "body": json.dumps({ "success": True, "result": res, }) } else: # If not json-izing the response, pass # it as the value to the key "body" # (still with a status-code of 200) return { "statusCode": 200, "body": res } return inner return wrapper
7,631
def get_logger(name=None): """return a logger """ global logger if logger is not None: return logger print('Creating logger========================================>') logger = logging.getLogger(name) logger.setLevel(logging.INFO) sh = logging.StreamHandler() sh.setLevel(logging.INFO) formatter = logging.Formatter('[%(asctime)s][%(levelname)s]{%(pathname)s:%(lineno)d} %(message)s') sh.setFormatter(formatter) logger.addHandler(sh) return logger
7,632
def LF_degen_spine(report): """ Checking for degenerative spine """ reg_01 = re.compile('degen',re.IGNORECASE) reg_02 = re.compile('spine',re.IGNORECASE) for s in report.report_text.text.split("."): if reg_01.search(s) and reg_02.search(s): return ABNORMAL_VAL return ABSTAIN_VAL
7,633
def make_log_format(fields, sep=" - "): """ Build a custom log format, as accepted by the logging module, from a list of field names. :param fields: list or tuple of str - names of fields to use in log messages :param sep: str - separator to put between fields. Default is ' - ' :return: a log format string usable to configure log formatters """ assert all(f in log_fields for f in fields), "Only fields from {} are valid".format( tuple(log_fields) ) return sep.join("%({}){}".format(f, log_fields[f]) for f in fields)
7,634
def tweets_for(type, args, per_user=None): """ Retrieve tweets for a user, list or search term. The optional ``per_user`` arg limits the number of tweets per user, for example to allow a fair spread of tweets per user for a list. """ lookup = {} lookup[type] = args[0].strip("\"'") tweets = Tweet.objects.get_for(**lookup) if per_user is not None: _tweets = defaultdict(list) for tweet in tweets: if len(_tweets[tweet.user_name]) < per_user: _tweets[tweet.user_name].append(tweet) tweets = sum(_tweets.values(), []) tweets.sort(key=lambda t: t.created_at, reverse=True) if len(args) > 1 and args[-1].isdigit(): tweets = tweets[:int(args[-1])] return tweets
7,635
def sum_var(A): """summation over axis 1 (var) equivalent to np.sum(A, 1)""" if issparse(A): return A.sum(1).A1 else: return np.sum(A, axis=1) if A.ndim > 1 else np.sum(A)
7,636
def success_schema(): """Pytest fixture for successful SchemaModel object""" scm = SchemaVersion("1.0") scm.success = True return scm
7,637
def _interfaces(config): """ list system interfaces based on shape """ shape = lib.metadata.get_instance()['shape'] print if config.getboolean('DEFAULT', 'auto') is True: interfaces = lib.interfaces.get_interfaces_by_shape(shape) else: interfaces = config['DEFAULT']['interfaces'].split(',') return interfaces
7,638
def getColumninfo(columns): """ See ElementFaceToThickness. """ ColumnC, problematicColumns = ElementFaceToThickness(columns) return ColumnC
7,639
def main(tokens, directory, output_file, dry_run, offsets, is_regex, case_sensitive, export_clips, export_clips_dir, export_clips_template): """A tool for finding quotes in series/movies/animes and automatically creating compilations. \b Examples: $ quoteclipper -match Hello . $ quoteclipper -m "Morning" -m "Good Night" ./videos $ quoteclipper -o ~/Desktop/greetings.mp4 -m 'Hello' -m 'Hi' -m 'Whassup' . $ quoteclipper -re -m "/Call 555.\d+/i" $ quoteclipper -re -m "/Car?s|sandwich(es)?/i" . $ quoteclipper -re -m "/(Ya?|You)'? ?Know\!/i" """ print('QuoteClipper') tokens = list(tokens) terms = sanitize_filename(', '.join(tokens)) output_file = output_file.format(terms) print('Directory:', directory, 'Output:', output_file, 'Match:', tokens) if is_regex: search_regxps = [regexp(m) for m in tokens] else: f = (re.I if case_sensitive == False else 0) search_regxps = [re.compile(r"\b"+m+r"\b", flags=f) for m in tokens] # find subtitles print('\n=> Scanning folder {} for videos with srt subtitles...'.format(directory)) episodes_list = [] for (dirpath, _dirnames, filenames) in walk(directory): for filename in filenames: if filename.endswith(('.mp4', '.mkv')) and not filename.startswith('._'): video_path = path.join(dirpath, filename) subtitles_path = None basename = path.splitext(path.basename(video_path))[0] # find corresponding subtitle file for ext in ['srt', 'ssa', 'ass', 'sub', 'txt']: test = path.join(dirpath, basename + '.' + ext) if path.isfile(test): subtitles_path = test break if subtitles_path: episode = SimpleNamespace( basename=basename, video_path=video_path, subtitles_path=subtitles_path, ) episodes_list.append(episode) print("\t* {}".format(subtitles_path)) else: print("\tNo subtitles found! {}".format(video_path)) episodes_list = sorted(episodes_list, key=lambda k: k.basename) print(" Files found: {} videos with subtitles".format(len(episodes_list))) # read each subtitle print('\n=> Searching captions matching "{}" ...'.format('" or "'.join(tokens))) quotes = [] for episode in episodes_list: print('\t* ', episode.basename) for caption in parser.parse(episode.subtitles_path): sanitized_captions = caption.text.strip().encode("ascii", "ignore").decode() if test_text(sanitized_captions, search_regxps): quote = SimpleNamespace( episode=episode, caption=caption, clip=None, clip_exported_file=None, cut= SimpleNamespace(t_start=0, t_end=0, t_total=0), ) quotes.append(quote) print('\t\t{} - [{} {} ~ {}] {}'.format(len(quotes), caption.index, caption.start, caption.end, caption.text)) print('\t') print(' Done scanning subtitles! Quotes found: {}'.format(len(quotes))) if len(quotes) > 0 and dry_run == False: # trim clips print('\n=> Creating subclips...') for i, quote in enumerate(quotes): quote.cut.t_start = time_to_seconds(quote.caption.start) + offsets[0] quote.cut.t_end = time_to_seconds(quote.caption.end) + offsets[1] quote.cut.t_total = quote.cut.t_end - quote.cut.t_start print("\t[{}/{}] Clipping... ({:.2f}s) {}".format(i, len(quotes), quote.cut.t_end-quote.cut.t_start, quote.caption.text)) clip = VideoFileClip(quote.episode.video_path).subclip(quote.cut.t_start, quote.cut.t_end) # export individual clips if export_clips: clip_filename = export_clips_template.format( n=i+1, index=quote.caption.index, basename=quote.episode.basename, quote=quote.caption.text, start=quote.caption.start, end=quote.caption.end, duration=clip.duration, ) quote.clip_exported_file = path.join(export_clips_dir, sanitize_filename(clip_filename)) if not path.isfile(quote.clip_exported_file): clip.to_videofile(quote.clip_exported_file, codec="libx264", temp_audiofile=quote.clip_exported_file + '~audio.m4a', remove_temp=True, audio_codec='aac') else: print("\tAlready Exist, skipping...") else: # save for joining later quote.clip = clip print(' Done creating subclips!') # join clips into a single video print('\n=> Rendering {} clips together...'.format(len(quotes))) clips = [] if export_clips: # Read from exported clips files for quote in quotes: clip = VideoFileClip(quote.clip_exported_file) clips.append(clip) else: # Export directly from source files clips = list(map(lambda q: q.clip, quotes)) final_clip = concatenate_videoclips(clips) final_clip.write_videofile( output_file, codec="libx264", temp_audiofile=output_file + '~audio.m4a', remove_temp=True, audio_codec='aac' ) # Generate new subtitles print('\n=> Creating new subtitles...') start = 0 new_subtitles = [] for i, quote in enumerate(quotes): end = start + quote.cut.t_total line = SimpleNamespace(index=i+1, start=seconds_to_hhmmssms(start-offsets[0]), end=seconds_to_hhmmssms(end-offsets[1]), text=quote.caption.text) start = end new_subtitles.append(line) template = "{index}{eol}{start} --> {end}{prop}{eol}{text}{eol}" new_subtitles = [template.format( index=c.index, start=c.start, end=c.end, prop='', text=c.text, eol='\n', ) for c in new_subtitles] new_srt = '\n'.join(new_subtitles) with open(path.splitext(output_file)[0] + '.srt', 'wb') as file: file.write(new_srt.encode('utf8')) print(' Done creating new subtitles!') print('\nFinished!')
7,640
def script( command: str, inputs: Any = [], outputs: Any = NULL, tempdir=False, **task_options ) -> Any: """ Execute a shell script as a redun task with file staging. """ if outputs == NULL: outputs = File("-") command_parts = [] # Prepare tempdir if requested. temp_path: Optional[str] if tempdir: temp_path = mkdtemp(suffix=".tempdir") command_parts.append('cd "{}"'.format(temp_path)) else: temp_path = None # Stage inputs. command_parts.extend(input.render_stage() for input in iter_nested_value(inputs)) # User command. command_parts.append(get_wrapped_command(prepare_command(command))) # Unstage outputs. file_stages = [value for value in iter_nested_value(outputs) if isinstance(value, Staging)] command_parts.extend(file_stage.render_unstage() for file_stage in file_stages) full_command = "\n".join(command_parts) # Get input files for reactivity. def get_file(value: Any) -> Any: if isinstance(value, Staging): # Staging files and dir turn into their remote versions. cls = type(value.remote) return cls(value.remote.path) else: return value input_args = map_nested_value(get_file, inputs) return _script( full_command, input_args, outputs, task_options=task_options, temp_path=temp_path )
7,641
def get_forms(console: Console, sess: requests.Session, form_id: str = "General_Record_2020v2.0"): """ Method to get every form for a given FormID """ raw_resp = get_url(url=f"https://forms.agterra.com/api/{form_id}/GetAll/0", sess=sess) if raw_resp.status_code != 200: console.log(f"[red] Something went wrong, we got status [white]{raw_resp.status_code}") json_data = raw_resp.json() console.log(f"Message Data: {json_data}") json_data = raw_resp.json() return json_data
7,642
def record_available_username(username): """ Notify the user of a valid username and write it to ./available-usernames.txt """ log.info("Name %s is available!", username) available_usernames.append(username) print(f"{available_usernames=}") with open( f"{script_dir}/available-usernames.txt", "a", encoding="UTF-8" ) as available_usernames_file: available_usernames_file.write(username) available_usernames_file.write("\n")
7,643
def load_dataset(input_files, input_vocab, mode, batch_size=32, min_seq_len=5, num_buckets=4): """Returns an iterator over the training data.""" def _make_dataset(text_files, vocab): dataset = tf.data.TextLineDataset(text_files) dataset = dataset.map(lambda x: tf.string_split([x]).values) dataset = dataset.map(vocab.lookup) return dataset def _key_func(x): if mode == constants.TRAIN: bucket_width = 6 bucket_id = x["length"] // bucket_width bucket_id = tf.minimum(bucket_id, num_buckets) return tf.to_int64(bucket_id) else: return 0 def _reduce_func(unused_key, dataset): return dataset.padded_batch(batch_size, padded_shapes={ "ids": [None], "ids_in": [None], "ids_out": [None], "ids_in_out": [None], "length": [], }, ) bos = tf.constant([constants.START_OF_SENTENCE_ID], dtype=tf.int64) eos = tf.constant([constants.END_OF_SENTENCE_ID], dtype=tf.int64) # Make a dataset from the input and translated file. input_dataset = _make_dataset(input_files, input_vocab) dataset = tf.data.Dataset.zip(input_dataset) if mode == constants.TRAIN: dataset = dataset.shuffle(200000) # Define the input format. dataset = dataset.map(lambda x: { "ids": x, "ids_in": tf.concat([bos, x], axis=0), "ids_out": tf.concat([x, eos], axis=0), "ids_in_out": tf.concat([bos, x, eos], axis=0), "length": tf.shape(x)[0]}) # Filter out invalid examples. if mode == constants.TRAIN: dataset = dataset.filter(lambda x: tf.greater(x["length"], min_seq_len - 1)) # Batch the dataset using a bucketing strategy. dataset = dataset.apply(tf.contrib.data.group_by_window( _key_func, _reduce_func, window_size=batch_size)) return dataset.make_initializable_iterator()
7,644
def _get_results(**kwargs): """ Generate a command with the parameters, run it, and return the normalized results """ output, error, rc = testoob.run_cmd.run_command(_generate_command(**kwargs)) return tt._normalize_newlines(output), tt._normalize_newlines(error), rc
7,645
def main(): """ This program simulates a bouncing ball at (START_X, START_Y) that has VX as x velocity and 0 as y velocity. Each bounce reduces y velocity to REDUCE of itself. """ ball.filled = True window.add(ball) onmouseclicked(star_ball)
7,646
def infer_from_discretized_mix_logistic(params): """ Sample from discretized mixture of logistic distributions Args: params (Tensor): B x C x T, [C/3,C/3,C/3] = [logit probs, means, log scales] Returns: Tensor: sample in range of [-1, 1]. """ log_scale_min = float(np.log(1e-14)) assert params.shape[1] % 3 == 0 nr_mix = params.shape[1] // 3 # B x T x C y = params #np.transpose(params, (1, 0)) logit_probs = y[:, :nr_mix] temp = np.random.uniform(low=1e-5, high=1.0 - 1e-5, size=logit_probs.shape) temp = logit_probs - np.log(- np.log(temp)) argmax = np.argmax(temp, axis=-1) one_hot = get_one_hot(argmax, nr_mix).astype(dtype=float) means = np.sum(y[:, nr_mix:2 * nr_mix] * one_hot, axis=-1) log_scales = np.clip(np.sum( y[:, 2 * nr_mix:3 * nr_mix] * one_hot, axis=-1), a_min=log_scale_min, a_max=None) u = np.random.uniform(low=1e-5, high=1.0 - 1e-5, size=means.shape) x = means + np.exp(log_scales) * (np.log(u) - np.log(1. - u)) x = np.clip(x, a_min=-1., a_max=1.) return x
7,647
def init_api_owners(init_owners): """Converts the ``DbOwns`` lists created by ``init_owners`` into lists of dicts that can be used to test the responses of the API endpoints.""" api_owners = {} for k, v in init_owners.items(): api_owners[k] = [ OwnsResp.from_dbowns(o).__dict__ for o in v ] yield api_owners
7,648
def get_sorted_file_paths(file_path, file_extension=None, encoding=None): """ Sorts file paths with numbers "naturally" (i.e. 1, 2, 10, a, b), not lexiographically (i.e. 1, 10, 2, a, b). :param str file_path: File containing file_paths in a text file, or as a list. :param str file_extension: Optional file extension (if a directory is passed) :param encoding: If opening a text file, what encoding it has. Default: None (platform dependent) :return: Sorted list of file paths """ if isinstance(file_path, list): return natsorted(file_path) # assume if not a list, is a file path file_path = Path(file_path) if file_path.suffix == ".txt": return get_text_lines(file_path, sort=True, encoding=encoding) elif file_path.is_dir(): if file_extension is None: file_path = glob.glob(os.path.join(file_path, "*")) else: file_path = glob.glob( os.path.join(file_path, "*" + file_extension) ) return natsorted(file_path) else: message = ( "Input file path is not a recognised format. Please check it " "is a list of file paths, a text file of these paths, or a " "directory containing image files." ) raise NotImplementedError(message)
7,649
def _process_window(output_arrays: dict, entries_for_inter, trial_key: str, crash_cutoff: Union[float, np.float], sampling_rate: int, episode_id: int) -> None: """ helper function to interpolate entries in a window and record output :param output_arrays: :param entries_for_inter: :param trial_key: :param crash_cutoff: cutoff time in seconds, time steps beyond which has the a corresponding seq label of 1 :param sampling_rate: :return: """ int_results = interpolate_entries(entries_for_inter, sampling_rate=sampling_rate) # record features output_arrays["vel_ori"].append(int_results["currentVelRoll"]) output_arrays["vel_cal"].append(int_results["calculated_vel"]) output_arrays["position"].append(int_results["currentPosRoll"]) output_arrays["joystick"].append(int_results["joystickX"]) output_arrays["trial_key"].append(trial_key) output_arrays["person"].append(entries_for_inter["peopleName"].iloc[0]) output_arrays["start_sec"].append(entries_for_inter['seconds'].iloc[0]) window_end = entries_for_inter['seconds'].iloc[-1] output_arrays["end_sec"].append(window_end) output_arrays["episode_id"].append(episode_id) # record labels seq_labels = np.zeros(sampling_rate) single_label = 0 if window_end >= crash_cutoff: # If window touches or crosses cutoff, there is at least one time step that has label 1. # First, retrieve interpolated seconds for locating cutoff. seconds = int_results["seconds"] # Then, any time step that touches or crosses cutoff receives a label of 1 assert seq_labels.shape[0] == seconds.shape[0], "Length mismatch between interpolated sequence length and seconds" seq_labels[seconds >= crash_cutoff] = 1 assert np.sum(seq_labels) > 0, "no 1 labels assigned!" # lastly, set single label to 1 to signal that there is a crash within time ahead single_label = 1 # now, record both single and seq label output_arrays["label"].append(single_label) output_arrays["seq_label"].append(seq_labels)
7,650
def _load_reft_data(reft_file, index_name="btl_fire_num"): """ Loads reft_file to dataframe and reindexes to match bottle data dataframe """ reft_data = pd.read_csv(reft_file, usecols=["btl_fire_num", "T90", "REFTMP_FLAG_W"]) reft_data.set_index(index_name) reft_data["SSSCC_TEMP"] = Path(reft_file).stem.split("_")[0] reft_data["REFTMP"] = reft_data["T90"] return reft_data
7,651
def commit_veto(environ, status, headers): """Veto a commit. This hook is called by repoze.tm in case we want to veto a commit for some reason. Return True to force a rollback. By default we veto if the response's status code is an error code. Override this method, or monkey patch the instancemethod, to fine tune this behaviour. """ return not 200 <= int(status.split(None, 1)[0]) < 400
7,652
def rate_limited_api(view_func): """ Checks users last post to rate limited endpoints (adding comments or recipes) and rejects if within timeout period for api requests (returns JSON response) """ @wraps(view_func) def _wrapped_view(request, *args, **kwargs): exceeded, msg = request.user.profile.rate_limit_exceeded() if exceeded: return JsonResponse({"error": msg}) else: return view_func(request, *args, **kwargs) return _wrapped_view
7,653
def SecondOrderTVD(Uo, Courant, diffX, LimiterFunc, Limiter, Eps=0.01): """Return the numerical solution of dependent variable in the model eq. This function uses the explicit second-order TVD method and their various Limiter functions and Limiters to obtain the solution of the 1D non-linear viscous Burgers equation. Call signature: SecondOrderTVD(Uo, Courant, diffX, LimiterFunc, Limiter, Eps) Parameters ---------- Uo: ndarray[float], =1d The dependent variable at time level, n within the entire domain. (Non-dimensionalized quantity) Courant: float Courant number that appears in the convection component of the PDE. diffX: float Diffusion number for x-component that appears in the diffusion component of the PDE. LimiterFunc: str Flux limiter function. Limiter: Limiter type. Eps: float, optional A positive constant in the entropy correction term, si in Eq. 6-127 in CFD Vol. 1 by Hoffmann. Its value must be between 0 and 0.125. Default is 0.1. Returns ------- U: ndarray[float], =1d The dependent variable at time level, n+1 within the entire domain. (Non-dimensionalized quantity) """ from .tvdfunctions import CalculateTVD from .backend import fetchoptions as fo from .backend.exceptions import TVDLimiterFunctionInputError shapeU = Uo.shape # Obtain Dimension if len(shapeU) == 2: raise DimensionError("2D", "viscous Bergers", "second-order TVD") iMax, = shapeU U = Uo.copy() # Initialize U E = Uo*Uo/2 fetch = fo.FetchOptions() limfunc_options = fetch.TVDLimiterFunctionOptions() if LimiterFunc not in limfunc_options: raise TVDLimiterFunctionInputError(LimiterFunc) for i in range(2, iMax-2): phiPlus, phiMinus = CalculateTVD(i, Uo, E, Eps, Courant, Limiter, LimiterFunc) # Equation 6-124 and 6-125 in Hoffmann Vol. 1 hPlus = 0.5 * (E[i+1]+E[i]+phiPlus) hMinus = 0.5 * (E[i]+E[i-1]+phiMinus) # Calculate diffusion terms in the viscous Bergers equation. # Equation 7-58 diffusion = diffX*(Uo[i+1] - 2.0*Uo[i] + Uo[i-1]) # Equation 6-123 U[i] = Uo[i] - Courant*(hPlus-hMinus) + diffusion return U
7,654
def login_required(func): """ Allow only auth users """ async def wrapped(self, *args, **kwargs): if self.request.user is None: add_message(self.request, "LogIn to continue.") redirect(self.request, "sign_in") return await func(self, *args, **kwargs) return wrapped
7,655
def splitter(iterable, sizes): """Split an iterable into successive slice by sizes. >>> list(splitter(range(6), [1, 2, 3])) [[0], [1, 2], [3, 4, 5]] """ iterator = iter(iterable) for size in sizes: yield list(it.islice(iterator, size))
7,656
def _hunnyb_search_func(name): """search function required by ``codecs.register``""" if name in (HUNNYB_ENC_NAME,) + HB_ALIASES: return (_encode, _decode, None, None)
7,657
def fingerprint_file(file): """Open, read file and calculate MD5 on its contents""" with open(file,'rb') as fd: # read contents of the file _file_data = fd.read() # pipe contents of the file through file_fingerprint = md5(_file_data).hexdigest() return file_fingerprint
7,658
def reset_color_info(*event): """Remove the values from the color info frame. This function will be called when no items on the Treeview are selected. """ global previous_sd, current_sd cell_spectrum_title["text"] = "Please select or add a spectrum to display its color coordinate" cell_x_value_text.set("") cell_y_value_text.set("") cell_z_value_text.set("") cell_color_display["bg"] = "#f0f0f0" if current_sd: current_sd.set_visible(False) canvas_sd.draw() current_sd = None previous_sd = None
7,659
def toOTLookup(self, font, ff): """Converts a fontFeatures.Routine object to binary. Args: font: A ``TTFont`` object. ff: The parent ``FontFeatures`` object containing this routine. Returns a list of ``fontTools.otlLib.builder`` Builder objects allowing this routine to be converted to binary layout format. """ lookuptypes = [x.lookup_type() for x in self.rules] if not all([lu == lookuptypes[0] for lu in lookuptypes]): raise ValueError("For now, a routine can only contain rules of the same type") if not all([self.rules[0].flags == rule.flags for rule in self.rules]): raise ValueError("For now, a routine can only contain rules of the same flags") self.flags = self.rules[0].flags if self.stage == "pos": return buildPos(self, font, lookuptypes[0], ff) if self.stage == "sub": return buildSub(self, font, lookuptypes[0], ff)
7,660
def test_paragraph_series_m_hb_ol_t_nl_i3_ol_nl_i2_hb(): """ Test case: Ordered list text newline indent of 3 ordered list newline indent of 2 html block """ # Arrange source_markdown = """1. abc 1. <script> foo </script> """ expected_tokens = [ "[olist(1,1):.:1:3:: ]", "[para(1,4):\n]", "[text(1,4):abc\n1.::\n]", "[end-para:::True]", "[end-olist:::True]", "[html-block(3,1)]", "[text(3,3):<script>\nfoo\n</script>: ]", "[end-html-block:::False]", "[BLANK(6,1):]", ] expected_gfm = """<ol> <li>abc 1.</li> </ol> <script> foo </script>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
7,661
def test___init__username(): """ Will test __init__ function if username is provided. :return: """ with pytest.raises(ValueError) as excinfo: syn = syncope.Syncope(syncope_url="http://192.168.1.145:9080", password="admin") assert excinfo.value.message == 'This interface needs an username to work!'
7,662
def apply_to_all(func, results, datasets): """Apply the given function to all results Args: func: the function to apply results: nested dictionary where the nested levels are: algorithm name, sensitive attribute and split ID datasets: nested dictionary where the nested levels are: sensitive attribute and split ID Returns: a nested dictionary with the same structure as `results` that contains the output of the given function """ output = {} for algo in results: output[algo] = {} for sensitive in results[algo]: output[algo][sensitive] = {} for split_id in results[algo][sensitive]: output[algo][sensitive][split_id] = func( results[algo][sensitive][split_id], datasets[sensitive][split_id]) return output
7,663
def any_value_except(mapping, excluded_keys): """Return a random value from a dict that is not associated with excluded_key. Raises StopIteration if there are no other keys than excluded_key""" return next(mapping[key] for key in mapping if key not in excluded_keys)
7,664
def one_hot(y, num_dim=10): """ One Hot Encoding, similar to `torch.eye(num_dim).index_select(dim=0, index=y)` :param y: N-dim tenser :param num_dim: do one-hot labeling from `0` to `num_dim-1` :return: shape = (batch_size, num_dim) """ one_hot_y = torch.zeros(y.size(0), num_dim) if y.is_cuda: one_hot_y = one_hot_y.cuda() return one_hot_y.scatter_(1, y.view(-1, 1), 1.)
7,665
def relate(target_file, start_file): """ Returns relative path of target-file from start-file. """ # Default os.path.rel_path takes directories as argument, thus we need # strip the filename if present in the paths else continue as is. target_dir, target_base = os.path.split(target_file) start_dir = os.path.dirname(start_file) # Calculate the relative path using the standard module and then concatenate # the file names if they were previously present. return os.path.join(os.path.relpath(target_dir, start_dir), target_base)
7,666
def IdentityMatrix(): """Creates an identity rotation matrix. Returns a rotation matrix that has no effect on orientation. This matrix can be the starting point for other operations, such as using a series of calls to #Pivot to create a custom rotation matrix. Returns ------- RotationMatrix The identity rotation matrix. """ return RotationMatrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1] ])
7,667
def compute_K_from_vanishing_points(vanishing_points): """Compute intrinsic matrix given vanishing points. Args: vanishing_points: A list of vanishing points. Returns: K: The intrinsic camera matrix (3x3 matrix). """ # vanishing points used v1 = vanishing_points[0] v2 = vanishing_points[1] v3 = vanishing_points[2] # construct constraint matrix A from each pair of vanishing points A = np.zeros((3, 3)) # 1 + 2 vi = v1 vj = v2 A[0] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])]) # 1 + 3 vi = v1 vj = v3 A[1] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])]) # 2 + 3 vi = v2 vj = v3 A[2] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])]) # add one column of ones A_ones = np.ones((A.shape[0], 1)) A = np.hstack((A, A_ones)) # SVD U, s, VT = np.linalg.svd(A) w = VT[-1, :] omega = np.array([[w[0], 0, w[1]], [0, w[0], w[2]], [w[1], w[2], w[3]]]) # find K matrix from omega KT_inv = np.linalg.cholesky(omega) K = np.linalg.inv(KT_inv.T) # normalize K /= K[2, 2] return K
7,668
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Insteon component.""" async_add_insteon_entities( hass, DOMAIN, InsteonDimmerEntity, async_add_entities, discovery_info )
7,669
def getProgFromFile(f): """Get program name from __file__. """ if f.endswith(".py"): f = f[:-3] return os.path.basename(f)
7,670
def _tpd2vec(seq, dtype=float): """ Convert a tpd file string to a vector, return a NumPy array. EXAMPLES: >>> _tpd2vec('1|13|4; 20; 25|28') array([ 1., 5., 9., 13., 20., 25., 26., 27., 28.]) >>> _tpd2vec('5.5; 1.2@3; 3|7|2') array([ 5.5, 1.2, 1.2, 1.2, 3. , 5. , 7. ]) >>> _tpd2vec(' ') array([], dtype=float64) """ finalvec = np.array([], dtype) for s in seq.split(';'): if s.count('|'): values = [dtype(v) for v in s.split('|')] values[1] += 1 vec = np.arange(*values) elif s.count('@'): value, num = s.split('@') try: vec = np.ones(int(num)) * dtype(value) except ValueError: raise ValueError('%s is incorrectly specified' % seq) else: try: vec = [dtype(s)] except ValueError: vec = np.array([], dtype) finalvec = np.append(finalvec, vec) return finalvec
7,671
def get_notification_html(*, notification_type: str, options: Dict, sender: str) -> str: """ Returns the formatted html for the notification based on the notification_type :return: A string representing the html markup to send in the notification """ validate_options(options=options) url_base = app.config['FRONTEND_BASE'] resource_url = '{url_base}{resource_path}?source=notification'.format(resource_path=options.get('resource_path'), url_base=url_base) joined_chars = resource_url[len(url_base) - 1:len(url_base) + 1] if joined_chars.count('/') != 1: raise Exception('Configured "FRONTEND_BASE" and "resource_path" do not form a valid url') notification_strings = NOTIFICATION_STRINGS.get(notification_type) if notification_strings is None: raise Exception('Unsupported notification_type') greeting = 'Hello,<br/>' notification = notification_strings.get('notification', '').format(resource_url=resource_url, resource_name=options.get('resource_name'), sender=sender) comment = notification_strings.get('comment', '') end_note = notification_strings.get('end_note', '') salutation = '<br/>Thanks,<br/>Amundsen Team' if notification_type == NotificationType.METADATA_REQUESTED: options_comment = options.get('comment') need_resource_description = options.get('description_requested') need_fields_descriptions = options.get('fields_requested') if need_resource_description and need_fields_descriptions: notification = notification + 'and requests improved table and column descriptions.<br/>' elif need_resource_description: notification = notification + 'and requests an improved table description.<br/>' elif need_fields_descriptions: notification = notification + 'and requests improved column descriptions.<br/>' else: notification = notification + 'and requests more information about that resource.<br/>' if options_comment: comment = ('<br/>{sender} has included the following information with their request:' '<br/>{comment}<br/>').format(sender=sender, comment=options_comment) if notification_type == NotificationType.DATA_ISSUE_REPORTED: greeting = 'Hello data owner,<br>' data_issue_url = options.get('data_issue_url') comment = comment.format(data_issue_url=data_issue_url) return '{greeting}{notification}{comment}{end_note}{salutation}'.format(greeting=greeting, notification=notification, comment=comment, end_note=end_note, salutation=salutation)
7,672
def rank(config, path, metric, revision_index, limit, threshold, descending): """ Rank command ordering files, methods or functions using metrics. :param config: The configuration :type config: :class:'wily.config.WilyConfig' :param path: The path to the file :type path ''str'' :param metric: Name of the metric to report on :type metric: ''str'' :param revision_index: Version of git repository to revert to. :type revision_index: ``str`` :param limit: Limit the number of items in the table :type limit: ``int`` :param threshold: For total values beneath the threshold return a non-zero exit code :type threshold: ``int`` :return: Sorted table of all files in path, sorted in order of metric. """ logger.debug("Running rank command") data = [] operator, metric = resolve_metric_as_tuple(metric) operator = operator.name state = State(config) if not revision_index: target_revision = state.index[state.default_archiver].last_revision else: rev = resolve_archiver(state.default_archiver).cls(config).find(revision_index) logger.debug(f"Resolved {revision_index} to {rev.key} ({rev.message})") try: target_revision = state.index[state.default_archiver][rev.key] except KeyError: logger.error( f"Revision {revision_index} is not in the cache, make sure you have run wily build." ) exit(1) logger.info( f"-----------Rank for {metric.description} for {format_revision(target_revision.revision.key)} by {target_revision.revision.author_name} on {format_date(target_revision.revision.date)}.------------" ) if path is None: files = target_revision.get_paths(config, state.default_archiver, operator) logger.debug(f"Analysing {files}") else: # Resolve target paths when the cli has specified --path if config.path != DEFAULT_PATH: targets = [str(Path(config.path) / Path(path))] else: targets = [path] # Expand directories to paths files = [ os.path.relpath(fn, config.path) for fn in radon.cli.harvest.iter_filenames(targets) ] logger.debug(f"Targeting - {files}") for item in files: for archiver in state.archivers: try: logger.debug( f"Fetching metric {metric.name} for {operator} in {str(item)}" ) val = target_revision.get( config, archiver, operator, str(item), metric.name ) value = val data.append((item, value)) except KeyError: logger.debug(f"Could not find file {item} in index") # Sort by ideal value data = sorted(data, key=op.itemgetter(1), reverse=descending) if limit: data = data[:limit] # Tack on the total row at the end total = metric.aggregate(rev[1] for rev in data) data.append(["Total", total]) headers = ("File", metric.description) print( tabulate.tabulate( headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE ) ) if threshold and total < threshold: logger.error( f"Total value below the specified threshold: {total} < {threshold}" ) exit(1)
7,673
def step(parents: be.Population, fitness: be.Fitness) -> tuple: """ The step function defines how an algorithm generation will be conducted. This function must receive a population and a fitness object and return another population. In this case we will define the parameters of the algorithm within the function itself and use report objects to monitor the evolution of the population. In this algorithm the main steps consists of: 1. Get elite -> Elite 2. Apply tournament selection -> Best individuals 3. Apply one point cross over to best individuals -> Offspring 4. Mutate offspring 5. Evaluate offspring 6. Annihilate worst individuals in offspring and replace them with the best. 7. Merge elite and offspring -> Population for next generation """ # Put parameters recombination_schema = 'one_point_i' # Alternatives: 'n_point_i' or 'uniform_i' mutation_schema = 'random_resetting' # Alternatives: 'creep' mutation_probability = 0.1 max_mutation_events = 2 ranking_selection_schema = 'tournament' # Alternatives: 'roulette' or 'sus' tournament_k = 2 tournament_w = 1 tournament_replacement = False elitism_percentage = 0.1 # Get elite elite = be.survivor_selection(population=parents, schema='elitism', select=elitism_percentage) # Apply selection to get the mating pool mating_pool = be.ranking_selection( population=parents, n=len(parents) - len(elite), schema=ranking_selection_schema, w=tournament_w, k=tournament_k, replacement=tournament_replacement) # Generate offspring offspring = be.recombination(population=mating_pool, n=len(mating_pool), schema=recombination_schema) # Mutate offspring be.mutation(population=offspring, probability=mutation_probability, schema=mutation_schema, max_mutation_events=max_mutation_events) # Evaluate offspring be.evaluate_parallel(population=offspring, fitness_function=fitness) # Merge elite and offspring next_generation = be.merge_populations(offspring, elite) report.create_report(population=next_generation, population_name='Population', increment_generation=True) # With this indicator we keep the best solution of each generation return next_generation, be.SaveBestSolution(next_generation)
7,674
def asset_dividend_record(self, **kwargs): """Asset Dividend Record (USER_DATA) Query asset dividend record. GET /sapi/v1/asset/assetDividend https://binance-docs.github.io/apidocs/spot/en/#asset-dividend-record-user_data Keyword Args: asset (str, optional) startTime (int, optional) endTime (int, optional) limit (int, optional): Default 20, max 500 recvWindow (int, optional): The value cannot be greater than 60000 """ return self.sign_request("GET", "/sapi/v1/asset/assetDividend", kwargs)
7,675
def copy(engine, fromm, to, project: 'Project'): """ Copy files from the hosts file system using a Docker container running root. See AbstractEngine.path_copy for general usage. """ if not path_in_project(to, project): raise PermissionError(f"Tried to copy into a path that is not within the project: {fromm} -> {to}") if not os.path.exists(fromm): raise OSError(f"Tried to copy a directory/file that does not exist: {fromm}") if not os.path.exists(os.path.dirname(to)): raise OSError(f"Tried to copy into a path that does not exist: {to}") command = Command({ 'image': IMAGE, 'command': 'cp -a /copy_from/. /copy_to/', 'additional_volumes': {'fromm': { 'host': fromm, 'container': '/copy_from', 'mode': 'ro' }, 'to': { 'host': to, 'container': '/copy_to', 'mode': 'rw' }} }) command.validate() (exit_code, output) = engine.cmd_detached(project, command, run_as_root=True) if exit_code != 0: raise ExecError(f"Error copying the directory ({str(exit_code)}) {fromm} -> {to}: {output}")
7,676
def heuristic(node_1, node_2): """ Heuristic when only 4 directions are posible (Manhattan) """ (x_node_1, y_node_1) = node_1 (x_node_2, y_node_2) = node_2 return abs(x_node_1 - x_node_2) + abs(y_node_1 - y_node_2)
7,677
def exercise_2(inputs): # DO NOT CHANGE THIS LINE """ Output should be the name of the class. """ output = Party return output # DO NOT CHANGE THIS LINE
7,678
def undistort(img, mtx, dist): """Undistort an image using camera matrix and distortion coefficients""" h, w = img.shape[:2] # return undistorted image with minimum unwanted pixels. It's okay to remove some pixesl at image corners. newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w,h)) undist = cv2.undistort(img, mtx, dist, None, newcameramtx) return undist
7,679
def get_order(order_id, sandbox=False): """Get a single order using the Sell Fulfillment API.""" return single_api_call('sell_fulfillment_get_order', order_id=order_id, field_groups='TAX_BREAKDOWN', sandbox=sandbox)
7,680
def extend_dict(x, *y): """Similar to Object.assign() / _.extend() in Javascript, using 'dict.update()' Args: x (dict): the base dict to merge into with 'update()' *y (dict, iter): any number of dictionary or iterable key/value pairs to be sequentially merged into 'x'. Skipped if None. """ z = x.copy() for d in [d for d in y if d is not None]: z.update(d) return z
7,681
def open_browser(page=None, browser=None, force=False): """Open web browser to page.""" if not sys.stdin.isatty() and not force: raise RuntimeError( "[-] use --force to open browser when stdin is not a TTY") if page is None: raise RuntimeError("[-] not page specified") which = "system default" if browser is None else browser logger.info(f"[+] opening browser '{which}' for {page}") if browser is not None: webbrowser.get(browser).open_new_tab(page) else: webbrowser.open(page, new=1)
7,682
def ErrorCriteria(errors): """Monitor the number of unexpected errors logged in the cluster. If more than five errors have occurred on the cluster during this time period, post an alert. Posts a warning if between one and four errors have occurred. """ ERROR_ALERT_THRESHOLD = 5 alerts = [] warnings = [] if errors['cluster_total'] > ERROR_ALERT_THRESHOLD: alerts.append(CLUSTER_TOKEN) elif errors['cluster_total'] > 0: warnings.append(CLUSTER_TOKEN) return alerts, warnings
7,683
def bdnyc_skyplot(): """ Create a sky plot of the database objects """ # Load the database db = astrodb.Database('./database.db') t = db.query('SELECT id, ra, dec, shortname FROM sources', fmt='table') # Convert to Pandas data frame data = t.to_pandas() data.index = data['id'] # Remove objects without RA/Dec num_missing = np.sum(pd.isnull(data['ra'])) if num_missing > 0: warning_message = 'Note: {} objects had missing coordinate information and were removed.'.format(num_missing) data = data[pd.notnull(data['ra'])] else: warning_message = '' # Coerce to numeric data['ra'] = pd.to_numeric(data['ra']) data['dec'] = pd.to_numeric(data['dec']) # Coordinate conversion c = SkyCoord(ra=data['ra'] * u.degree, dec=data['dec'] * u.degree) pi = np.pi proj = 'hammer' data['x'], data['y'] = projection(c.ra.radian - pi, c.dec.radian, use=proj) data['l'], data['b'] = c.galactic.l, c.galactic.b # Make the plots p1 = make_sky_plot(data, proj) data['x'], data['y'] = projection(c.galactic.l.radian - pi, c.galactic.b.radian, use=proj) p2 = make_sky_plot(data, proj) tab1 = Panel(child=p1, title="Equatorial") tab2 = Panel(child=p2, title="Galactic") tabs = Tabs(tabs=[tab1, tab2]) script, div = components(tabs) return render_template('skyplot.html', script=script, plot=div, warning=warning_message)
7,684
def test_image_link_592(): """ Test case 592: (part 1) Collapsed: """ # Arrange source_markdown = """![foo][] [foo]: /url "title" """ expected_tokens = [ "[para(1,1):]", "[image(1,1):collapsed:/url:title:foo::::foo:::::]", "[end-para:::True]", "[BLANK(2,1):]", '[link-ref-def(3,1):True::foo:: :/url:: :title:"title":]', "[BLANK(4,1):]", ] expected_gfm = """<p><img src="/url" alt="foo" title="title" /></p>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
7,685
def get_model_config(model_name, dataset, params): """Map model name to model network configuration.""" model_map = _get_model_map(dataset.name) if model_name not in model_map: raise ValueError('Invalid model name \'%s\' for dataset \'%s\'' % (model_name, dataset.name)) else: return model_map[model_name](params=params)
7,686
def post_search(request): """Allow text matching search. """ form = SearchForm() query = None results = [] if 'query' in request.GET: # check if result is submitted by looking for query form = SearchForm(request.GET) if form.is_valid(): query = form.cleaned_data['query'] # results = Post.objects.annotate(search=SearchVector('title','body'),).filter(search=query) # a search is more relevant if the search term is in the title """ Search weights are D,C,B and A corresponding to 0.1,0.2,0.4 and 1.0 """ search_vector = SearchVector('title', weight='A') + SearchVector('body',weight='B') search_query = SearchQuery(query) # filter results to display only the ones ranking higher than 0.3 results = Post.objects.annotate(search=search_vector,rank=SearchRank(search_vector,search_query) ).filter(rank__gte=0.3).order_by('-rank') return render(request,'blog/post/search.html', {'form':form, 'query':query, 'results':results})
7,687
def PrimacyCodingNumeric_receptor_activity_monte_carlo_numba_generator(conc_gen): """ generates a function that calculates the receptor activity for a given concentration generator """ func_code = receptor_activity_monte_carlo_numba_template.format( CONCENTRATION_GENERATOR=conc_gen) # make sure all necessary objects are in the scope scope = {'np': np, 'nlargest_indices_numba': nlargest_indices_numba} exec(func_code, scope) func = scope['function'] return numba.jit(nopython=NUMBA_NOPYTHON, nogil=NUMBA_NOGIL)(func)
7,688
def subtoken_counts(proposed, ground_truth): """ Compute the number of precise tokens, proposed tokens and ground truth tokens from two strings representing tokens. """ gt_subtokens = set(compute_subtokens(ground_truth)) proposed_subtokens = set(compute_subtokens(proposed)) precise_subtokens = proposed_subtokens.intersection(gt_subtokens) return len(precise_subtokens), len(proposed_subtokens), len(gt_subtokens)
7,689
def welcome(): """ Define welcome reply """ hello = random.choice(_HELLO_) nick = random.choice(_NICK_NAME_) welcome = random.choice(_WELCOME_) proposal = random.choice(_PROPOSAL_) return hello + " " + nick + ", " + welcome + " ! " + proposal + " ?"
7,690
def EDCN(linear_feature_columns, dnn_feature_columns, bridge_type='attention_pooling', tau=0.1, use_dense_features=True, cross_num=2, cross_parameterization='vector', l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_cross=1e-5, l2_reg_dnn=0, seed=10000, dnn_dropout=0, dnn_use_bn=False, dnn_activation='relu', task='binary'): """Instantiates the Enhanced Deep&Cross Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param bridge_type: The type of bridge interaction, one of 'pointwise_addition', 'hadamard_product', 'concatenation', 'attention_pooling' :param tau: Positive float, the temperature coefficient to control distribution of field-wise gating unit :param use_dense_features: Whether to use dense features, if True, dense feature will be projected to sparse embedding space :param cross_num: positive integet,cross layer number :param cross_parameterization: str, ``"vector"`` or ``"matrix"``, how to parameterize the cross network. :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_cross: float. L2 regularizer strength applied to cross net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not DNN :param dnn_activation: Activation function to use in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ if cross_num == 0: raise ValueError("Cross layer num must > 0") if bridge_type == 'pointwise_addition': BridgeLayer = tf.keras.layers.Add elif bridge_type == 'hadamard_product': BridgeLayer = tf.keras.layers.Multiply elif bridge_type == 'concatenation': BridgeLayer = ConcatenationBridge elif bridge_type == 'attention_pooling': BridgeLayer = AttentionPoolingLayer else: raise NotImplementedError print('EDCN brige type: ', bridge_type) features = build_input_features(dnn_feature_columns) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, l2_reg_embedding, seed) # project dense value to sparse embedding space, generate a new field feature if use_dense_features: sparse_embedding_dim = sparse_embedding_list[0].shape[-1] dense_value_feild = concat_func(dense_value_list) dense_value_feild = DNN([sparse_embedding_dim], dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn)(dense_value_feild) dense_value_feild = tf.expand_dims(dense_value_feild, axis=1) sparse_embedding_list.append(dense_value_feild) deep_in = sparse_embedding_list cross_in = sparse_embedding_list field_size = len(sparse_embedding_list) cross_dim = field_size * cross_in[0].shape[-1] for i in range(cross_num): deep_in = RegulationLayer(tau)(deep_in) cross_in = RegulationLayer(tau)(cross_in) cross_out = CrossNet(1, parameterization=cross_parameterization, l2_reg=l2_reg_cross)(deep_in) deep_out = DNN([cross_dim], dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(cross_in) bridge_out = BridgeLayer()([cross_out, deep_out]) bridge_out_list = tf.split(tf.expand_dims(bridge_out, axis=1), field_size, axis=-1) deep_in = bridge_out_list cross_in = bridge_out_list stack_out = tf.keras.layers.Concatenate()( [cross_out, deep_out, bridge_out]) final_logit = tf.keras.layers.Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(stack_out) final_logit = add_func([final_logit, linear_logit]) output = PredictionLayer(task)(final_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
7,691
def test_no_pass_confirm(db_session, nopassconfirm_app): """Register without password confirm option.""" assert db_session.query(User).count() == 0 res = nopassconfirm_app.get('/register') res.form['email'] = DEFAULT_USER['email'] res.form['password'] = DEFAULT_USER['password'] res = res.form.submit(extra_environ={'REMOTE_ADDR': '0.0.0.0'}) assert 'Passwords don\'t match!' not in HTMLParser().unescape(res.body.decode('unicode_escape')) transaction.commit() user = db_session.query(User).filter(User.email == DEFAULT_USER['email']).one() # User should not have active account at this moment assert user.is_active is not None assert user.check_password(DEFAULT_USER['password'])
7,692
def cart_to_polar(arr_c): """Return cartesian vectors in their polar representation. Parameters ---------- arr_c: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- arr_p: array, shape of arr_c Polar vectors, using (radius, inclination, azimuth) convention. """ if arr_c.shape[-1] == 1: arr_p = arr_c.copy() elif arr_c.shape[-1] == 2: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) elif arr_c.shape[-1] == 3: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0]) arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) else: raise Exception('Invalid vector for polar representation') return arr_p
7,693
def createList(listSize): """ Creates list block that creates input instances for each element and an output instance for connecting to the resulting list. List size is limited to 300 elements. Larger lists will be truncated. :param listSize: The size of the list of point inputs that will be created :return: A list of the input instances and the output of the list block """ listInst = psc.createInstance("ListBlock", "ListInstance") inputInstances = [None] * listSize psc.connect(psc.Constant((listSize, 0, 0)), listInst.size) for i in range(listSize): inputInstances[i] = getattr(listInst, "element" + str(i)) return {"inputs":inputInstances, "output":listInst.out}
7,694
def get_ground_weather_one_place(dir_path): """ 1地点の地上気象データを取得する Args: dir_path(string) : ディレクトリパス Returns: DataFrame : ファイルの読込結果 """ # 地上気象データのファイル一覧取得 file_paths = read_csv.get_file_paths(dir_path) # 気象データを読み込み、DataFrameに格納する ground_df = None for file_path in file_paths: # 地上気象データ読み込み df = read_csv.read_ground(file_path) # 指定した行のデータを抽出 df1 = wdfproc.extract_row_isin(df, ('時', '時'), [9, 21]) # DataFrameに追加する if ground_df is None: ground_df = df1 else: ground_df = ground_df.append(df1) # 地点名を取得する dirname = os.path.basename(dir_path) elements = name_handle.elements_from_dirname_ground(dirname) place_name = elements['name'] # 列名を変更する ground_df = wdfproc.rename_column_ground(ground_df, place_name) return ground_df
7,695
def trash_description(spl, garbage, keyword, description="description_1"): """description_1 OR description_2""" relocate = spl[spl[description].str.contains(keyword, na=False, regex=True)] spl = spl[~spl[description].str.contains(keyword, na=False, regex=True)] garbage = pd.concat([garbage, relocate], ignore_index=True, sort=False) return (spl, garbage, relocate)
7,696
def to_gray_example(): """Show an example of the to_gray function.""" print('\n' + '=' * 42) print("""print(to_gray('#FFFFFF')) print(to_gray('#000000')) print(to_gray('#435612')) print(to_gray('#130303')) print(to_gray('#777787')) print(to_gray('#808080')) print(to_gray('#A9A9A9'))""") print('-' * 42) print(to_gray('#FFFFFF')) print(to_gray('#000000')) print(to_gray('#435612')) print(to_gray('#130303')) print(to_gray('#777787')) print(to_gray('#808080')) print(to_gray('#A9A9A9')) print('=' * 42)
7,697
def run_analysis( model, target, metric='integral', style='barplot', excluded_params=[] ): """ Perform sensitivity analysis to identify critical parameters, species or reactions in the complex biological network. The sensitivity S(y,x) was calculated according to the following equation: S(y,x) = d ln(yi) / d ln (xj), where yi is the signaling metric and xj is each nonzero species, parameter value or reaction rate. Paremters --------- model : module Model for sensitivity analysis. target : str - 'reaction' - 'initial_condition' - 'parameter' metric : str (default: 'integral') - 'maximum' : The maximum value. - 'minimum' : The minimum value. - 'duration' : The time it takes to decline below 10% of its maximum. - 'integral' : The integral of concentration over the observation time. style : str (default: 'barplot') - 'barplot' - 'heatmap' excluded_params : list of strings For parameter sensitivity analysis. Example ------- >>> from biomass.models import Nakakuki_Cell_2010 >>> from biomass import run_analysis >>> run_analysis( Nakakuki_Cell_2010, target='parameter', excluded_params=[ 'a', 'Vn', 'Vc', 'Ligand', 'EGF', 'HRG', 'no_ligand' ] ) """ warnings.filterwarnings('ignore') if target == 'reaction': ReactionSensitivity(model).analyze(metric=metric, style=style) elif target == 'initial_condition': InitialConditionSensitivity(model).analyze(metric=metric, style=style) elif target == 'parameter': ParameterSensitivity( model, excluded_params ).analyze(metric=metric, style=style) else: raise ValueError( "Available targets are: 'reaction', 'initial_condition' , 'parameter'" )
7,698
def update_job(**kwargs): """Update job. Keyword arguments: id -- Job ID summary -- Job summary location -- Location job was advertised programs -- Programs the job is specified for levels -- Levels job is intended for [Junior, Intermediate, Senior] openings -- Number of job openings index -- Boolean to indicate whether to index or not (default True) """ summary = kwargs['summary'] location = kwargs['location'].lower() levels = kwargs['levels'] programs = [] for program in kwargs['programs']: uw_program = Program.get_program(program) if uw_program: programs.append(uw_program) else: logger.error(COMPONENT, 'Error processing program: {}'.format(program)) openings = 0 try: if kwargs['openings']: openings = int(kwargs['openings']) or 0 except Exception: pass index = False if index in kwargs: index = kwargs['index'] job = Job.objects(id=kwargs['id']).first() remaining = job.openings # Job posting has decreased, some positions filled up if openings < job.openings: remaining = openings filtered_summary = engine.filter_summary(summary) summary_keywords = engine.get_keywords(filtered_summary, programs) filtered_summary_compare = re.sub(r'\W+', '', filtered_summary.lower().strip()).strip() job_summary_compare = re.sub(r'\W+', '', job.summary.lower().strip()).strip() employer = Employer.objects(jobs=kwargs['id']).first() # Job summary is not the same. In this case the employer most likely changed the job if not filtered_summary_compare == job_summary_compare: if openings >= 1: logger.info(COMPONENT, 'Job: {}: different summary detected, deprecating and creating new job..' .format(kwargs['id'])) job.update(set__deprecated=True) location = Location(name=location) keywords = [Keyword(keyword=k['keyword'], types=k['types']) for k in summary_keywords] # Assume new job so number of remaining positions is same as openings new_job = Job(title=job.title, summary=filtered_summary, year=job.year, term=job.term, location=[location], openings=openings, remaining=openings, levels=levels, programs=programs, url=job.url, keywords=keywords) new_job.save() employer.update(push__jobs=new_job) if index: elastic.delete_employer_waterlooworks(employer) elastic.delete_job_waterlooworks(employer, job) elastic.index_employer_waterlooworks(employer) elastic.index_job_waterlooworks(employer, new_job) else: logger.info(COMPONENT, 'Job: {}: different summary detected but invalid openings: {}, ignoring..' .format(job.title, openings)) else: logger.info(COMPONENT, 'Job: {}: updating for current term'.format(kwargs['id'])) location = Location(name=location) job.update(add_to_set__location=location, set__remaining=remaining, set__levels=list(set(levels + job.levels)), set__programs=list(set(programs + job.programs)), set__last_indexed=datetime.now()) if index: elastic.update_job_waterlooworks(employer, job)
7,699