content
stringlengths
22
815k
id
int64
0
4.91M
def eig_min(a, eps=1e-7, kmax=1e3, log=False): """ :param a: matrix to find min eigenvalue of :param eps: desired precision :param kmax: max number of iterations allowed :param log: whether to log the iterations """ mu_1 = eig_max_abs(a, eps, kmax, log) return mu_1 - eig_max_abs(mu_1 * np.eye(a.shape[0]) - a, eps, kmax, log)
8,200
def relacao(lista): """Crie uma função que recebe uma lista de números reais e retorna uma outra lista de tamanho 3 em que (i) o primeiro elemento é a quantidade de números maiores que zero, (ii) o segundo elemento é a quantidade de números menores que zero e (iii) o último elemento é a quantidade de zeros da lista inicial. Args: lista (list): lista recebida para ser processada pela funcao Returns: list: lista com tamanho três na ordem (maiores, menores e iguais a zero) """ maior = menor = igual = 0 for i in lista: if i > 0: maior += 1 elif i < 0: menor += 1 else: igual += 1 return f'[{maior},{menor},{igual}]'
8,201
def require_python(minimum): """Python version check.""" if sys.hexversion < minimum: hversion = hex(minimum)[2:] if len(hversion) % 2 != 0: hversion = "0" + hversion split = list(hversion) parts = [] while split: parts.append(int("".join((split.pop(0), split.pop(0))), 16)) major, minor, micro, release = parts if release == 0xF0: print( "Python {0}.{1}.{2} or higher is required".format( major, minor, micro ) ) else: print( "Python {0}.{1}.{2} ({3}) or higher is required".format( major, minor, micro, hex(release)[2:] ) ) sys.exit(1)
8,202
def _input_to_dictionary(input_): """Convert. Args: input_: GraphQL "data" dictionary structure from mutation Returns: result: Dict of inputs """ # 'column' is a dict of DB model 'non string' column names and their types column = { 'idx_user': DATA_INT, 'enabled': DATA_INT } result = utils.input_to_dictionary(input_, column=column) return result
8,203
def creature_ability_116(field, player, opponent, virtual, target, itself): """ Fanfare: Put Coco and Mimi into your hand. """ put_card_in_hand(field,player,virtual,name="Mimi",card_category="Spell") put_card_in_hand(field, player, virtual, name="Coco", card_category="Spell")
8,204
def validate(request): """Validate an authentication request.""" email_token = request.GET.get('a') client_token = request.GET.get('b') user = authenticate(email_token=email_token, counter_token=client_token) if user: login(request, user) return redirect(request.GET.get('success', '/')) else: return HttpResponseForbidden()
8,205
def strip_chr(bt): """Strip 'chr' from chromosomes for BedTool object Parameters ---------- bt : pybedtools.BedTool BedTool to strip 'chr' from. Returns ------- out : pybedtools.BedTool New BedTool with 'chr' stripped from chromosome names. """ try: df = pd.read_table(bt.fn, header=None, dtype=str) # If the try fails, I assume that's because the file has a trackline. Note # that I don't preserve the trackline (I'm not sure how pybedtools keeps # track of it anyway). except pd.parser.CParserError: df = pd.read_table(bt.fn, header=None, skiprows=1, dtype=str) df[0] = df[0].apply(lambda x: x[3:]) s = '\n'.join(df.astype(str).apply(lambda x: '\t'.join(x), axis=1)) + '\n' out = pbt.BedTool(s, from_string=True) return out
8,206
def fit_slice(fitter, sliceid, lbda_range=[5000, 8000], nslices=5, **kwargs): """ """ fitvalues = fitter.fit_slice(lbda_ranges=lbda_range, metaslices=nslices, sliceid=sliceid, **kwargs) return fitvalues
8,207
def test(monkeypatch, is_windows, mode): """Test function. :param monkeypatch: pytest fixture. :param bool is_windows: Monkeypatch terminal_io.IS_WINDOWS :param str mode: Scenario to test for. """ monkeypatch.setattr('terminaltables.terminal_io.IS_WINDOWS', is_windows) kernel32 = MockKernel32() # Title. if mode == 'ascii': title = 'Testing terminaltables.' elif mode == 'unicode': title = u'Testing terminaltables with unicode: 世界你好蓝色' else: title = b'Testing terminaltables with bytes.' # Run. assert set_terminal_title(title, kernel32) if not is_windows: return # Verify. if mode == 'ascii': assert kernel32.setConsoleTitleA_called assert not kernel32.setConsoleTitleW_called elif mode == 'unicode': assert not kernel32.setConsoleTitleA_called assert kernel32.setConsoleTitleW_called else: assert kernel32.setConsoleTitleA_called assert not kernel32.setConsoleTitleW_called
8,208
def test_all_shorthand(count): """ scope.all is a shorthand for creating a scope and runninig things in it """ async def foo(mark): count(mark) @ayo.run_as_main() async def main(run): run.all(foo(1), foo(2), foo(3)) assert count == 3, "All coroutines have been called exactly once"
8,209
def create_project(name=None, id=None, description=None, clientRequestToken=None, sourceCode=None, toolchain=None, tags=None): """ Creates a project, including project resources. This action creates a project based on a submitted project request. A set of source code files and a toolchain template file can be included with the project request. If these are not provided, an empty project is created. See also: AWS API Documentation Exceptions :example: response = client.create_project( name='string', id='string', description='string', clientRequestToken='string', sourceCode=[ { 'source': { 's3': { 'bucketName': 'string', 'bucketKey': 'string' } }, 'destination': { 'codeCommit': { 'name': 'string' }, 'gitHub': { 'name': 'string', 'description': 'string', 'type': 'string', 'owner': 'string', 'privateRepository': True|False, 'issuesEnabled': True|False, 'token': 'string' } } }, ], toolchain={ 'source': { 's3': { 'bucketName': 'string', 'bucketKey': 'string' } }, 'roleArn': 'string', 'stackParameters': { 'string': 'string' } }, tags={ 'string': 'string' } ) :type name: string :param name: [REQUIRED]\nThe display name for the project to be created in AWS CodeStar.\n :type id: string :param id: [REQUIRED]\nThe ID of the project to be created in AWS CodeStar.\n :type description: string :param description: The description of the project, if any. :type clientRequestToken: string :param clientRequestToken: A user- or system-generated token that identifies the entity that requested project creation. This token can be used to repeat the request. :type sourceCode: list :param sourceCode: A list of the Code objects submitted with the project request. If this parameter is specified, the request must also include the toolchain parameter.\n\n(dict) --Location and destination information about the source code files provided with the project request. The source code is uploaded to the new project source repository after project creation.\n\nsource (dict) -- [REQUIRED]The location where the source code files provided with the project request are stored. AWS CodeStar retrieves the files during project creation.\n\ns3 (dict) -- [REQUIRED]Information about the Amazon S3 location where the source code files provided with the project request are stored.\n\nbucketName (string) --The Amazon S3 bucket name where the source code files provided with the project request are stored.\n\nbucketKey (string) --The Amazon S3 object key where the source code files provided with the project request are stored.\n\n\n\n\n\ndestination (dict) -- [REQUIRED]The repository to be created in AWS CodeStar. Valid values are AWS CodeCommit or GitHub. After AWS CodeStar provisions the new repository, the source code files provided with the project request are placed in the repository.\n\ncodeCommit (dict) --Information about the AWS CodeCommit repository to be created in AWS CodeStar. This is where the source code files provided with the project request will be uploaded after project creation.\n\nname (string) -- [REQUIRED]The name of the AWS CodeCommit repository to be created in AWS CodeStar.\n\n\n\ngitHub (dict) --Information about the GitHub repository to be created in AWS CodeStar. This is where the source code files provided with the project request will be uploaded after project creation.\n\nname (string) -- [REQUIRED]Name of the GitHub repository to be created in AWS CodeStar.\n\ndescription (string) --Description for the GitHub repository to be created in AWS CodeStar. This description displays in GitHub after the repository is created.\n\ntype (string) -- [REQUIRED]The type of GitHub repository to be created in AWS CodeStar. Valid values are User or Organization.\n\nowner (string) -- [REQUIRED]The GitHub username for the owner of the GitHub repository to be created in AWS CodeStar. If this repository should be owned by a GitHub organization, provide its name.\n\nprivateRepository (boolean) -- [REQUIRED]Whether the GitHub repository is to be a private repository.\n\nissuesEnabled (boolean) -- [REQUIRED]Whether to enable issues for the GitHub repository.\n\ntoken (string) -- [REQUIRED]The GitHub user\'s personal access token for the GitHub repository.\n\n\n\n\n\n\n\n\n :type toolchain: dict :param toolchain: The name of the toolchain template file submitted with the project request. If this parameter is specified, the request must also include the sourceCode parameter.\n\nsource (dict) -- [REQUIRED]The Amazon S3 location where the toolchain template file provided with the project request is stored. AWS CodeStar retrieves the file during project creation.\n\ns3 (dict) -- [REQUIRED]The Amazon S3 bucket where the toolchain template file provided with the project request is stored.\n\nbucketName (string) --The Amazon S3 bucket name where the source code files provided with the project request are stored.\n\nbucketKey (string) --The Amazon S3 object key where the source code files provided with the project request are stored.\n\n\n\n\n\nroleArn (string) --The service role ARN for AWS CodeStar to use for the toolchain template during stack provisioning.\n\nstackParameters (dict) --The list of parameter overrides to be passed into the toolchain template during stack provisioning, if any.\n\n(string) --\n(string) --\n\n\n\n\n\n :type tags: dict :param tags: The tags created for the project.\n\n(string) --\n(string) --\n\n\n\n :rtype: dict ReturnsResponse Syntax { 'id': 'string', 'arn': 'string', 'clientRequestToken': 'string', 'projectTemplateId': 'string' } Response Structure (dict) -- id (string) -- The ID of the project. arn (string) -- The Amazon Resource Name (ARN) of the created project. clientRequestToken (string) -- A user- or system-generated token that identifies the entity that requested project creation. projectTemplateId (string) -- Reserved for future use. Exceptions CodeStar.Client.exceptions.ProjectAlreadyExistsException CodeStar.Client.exceptions.LimitExceededException CodeStar.Client.exceptions.ValidationException CodeStar.Client.exceptions.ProjectCreationFailedException CodeStar.Client.exceptions.InvalidServiceRoleException CodeStar.Client.exceptions.ProjectConfigurationException CodeStar.Client.exceptions.ConcurrentModificationException :return: { 'id': 'string', 'arn': 'string', 'clientRequestToken': 'string', 'projectTemplateId': 'string' } :returns: CodeStar.Client.exceptions.ProjectAlreadyExistsException CodeStar.Client.exceptions.LimitExceededException CodeStar.Client.exceptions.ValidationException CodeStar.Client.exceptions.ProjectCreationFailedException CodeStar.Client.exceptions.InvalidServiceRoleException CodeStar.Client.exceptions.ProjectConfigurationException CodeStar.Client.exceptions.ConcurrentModificationException """ pass
8,210
def test_chain_rewrite_save_last(): """Take chain of length 5, save last node. This saved no memory, and is and edge case that should raise exception by rewriter.""" tf.reset_default_graph() tf_dev = tf.device('/cpu:0') tf_dev.__enter__() n = 5 a0, a1, a2, a3, a4 = make_chain_tanh(n) try: grad = memory_saving_gradients.gradients([a4], [a0], checkpoints=[a4])[0] except Exception: return else: if not REMOVE_ASSERTS: assert "Should've been 'no checkpoints nodes found' exception"
8,211
def build_routes(app): """Register routes to given app instance.""" app.config.update({ 'APISPEC_SPEC': APISpec( title=SERVICE_NAME, openapi_version=OPENAPI_VERSION, version=API_VERSION, plugins=[MarshmallowPlugin()], ), 'APISPEC_SWAGGER_URL': API_SPEC_URL, }) app.register_blueprint(cache_blueprint) app.register_blueprint(dataset_blueprint) swaggerui_blueprint = get_swaggerui_blueprint( SWAGGER_URL, API_SPEC_URL, config={'app_name': 'Renku Service'} ) app.register_blueprint(swaggerui_blueprint, url_prefix=SWAGGER_URL) docs = FlaskApiSpec(app) docs.register(upload_file_view, blueprint=CACHE_BLUEPRINT_TAG) docs.register(list_uploaded_files_view, blueprint=CACHE_BLUEPRINT_TAG) docs.register(project_clone, blueprint=CACHE_BLUEPRINT_TAG) docs.register(list_projects_view, blueprint=CACHE_BLUEPRINT_TAG) docs.register(create_dataset_view, blueprint=DATASET_BLUEPRINT_TAG) docs.register(add_file_to_dataset_view, blueprint=DATASET_BLUEPRINT_TAG) docs.register(list_datasets_view, blueprint=DATASET_BLUEPRINT_TAG) docs.register(list_dataset_files_view, blueprint=DATASET_BLUEPRINT_TAG)
8,212
def execute_sync(function, sync_type): """ Synchronize with the disassembler for safe database access. Modified from https://github.com/vrtadmin/FIRST-plugin-ida """ @functools.wraps(function) def wrapper(*args, **kwargs): output = [None] # # this inline function definition is technically what will execute # in the context of the main thread. we use this thunk to capture # any output the function may want to return to the user. # def thunk(): output[0] = function(*args, **kwargs) return 1 if is_mainthread(): thunk() else: idaapi.execute_sync(thunk, sync_type) # return the output of the synchronized execution return output[0] return wrapper
8,213
def mk_living_arrangements(data_id, data): # measurement group 11 """ transforms a f-living-arrangements.json form into the triples used by insertMeasurementGroup to store each measurement that is in the form :param data_id: unique id from the json form :param data: data array from the json form :return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements """ return [(220, 2, data_id), (95, 6, lwh.mk_category(data['alone'], ['Alone', 'With someone'])), (96, 5, lwh.mk_category(data['arrange'], ['House', 'Apartment', 'Independent living unit', 'Other'])), (97, 2, data['othertext'])]
8,214
def command_report(subargv): """Output a report from the results of the jobs command.""" # Directory in which the reports are # [positional] reports: Argument # Comparison file compare: Argument & config = default(None) # Weights file to compute the score weights: Argument & config = default(None) # Path to the HTML file to generate html: Argument = default(None) # Compare the configuration's individual GPUs compare_gpus: Argument & bool = default(False) # Whether to penalize variance in the score (defaults to False) penalize_variance: Argument & bool = default(False) # Price of the configuration, to compute score/price ratio price: Argument & float = default(None) # Title to give to the report title: Argument = default(None) reports = os.path.realpath(os.path.expanduser(reports)) if os.path.isdir(reports): results = summarize(reports, filter=_filter, group=_group) else: results = json.load(open(reports)) make_report( results, compare=compare, weights=weights, html=html, compare_gpus=compare_gpus, price=price, title=title, penalize_variance=penalize_variance, )
8,215
def process_reporter(data_inserter): """ Process device aircraft data """ total_inserted, errors = data_inserter() print(f'INFO: Total inserted records: {total_inserted}') if errors: for (record, err) in errors: record_json = json.dumps(record) if record else 'NotFound' joined_errors = json.dumps(err) print(f'ERROR: Error inserting {record_json}, Errors : {joined_errors}')
8,216
def ids_in(table): """Returns the ids in the given dataframe, either as a list of ints or a single int.""" entity, id_colname = get_entity_and_id_colname(table) # Series.to_list() converts to a list of Python int rather than numpy.int64 # Conversion to the list type and the int type are both necessary for the shared functions ids = table[id_colname].to_list() ids = process_singleton_ids(ids, entity) return ids
8,217
def transformer_ae_base_tpu(): """Base config adjusted for TPU.""" hparams = transformer_ae_base() transformer.update_hparams_for_tpu(hparams) hparams.batch_size = 512 return hparams
8,218
def test_minor_bump(admin_client, transactional_db, studies, worker): """ Test that the major version number is bumped upon publish """ release = { 'name': 'First Release', 'studies': ['SD_00000001'], 'author': 'bob', 'is_major': True, } resp = admin_client.post('http://testserver/releases', data=release) assert Release.objects.count() == 1 res = resp.json() assert res['version'] == '0.0.0' worker.work(burst=True) resp = admin_client.get('http://testserver/releases/'+res['kf_id']) res = resp.json() resp = admin_client.post('http://testserver/releases/' + res['kf_id']+'/publish') worker.work(burst=True) resp = admin_client.get('http://testserver/releases/'+res['kf_id']) res = resp.json() assert res['version'] == '1.0.0' assert str(Release.objects.first().version) == '1.0.0'
8,219
def attribute_volume(tree, altitudes, area=None): """ Volume of each node the given tree. The volume :math:`V(n)` of a node :math:`n` is defined recursively as: .. math:: V(n) = area(n) * | altitude(n) - altitude(parent(n)) | + \sum_{c \in children(n)} V(c) :param tree: input tree :param altitudes: node altitudes of the input tree :param area: area of the nodes of the input hierarchy (provided by :func:`~higra.attribute_area` on `tree`) :return: a 1d array """ if area is None: area = hg.attribute_area(tree) height = np.abs(altitudes[tree.parents()] - altitudes) height = height * area volume_leaves = np.zeros(tree.num_leaves(), dtype=np.float64) return hg.accumulate_and_add_sequential(tree, height, volume_leaves, hg.Accumulators.sum)
8,220
def save_plot( fig, filepath=None, format="png", interactive=False, return_filepath=False ): """Saves fig to filepath if specified, or to a default location if not. Args: fig (Figure): Figure to be saved. filepath (str or Path, optional): Location to save file. Default is with filename "test_plot". format (str): Extension for figure to be saved as. Ignored if interactive is True and fig is of type plotly.Figure. Defaults to 'png'. interactive (bool, optional): If True and fig is of type plotly.Figure, saves the fig as interactive instead of static, and format will be set to 'html'. Defaults to False. return_filepath (bool, optional): Whether to return the final filepath the image is saved to. Defaults to False. Returns: String representing the final filepath the image was saved to if return_filepath is set to True. Defaults to None. """ plotly_ = import_or_raise("plotly", error_msg="Cannot find dependency plotly") graphviz_ = import_or_raise( "graphviz", error_msg="Please install graphviz to visualize trees." ) matplotlib = import_or_raise( "matplotlib", error_msg="Cannot find dependency matplotlib" ) plt_ = matplotlib.pyplot axes_ = matplotlib.axes is_plotly = False is_graphviz = False is_plt = False is_seaborn = False format = format if format else "png" if isinstance(fig, plotly_.graph_objects.Figure): is_plotly = True elif isinstance(fig, graphviz_.Source): is_graphviz = True elif isinstance(fig, plt_.Figure): is_plt = True elif isinstance(fig, axes_.SubplotBase): is_seaborn = True if not filepath: extension = "html" if interactive and is_plotly else format filepath = os.path.join(os.getcwd(), f"test_plot.{extension}") filepath = _file_path_check( filepath, format=format, interactive=interactive, is_plotly=is_plotly ) if is_plotly and interactive: fig.write_html(file=filepath) elif is_plotly and not interactive: fig.write_image(file=filepath, engine="kaleido") elif is_graphviz: filepath_, format_ = os.path.splitext(filepath) fig.format = "png" filepath = f"{filepath_}.png" fig.render(filename=filepath_, view=False, cleanup=True) elif is_plt: fig.savefig(fname=filepath) elif is_seaborn: fig = fig.figure fig.savefig(fname=filepath) if return_filepath: return filepath
8,221
def updateCalibrationCoefs(inputCsvFile, outputCsvFile): """read summary .csv file and update coefs for those with poor calibration Look through all processed accelerometer files, and find participants that did not have good calibration data. Then assigns the calibration coefs from previous good use of a given device. Output will be a new .csv file to support reprocessing of uncalibrated files with new pre-specified calibration coefs. :param str inputCsvFile: Summary CSV of processed dataset :param str outputCsvFile: Output CSV of files to be reprocessed with new calibration info :return: New file written to <outputCsvFile> :rtype: void :Example: >>> import accUtils >>> accUtils.updateCalibrationCoefs("data/summary-all-files.csv", "study/files-recalibration.csv") <CSV of files to be reprocessed written to "study/files-recalibration.csv"> """ d = pd.read_csv(inputCsvFile) # select participants with good spread of stationary values for calibration goodCal = d.loc[(d['quality-calibratedOnOwnData'] == 1) & (d['quality-goodCalibration'] == 1)] # now only select participants whose data was NOT calibrated on a good spread of stationary values badCal = d.loc[(d['quality-calibratedOnOwnData'] == 1) & (d['quality-goodCalibration'] == 0)] # sort files by start time, which makes selection of most recent value easier goodCal = goodCal.sort_values(['file-startTime']) badCal = badCal.sort_values(['file-startTime']) calCols = ['calibration-xOffset(g)', 'calibration-yOffset(g)', 'calibration-zOffset(g)', 'calibration-xSlope(g)', 'calibration-ySlope(g)', 'calibration-zSlope(g)', 'calibration-xTemp(C)', 'calibration-yTemp(C)', 'calibration-zTemp(C)', 'calibration-meanDeviceTemp(C)'] # print output CSV file with suggested calibration parameters noOtherUses = 0 nextUses = 0 previousUses = 0 f = open(outputCsvFile, 'w') f.write('fileName,calOffset,calSlope,calTemp,meanTemp\n') for ix, row in badCal.iterrows(): # first get current 'bad' file participant, device, startTime = row[['file-name', 'file-deviceID', 'file-startTime']] device = int(device) # get calibration values from most recent previous use of this device # (when it had a 'good' calibration) prevUse = goodCal[calCols][(goodCal['file-deviceID'] == device) & (goodCal['file-startTime'] < startTime)].tail(1) try: ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = prevUse.iloc[0] previousUses += 1 except Exception: nextUse = goodCal[calCols][(goodCal['file-deviceID'] == device) & (goodCal['file-startTime'] > startTime)].head(1) if len(nextUse) < 1: print('no other uses for this device at all: ', str(device), str(participant)) noOtherUses += 1 continue nextUses += 1 ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = nextUse.iloc[0] # now construct output out = participant + ',' out += str(ofX) + ' ' + str(ofY) + ' ' + str(ofZ) + ',' out += str(slpX) + ' ' + str(slpY) + ' ' + str(slpZ) + ',' out += str(tmpX) + ' ' + str(tmpY) + ' ' + str(tmpZ) + ',' out += str(calTempAvg) f.write(out + '\n') f.close() print('previousUses', previousUses) print('nextUses', nextUses) print('noOtherUses', noOtherUses) print('Reprocessing for ', str(previousUses + nextUses), 'participants written to:', outputCsvFile)
8,222
def stats_roll(): """ creates 4 random int's between 1 and 6 for each items in starting_stats, dops the lowest and calculates the total """ starting_stats = {'Strength': 0, 'Dexterity': 0, 'Constitution': 0, 'Intelligence': 0, 'Wisdom': 0, 'Charisma': 0} num = 0 for key, value in starting_stats.items(): # print the keys of starting_stats num = num + 1 print(F"\n", num, "- {key} ") count = 0 rolls = [] # create the list of roll results while count < 4: rolls.append(roll()) count += 1 rolls.sort() # sort rolls into ascending order print(F"You rolled: {rolls}") rolls.pop(0) # drop the first (lowest) number in rolls total = 0 for i in rolls: total = total + i print("Your three highest rolls were {}, for a total of {}.".format(rolls, total)) starting_stats[key] = total print("\n\nYor stats are:\n") for key, value in starting_stats.items(): # print the keys and values of starting_stats print(F"{key} = {value}")
8,223
def test_image_equality(): """Images with the same pixels should equal each other, as long as Pillow doesn't break """ a = Image.new("RGBA", (1, 1)) b = Image.new("RGBA", (1, 1)) assert a == b b.putpixel((0, 0), (1, 0, 0, 1)) assert a != b a.putpixel((0, 0), (1, 0, 0, 1)) assert a == b
8,224
def devstack(args): """ Start the devstack lms or studio server """ parser = argparse.ArgumentParser(prog='paver devstack') parser.add_argument('system', type=str, nargs=1, help="lms or studio") parser.add_argument('--fast', action='store_true', default=False, help="Skip updating assets") parser.add_argument('--optimized', action='store_true', default=False, help="Run with optimized assets") parser.add_argument('--settings', type=str, default=DEFAULT_SETTINGS, help="Settings file") parser.add_argument('--asset-settings', type=str, default=None, help=ASSET_SETTINGS_HELP) args = parser.parse_args(args) settings = args.settings asset_settings = args.asset_settings if args.asset_settings else settings if args.optimized: settings = OPTIMIZED_SETTINGS asset_settings = OPTIMIZED_ASSETS_SETTINGS sh(django_cmd('cms', settings, 'reindex_course', '--setup')) run_server( args.system[0], fast=args.fast, settings=settings, asset_settings=asset_settings, )
8,225
def write_jenkins_file(): """Write changed_ext_attrs and changed_scripts to jenkins file. $eas will contains the changed extension attributes, $scripts will contains the changed scripts If there are no changes, the variable will be set to 'None' """ if len(changed_ext_attrs) == 0: contents = "eas=" + "None" else: contents = "eas=" + slack_emoji + changed_ext_attrs[0] + '\\n' + '\\' for changed_ext_attr in changed_ext_attrs[1:]: contents = contents + '\n' + slack_emoji + changed_ext_attr + '\\n' + '\\' if len(changed_scripts) == 0: contents = contents.rstrip('\\') + '\n' + "scripts=" + "None" else: contents = contents.rstrip('\\') + '\n' + "scripts=" + slack_emoji + changed_scripts[0] + '\\n' + '\\' for changed_script in changed_scripts[1:]: contents = contents + '\n' + slack_emoji + changed_script + '\\n' + '\\' with open('jenkins.properties', 'w') as f: f.write(contents)
8,226
def _parse_config_result(response, source) -> None: """Checks if the source received in manifest is in the trusted repository list response from config agent @param response: String response received from configuration agent for the command requested @param source: the repository path where the package is supposed to be fetched from """ logger.debug("") if response is None: raise DispatcherException( 'Source verification failed. Failure fetching trusted repository.') for line in response.strip().splitlines(): trusted_source = line.strip() if _is_source_match_trusted_repo(trusted_source, canonicalize_uri(source)): return logger.debug(f"Source '{source}' is not in the trusted repositories") raise DispatcherException( 'Source verification failed. Source is not in the trusted repository.')
8,227
def populate_canary(canary_id, protocol, domain, dns, filename, rdir, settings): """Create actual canary URI / URL.""" if protocol not in ['unc', 'http', 'https']: raise ValidationError('Unknown protocol specified') if dns: domain = f"{canary_id}.{domain}" else: domain = f"{settings.nginx_domain}.{domain}" if protocol == 'unc': if not rdir: canary = f"\\\\{domain}\\templates\\{filename}" else: canary = f"\\\\{domain}\\templates\\{rdir}\\{filename}" else: if not rdir: canary = f"{protocol}://{domain}/images/{filename}" else: canary = f"{protocol}://{domain}/images/{rdir}/{filename}" return canary
8,228
def get_wastewater_location_data(): """Read in data of wastewater facility location data. :return: dataframe of wastewater location values """ data = pkg_resources.resource_filename('interflow', 'input_data/WW_Facility_Loc.csv') # return dataframe return pd.read_csv(data, dtype={'CWNS_NUMBER': str})
8,229
def generate_accounts(seeds): """Create private keys and addresses for all seeds. """ return { seed: { 'privatekey': encode_hex(sha3(seed)), 'address': encode_hex(privatekey_to_address(sha3(seed))), } for seed in seeds }
8,230
def get_annotation_df( state: State, piece: Piece, root_type: PitchType, tonic_type: PitchType, ) -> pd.DataFrame: """ Get a df containing the labels of the given state. Parameters ---------- state : State The state containing harmony annotations. piece : Piece The piece which was used as input when creating the given state. root_type : PitchType The pitch type to use for chord root labels. tonic_type : PitchType The pitch type to use for key tonic annotations. Returns ------- annotation_df : pd.DataFrame[type] A DataFrame containing the harmony annotations from the given state. """ labels_list = [] chords, changes = state.get_chords() estimated_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int) for chord, start, end in zip(chords, changes[:-1], changes[1:]): estimated_chord_labels[start:end] = chord keys, changes = state.get_keys() estimated_key_labels = np.zeros(len(piece.get_inputs()), dtype=int) for key, start, end in zip(keys, changes[:-1], changes[1:]): estimated_key_labels[start:end] = key chord_label_list = hu.get_chord_label_list(root_type, use_inversions=True) key_label_list = hu.get_key_label_list(tonic_type) prev_est_key_string = None prev_est_chord_string = None for duration, note, est_chord_label, est_key_label in zip( piece.get_duration_cache(), piece.get_inputs(), estimated_chord_labels, estimated_key_labels, ): if duration == 0: continue est_chord_string = chord_label_list[est_chord_label] est_key_string = key_label_list[est_key_label] # No change in labels if est_chord_string == prev_est_chord_string and est_key_string == prev_est_key_string: continue if est_key_string != prev_est_key_string: labels_list.append( { "label": est_key_string, "mc": note.onset[0], "mc_onset": note.mc_onset, "mn_onset": note.onset[1], } ) if est_chord_string != prev_est_chord_string: labels_list.append( { "label": est_chord_string, "mc": note.onset[0], "mc_onset": note.mc_onset, "mn_onset": note.onset[1], } ) prev_est_key_string = est_key_string prev_est_chord_string = est_chord_string return pd.DataFrame(labels_list)
8,231
def prettify(elem): """Return a pretty-printed XML string for the Element.""" rough_string = ET.tostring(elem, "utf-8") reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ")
8,232
def plot_confusion_matrix_full(model,X_train, y_train, X_val, y_val): """Score Models and return results as a dataframe Parameters ---------- model: model Model passed into function X_train : Numpy Array X_train data y_train : Numpy Array Train target X_val : Numpy Array X_val data y_val : Numpy Array Val target Returns ------- """ from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import ConfusionMatrixDisplay import matplotlib.pyplot as plt # Plot non-normalized confusion matrix titles_options = [ ("Train - Confusion matrix, without normalization", None, X_train, y_train), ("Train - Normalized confusion matrix", "true", X_train, y_train), ("Validate - Confusion matrix, without normalization", None, X_val, y_val), ("Validate - Normalized confusion matrix", "true", X_val, y_val), ] for title, normalize, X, y in titles_options: disp = ConfusionMatrixDisplay.from_estimator( model, X, y, display_labels=model.classes_, cmap=plt.cm.Blues, normalize=normalize, ) disp.ax_.set_title(title) print(title) print(disp.confusion_matrix) plt.show() return
8,233
def parse_new_multipart_upload(data): """ Parser for new multipart upload response. :param data: Response data for new multipart upload. :return: Returns a upload id. """ root = S3Element.fromstring('InitiateMultipartUploadResult', data) return root.get_child_text('UploadId')
8,234
def mech_name_for_species(mech1_csv_str, mech2_csv_str, ich): """ build dictionaries to get the name for a given InCHI string """ mech1_inchi_dct = mechparser.mechanism.species_inchi_name_dct( mech1_csv_str) mech2_inchi_dct = mechparser.mechanism.species_inchi_name_dct( mech2_csv_str) if ich in mech1_inchi_dct: mech1_name = mech1_inchi_dct[ich] else: mech1_name = 'Not in Mechanism' if ich in mech2_inchi_dct: mech2_name = mech2_inchi_dct[ich] else: mech2_name = 'Not in Mechanism' return mech1_name, mech2_name
8,235
def get_api(api, cors_handler, marshal=None, resp_model=None, parser=None, json_resp=True): """Returns default API decorator for GET request. :param api: Flask rest_plus API :param cors_handler: CORS handler :param marshal: The API marshaller, e.g. api.marshal_list_with :param resp_model: The API response model """ funcs = [ cors_handler, no_cache, log_header(), ] if json_resp: funcs.append(as_json) funcs.append( api.doc(responses={ 403: 'Not Authorized', 404: 'Resource does not exist', }), ) if parser: funcs.insert(-1, api.doc(parser=parser)) if marshal and resp_model: funcs.insert(-1, marshal(resp_model)) return utils.compose(*funcs)
8,236
def construct_chain_config_params(args): """ Helper function for constructing the kwargs to initialize a ChainConfig object. """ yield 'network_id', args.network_id if args.data_dir is not None: yield 'data_dir', args.data_dir if args.nodekey_path and args.nodekey: raise ValueError("Cannot provide both nodekey_path and nodekey") elif args.nodekey_path is not None: yield 'nodekey_path', args.nodekey_path elif args.nodekey is not None: yield 'nodekey', decode_hex(args.nodekey)
8,237
def fetch_total_n_items(num_items, uniform_distribution=False): """Get num_items files from internet archive in our dirty categories list""" logger.info(f"Fetching info for {num_items} internetarchive items...") categories_weights = CATEGORIES_WEIGHTS if uniform_distribution: categories_weights = [1/len(DIRTY_CATEGORIES) for x in range(len(DIRTY_CATEGORIES))] how_many_of_each_cat = [math.ceil(w * num_items) for w in categories_weights] logger.info(" ".join([f"{cat}:{quant}" for cat, quant in zip(DIRTY_CATEGORIES, how_many_of_each_cat)])) total_items = [] for amount, category in zip(how_many_of_each_cat, DIRTY_CATEGORIES): query = make_category_query(category) try: total_items.extend(fetch_items_in_query(query, amount)) except Exception as e: logger.error(f"Failed to fetch info for \"{query}\" from internetarchive") return total_items
8,238
def ListLigandsInfo(MolName, ChainIDs): """List ligand information across all chains.""" if not (OptionsInfo["All"] or OptionsInfo["Ligands"]): return ListSelectionResiduesInfo(MolName, ChainIDs, "Ligands")
8,239
def full_formation_file_call(fit_directory): """full_formation_file_call Casm query to generate composition of species "A", formation energy, DFT hull distance, cluster expansion energies and cluster expansion hull distance. Args: fit_directory (str): absolute path to the current genetic fit directory. Returns: none. """ os.chdir(fit_directory) os.system( "casm query -k comp formation_energy hull_dist clex clex_hull_dist -o full_formation_energies.txt" )
8,240
def german_weekday_name(date): """Return the german weekday name for a given date.""" days = [u'Montag', u'Dienstag', u'Mittwoch', u'Donnerstag', u'Freitag', u'Samstag', u'Sonntag'] return days[date.weekday()]
8,241
def load_data(database_filepath): """ Input: database_filepath - path of the cleaned data file Output: X and Y for model training Category names """ # load data from database engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql("SELECT * FROM df_clean", engine) X = df['message'] Y = df.iloc[0:, 4:] category_names = Y.columns return X, Y, category_names
8,242
def real_spherical_harmonics(phi, theta, l, m): """Real spherical harmonics, also known as tesseral spherical harmonics with condon shortley phase. Only for scalar phi and theta!!! """ from scipy.special import lpmn, factorial if m == 0: y = np.sqrt( (2 * l + 1) / (4 * np.pi) ) * lpmn(m, l, np.cos(theta))[0][-1][-1] elif m < 0: y = (-1)**m * np.sqrt(2) * np.sqrt( (2 * l + 1) / (4 * np.pi) * \ factorial(l - np.abs(m)) / factorial(l + np.abs(m)) ) * lpmn(np.abs(m), l, np.cos(theta))[0][-1][-1] * np.sin(np.abs(m) * phi) elif m > 0: y = (-1)**m * np.sqrt(2) * np.sqrt( (2 * l + 1) / (4 * np.pi) * \ factorial(l - np.abs(m)) / factorial(l + np.abs(m)) ) * lpmn(np.abs(m), l, np.cos(theta))[0][-1][-1] * np.cos(np.abs(m) * phi) return y
8,243
def get_replace_function(replace_multiple: bool) -> Callable: """given bool:replace_multiple flag, return replace function from modifier """ if replace_multiple: return distend.modifier.replace_multiple else: return distend.modifier.replace_single
8,244
def setup_restart(signum=signal.SIGHUP, restart_callback=None): """Install a signal handler that calls :func:`restart` when :const:`SIGHUP` is received. Parameters ---------- signum : int, optional Signum number for the signal handler to install restart_callback : callable If specified, this is called before execing. This can be used to cleanly wind up state. If it returns a true value, the restart is not done. This can be used if the callback just schedules cleanup and :func:`restart_process` in an asynchronous manner. """ def restart_thread(): logger = logging.getLogger(__file__) logger.warn("Received signal %d, restarting", signum) if restart_callback is not None: if restart_callback(): return restart_process() def restart_handler(signum, frame): # It's not safe to log directly from a signal handler, so start # a separate thread to do the shutdown. thread = threading.Thread(target=restart_thread) thread.daemon = True thread.start() signal.signal(signal.SIGHUP, restart_handler)
8,245
def accuracy(X,Y,w): """ First, evaluate the classifier on training data. """ n_correct = 0 for i in range(len(X)): if predict(w, X[i]) == Y[i]: n_correct += 1 return n_correct * 1.0 / len(X)
8,246
def reduce_matrix(indices_to_remove: List[int], matrix: np.ndarray) -> np.ndarray: """ Removes indices from indices_to_remove from binary associated to indexing of matrix, producing a new transition matrix. To do so, it assigns all transition probabilities as the given state in the remaining indices binary, with the removed binary in state 0. This is an assumption on the noise made because it is likely that unmeasured qubits will be in that state. :param indices_to_remove: Binary index of state matrix is mapping to be removed. :type indices_to_remove: List[int] :param matrix: Transition matrix where indices correspond to some binary state, to have some dimension removed. :type matrix: np.ndarray :return: Transition matrix with removed entries. :rtype: np.ndarray """ new_n_qubits = int(log2(matrix.shape[0])) - len(indices_to_remove) if new_n_qubits == 0: return np.array([]) bin_map = dict() mat_dim = 1 << new_n_qubits for index in range(mat_dim): # get current binary bina = list(int_to_binary(index, new_n_qubits)) # add 0's to fetch old binary to set values from for i in sorted(indices_to_remove): bina.insert(i, 0) # get index of values bin_map[index] = binary_to_int(tuple(bina)) new_mat = np.zeros((mat_dim,) * 2, dtype=float) for i in range(len(new_mat)): old_row_index = bin_map[i] for j in range(len(new_mat)): old_col_index = bin_map[j] new_mat[i, j] = matrix[old_row_index, old_col_index] return new_mat
8,247
def kdj(df, n=9): """ 随机指标KDJ N日RSV=(第N日收盘价-N日内最低价)/(N日内最高价-N日内最低价)×100% 当日K值=2/3前1日K值+1/3×当日RSV=SMA(RSV,M1) 当日D值=2/3前1日D值+1/3×当日K= SMA(K,M2) 当日J值=3 ×当日K值-2×当日D值 """ _kdj = pd.DataFrame() _kdj['date'] = df['date'] rsv = (df.close - df.low.rolling(n).min()) / (df.high.rolling(n).max() - df.low.rolling(n).min()) * 100 _kdj['k'] = sma(rsv, 3) _kdj['d'] = sma(_kdj.k, 3) _kdj['j'] = 3 * _kdj.k - 2 * _kdj.d return _kdj
8,248
def identity_func(x): """The identify (a.k.a. transparent) function that returns it's input as is.""" return x
8,249
def vapp(ctx, operation, vdc, vapp, catalog, template, network, mode, vm_name, cust_file, media, disk_name, count, cpu, ram, ip): """Operations with vApps""" if vdc == '': vdc = ctx.obj['vdc'] vca = _getVCA_vcloud_session(ctx) if not vca: print_error('User not authenticated or token expired', ctx) return if 'list' == operation: headers = ['vApp', "VMs", "Status", "Deployed", "Description"] table = [] the_vdc = vca.get_vdc(vdc) if the_vdc: table1 = [] for entity in the_vdc.get_ResourceEntities().ResourceEntity: if entity.type_ == 'application/vnd.vmware.vcloud.vApp+xml': the_vapp = vca.get_vapp(the_vdc, entity.name) vms = [] if the_vapp and the_vapp.me.Children: for vm in the_vapp.me.Children.Vm: vms.append(vm.name) table1.append([entity.name, _as_list(vms), status_code[the_vapp.me.get_status()](), 'yes' if the_vapp.me.deployed else 'no', the_vapp.me.Description]) table = sorted(table1, key=operator.itemgetter(0), reverse=False) print_table("Available vApps in '%s' for '%s' profile:" % (vdc, ctx.obj['profile']), 'vapps', headers, table, ctx) elif 'create' == operation: for x in xrange(1, count + 1): vapp_name = vapp if count > 1: vapp_name += '-' + str(x) print_message("creating vApp '%s' in VDC '%s'" " from template '%s' in catalog '%s'" % (vapp_name, vdc, template, catalog), ctx) task = vca.create_vapp(vdc, vapp_name, template, catalog, vm_name=vm_name) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = vca.response print_error("can't create the vApp", ctx) return the_vdc = vca.get_vdc(vdc) the_vapp = vca.get_vapp(the_vdc, vapp_name) if ((vm_name is not None) and ((ctx.obj['service_version'] == "1.0") or (ctx.obj['service_version'] == "1.5") or (ctx.obj['service_version'] == "5.1") or (ctx.obj['service_version'] == "5.5"))): if vm_name is not None: print_message( "setting VM name to '%s'" % (vm_name), ctx) task = the_vapp.modify_vm_name(1, vm_name) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't set VM name", ctx) return the_vapp = vca.get_vapp(the_vdc, vapp_name) if vm_name is not None: print_message( "setting computer name for VM '%s'" % (vm_name), ctx) task = the_vapp.customize_guest_os(vm_name, computer_name=vm_name) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't set computer name", ctx) the_vapp = vca.get_vapp(the_vdc, vapp_name) if cpu is not None: print_message( "configuring '%s' vCPUs for VM '%s', vApp '%s'" % (cpu, vm_name, vapp_name), ctx) task = the_vapp.modify_vm_cpu(vm_name, cpu) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't configure virtual CPUs", ctx) the_vapp = vca.get_vapp(the_vdc, vapp_name) if ram is not None: print_message("configuring '%s' MB of memory" " for VM '%s', vApp '%s'" % (ram, vm_name, vapp_name), ctx) task = the_vapp.modify_vm_memory(vm_name, ram) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't configure RAM", ctx) the_vapp = vca.get_vapp(the_vdc, vapp_name) if '' != network: print_message("disconnecting VM from networks" " pre-defined in the template", ctx) task = the_vapp.disconnect_vms() if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't disconnect VM from networks", ctx) return print_message("disconnecting vApp from networks" " pre-defined in the template", ctx) task = the_vapp.disconnect_from_networks() if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't disconnect vApp from networks", ctx) return nets = filter(lambda n: n.name == network, vca.get_networks(vdc)) if len(nets) == 1: print_message( "connecting vApp to network" " '%s' with mode '%s'" % (network, mode), ctx) task = the_vapp.connect_to_network( nets[0].name, nets[0].href) if task: display_progress( task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't connect the vApp to the network", ctx) return print_message("connecting VM to network '%s'" " with mode '%s'" % (network, mode), ctx) task = the_vapp.connect_vms( nets[0].name, connection_index=0, ip_allocation_mode=mode.upper(), mac_address=None, ip_address=ip) if task: display_progress( task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error( "can't connect the VM to the network", ctx) elif 'delete' == operation: print("deleting vApp '%s' from VDC '%s'" % (vapp, vdc)) task = vca.delete_vapp(vdc, vapp) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = vca.response print_error("can't delete vApp", ctx) elif 'deploy' == operation: print("deploying vApp '%s' to VDC '%s'" % (vapp, vdc)) the_vdc = vca.get_vdc(vdc) the_vapp = vca.get_vapp(the_vdc, vapp) task = the_vapp.deploy() if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't deploy vApp", ctx) elif 'undeploy' == operation: print_message("undeploying vApp '%s' from VDC '%s'" % (vapp, vdc), ctx) the_vdc = vca.get_vdc(vdc) the_vapp = vca.get_vapp(the_vdc, vapp) task = the_vapp.undeploy() if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't undeploy vApp", ctx) elif 'customize' == operation: print("customizing VM '%s'" "in vApp '%s' in VDC '%s'" % (vm_name, vapp, vdc)) the_vdc = vca.get_vdc(vdc) the_vapp = vca.get_vapp(the_vdc, vapp) if the_vdc and the_vapp and cust_file: print_message("uploading customization script", ctx) task = the_vapp.customize_guest_os(vm_name, cust_file.read()) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) print_message("deploying and starting the vApp", ctx) task = the_vapp.force_customization(vm_name) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't customize vApp", ctx) else: ctx.obj['response'] = the_vapp.response print_error("can't customize vApp", ctx) elif ('info' == operation or 'power.off' == operation or 'power.on' == operation or 'delete' == operation): the_vdc = vca.get_vdc(vdc) if the_vdc: the_vapp = vca.get_vapp(the_vdc, vapp) if the_vapp and the_vapp.me: if 'info' == operation: print_vapp_details(ctx, the_vapp) else: task = None if 'power.on' == operation: task = the_vapp.poweron() elif 'power.off' == operation: task = the_vapp.poweroff() elif 'delete' == operation: task = the_vapp.delete() if task: display_progress( task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't operate with the vApp", ctx) global checker_status checker_status = False _save_property(ctx.obj['profile'], 'vdc', vdc) else: ctx.obj['response'] = vca.response print_error("vApp '%s' not found" % vapp, ctx) elif 'insert' == operation or 'eject' == operation: the_vdc = vca.get_vdc(vdc) if the_vdc: the_vapp = vca.get_vapp(the_vdc, vapp) if the_vapp: the_media = vca.get_media(catalog, media) task = the_vapp.vm_media(vm_name, the_media, operation) if task: display_progress(task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't insert or eject media", ctx) elif 'connect' == operation: print_message("connecting vApp '%s', VM '%s' in VDC '%s' to network '%s'" % (vapp, vm_name, vdc, network), ctx) elif 'disconnect' == operation: print_message("disconnecting vApp '%s', VM '%s' in VDC '%s' from network '%s'" % (vapp, vm_name, vdc, network), ctx) # if '' != network: # print_message("disconnecting vApp from networks" # " pre-defined in the template", ctx) # task = the_vapp.disconnect_from_networks() # if task: # display_progress(task, ctx, # vca.vcloud_session.get_vcloud_headers()) # else: # ctx.obj['response'] = the_vapp.response # print_error("can't disconnect vApp from networks", ctx) elif 'attach' == operation or 'detach' == operation: the_vdc = vca.get_vdc(vdc) if the_vdc: the_vapp = vca.get_vapp(the_vdc, vapp) if the_vapp: link = filter(lambda link: link.get_name() == disk_name, vca.get_diskRefs(the_vdc)) if len(link) == 1: if 'attach' == operation: task = the_vapp.attach_disk_to_vm(vm_name, link[0]) else: task = the_vapp.detach_disk_from_vm(vm_name, link[0]) if task: display_progress( task, ctx, vca.vcloud_session.get_vcloud_headers()) else: ctx.obj['response'] = the_vapp.response print_error("can't attach or detach disk", ctx) elif len(link) == 0: print_error("disk not found", ctx) elif len(link) > 1: print_error( "more than one disk found with the same name", ctx) else: print_message('not implemented', ctx)
8,250
def make_choice_validator( choices, default_key=None, normalizer=None): """ Returns a callable that accepts the choices provided. Choices should be provided as a list of 2-tuples, where the first element is a string that should match user input (the key); the second being the value associated with the key. The callable by default will match, upon complete match the first value associated with the result will be returned. Partial matches are supported. If a default is provided, that value will be returned if the user provided input is empty, i.e. the value that is mapped to the empty string. Finally, a normalizer function can be passed. This normalizes all keys and validation value. """ def normalize_all(_choices): # normalize all the keys for easier comparison if normalizer: _choices = [(normalizer(key), value) for key, value in choices] return _choices choices = normalize_all(choices) def choice_validator(value): if normalizer: value = normalizer(value) if not value and default_key: value = choices[default_key][0] results = [] for choice, mapped in choices: if value == choice: return mapped if choice.startswith(value): results.append((choice, mapped)) if len(results) == 1: return results[0][1] elif not results: raise ValueError('Invalid choice.') else: raise ValueError( 'Choice ambiguous between (%s)' % ', '.join( k for k, v in normalize_all(results)) ) return choice_validator
8,251
def main(): """ Simple tester for the classifier """ # definte the sample data sd = SampleData(2, 5, [-3, 3], [0.1, 0.1]) # initialize a new classifier with some sample data clf = Classifier(sd.get_points(500)) # sample some points and see how many the classifier classifies correctly sample_size = 50; correct = 0 for k in range(0, sample_size): p, i = sd.get_points(1); i = i[0] if clf.classify(p) == i: correct += 1 print "Classified ", correct, " of ", sample_size, " correctly."
8,252
def find_best_polycomp_parameters(samples, num_of_coefficients_range, samples_per_chunk_range, max_error, algorithm, delta_coeffs=1, delta_samples=1, period=None, callback=None, max_iterations=0): """Performs an optimized search of the best configuration in the parameter space given by "num_of_coefficients_space" and "samples_per_chunk_space".""" optimization_start_time = time.clock() x_range = num_of_coefficients_range y_range = samples_per_chunk_range midpoint_x, midpoint_y = [int(np.mean(k)) for k in (x_range, y_range)] param_points = PointCache(samples=samples, max_allowable_error=max_error, algorithm=algorithm, period=period) # The logic of this code is the following: # # 1. Start from a point (x, y) # 2. Sample the point and all its neighbours # 3. Move to the best point among the nine that have been sampled # 4. Repeat from point 2. until the best point is the current one # # Many points will be sampled more than once, but we use a # "PointCache" object to do all the sampling, so that only newer # points need to be recalculated every time. num_of_steps = 1 dx = delta_coeffs dy = delta_samples while True: ring_of_points = [(-dx, -dy), (0, -dy), (dx, -dy), (-dx, 0), (0, 0), (dx, 0), (-dx, dy), (0, dy), (dx, dy)] ring_of_configurations = [] for dx, dy in ring_of_points: cur_x = midpoint_x + dx cur_y = midpoint_y + dy if cur_x < x_range[0] or cur_x > x_range[1]: continue if cur_y < y_range[0] or cur_y > y_range[1]: continue chunks, params = param_points.get_point(cur_x, cur_y) if callback is not None: callback(cur_x, cur_y, params, num_of_steps) ring_of_configurations.append((cur_x, cur_y, chunks, params)) ring_of_configurations.sort(key=lambda p: p[3].compr_data_size) best_x, best_y, best_chunks, best_params = ring_of_configurations[0] # If we have ran too much iterations, stop bothering and exit the loop num_of_steps += 1 if (max_iterations > 0) and num_of_steps > max_iterations: break # If we're centered on the best value, let's explore a # narrower space around it if (best_x, best_y) == (midpoint_x, midpoint_y): repeat = False # Can the ring be shrunk any further? If so, shrink it and # keep iterating if (dx > 1) or (dy > 1): # If dx == dy, we prefer to reduce dy first if dy > dx: dy = dy // 2 else: dx = dx // 2 repeat = True if repeat: continue else: break midpoint_x, midpoint_y = best_x, best_y return (best_params, list(param_points.parameter_space.values()), num_of_steps)
8,253
def moving_average(data, window_size=100): #used this approach https://stackoverflow.com/questions/11352047/finding-moving-average-from-data-points-in-python """ Calculates a moving average for all the data Args: data: set of values window_size: number of data points to consider in window Returns: Moving average of the data """ cumsum_vec = np.cumsum(np.insert(data, 0, 0)) ma_vec = (cumsum_vec[window_size:] - cumsum_vec[:-window_size]) / window_size return ma_vec
8,254
def _change_TRAVDV_to_TRAVdashDV(s:str): """ Reconciles mixcr name like TRAV29/DV5*01 to tcrdist2 name TRAV29DV5*01 Parameters ---------- s : str Examples -------- >>> _change_TRAVDV_to_TRAVdashDV('TRAV29DV5*01') 'TRAV29/DV5*01' >>> _change_TRAVDV_to_TRAVdashDV('TRAV38-2DV8*01') 'TRAV38-2/DV8*01' >>> _change_TRAVDV_to_TRAVdashDV('TRDV*01') 'TRDV*01' Notes ----- This reconciles such gene names to match the tcrdist2 reference db. see database for more details: repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv").all_genes """ if isinstance(s, str): m = re.match(pattern = "(TRAV[0-9]+)(DV.*)", string = s) m2 = re.match(pattern = "(TRAV[0-9]+-[1-2])(DV.*)", string = s) if m: new_s = "/".join(m.groups()) return(new_s) elif m2: new_s = "/".join(m2.groups()) return(new_s) else: return(s) else: return(np.NaN)
8,255
def fixMayapy2011SegFault(): """ # Have all the checks inside here, in case people want to insert this in their # userSetup... it's currently not always on """ pass
8,256
def gen_event_type_entry_str(event_type_name, event_type, event_config): """ return string like: {"cpu-cycles", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES}, """ return '{"%s", %s, %s},\n' % (event_type_name, event_type, event_config)
8,257
def encode_rotate_authentication_key_script(new_key: bytes) -> Script: """# Summary Rotates the transaction sender's authentication key to the supplied new authentication key. May be sent by any account. # Technical Description Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. # Parameters | Name | Type | Description | | ------ | ------ | ------------- | | `account` | `&signer` | Signer reference of the sending account of the transaction. | | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | # Common Abort Conditions | Error Category | Error Reason | Description | | ---------------- | -------------- | ------------- | | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | # Related Scripts * `Script::rotate_authentication_key_with_nonce` * `Script::rotate_authentication_key_with_nonce_admin` * `Script::rotate_authentication_key_with_recovery_address` """ return Script( code=ROTATE_AUTHENTICATION_KEY_CODE, ty_args=[], args=[TransactionArgument__U8Vector(value=new_key)], )
8,258
def retrieval_visualizations(model, savefig=True): """ Plots incremental retrieval contexts and supports, as heatmaps, and prints recalled items. **Required model attributes**: - item_count: specifies number of items encoded into memory - context: vector representing an internal contextual state - experience: adding a new trace to the memory model - activations: function returning item activations given a vector probe - outcome_probabilities: function returning item supports given a set of activations **Also** uses savefig: boolean deciding whether figures are saved (True) or displayed """ retrieval_contexts, retrieval_supports, recall = retrieval_states(model) plot_states(retrieval_contexts, 'Retrieval Contexts', savefig=savefig) plot_states(retrieval_supports, 'Supports For Each Item At Each Increment of Retrieval', savefig=savefig) return recall
8,259
def save_image(image, path, filename, format): """Saves the image file in the specified format to the specified path Args: image (np.array): The image data path (str): The path to the folder in which to save filename (str): the name of the image file format (str): The format in which to save the image """ supported_extensions = ['jpeg', 'jpg', 'png'] if format.lower() in supported_extensions: if os.path.exists(path): path_to_file = os.path.join(path, f'{filename}.{format}') try: image.save(path_to_file) except Exception: raise Exception("Couldn't write to image") else: raise OSError(f"the path {path} doesn't exist") else: raise Exception(f'Invalid file extension {format}')
8,260
def get_force_charge() -> str: """ Gets the command object for the force charge command Returns: The command object as a json string """ force_charge = Path('force_charge.json').read_text() return force_charge
8,261
def start(update, context): """Send a message when the command /start is issued.""" context.bot.send_message(chat_id=update.effective_chat.id, text=TEXT + '')
8,262
def asin(x: float32) -> float: """ Return arcsin of x in radians. Inputs are automatically clamped to [-1.0, 1.0]. """ ...
8,263
def prune_motifs(ts, sorted_dic_list, r): """ :param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series :type ts: 1d array :param sorted_dic_list: list of motif dictionaries returned from the emd algorithm, ordered by relevance :type sorted_dic_list: list of dic :param r: maximum distance to the center of the motif :type r: float :return: list of dictionaries with the most relevant motifs :rtype: list of dic """ pruned_motif_dic_list = [sorted_dic_list[0]] first_center_ts = extract_ts_from_pointers(ts, sorted_dic_list[0]['center_ts_pointers']) pruned_center_ts_list = [first_center_ts] for motif_dic in sorted_dic_list[1:]: cur_center_ts = extract_ts_from_pointers(ts, motif_dic['center_ts_pointers']) dist_list = dtwdist.compute_dwt_dist_between_ts_and_list(cur_center_ts, pruned_center_ts_list, 2 * r) dist_test_list = [dist <= 2 * r for dist in dist_list] if sum(dist_test_list) == 0: pruned_motif_dic_list.append(motif_dic) pruned_center_ts_list.append(cur_center_ts) else: continue return pruned_motif_dic_list
8,264
def xor_columns(col, parity): """ XOR a column with the parity values from the state """ result = [] for i in range(len(col)): result.append(col[i] ^ parity[i]) return result
8,265
def initiate_default_resource_metadata(aws_resource): """ :type aws_resource: BaseAWSObject """ if not isinstance(aws_resource, BaseAWSObject): raise TypeError try: metadata = aws_resource.Metadata if not isinstance(metadata, dict): raise TypeError("`troposphere.BaseAWSObject.Metadata` is not a dict!") except Exception as e: if "is not a dict!" in str(e): raise e metadata = {} metadata.setdefault(TROPOSPHERE_METADATA_FIELD_NAME, {}) aws_resource.Metadata = metadata return metadata
8,266
def token_vault_single(chain, team_multisig, token, freeze_ends_at, token_vault_balances) -> Contract: """Another token vault deployment with a single customer.""" total = 1000 args = [ team_multisig, freeze_ends_at, token.address, total, 0 # Disable the tap ] contract, hash = chain.provider.deploy_contract('TokenVault', deploy_args=args) return contract
8,267
def test_namechooser__NameSuffix__1(): """`NameSuffix` conforms to INameChooser.""" assert verifyObject(INameSuffix, NameSuffix())
8,268
def pixel_pick(): """Pick the value from a pixel. Args: body parameters: catalog (str): catalog to query asset_id (str): asset id to query lng (float): longitude coordinate lat (float): latitude coordinate Returns: {'val': val, 'x': x, 'y': y} if pixel in valid range otherwise {'val': 'out of range', 'x': x, 'y': y} if pixel in valid range otherwise """ try: picker_data = json.loads(flask.request.get_data()) LOGGER.debug(str(picker_data)) catalog_entry = queries.find_catalog_by_id( picker_data["catalog"], picker_data["asset_id"]) r = gdal.OpenEx(catalog_entry.local_path, gdal.OF_RASTER) b = r.GetRasterBand(1) gt = r.GetGeoTransform() inv_gt = gdal.InvGeoTransform(gt) # transform lat/lng to raster coordinate space wgs84_srs = osr.SpatialReference() wgs84_srs.ImportFromEPSG(4326) raster_srs = osr.SpatialReference() raster_srs.ImportFromWkt(r.GetProjection()) # put in x/y order raster_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) wgs84_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) # Create a coordinate transformation wgs84_to_raster_trans = osr.CoordinateTransformation( wgs84_srs, raster_srs) point = ogr.Geometry(ogr.wkbPoint) point.AddPoint(picker_data['lng'], picker_data['lat']) error_code = point.Transform(wgs84_to_raster_trans) if error_code != 0: # error return "error on transform", 500 # convert to raster space x_coord, y_coord = [ int(p) for p in gdal.ApplyGeoTransform( inv_gt, point.GetX(), point.GetY())] if (x_coord < 0 or y_coord < 0 or x_coord >= b.XSize or y_coord >= b.YSize): response_dict = { 'val': 'out of range', 'x': x_coord, 'y': y_coord } else: # must cast the right type for json val = r.ReadAsArray(x_coord, y_coord, 1, 1)[0, 0] if numpy.issubdtype(val, numpy.integer): val = int(val) else: val = float(val) # create the response response_dict = { 'val': val, 'x': x_coord, 'y': y_coord } # and replace with no-data if set nodata = b.GetNoDataValue() if nodata is not None: if numpy.isclose(val, nodata): response_dict['val'] = 'nodata' response = flask.jsonify(response_dict) response.headers.add('Access-Control-Allow-Origin', '*') return response except Exception as e: LOGGER.exception('something bad happened') return str(e), 500
8,269
def read_eep_track(fp, colnames=None): """ read MIST eep tracks """ # read lines f = open(fp, "r+") s = f.readlines() # get info MIST_version = re.split(r"\s+", s[0].strip())[-1] MESA_revision = re.split(r"\s*", s[1].strip())[-1] Yinit, Zinit, FeH, aFe, vvcrit = re.split(r"\s*", s[4].strip())[1:] Yinit = np.float(Yinit) Zinit = np.float(Zinit) FeH = np.float(FeH) aFe = np.float(aFe) vvcrit = np.float(vvcrit) initial_mass, N_pts, N_EEP, N_col, phase, type_ = \ re.split(r"\s*", s[7].strip())[1:] initial_mass = np.float(initial_mass) N_pts = np.int(N_pts) N_EEP = np.int(N_EEP) N_col = np.int(N_col) # get eep info EEPs = [np.int(_) for _ in re.split(r"\s+", s[8].strip())[2:]] eep = np.arange(EEPs[0], EEPs[-1] + 1) # add eep column # _eep t = Table.read(s[11:], format="ascii.commented_header") t.add_column(Column(eep, "_eep")) # _lgmass t.add_column(Column(np.ones(len(t), )*np.log10(initial_mass), "_lgmass")) # _lgage t.add_column(Column(np.log10(t["star_age"].data), "_lgage")) # _feh t.add_column(Column(np.ones(len(t), ) * FeH, "_feh")) # add meta info meta = OrderedDict( MIST_version=MIST_version, MESA_revision=MESA_revision, Yinit=Yinit, Zinit=Zinit, FeH=FeH, aFe=aFe, vvcrit=vvcrit, initial_mass=initial_mass, N_pts=N_pts, N_EEP=N_EEP, N_col=N_col, phase=phase, type_=type_, EEPs=EEPs, INTERP=("_INTERP" in fp) ) t.meta = meta if colnames is None: return t else: for colname in colnames: try: assert colname in t.colnames except AssertionError as ae: raise(ae("{} not in track.colnames!!!".format(colname))) return t
8,270
def fontwidth(string, font='sans-serif'): """Function: Returns the px width of a string assuming a base size of 16px.""" _fontwidth = json.load(open(os.path.join(abs_path(), 'fonts.json'), encoding='utf-8')) codes_len = 127 default_width = 32 default_width_idx = 120 for _fontrow in _fontwidth: _fontrow['widths'] = pd.np.array(_fontrow['widths'], dtype=float) _fontrow['widths'] = pd.np.insert(_fontrow['widths'], 0, np.zeros(default_width)) # Add the first font stack at the end, making it the default _fontwidth.append(_fontwidth[0]) # Convert all characters to ASCII codes. Treat Unicode as single char codes = pd.np.fromstring(string.encode('ascii', 'replace'), dtype=pd.np.uint8) # Drop everything that's out of bounds. We'll adjust for them later valid = codes[codes < codes_len] # Get the font for row in _fontwidth: if font in row['family']: break # Compute and return the width, defaulting unknowns to 'x' (char 120) widths = row['widths'] return widths[valid].sum() + widths[default_width_idx] * (len(codes) - len(valid))
8,271
def create_model(): """ResNet34 inspired analog model. Returns: nn.Modules: created model """ block_per_layers = (3, 4, 6, 3) base_channel = 16 channel = (base_channel, 2*base_channel, 4*base_channel) l0 = nn.Sequential( nn.Conv2d(3, channel[0], kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(channel[0]), nn.ReLU() ) l1 = nn.Sequential(*concatenate_layer_blocks(channel[0], channel[0], block_per_layers[0], first_layer=True)) l2 = nn.Sequential(*concatenate_layer_blocks(channel[0], channel[1], block_per_layers[1])) l3 = nn.Sequential(*concatenate_layer_blocks(channel[1], channel[2], block_per_layers[2])) l4 = nn.Sequential( nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(channel[2], N_CLASSES) ) return nn.Sequential(l0, l1, l2, l3, l4)
8,272
def kv_get(key: Union[str, bytes], *, namespace: Optional[str] = None) -> bytes: """Fetch the value of a binary key.""" if isinstance(key, str): key = key.encode() assert isinstance(key, bytes) return global_state_client.kv_get(key, namespace)
8,273
def mode(dev, target): """Gets or sets the active mode.""" click.echo("Current mode: %s" % dev.mode_readable) if target: click.echo("Setting mode: %s" % target) dev.mode = target
8,274
def decrypt_file(key: bytes, infile: Path, outfile: Path): """Decrypt infile and save as outfile""" plaintext = read_decrypted(key, infile) outfile.open('wb').write(plaintext)
8,275
def construct_model(data, local_settings, covariate_multipliers, covariate_data_spec): """Makes a Cascade model from EpiViz-AT settings and data. Args: data: An object with both ``age_specific_death_rate`` and ``locations``. local_settings: A settings object from ``cascade_plan``. covariate_multipliers (List[EpiVizCovariateMultiplier]): descriptions of covariate multipliers. covariate_data_spec (List[EpiVizCovariate]): the covariates themselves. Some covariates aren't used by covariate multipliers but are included to calculate hold outs. Returns: cascade.model.Model: The model to fit. """ ev_settings = local_settings.settings parent_location_id = local_settings.parent_location_id default_age_time = dict() default_age_time["age"] = np.linspace(0, 100, 21) default_age_time["time"] = np.linspace(1990, 2015, 6) for kind in ["age", "time"]: default_grid = getattr(ev_settings.model, f"default_{kind}_grid") if default_grid is not None: default_age_time[kind] = np.sort(np.array(default_grid, dtype=np.float)) # Use this age and time when a smooth grid doesn't depend on age and time. single_age = default_age_time["age"][:1] single_time = [default_age_time["time"][len(default_age_time["time"]) // 2]] single_age_time = (single_age, single_time) nonzero_rates = [smooth.rate for smooth in ev_settings.rate] children = list(data.locations.successors(parent_location_id)) model = Model( nonzero_rates=nonzero_rates, parent_location=parent_location_id, child_location=children, covariates=covariates_list(covariate_data_spec), weights=None, ) construct_model_rates(default_age_time, single_age_time, ev_settings, model) # No random effects if there is only one child. if children and len(children) > 1: construct_model_random_effects(default_age_time, single_age_time, ev_settings, model) construct_model_covariates(default_age_time, single_age_time, covariate_multipliers, model) asdr = data.age_specific_death_rate if ev_settings.model.constrain_omega: constrain_omega( default_age_time, asdr, ev_settings, model, parent_location_id, children ) return model
8,276
def test_cli_config_template(cli): """Verify the --template option works correctly.""" filename = 'build-magic_template.yaml' current = Path().cwd().resolve() res = cli.invoke(build_magic, ['--template']) assert current.joinpath(filename).exists() os.remove(filename) assert res.exit_code == ExitCode.PASSED
8,277
def active(): """Print currently active element to screen.""" o(driver.switch_to.active_element.text.split("\n")[0])
8,278
def test_owe_unsupported_group(dev, apdev): """Opportunistic Wireless Encryption and unsupported group""" try: run_owe_unsupported_group(dev, apdev) finally: dev[0].request("VENDOR_ELEM_REMOVE 13 *")
8,279
def set_off(): """ Turns OFF the lamp. """ unicorn.set_status(False) return OK
8,280
def get_all_active_bets(): """ Gets all the active bets for all active discord ritoman users """ return session.query(LoLBets).filter(LoLBets.completed == false()).all()
8,281
def get_quantize_pos_min_diffs(inputs, f_min, f_max, q_min, q_max, bit_width): """Get quantize pos which makes min difference between float and quantzed. """ with tf.name_scope("GetQuantizePosMinDiffs"): min_scale_inv = tf.math.divide(f_min, q_min) max_scale_inv = tf.math.divide(f_max, q_max) float_scale_inv = tf.math.maximum(min_scale_inv, max_scale_inv) non_overflow_pos = get_quantize_pos_non_overflow(inputs, f_min, f_max, q_min, q_max) def calc_pos(): diffs = [] for i in range(5): with tf.name_scope("FakeQuantizeWithScale_{}".format(i)): # fake quantize scale = tf.math.pow(2.0, non_overflow_pos + i, name="scale") quantized = dpu_symmetry_quantize(inputs, scale, q_min, q_max) dequantized = dpu_symmetry_dequantize(quantized, scale, q_min, q_max) diff = tf.pow(inputs - dequantized, 2) diff = tf.reduce_sum(diff) diffs.append(diff) pos_offset = tf.argmin(diffs) quantize_pos = non_overflow_pos + tf.cast(pos_offset, tf.float32) return quantize_pos return tf.cond(float_scale_inv < 1e-9, lambda: 127.0, calc_pos)
8,282
def save_color_map(map_name, output_path): """ Gets 256 colors from the color map and saves the [0, 1] RGB values of those colors to the corresponding tab-separated file in the output_path. """ file_name = os.path.join(output_path, f'{map_name}.tsv') cmap = plt.get_cmap(map_name) colors = None if isinstance(cmap, LinearSegmentedColormap): if cmap.N == 256: colors = cmap(np.arange(0, 256))[:, :3] elif isinstance(cmap, ListedColormap): colors = cmap.colors if colors is not None: df = pd.DataFrame(columns=['R', 'G', 'B'], data=colors) df.to_csv(file_name, sep='\t', index=False)
8,283
def _non_string_elements(x): """ Simple helper to check that all values of x are string. Returns all non string elements as (position, element). :param x: Iterable :return: [(int, !String), ...] """ problems = [] for i in range(0, len(x)): if not isinstance(x[i], str): problems.append((i, x[i])) return problems
8,284
def configure_dirs(base_path: str, config_name: str, dataset_name: str) -> str: """ Performs configuration of directories for storing vectors :param base_path: :param config_name: :param dataset_name: :return: Full configuration path """ base_path = Path(base_path) base_path.mkdir(exist_ok=True) full_path = base_path / config_name full_path.mkdir(exist_ok=True) full_path = full_path / dataset_name full_path.mkdir(exist_ok=True) return str(full_path)
8,285
def test_wet_bulb_temperature_1d(): """Test wet bulb calculation with 1d list.""" pressures = [1013, 1000, 990] * units.hPa temperatures = [25, 20, 15] * units.degC dewpoints = [20, 15, 10] * units.degC val = wet_bulb_temperature(pressures, temperatures, dewpoints) truth = [21.44487, 16.73673, 12.06554] * units.degC # 21.58, 16.86, 12.18 from NWS Calculator assert_array_almost_equal(val, truth, 5)
8,286
def generate_output_file(final_model,out_name): """ This function takes as input both the final model created with the building algorithm and the output filename given by the user (if not defined, is macrocomplex by default). Eventually, it returns the file saved in either ".pdb" or ".mmcif" format. """ out_name = str(out_name.strip()) # If the output file is too big, we save it in ".mmcif" format if len(list(final_model[0].get_atoms())) > 99999 or len(list(final_model[0].get_chains())) > 62: mmcif_IO = MMCIFIO() mmcif_IO.set_structure(final_model[0]) mmcif_IO.save(out_name + ".cif") # Otherwise, save it ".pdb" format else: pdb_IO = PDBIO() pdb_IO.set_structure(final_model[0]) pdb_IO.save(out_name + ".pdb")
8,287
def config_entry_version_fixture(): """Define a config entry version fixture.""" return 2
8,288
def edge_dfs(G, source=None, orientation=None): """A directed, depth-first-search of edges in `G`, beginning at `source`. Yield the edges of G in a depth-first-search order continuing until all edges are generated. Parameters ---------- G : graph A directed/undirected graph/multigraph. source : node, list of nodes The node from which the traversal begins. If None, then a source is chosen arbitrarily and repeatedly until all edges from each node in the graph are searched. orientation : None | 'original' | 'reverse' | 'ignore' (default: None) For directed graphs and directed multigraphs, edge traversals need not respect the original orientation of the edges. When set to 'reverse' every edge is traversed in the reverse direction. When set to 'ignore', every edge is treated as undirected. When set to 'original', every edge is treated as directed. In all three cases, the yielded edge tuples add a last entry to indicate the direction in which that edge was traversed. If orientation is None, the yielded edge has no direction indicated. The direction is respected, but not reported. Yields ------ edge : directed edge A directed edge indicating the path taken by the depth-first traversal. For graphs, `edge` is of the form `(u, v)` where `u` and `v` are the tail and head of the edge as determined by the traversal. For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is the key of the edge. When the graph is directed, then `u` and `v` are always in the order of the actual directed edge. If orientation is not None then the edge tuple is extended to include the direction of traversal ('forward' or 'reverse') on that edge. Examples -------- >>> nodes = [0, 1, 2, 3] >>> edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] >>> list(nx.edge_dfs(nx.Graph(edges), nodes)) [(0, 1), (1, 2), (1, 3)] >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes)) [(0, 1), (1, 0), (2, 1), (3, 1)] >>> list(nx.edge_dfs(nx.MultiGraph(edges), nodes)) [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] >>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes)) [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes, orientation="ignore")) [(0, 1, 'forward'), (1, 0, 'forward'), (2, 1, 'reverse'), (3, 1, 'reverse')] >>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes, orientation="ignore")) [(0, 1, 0, 'forward'), (1, 0, 0, 'forward'), (1, 0, 1, 'reverse'), (2, 1, 0, 'reverse'), (3, 1, 0, 'reverse')] Notes ----- The goal of this function is to visit edges. It differs from the more familiar depth-first traversal of nodes, as provided by :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`, in that it does not stop once every node has been visited. In a directed graph with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited if not for the functionality provided by this function. See Also -------- :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` """ nodes = list(G.nbunch_iter(source)) if not nodes: return directed = G.is_directed() kwds = {"data": False} if G.is_multigraph() is True: kwds["keys"] = True # set up edge lookup if orientation is None: def edges_from(node): return iter(G.edges(node, **kwds)) elif not directed or orientation == "original": def edges_from(node): for e in G.edges(node, **kwds): yield e + (FORWARD,) elif orientation == "reverse": def edges_from(node): for e in G.in_edges(node, **kwds): yield e + (REVERSE,) elif orientation == "ignore": def edges_from(node): for e in G.edges(node, **kwds): yield e + (FORWARD,) for e in G.in_edges(node, **kwds): yield e + (REVERSE,) else: raise nx.NetworkXError("invalid orientation argument.") # set up formation of edge_id to easily look up if edge already returned if directed: def edge_id(edge): # remove direction indicator return edge[:-1] if orientation is not None else edge else: def edge_id(edge): # single id for undirected requires frozenset on nodes return (frozenset(edge[:2]),) + edge[2:] # Basic setup check_reverse = directed and orientation in ("reverse", "ignore") visited_edges = set() visited_nodes = set() edges = {} # start DFS for start_node in nodes: stack = [start_node] while stack: current_node = stack[-1] if current_node not in visited_nodes: edges[current_node] = edges_from(current_node) visited_nodes.add(current_node) try: edge = next(edges[current_node]) except StopIteration: # No more edges from the current node. stack.pop() else: edgeid = edge_id(edge) if edgeid not in visited_edges: visited_edges.add(edgeid) # Mark the traversed "to" node as to-be-explored. if check_reverse and edge[-1] == REVERSE: stack.append(edge[0]) else: stack.append(edge[1]) yield edge
8,289
async def _md5_by_reading(filepath: str, chunk_size: int = DEFAULT_BUFFER_SIZE) -> str: """ Compute md5 of a filepath. """ file_hash = hashlib.md5() async with async_open(filepath, "rb") as reader: async for chunk in reader.iter_chunked(chunk_size): file_hash.update(chunk) return file_hash.hexdigest()
8,290
def client_authenticator_factory(mechanism,password_manager): """Create a client authenticator object for given SASL mechanism and password manager. :Parameters: - `mechanism`: name of the SASL mechanism ("PLAIN", "DIGEST-MD5" or "GSSAPI"). - `password_manager`: name of the password manager object providing authentication credentials. :Types: - `mechanism`: `str` - `password_manager`: `PasswordManager` :return: new authenticator. :returntype: `sasl.core.ClientAuthenticator`""" authenticator=all_mechanisms_dict[mechanism][0] return authenticator(password_manager)
8,291
def wait_for_cell_data_connection( log, ad, state, timeout_value=EventDispatcher.DEFAULT_TIMEOUT): """Wait for data connection status to be expected value for default data subscription. Wait for the data connection status to be DATA_STATE_CONNECTED or DATA_STATE_DISCONNECTED. Args: log: Log object. ad: Android Device Object. state: Expected status: True or False. If True, it will wait for status to be DATA_STATE_CONNECTED. If False, it will wait for status ti be DATA_STATE_DISCONNECTED. timeout_value: wait for cell data timeout value. This is optional, default value is EventDispatcher.DEFAULT_TIMEOUT Returns: True if success. False if failed. """ sub_id = get_default_data_sub_id(ad) return wait_for_cell_data_connection_for_subscription(log, ad, sub_id, state, timeout_value)
8,292
def _parse_lists(config_parser: configparser.ConfigParser, section: str = '') -> t.Dict: """Parses multiline blocks in *.cfg files as lists.""" config = dict(config_parser.items(section)) for key, val in config.items(): if '/' in val and 'parameters' not in section: config[key] = parse_mars_syntax(val) elif '\n' in val: config[key] = _splitlines(val) return config
8,293
def get_filename_pair(filename): """ Given the name of a VASF data file (*.rsd) or parameter file (*.rsp) return a tuple of (parameters_filename, data_filename). It doesn't matter if the filename is a fully qualified path or not. - assumes extensions are all caps or all lower """ param_filename = data_filename = filename[:-1] if filename[-1:].isupper(): data_filename += 'D' param_filename += 'P' else: data_filename += 'd' param_filename += 'p' return (param_filename, data_filename)
8,294
def parseArgs(app): """ Args parser """ if not os.path.isdir(app.iconCacheDir): os.system("mkdir -p "+app.iconCacheDir) try: opts, args = getopt.getopt(sys.argv[1:], "ha:r:lt:pbs", ["help", "append=", "remove=", 'list', 'daemonize', 'type', 'list-plugins', 'list-types', 'config=', 'value=', 'list-config', 'force-new', 'service', 'list-browsers', 'debug']) except Exception as err: print("Error: "+str(err)+", Try --help for usage\n\n") usage() sys.exit(2) Type = None Variable = None ForceNew = False # turn off debugging & error reporting when showing usage dialog #try: # if "-h" in opts[0] or "--help" in opts[0]: # #except: # pass app.Logging.silent = True if len(opts) > 0: if "--debug" in opts[0]: app.Logging.silent = False app.db = libnbnotify.database.Database() app.loadConfig() app.loadPasswords() for o, a in opts: if o in ('-h', '--help'): usage() sys.exit(2) if o in ('-p', '--list-plugins', '--list-types'): files = os.listdir(libnbnotify.plugins.__path__[0]) print("Avaliable plugins:") for file in files: if file[-4:] == ".pyc": continue if file == "__init__.py": continue print("+ "+file[:-3]) sys.exit(0) ### Web Browsers support if o == "--list-browsers" or o == "-b": files = os.listdir(libnbnotify.browser.__path__[0]) print("Avaliable browsers:") for file in files: if file[-4:] == ".pyc": continue if file == "__init__.py": continue print("+ "+file[:-3]) print("\nExample usage: nbnotify --service chromium.default.facebook to add facebook session from 'default' profile of Chromium browser") sys.exit(0) if o == "--service" or o == "-s": if len(args) == 0: usage() sys.exit(0) app.cli = True app.doPluginsLoad() if app.addService(args[0]) == False: print("Cannot add service, check logs or enable --debug option") else: print("Added.") app.Config.save() sys.exit(0) ### Configuration editor if o == "--config": Variable = a if o == "--force-new": ForceNew = True if o == "--value": if Variable == None: print("You must specify --config=section:option first") sys.exit(0) Var = Variable.split(":") if len(Var) == 2: if str(app.configGetKey(Var[0], Var[1])) != "False" or ForceNew == True: app.configSetKey(Var[0], Var[1], a) print(Var[0] + ":"+Var[1]+" = "+a) app.saveConfiguration() else: print(Var[0] + ":"+Var[1]+" does not exists in config file, use --force-new to force creation of new variable.") sys.exit(0) if o in ('-l', '--list'): links = app.configGetSection('links') if links == False: print("No links in database.") sys.exit(0) for link in links: print(links[link]) sys.exit(0) if o in "--list-config": i = 0 for var in app.Config.Config: i = i + 1 section = app.configGetSection(var) print(str(i)+". "+str(var)) for option in section: print("==> "+option+" = "+str(app.configGetKey(var, option))+";") print("\n") sys.exit(0) ### Links management if o in ('-t', '--type'): Type = a if o in ('-a', '--append'): app.cli = True app.doPluginsLoad() if not Type == None: if os.path.isfile(libnbnotify.plugins.__path__[0]+"/"+Type+".py"): app.setType(a, Type) print("Type changed to \""+str(Type)+"\" for link: "+str(a)) else: print("Invalid type "+a+" - does not exists in plugins list.") app.addPage(a) app.saveConfiguration() sys.exit(0) if o in ('-r', '--remove'): links = app.configGetSection('links') if links == False: print("No links in database.") sys.exit(0) pos = None for link in links: if links[link] == a: pos = link break if pos is not None: links.pop(pos) print("Removed.") app.saveConfiguration() else: print("Link not found, nothing changed.") sys.exit(0) if o == '--daemonize': if not os.path.isdir("/tmp/.nbnotify"): os.system("mkdir /tmp/.nbnotify") daemonize(stdout='/tmp/.nbnotify/.out', stderr='/tmp/.nbnotify/.err') if app.Logging.silent == True: print(app.Logging.session) app.Logging.silent = False app.doPluginsLoad() app.main()
8,295
def test_random(client): """Testing shows.random""" response = client.get("/shows/random") assert response.status_code == 302 assert response.location
8,296
async def get_reposet(client, headers, reposet_id): """Get the reposet by id.""" url = f"https://api.athenian.co/v1/reposet/{reposet_id}" return await do_request(client, url, headers)
8,297
def conditions(converted: str) -> bool: """Conditions function is used to check the message processed. Uses the keywords to do a regex match and trigger the appropriate function which has dedicated task. Args: converted: Takes the voice recognized statement as argument. Returns: bool: Boolean True only when asked to sleep for conditioned sleep message. """ sys.stdout.write(f'\r{converted}') converted_lower = converted.lower() todo_checks = ['to do', 'to-do', 'todo'] if any(word in converted_lower for word in keywords.date()) and \ not any(word in converted_lower for word in keywords.avoid()): current_date() elif any(word in converted_lower for word in keywords.time()) and \ not any(word in converted_lower for word in keywords.avoid()): place = '' for word in converted.split(): if word[0].isupper(): place += word + ' ' elif '.' in word: place += word + ' ' if place: current_time(place) else: current_time() elif any(word in converted_lower for word in keywords.weather()) and \ not any(word in converted_lower for word in keywords.avoid()): place = '' for word in converted.split(): if word[0].isupper(): place += word + ' ' elif '.' in word: place += word + ' ' weather_cond = ['tomorrow', 'day after', 'next week', 'tonight', 'afternoon', 'evening'] if any(match in converted_lower for match in weather_cond): if place: weather_condition(msg=converted, place=place) else: weather_condition(msg=converted) elif place: weather(place) else: weather() elif any(word in converted_lower for word in keywords.system_info()): system_info() elif any(word in converted for word in keywords.ip_info()) or 'IP' in converted.split(): if 'public' in converted_lower: if not internet_checker(): speaker.say("You are not connected to the internet sir!") return False if ssid := get_ssid(): ssid = f'for the connection {ssid} ' else: ssid = '' if public_ip := json_load(urlopen('http://ipinfo.io/json')).get('ip'): output = f"My public IP {ssid}is {public_ip}" elif public_ip := json_loads(urlopen('http://ip.jsontest.com').read()).get('ip'): output = f"My public IP {ssid}is {public_ip}" else: output = 'I was unable to fetch the public IP sir!' else: ip_address = vpn_checker().split(':')[-1] output = f"My local IP address for {gethostname()} is {ip_address}" sys.stdout.write(f'\r{output}') speaker.say(output) elif any(word in converted_lower for word in keywords.wikipedia()): wikipedia_() elif any(word in converted_lower for word in keywords.news()): news() elif any(word in converted_lower for word in keywords.report()): report() elif any(word in converted_lower for word in keywords.robinhood()): robinhood() elif any(word in converted_lower for word in keywords.repeat()): repeater() elif any(word in converted_lower for word in keywords.location()): location() elif any(word in converted_lower for word in keywords.locate()): locate(converted) elif any(word in converted_lower for word in keywords.gmail()): gmail() elif any(word in converted_lower for word in keywords.meaning()): meaning(converted.split()[-1]) elif any(word in converted_lower for word in keywords.delete_todo()) and \ any(word in converted_lower for word in todo_checks): delete_todo() elif any(word in converted_lower for word in keywords.list_todo()): todo() elif any(word in converted_lower for word in keywords.add_todo()) and \ any(word in converted_lower for word in todo_checks): add_todo() elif any(word in converted_lower for word in keywords.delete_db()): delete_db() elif any(word in converted_lower for word in keywords.create_db()): create_db() elif any(word in converted_lower for word in keywords.distance()) and \ not any(word in converted_lower for word in keywords.avoid()): """the loop below differentiates between two places and one place with two words eg: New York will be considered as one word and New York and Las Vegas will be considered as two words""" check = converted.split() # str to list places = [] for word in check: if word[0].isupper() or '.' in word: # looks for words that start with uppercase try: next_word = check[check.index(word) + 1] # looks if words after an uppercase word is also one if next_word[0].isupper(): places.append(f"{word + ' ' + check[check.index(word) + 1]}") else: if word not in ' '.join(places): places.append(word) except IndexError: # catches exception on lowercase word after an upper case word if word not in ' '.join(places): places.append(word) """the condition below assumes two different words as two places but not including two words starting upper case right next to each other""" if len(places) >= 2: start = places[0] end = places[1] elif len(places) == 1: start = None end = places[0] else: start, end = None, None distance(start, end) elif any(word in converted_lower for word in conversation.form()): speaker.say("I am a program, I'm without form.") elif any(word in converted_lower for word in keywords.geopy()): # tries to look for words starting with an upper case letter place = '' for word in converted.split(): if word[0].isupper(): place += word + ' ' elif '.' in word: place += word + ' ' # if no words found starting with an upper case letter, fetches word after the keyword 'is' eg: where is Chicago if not place: keyword = 'is' before_keyword, keyword, after_keyword = converted.partition(keyword) place = after_keyword.replace(' in', '').strip() locate_places(place.strip()) elif any(word in converted_lower for word in keywords.directions()): place = '' for word in converted.split(): if word[0].isupper(): place += word + ' ' elif '.' in word: place += word + ' ' place = place.replace('I ', '').strip() if place: directions(place) else: speaker.say("I can't take you to anywhere without a location sir!") directions(place=None) elif any(word in converted_lower for word in keywords.webpage()) and \ not any(word in converted_lower for word in keywords.avoid()): converted = converted.replace(' In', 'in').replace(' Co. Uk', 'co.uk') host = (word for word in converted.split() if '.' in word) webpage(host) elif any(word in converted_lower for word in keywords.kill_alarm()): kill_alarm() elif any(word in converted_lower for word in keywords.alarm()): alarm(converted_lower) elif any(word in converted_lower for word in keywords.google_home()): google_home() elif any(word in converted_lower for word in keywords.jokes()): jokes() elif any(word in converted_lower for word in keywords.reminder()): reminder(converted_lower) elif any(word in converted_lower for word in keywords.notes()): notes() elif any(word in converted_lower for word in keywords.github()): auth = HTTPBasicAuth(git_user, git_pass) response = get('https://api.github.com/user/repos?type=all&per_page=100', auth=auth).json() result, repos, total, forked, private, archived, licensed = [], [], 0, 0, 0, 0, 0 for i in range(len(response)): total += 1 forked += 1 if response[i]['fork'] else 0 private += 1 if response[i]['private'] else 0 archived += 1 if response[i]['archived'] else 0 licensed += 1 if response[i]['license'] else 0 repos.append({response[i]['name'].replace('_', ' ').replace('-', ' '): response[i]['clone_url']}) if 'how many' in converted: speaker.say(f'You have {total} repositories sir, out of which {forked} are forked, {private} are private, ' f'{licensed} are licensed, and {archived} archived.') else: [result.append(clone_url) if clone_url not in result and re.search(rf'\b{word}\b', repo.lower()) else None for word in converted_lower.split() for item in repos for repo, clone_url in item.items()] if result: github(target=result) else: speaker.say("Sorry sir! I did not find that repo.") elif any(word in converted_lower for word in keywords.txt_message()): number = '-'.join([str(s) for s in re.findall(r'\b\d+\b', converted)]) send_sms(number) elif any(word in converted_lower for word in keywords.google_search()): phrase = converted.split('for')[-1] if 'for' in converted else None google_search(phrase) elif any(word in converted_lower for word in keywords.tv()): television(converted) elif any(word in converted_lower for word in keywords.apps()): apps(converted.split()[-1]) elif any(word in converted_lower for word in keywords.music()): if 'speaker' in converted_lower: music(converted) else: music() elif any(word in converted_lower for word in keywords.volume()): if 'mute' in converted_lower: level = 0 elif 'max' in converted_lower or 'full' in converted_lower: level = 100 else: level = re.findall(r'\b\d+\b', converted) # gets integers from string as a list level = int(level[0]) if level else 50 # converted to int for volume volume_controller(level) speaker.say(f"{choice(ack)}!") elif any(word in converted_lower for word in keywords.face_detection()): face_recognition_detection() elif any(word in converted_lower for word in keywords.speed_test()): speed_test() elif any(word in converted_lower for word in keywords.bluetooth()): bluetooth(phrase=converted_lower) elif any(word in converted_lower for word in keywords.brightness()) and 'lights' not in converted_lower: speaker.say(choice(ack)) if 'set' in converted_lower or re.findall(r'\b\d+\b', converted_lower): level = re.findall(r'\b\d+\b', converted_lower) # gets integers from string as a list if not level: level = ['50'] # pass as list for brightness, as args must be iterable Thread(target=set_brightness, args=level).start() elif 'decrease' in converted_lower or 'reduce' in converted_lower or 'lower' in converted_lower or \ 'dark' in converted_lower or 'dim' in converted_lower: Thread(target=decrease_brightness).start() elif 'increase' in converted_lower or 'bright' in converted_lower or 'max' in converted_lower or \ 'brighten' in converted_lower or 'light up' in converted_lower: Thread(target=increase_brightness).start() elif any(word in converted_lower for word in keywords.lights()): if not vpn_checker().startswith('VPN'): lights(converted=converted_lower) elif any(word in converted_lower for word in keywords.guard_enable() or keywords.guard_disable()): if any(word in converted_lower for word in keywords.guard_enable()): logger.info('Enabled Security Mode') speaker.say(f"Enabled security mode sir! I will look out for potential threats and keep you posted. " f"Have a nice {part_of_day()}, and enjoy yourself sir!") speaker.runAndWait() guard() elif any(word in converted_lower for word in keywords.flip_a_coin()): playsound('indicators/coin.mp3') sleep(0.5) speaker.say(f"""{choice(['You got', 'It landed on', "It's"])} {choice(['heads', 'tails'])} sir""") elif any(word in converted_lower for word in keywords.facts()): speaker.say(getFact(False)) elif any(word in converted_lower for word in keywords.meetings()): if os.path.isfile('meetings'): meeting_reader() else: if os.environ.get('called_by_offline'): speaker.say("Meetings file is not ready yet. Please try again in a minute or two.") return False meeting = ThreadPool(processes=1).apply_async(func=meetings) speaker.say("Please give me a moment sir! Let me check your calendar.") speaker.runAndWait() try: speaker.say(meeting.get(timeout=60)) except ThreadTimeoutError: logger.error('Unable to read the calendar within 60 seconds.') speaker.say("I wasn't able to read your calendar within the set time limit sir!") speaker.runAndWait() elif any(word in converted_lower for word in keywords.voice_changer()): voice_changer(converted) elif any(word in converted_lower for word in keywords.system_vitals()): system_vitals() elif any(word in converted_lower for word in keywords.vpn_server()): if vpn_server_check(): speaker.say('An operation for VPN Server is already in progress sir! Please wait and retry.') elif 'start' in converted_lower or 'trigger' in converted_lower or 'initiate' in converted_lower or \ 'enable' in converted_lower or 'spin up' in converted_lower: Thread(target=vpn_server, args=['START']).start() speaker.say('VPN Server has been initiated sir! Login details will be sent to you shortly.') elif 'stop' in converted_lower or 'shut' in converted_lower or 'close' in converted_lower or \ 'disable' in converted_lower: Thread(target=vpn_server, args=['STOP']).start() speaker.say('VPN Server will be shutdown sir!') else: speaker.say("I don't understand the request sir! You can ask me to enable or disable the VPN server.") elif any(word in converted_lower for word in keywords.personal_cloud()): if 'enable' in converted_lower or 'initiate' in converted_lower or 'kick off' in converted_lower or \ 'start' in converted_lower: Thread(target=personal_cloud.enable).start() speaker.say("Personal Cloud has been triggered sir! I will send the login details to your phone number " "once the server is up and running.") elif 'disable' in converted_lower or 'stop' in converted_lower: Thread(target=personal_cloud.disable).start() speaker.say(choice(ack)) else: speaker.say("I didn't quite get that sir! Please tell me if I should enable or disable your server.") elif any(word in converted_lower for word in conversation.greeting()): speaker.say('I am spectacular. I hope you are doing fine too.') elif any(word in converted_lower for word in conversation.capabilities()): speaker.say('There is a lot I can do. For example: I can get you the weather at any location, news around ' 'you, meanings of words, launch applications, create a to-do list, check your emails, get your ' 'system configuration, tell your investment details, locate your phone, find distance between ' 'places, set an alarm, play music on smart devices around you, control your TV, tell a joke, send' ' a message, set reminders, scan and clone your GitHub repositories, and much more. Time to ask,.') elif any(word in converted_lower for word in conversation.languages()): speaker.say("Tricky question!. I'm configured in python, and I can speak English.") elif any(word in converted_lower for word in conversation.whats_up()): speaker.say("My listeners are up. There is nothing I cannot process. So ask me anything..") elif any(word in converted_lower for word in conversation.what()): speaker.say("I'm just a pre-programmed virtual assistant, trying to become a natural language UI.") elif any(word in converted_lower for word in conversation.who()): speaker.say("I am Jarvis. A virtual assistant designed by Mr.Raauv.") elif any(word in converted_lower for word in conversation.about_me()): speaker.say("I am Jarvis. A virtual assistant designed by Mr.Raauv.") speaker.say("I'm just a pre-programmed virtual assistant, trying to become a natural language UI.") speaker.say("I can seamlessly take care of your daily tasks, and also help with most of your work!") elif any(word in converted_lower for word in keywords.sleep()): if 'pc' in converted_lower or 'computer' in converted_lower or 'imac' in converted_lower or \ 'screen' in converted_lower: pc_sleep() else: speaker.say("Activating sentry mode, enjoy yourself sir!") if greet_check: greet_check.pop('status') return True elif any(word in converted_lower for word in keywords.restart()): if 'pc' in converted_lower or 'computer' in converted_lower or 'imac' in converted_lower: logger.info(f'JARVIS::Restart for {host_info("model")} has been requested.') restart(target='PC') else: logger.info('JARVIS::Self reboot has been requested.') if 'quick' in converted_lower or 'fast' in converted_lower: restart(quick=True) else: restart() elif any(word in converted_lower for word in keywords.kill()) and \ not any(word in converted_lower for word in keywords.avoid()): raise KeyboardInterrupt elif any(word in converted_lower for word in keywords.shutdown()): shutdown() elif any(word in converted_lower for word in keywords.chatbot()): chatter_bot() else: logger.info(f'Received the unrecognized lookup parameter: {converted}') Thread(target=unrecognized_dumper, args=[converted]).start() # writes to training_data.yaml in a thread if alpha(converted): if google_maps(converted): if google(converted): # if none of the conditions above are met, opens a google search on default browser sys.stdout.write(f"\r{converted}") if google_maps.has_been_called: google_maps.has_been_called = False speaker.say("I have also opened a google search for your request.") else: speaker.say(f"I heard {converted}. Let me look that up.") speaker.runAndWait() speaker.say("I have opened a google search for your request.") search = str(converted).replace(' ', '+') unknown_url = f"https://www.google.com/search?q={search}" web_open(unknown_url)
8,298
def detect_os_flavour(os_type): """Detect Linux flavours and return the current version""" if os_type: # linux try: return platform.linux_distribution()[0] except Exception, e: return None else: # windows return platform.platform()
8,299