content
stringlengths
22
815k
id
int64
0
4.91M
def binary_to_string(bin_string: str): """ >>> binary_to_string("01100001") 'a' >>> binary_to_string("a") Traceback (most recent call last): ... ValueError: bukan bilangan biner >>> binary_to_string("") Traceback (most recent call last): ... ValueError: tidak ada yang diinputkan >>> binary_to_string("39") Traceback (most recent call last): ... ValueError: bukan bilangan biner >>> binary_to_string(1010) Traceback (most recent call last): ... TypeError: bukan string """ if not isinstance(bin_string, str): raise TypeError("bukan string") if not bin_string: raise ValueError("tidak ada yang diinputkan") if not all(char in "01" for char in bin_string): raise ValueError("bukan bilangan biner") return "".join([chr(int(i, 2)) for i in bin_string.split()])
6,300
def put_lifecycle_configuration(FileSystemId=None, LifecyclePolicies=None): """ Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system. Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management. In the request, specify the following: This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation. To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system. See also: AWS API Documentation Exceptions :example: response = client.put_lifecycle_configuration( FileSystemId='string', LifecyclePolicies=[ { 'TransitionToIA': 'AFTER_7_DAYS'|'AFTER_14_DAYS'|'AFTER_30_DAYS'|'AFTER_60_DAYS'|'AFTER_90_DAYS' }, ] ) :type FileSystemId: string :param FileSystemId: [REQUIRED]\nThe ID of the file system for which you are creating the LifecycleConfiguration object (String).\n :type LifecyclePolicies: list :param LifecyclePolicies: [REQUIRED]\nAn array of LifecyclePolicy objects that define the file system\'s LifecycleConfiguration object. A LifecycleConfiguration object tells lifecycle management when to transition files from the Standard storage class to the Infrequent Access storage class.\n\n(dict) --Describes a policy used by EFS lifecycle management to transition files to the Infrequent Access (IA) storage class.\n\nTransitionToIA (string) --A value that describes the period of time that a file is not accessed, after which it transitions to the IA storage class. Metadata operations such as listing the contents of a directory don\'t count as file access events.\n\n\n\n\n :rtype: dict ReturnsResponse Syntax { 'LifecyclePolicies': [ { 'TransitionToIA': 'AFTER_7_DAYS'|'AFTER_14_DAYS'|'AFTER_30_DAYS'|'AFTER_60_DAYS'|'AFTER_90_DAYS' }, ] } Response Structure (dict) -- LifecyclePolicies (list) -- An array of lifecycle management policies. Currently, EFS supports a maximum of one policy per file system. (dict) -- Describes a policy used by EFS lifecycle management to transition files to the Infrequent Access (IA) storage class. TransitionToIA (string) -- A value that describes the period of time that a file is not accessed, after which it transitions to the IA storage class. Metadata operations such as listing the contents of a directory don\'t count as file access events. Exceptions EFS.Client.exceptions.BadRequest EFS.Client.exceptions.InternalServerError EFS.Client.exceptions.FileSystemNotFound EFS.Client.exceptions.IncorrectFileSystemLifeCycleState :return: { 'LifecyclePolicies': [ { 'TransitionToIA': 'AFTER_7_DAYS'|'AFTER_14_DAYS'|'AFTER_30_DAYS'|'AFTER_60_DAYS'|'AFTER_90_DAYS' }, ] } :returns: FileSystemId (string) -- [REQUIRED] The ID of the file system for which you are creating the LifecycleConfiguration object (String). LifecyclePolicies (list) -- [REQUIRED] An array of LifecyclePolicy objects that define the file system\'s LifecycleConfiguration object. A LifecycleConfiguration object tells lifecycle management when to transition files from the Standard storage class to the Infrequent Access storage class. (dict) --Describes a policy used by EFS lifecycle management to transition files to the Infrequent Access (IA) storage class. TransitionToIA (string) --A value that describes the period of time that a file is not accessed, after which it transitions to the IA storage class. Metadata operations such as listing the contents of a directory don\'t count as file access events. """ pass
6,301
def plot_height(dset): """Plots ash top height from VOLCAT Does not save figure - quick image creation""" fig = plt.figure('Ash_Top_Height') title = 'Ash Top Height (km)' ax = fig.add_subplot(1,1,1) plot_gen(dset, ax, val='height', time=None, plotmap=True, title=title)
6,302
def clean_axis(axis): """ Args: axis: Returns: """ axis.get_xaxis().set_ticks([]) axis.get_yaxis().set_ticks([]) for spine in axis.spines.values(): spine.set_visible(False)
6,303
def create_fake_record(): """Create records for demo purposes.""" fake = Faker() data_to_use = { "access": { "record": "public", "files": "public", }, "files": { "enabled": False, }, "pids": { }, "metadata": { "resource_type": fake_resource_type(), "creators": [{ "person_or_org": { "family_name": fake.last_name(), "given_name": fake.first_name(), "type": "personal", "identifiers": [{ "scheme": "orcid", "identifier": "0000-0002-1825-0097", }], }, "affiliations": [{ "name": fake.company(), "identifiers": [{ "scheme": "ror", "identifier": "03yrm5c26", }] }] } for i in range(4)], "title": fake.company() + "'s gallery", "additional_titles": [{ "title": "a research data management platform", "type": "subtitle", "lang": "eng" }, { "title": fake.company() + "'s gallery", "type": "alternativetitle", "lang": "eng" }], "publisher": "InvenioRDM", "publication_date": fake_edtf_level_0(), "subjects": [{ "subject": fake.word(), "identifier": "03yrm5c26", "scheme": "ror" }, { "subject": fake.word(), "identifier": "03yrm5c26", "scheme": "ror" }], "contributors": [{ "person_or_org": { "family_name": fake.last_name(), "given_name": fake.first_name(), "type": "personal", }, "affiliations": [{ "name": fake.company(), "identifiers": [{ "scheme": "ror", "identifier": "03yrm5c26", }] }], "role": "rightsholder" } for i in range(3)], # "dates": [{ # # No end date to avoid computations based on start # "date": fake.date(pattern='%Y-%m-%d'), # "description": "Random test date", # "type": "other" # }], # TODO: Add when we have PIDs for languages vocabulary # "languages": [{"id": "eng"}], # "related_identifiers": [{ # "identifier": "10.9999/rdm.9999988", # "scheme": "doi", # "relation_type": "requires", # "resource_type": fake_resource_type() # }], "sizes": [ "11 pages" ], "formats": [ "application/pdf" ], "version": "v0.0.1", # "rights": [{ # "rights": "Berkeley Software Distribution 3", # "uri": "https://opensource.org/licenses/BSD-3-Clause", # "identifier": "03yrm5c26", # "scheme": "ror", # }], "description": fake.text(max_nb_chars=3000), "additional_descriptions": [{ "description": fake.text(max_nb_chars=200), "type": "methods", "lang": "eng" } for i in range(2)], "funding": [{ "funder": { "name": "European Commission", "identifier": "03yrm5c26", "scheme": "ror" }, "award": { "title": "OpenAIRE", "number": "246686", "identifier": "0000-0002-1825-0097", "scheme": "orcid" } }], # "locations": [{ # 'geometry': { # 'type': 'Point', # 'coordinates': [ # float(fake.latitude()), float(fake.longitude()) # ] # }, # "place": fake.location_on_land()[2], # "description": "Random place on land...", # 'identifiers': [{ # 'scheme': 'ror', # 'identifier': '03yrm5c26', # }, { # 'scheme': 'orcid', # 'identifier': '0000-0002-1825-0097', # }] # }, { # 'geometry': { # 'type': 'MultiPoint', # 'coordinates': [ # [float(fake.latitude()), float(fake.longitude())], # [float(fake.latitude()), float(fake.longitude())] # ] # }, # "place": fake.location_on_land()[2], # } # ], "references": [{ "reference": "Reference to something et al.", "identifier": "0000000114559647", "scheme": "isni" }], "identifiers": [{ "identifier": "ark:/123/456", "scheme": "ark" }], } } return json.loads(json.dumps(data_to_use))
6,304
def load_texture_pair(filename): """Function what loads two verions of the texture for left/right movement""" return [ arcade.load_texture(filename), arcade.load_texture(filename, flipped_horizontally=True) ]
6,305
def aw_mover_set_state(id: int, state: int, model_num: int) -> None: """ Sets the state of a mover. Triggers the mover set state event. Args: id (int): The mover ID. state (int): The state. model_num (int): The model number. Raises: Exception: If the mover state could not be set. """ rc = SDK.aw_mover_set_state(id, state, model_num) if rc: raise Exception(f"Failed to set mover state: {rc}")
6,306
def bytes2bson(val: bytes) -> Tuple[bytes, bytes]: """Encode bytes as BSON Binary / Generic.""" assert isinstance(val, (bytes, bytearray)) return BSON_BINARY, pack_i32(len(val)) + BSON_BINARY_GENERIC + val
6,307
def create_overlay(path: str, preprocessed: np.ndarray, segmentation_result: Optional[SegmentationResult], cells_results: Optional[np.ndarray], results: List[Tuple[float, int, str]]): """ Creates and saves an overlaid image. """ # create path if needed if not os.path.exists(cli_overlays): os.makedirs(cli_overlays) # get out path name = os.path.splitext(os.path.basename(path))[0] out_path = os.path.join(cli_overlays, name + '-overlay.png') # create boxes data if segmentation_result is not None and cells_results is not None: boxes = [ ( (seg.y, seg.x), (seg.y + seg.mask.shape[1], seg.x + seg.mask.shape[0]), f'{cell_classifier.classes[cell_result.argmax()]} {cell_result.max() * 100:4.2f}' ) for seg, cell_result in zip(segmentation_result.segments, cells_results) ] else: boxes = [] # create image img = overlay.draw_overlay(preprocessed, boxes, results) # save image cv.imwrite(out_path, img) # print info echo_verbose(f'\tSaved overlaid image to {out_path}')
6,308
def tabindex(field, index): """Set the tab index on the filtered field.""" field.field.widget.attrs["tabindex"] = index return field
6,309
def compute_GridData(xvals, yvals, f, ufunc=0, **keyw): """Evaluate a function of 2 variables and store the results in a GridData. Computes a function 'f' of two variables on a rectangular grid using 'tabulate_function', then store the results into a 'GridData' so that it can be plotted. After calculation the data are written to a file; no copy is kept in memory. Note that this is quite different than 'Func' (which tells gnuplot to evaluate the function). Arguments: 'xvals' -- a 1-d array with dimension 'numx' 'yvals' -- a 1-d array with dimension 'numy' 'f' -- the function to plot--a callable object for which 'f(x,y)' returns a number. 'ufunc=<bool>' -- evaluate 'f' as a ufunc? Other keyword arguments are passed to the 'GridData' constructor. 'f' should be a callable object taking two arguments. 'f(x,y)' will be computed at all grid points obtained by combining elements from 'xvals' and 'yvals'. If called with 'ufunc=1', then 'f' should be a function that is composed entirely of ufuncs, and it will be passed the 'xvals' and 'yvals' as rectangular matrices. Thus if you have a function 'f' and two vectors 'xvals' and 'yvals' and a Gnuplot instance called 'g', you can plot the function by typing 'g.splot(compute_GridData(f, xvals, yvals))'. """ xvals = utils.float_array(xvals) yvals = utils.float_array(yvals) # evaluate function: data = tabulate_function(f, xvals, yvals, ufunc=ufunc) return Gnuplot.GridData(data, xvals, yvals, **keyw)
6,310
def decorator_with_option( decorator_fn, ): """Wraps a decorator to correctly forward decorator options. `decorator_with_option` is applied on decorators. Usage: ``` @jax3d.utils.decorator_with_option def my_decorator(fn, x=None, y=None): ... ``` The decorated decorator can then be used with or without options, or called directly. ``` @my_decorator(x, y=y) def fn(): ... @my_decorator def fn(): ... fn = my_decorator(fn, x, y=y) ``` Args: decorator_fn: The decorator with signature `(fn, *option, **option_kwargs)` Returns: The `decorator_fn` which now can be used as decorator with option. """ @functools.wraps(decorator_fn) def decorated(*args: Any, **kwargs: Any) -> Any: fn = args[0] if args else None if not isinstance(fn, collections.abc.Callable): def decorated_with_options(fn): return decorator_fn(fn, *args, **kwargs) return decorated_with_options return decorator_fn(fn, *args[1:], **kwargs) return decorated
6,311
def main(): """Main program""" disable_loggers(DISABLE_LOGGERS) try: docker_hub = DockerHub(http_get, DOCKER_HUB_USERNAME, DOCKER_HUB_PASSWORD, DOCKER_HUB_REPO) except PermissionError: log.error('Could not log into Docker hub') sys.exit(1) global fleet # pylint: disable=global-statement, invalid-name fleet = Fleet(docker_hub, socket_connections, event_stream) socket_io.run( web_app, host='0.0.0.0', port=5000 )
6,312
def zones_logs(self): """ API core commands for Cloudflare API""" self.add('VOID', "zones", "logs") self.add('AUTH_UNWRAPPED', "zones", "logs/received") self.add('AUTH_UNWRAPPED', "zones", "logs/received/fields")
6,313
def chebyshev_parameters(rxn_dstr, a_units='moles'): """ Parses the data string for a reaction in the reactions block for the lines containing the Chebyshevs fitting parameters, then reads the parameters from these lines. :param rxn_dstr: data string for species in reaction block :type rxn_dstr: str :return params: Chebyshev fitting parameters :rtype: dict[param: value] """ original_rxn_dstr = rxn_dstr rxn_dstr = apf.remove(COMMENTS_PATTERN, rxn_dstr) tcheb_pattern = ( 'TCHEB' + app.zero_or_more(app.SPACE) + app.escape('/') + app.zero_or_more(app.SPACE) + app.capturing(app.NUMBER) + app.one_or_more(app.SPACE) + app.capturing(app.NUMBER) + app.zero_or_more(app.SPACE) + app.escape('/') ) pcheb_pattern = ( 'PCHEB' + app.zero_or_more(app.SPACE) + app.escape('/') + app.zero_or_more(app.SPACE) + app.capturing(app.NUMBER) + app.one_or_more(app.SPACE) + app.capturing(app.NUMBER) + app.zero_or_more(app.SPACE) + app.escape('/') ) cheb_pattern = ( app.not_preceded_by(app.one_of_these(['T', 'P'])) + 'CHEB' + app.zero_or_more(app.SPACE) + app.escape('/') + app.capturing(app.one_or_more(app.WILDCARD2) ) + app.escape('/') ) cheb_params_raw = apf.all_captures(cheb_pattern, rxn_dstr) if cheb_params_raw: params = {} # Get the temp and pressure limits; # add the Chemkin default values if they don't exist cheb_temps = apf.first_capture(tcheb_pattern, rxn_dstr) cheb_pressures = apf.first_capture(pcheb_pattern, rxn_dstr) if cheb_temps is None: cheb_temps = ('300.00', '2500.00') print( 'No Chebyshev temperature limits specified' + ' for the below reaction.' + f' Assuming 300 and 2500 K. \n \n {original_rxn_dstr}\n') if cheb_pressures is None: cheb_pressures = ('0.001', '100.00') print( 'No Chebyshev pressure limits specified' + ' for the below reaction.' + f' Assuming 0.001 and 100 atm. \n \n {original_rxn_dstr}\n') # Get all the numbers from the CHEB parameters cheb_params = [] for cheb_line in cheb_params_raw: cheb_params.extend(cheb_line.split()) # Get the cheb array dimensions N and M, which are the first two # entries of the CHEB params cheb_n = int(math.floor(float(cheb_params[0]))) cheb_m = int(math.floor(float(cheb_params[1]))) # Start on the third value (after N and M) # and get all the polynomial coefficients coeffs = [] for idx, coeff in enumerate(cheb_params[2:]): # extra coefficients are allowed but ignored if idx+1 > (cheb_n*cheb_m): break coeffs.append(coeff) assert len(coeffs) == (cheb_n*cheb_m), ( f'For the below reaction, there should be {cheb_n*cheb_m}' + ' Chebyshev polynomial' + f' coefficients, but there are only {len(coeffs)}.' + f' \n \n {original_rxn_dstr}\n' ) alpha = np.array(list(map(float, coeffs))) params['t_limits'] = [float(val) for val in cheb_temps] params['p_limits'] = [float(val) for val in cheb_pressures] params['alpha_elm'] = alpha.reshape([cheb_n, cheb_m]) params['a_units'] = a_units else: params = None return params
6,314
def zip_varlen(*iterables): """Variable length zip() function.""" iters = [iter(it) for it in iterables] while True: # broken when an empty tuple is given by _one_pass val = tuple(_one_pass(iters)) if val: yield val else: break
6,315
def _gen_matrix(n, *args): """Supports more matrix construction routines. 1. Usual contruction (from a 2d list or a single scalar). 2. From a 1-D array of n*n elements (glsl style). 3. From a list of n-D vectors (glsl style). """ if len(args) == n * n: # initialize with n*n scalars data = [[args[k * n + i] for i in range(n)] for k in range(n)] return ti.Matrix(data, float) if len(args) == n: # initialize with n vectors # Matrix.rows() will do implict type inference data = [list(x) for x in args] return ti.Matrix(data, float) if len(args) == 1: # initialize with a scalar, a matrix or a 1d list x = args[0] if isinstance(x, ti.Matrix): return x if hasattr(x, "__len__") and len(x) == n * n: data = [[x[k * n + i] for i in range(n)] for k in range(n)] return ti.Matrix(data, float) return ti.types.matrix(n, n, float)(*args)
6,316
def temperature(analog_pin, power_pin = None, ground_pin = None, R = 20000, n = 100): """Function for computing thermister temperature Parameters ---------- adc_pin: :obj:'pyb.Pin' Any pin connected to an analog to digital converter on a pyboard power_pin: :obj:'pyb.Pin', optional Used if a digital pin is to be used to power the thermistor. Note that the thermistor may also be powered by the 3.3V pin. In that case, this argument is not required. ground_pin: :obj:'pyb.Pin', optional Used if a digital pin is used to ground the thermistor. Note that the thermistor may also be grounded by the GND pin. In that case, this argument is not required. R: float, optional Value of the fixed resistor in the resistor divider. Default is 20,000 ohm n: int, optional Number of readings to make--returns average of middle two quartiles. Defaults to 100. Returns ------- Float Temperature (Celsius degrees) """ #Define constants for conversion A = 0.001125308852122 B = 0.000234711863267 C = 0.000000085663516 #Allocate array for storing temperature readings T = arr.array('f',[0]*n) #Turn on the power if necessary, then wait a moment if power_pin is not None: power_pin.off() time.sleep_ms(1) #Turn off the ground if necessary, then wait a moment if ground_pin is not None: ground_pin.off() time.sleep_ms(1) #Loop through readings, computing thermistor resistance #and temperature, then storing in array for i in range(n): #if possible, switch current on pins to ensure #no net accumulation of charge if this is in parallel with pins that have a capacitance if power_pin is not None: power_pin.on() ontick = time.ticks_us() time.sleep_us(1000) count = analog_pin.read() power_pin.off() offtick = time.ticks_us() time_on = time.ticks_diff(offtick, ontick) power_pin.off() if ground_pin is not None: ground_pin.on() time.sleep_us(time_on) ground_pin.off() #calculate resistance and temperature, being careful not to cause an overload if count>0: if count < 4095: R_t = ((count/4095)*R)/(1-count/4095) T[i] = 1/((A+B*(math.log(R_t)))+C*((math.log(R_t))**3))-273.15 else: T[i] = -55 else: T[i] =150 #Turn the power back off if possible if power_pin is not None: power_pin.off() #Define and analyze the middle two quartiles upper_index = math.ceil(3*n/4) lower_index = math.floor(n/4) sampled_length = (upper_index - lower_index) T_mean_of_mid_quartiles = sum(sorted(T)[lower_index:upper_index])/sampled_length return T_mean_of_mid_quartiles
6,317
def submit_form(): """ Submits survey data to SQL database. """ if request.method == "POST": data = request.data if data: data = json.loads(data.decode("utf-8").replace("'",'"')) student, enrollments, tracks = processSurveyData(data) insert = 'INSERT INTO student_info.student (net_id, student_name, pass_word, start_semester, current_semester,' \ 'expected_graduation, total_semesters) VALUES("%s", "%s", "%s", "%s", "%s", "%s", %i)' % (student) insert += 'ON DUPLICATE KEY UPDATE student_name="%s", pass_word="%s", start_semester="%s", current_semester="%s", ' \ 'expected_graduation="%s", total_semesters=%i' % tuple(student[1:]) engine.raw_operation(insert) for e in enrollments: insert = 'INSERT INTO student_info.enrollments VALUES("%s", "%s", "%s", "%s", %i)' % tuple(e) insert += 'ON DUPLICATE KEY UPDATE semester="%s", semester_taken="%s", rating=%i' % tuple(e[2:]) engine.raw_operation(insert) for t in tracks: insert = 'INSERT INTO student_info.track VALUES("%s", "%s", %i, "%s")' % tuple(t) insert += 'ON DUPLICATE KEY UPDATE interest = "{}", credit_hours={}'.format(t[1], t[2]) engine.raw_operation(insert) if 'DeletedMajor' in data or 'DeletedMinor' in data: tracks_to_delete = data['DeletedMajor'] + data['DeletedMinor'] for track in tracks_to_delete: engine.drop_rows('DELETE FROM student_info.track WHERE \ track.net_id = "%s" AND track.field_name = "%s"' % (student[0], track)) return 'Successful submission' return "Invalid input"
6,318
def get_target_external_resource_ids(relationship_type, ctx_instance): """Gets a list of target node ids connected via a relationship to a node. :param relationship_type: A string representing the type of relationship. :param ctx: The Cloudify ctx context. :returns a list of security group ids. """ ids = [] if not getattr(ctx_instance, 'relationships', []): ctx.logger.info('Skipping attaching relationships, ' 'because none are attached to this node.') return ids for r in ctx_instance.relationships: if relationship_type in r.type: ids.append( r.target.instance.runtime_properties[ constants.EXTERNAL_RESOURCE_ID]) return ids
6,319
def split_multibody(beam, tstep, mb_data_dict, ts): """ split_multibody This functions splits a structure at a certain time step in its different bodies Args: beam (:class:`~sharpy.structure.models.beam.Beam`): structural information of the multibody system tstep (:class:`~sharpy.utils.datastructures.StructTimeStepInfo`): timestep information of the multibody system mb_data_dict (dict): Dictionary including the multibody information ts (int): time step number Returns: MB_beam (list(:class:`~sharpy.structure.models.beam.Beam`)): each entry represents a body MB_tstep (list(:class:`~sharpy.utils.datastructures.StructTimeStepInfo`)): each entry represents a body """ MB_beam = [] MB_tstep = [] for ibody in range(beam.num_bodies): ibody_beam = None ibody_tstep = None ibody_beam = beam.get_body(ibody = ibody) ibody_tstep = tstep.get_body(beam, ibody_beam.num_dof, ibody = ibody) ibody_beam.FoR_movement = mb_data_dict['body_%02d' % ibody]['FoR_movement'] if ts == 1: ibody_beam.ini_info.pos_dot *= 0 ibody_beam.timestep_info.pos_dot *= 0 ibody_tstep.pos_dot *= 0 ibody_beam.ini_info.psi_dot *= 0 ibody_beam.timestep_info.psi_dot *= 0 ibody_tstep.psi_dot *= 0 MB_beam.append(ibody_beam) MB_tstep.append(ibody_tstep) return MB_beam, MB_tstep
6,320
def test_biomass_open_production(model, reaction_id): """ Expect biomass production in complete medium. Using flux balance analysis this test optimizes the model for growth using a complete medium i.e. unconstrained boundary reactions. Any non-zero growth rate is accepted to pass this test. Implementation: Calculate the solution of FBA with the biomass reaction set as objective function and after removing any constraints from all boundary reactions. """ ann = test_biomass_open_production.annotation helpers.open_boundaries(model) ann["data"][reaction_id] = helpers.run_fba(model, reaction_id) outcome = ann["data"][reaction_id] > 1e-07 ann["metric"][reaction_id] = 1.0 - float(outcome) ann["message"][reaction_id] = wrapper.fill( """Using the biomass reaction {} this is the growth rate that can be achieved when the model is simulated on a complete medium i.e. with all the boundary reactions unconstrained: {} """.format( reaction_id, ann["data"][reaction_id] ) ) assert outcome, ann["message"][reaction_id]
6,321
def qname_decode(ptr, message, raw=False): """Read a QNAME from pointer and respect labels.""" def _rec(name): ret = [] while name and name[0] > 0: length = int(name[0]) if (length & 0xC0) == 0xC0: offset = (length & 0x03) << 8 | int(name[1]) comps, _ = _rec(message[offset:]) ret += comps name = name[1:] break ret.append(name[1 : 1 + length]) name = name[length + 1 :] return ret, name name_components, rest = _rec(ptr) if raw: return name_components, rest[1:] return ".".join([x.decode("utf-8") for x in name_components]), rest[1:]
6,322
def coordinates_within_board(n: int, x: int, y: int) -> bool: """Are the given coordinates inside the board?""" return x < n and y < n and x >= 0 and y >= 0
6,323
def check_for_header(header, include_dirs, define_macros): """Check for the existence of a header file by creating a small program which includes it and see if it compiles.""" program = "#include <%s>\n" % header sys.stdout.write("Checking for <%s>... " % header) success = see_if_compiles(program, include_dirs, define_macros) if (success): sys.stdout.write("OK\n"); else: sys.stdout.write("Not found\n"); return success
6,324
def make_dvh_metric_diff_plots(df_dvh_metrics: pd.DataFrame, constants: ModelParameters): """ Generates box plots to visualize distribution of DVH point differences Args: df_dvh_metrics: Set of DVH metrics constants: Model constants """ # Prep dvh metrics for analysis df_to_plot = df_dvh_metrics.unstack((0, 2, 3)).melt() df_to_plot.drop(columns=[None], axis=1, inplace=True) # Drops prediction name # Merge the melted data df_to_plot.dropna(axis=0, inplace=True) df_to_plot.set_index('Metric', inplace=True) sns.reset_defaults() # Iterate through each type of DVH metric (e.g., D_mean, D_99) for m in df_to_plot.index.unique(): constants.reset_plot_fonts() data_to_plot = df_to_plot.loc[m].copy(deep=True) data_to_plot.replace(constants.structure_printing, inplace=True) data_to_plot.replace(constants.optimization_short_hands_dict, inplace=True) # Set plot titles if m in ['D_99', 'D_95']: title = 'Better $\longrightarrow$' data_to_plot.value *= -1 # Correct the from negative values that are used previously alternative_hyp = 'greater' else: title = '$\longleftarrow$ Better' alternative_hyp = 'less' # Prepare data (split on OAR and target criteria) if m in ['mean', 'D_0.1_cc']: # OAR criteria limits = [-45, 12] plt.figure(figsize=(constants.line_width / 2.06, 3)) structure_order = constants.rois_plotting_order['oars'] plt.xticks(np.arange(-40, 11, 10)) else: # Target criteria plt.figure(figsize=(constants.line_width / 3.15, 1.735)) limits = [-10.25, 8] structure_order = constants.rois_plotting_order['targets'] plt.xticks(np.arange(-10, 6, 5)) # Do mann-whitney u test to test difference between prediction and plans pred_values = data_to_plot[data_to_plot['Dose_type'] == 'Prediction'] plan_values = data_to_plot[data_to_plot['Dose_type'] != 'Prediction'] p_values = pd.DataFrame(plan_values.groupby(['Structure']).apply( lambda x: mannwhitneyu( x.value, pred_values[pred_values['Structure'] == x.iloc[0, 1]].value, alternative=alternative_hyp )[1])) # [1] retrieves the p value from the mann-whitney u test # Prepare p value to print in figure p_values['p'] = p_values.applymap(lambda x: '{:.3f}'.format(x)) p_values['equal'] = '$P=$' p_values = p_values['equal'].str.cat(p_values['p']) p_values = p_values.replace({'$P=$0.000': '$P<$0.001'}) # Generate box plot number_of_structures = plan_values.Structure.unique().shape[0] y = np.arange(-0.5, number_of_structures - 0.5, 0.0001) ax = sns.boxplot(data=data_to_plot, x='value', y='Structure', showfliers=False, hue='Dose_type', linewidth=1, hue_order=[*constants.optimization_short_hands_dict.values()], order=structure_order, zorder=2, boxprops={'zorder': 2}) ax.set_ylim((number_of_structures - 0.5, -0.5)) ax.fill_betweenx(y, limits[0], limits[1], where=np.round(y).__mod__(2) == 1, facecolor='grey', alpha=0.15, zorder=1) # Add p values to right axis ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis ax2.set_yticks(ax.get_yticks()) ax2.set_yticklabels(p_values.values.squeeze()) ax2.set_ylim(ax.get_ylim()) # Format figure and save ax.axvline(0, ls='--', color='black', linewidth=1.5, zorder=-1) ax.set_title(title) ax.set_xlim(limits) ax.set_xlabel(f'{constants.dvh_metric_axis_dict[m]} (Gy)') ax.set_ylabel(None) save_plot(constants, f'{m} error', 5, ax=ax)
6,325
def test_get_labeled_stats(client): """Test get all papers classified as prior documents""" response = client.get("/api/projects/project-id/labeled_stats") json_data = response.get_json() assert isinstance(json_data, dict) assert "n_prior" in json_data assert json_data["n_prior"] == 2
6,326
def hook_name_to_env_name(name, prefix='HOOKS'): """ >>> hook_name_to_env_name('foo.bar_baz') HOOKS_FOO_BAR_BAZ >>> hook_name_to_env_name('foo.bar_baz', 'PREFIX') PREFIX_FOO_BAR_BAZ """ return '_'.join([prefix, name.upper().replace('.', '_')])
6,327
def get_coefficients(): """ Returns the global scaling dictionary. """ global COEFFICIENTS if COEFFICIENTS is None: COEFFICIENTS = TransformedDict() COEFFICIENTS["[length]"] = 1.0 * u.meter COEFFICIENTS["[mass]"] = 1.0 * u.kilogram COEFFICIENTS["[time]"] = 1.0 * u.year COEFFICIENTS["[temperature]"] = 1.0 * u.degK COEFFICIENTS["[substance]"] = 1.0 * u.mole return COEFFICIENTS
6,328
def get_distance_from_guide_alignment(data, guide_data, reference_index_key="position", minus_strand=False): """Calculate the distance of input data alignment to the guide alignment. :param data: input data with at least "raw_start", "raw_length", and reference_index_key fields :param guide_data: guide alignmnet data :param reference_index_key: key to grab reference index from data :param minus_strand: boolean option if data is aligned to minus strand :return: modified data with "guide_delta" field """ variant_data = data.sort_values(by=reference_index_key) if minus_strand: guide_data = guide_data[::-1] distance_to_guide = [] variant_index = 0 len_variant_data = len(variant_data) v_position = variant_data.iloc[variant_index][reference_index_key] for i, guide in enumerate(guide_data.itertuples()): if getattr(guide, "reference_index") >= v_position: if getattr(guide, "reference_index") == v_position: guide_index = i else: guide_index = i - 1 v_position_middle = (variant_data.iloc[variant_index]["raw_start"] + (variant_data.iloc[variant_index]["raw_length"] / 2)) guide_middle_position = np.round( (guide_data.iloc[guide_index]["raw_start"] + (guide_data.iloc[guide_index]["raw_length"] / 2))) distance_to_guide.append(v_position_middle - guide_middle_position) variant_index += 1 if variant_index < len_variant_data: v_position = variant_data.iloc[variant_index][reference_index_key] else: break distance = pd.DataFrame(distance_to_guide, columns=['guide_delta']) final_data = pd.concat([variant_data, distance], axis=1) return final_data
6,329
def get_info(font): """ currently wraps the infoFont call, but I would like to add a JSON represenation of this data to better display the individual details of the font.""" return pyfiglet.FigletFont.infoFont(font)
6,330
def gevent_pywsgi_write(self, data): """ Monkey-patched version of the `gevent.pywsgi.WSGIHandler.write` method that ensures that the passed `data` (which may be unicode string) are encoded to UTF8 bytes prior to being written. """ if self.code in (304, 204) and data: raise self.ApplicationError('The %s response must have no body' % self.code) # Guard against unicode strings being concatenated with `bytes`. if isinstance(data, str): data = data.encode("utf-8") if self.headers_sent: self._write(data) else: if not self.status: raise self.ApplicationError("The application did not call start_response()") self._write_with_headers(data)
6,331
def readline_skip_comments(f): """ Read a new line while skipping comments. """ l = f.readline().strip() while len(l) > 0 and l[0] == '#': l = f.readline().strip() return l
6,332
def test_addDropShadowComplex_BGGreen(): """ Manual Check Desired Output: A square image 512x512 pixels with a black shadow to the bottom right on a green background """ io.saveImage( OUTPUT + "/test_addDropShadowComplex_BGGreen.png", effects.addDropShadowComplex(IMAGE, 5, 50, [50, 50], "#00ff00", "#000000"), )
6,333
def _verify_weight_parameters(weight_parameters): """Verifies that the format of the input `weight_parameters`. Checks that the input parameters is a 2-tuple of tensors of equal shape. Args: weight_parameters: The parameters to check. Raises: RuntimeError: If the input is not a 2-tuple of tensors with equal shape. Returns: The input `weight_parameters`. """ if len(weight_parameters) != 2: raise RuntimeError("Incorrect number of weight parameters. Expected " "2 tensors, got {}".format(len(weight_parameters))) if weight_parameters[0].shape != weight_parameters[1].shape: raise RuntimeError("Expected theta and log alpha parameter tensor " "to be same shape. Got shapes {} and {}" .format(weight_parameters[0].get_shape().as_list(), weight_parameters[1].get_shape().as_list())) return weight_parameters
6,334
def send_notification(lira_url, auth_dict, notification): """Send a notification to a given Lira. Args: lira_url (str): A typical Lira url, e.g. https://pipelines.dev.data.humancellatlas.org/ auth_dict (dict): Dictionary contains credentials for authenticating with Lira. It should have 'method' and 'value' as keys. notification (dict): A dict of notification content. Returns: requests.Response: The response object returned by Lira. """ if auth_dict['method'] == 'token': response = requests.post( url=harmonize_url(lira_url) + 'notifications', json=notification, params={'auth': auth_dict['value']['auth_token']}, ) else: auth = HTTPSignatureAuth( key_id=auth_dict['value']['hmac_key_id'], key=auth_dict['value']['hmac_key_value'].encode('utf-8'), ) response = requests.post( url=harmonize_url(lira_url) + 'notifications', json=notification, auth=auth ) return response
6,335
def beam(): """ \b _ ) __| \ \ | _ \ _ _| | _ \ __ __| _ \ _| _ \ |\/ | __/ | | ( | | ___/ ___| _/ _\ _| _| _| ___| ____| \___/ _| Beam Pilot - Cosmos Infrastructure Manager """
6,336
def fill_missing_node_names(tree): """ Names nodes in the tree without a name. Parameters ---------- bp_tree : bp.Tree Input tree with potentially unnamed nodes (i.e. nodes' .name attributes can be None). Returns ------- skbio.TreeNode or empress.Tree Tree with all nodes assigned a name. """ current_unlabeled_node = 0 new_names = np.full(tree.B.size, None, dtype=object) for node_idx in tree.postorder(include_self=True): if tree.bp_tree.name(node_idx) is None: new_name = 'EmpressNode{}'.format(current_unlabeled_node) new_names[node_idx] = new_name current_unlabeled_node += 1 else: new_names[node_idx] = tree.bp_tree.name(node_idx) tree.bp_tree.set_names(new_names)
6,337
def calc_vfactor(atm="H2",LJPparam=None): """ Args: atm: molecule consisting of atmosphere, "H2", "O2", and "N2" LJPparam: Custom Lennard-Jones Potential Parameters (d (cm) and epsilon/kB) Returns: vfactor: dynamic viscosity factor for Rosner eta = viscosity*T**0.66 applicable tempature range (K,K) Note: The dynamic viscosity is from the Rosner book (3-2-12) and caption in p106 Hirschfelder et al. (1954) within Trange. """ from exojax.spec.molinfo import molmass from exojax.utils.constants import kB, m_u mu=molmass(atm) if LJPparam is None: LJPparam_d,LJPparam_epsilon_per_kB=get_LJPparam() epsilon_per_kB=LJPparam_epsilon_per_kB[atm] d=LJPparam_d[atm] else: epsilon_per_kB=LJPparam[0] d=LJPparam[1] vfactor=5.0/16.0*np.sqrt(np.pi*kB*mu*m_u)/(np.pi*d*d)/1.22*(1.0/epsilon_per_kB)**0.16 Trange=[3.0*epsilon_per_kB,200.0*epsilon_per_kB] return vfactor, Trange
6,338
def _crc16_checksum(bytes): """Returns the CRC-16 checksum of bytearray bytes Ported from Java implementation at: http://introcs.cs.princeton.edu/java/61data/CRC16CCITT.java.html Initial value changed to 0x0000 to match Stellar configuration. """ crc = 0x0000 polynomial = 0x1021 for byte in bytes: for i in range(8): bit = (byte >> (7 - i) & 1) == 1 c15 = (crc >> 15 & 1) == 1 crc <<= 1 if c15 ^ bit: crc ^= polynomial return crc & 0xFFFF
6,339
def create_readme(target_path, context): """Create README.rst for test project :param target_path: The path to the outer directory where the package directory is contained :param context: Jinja context used to render template """ target_path = os.path.abspath(target_path) template_path = templates.project_root.get_path() create_file_from_template(template_path, target_path, 'README.rst', context)
6,340
def is_absolute_href(href): """Determines if an HREF is absolute or not. Args: href (str): The HREF to consider. Returns: bool: True if the given HREF is absolute, False if it is relative. """ parsed = urlparse(href) return parsed.scheme != '' or os.path.isabs(parsed.path)
6,341
def method_get_func(model, fields="__all__", need_user=False, **kwargs)->Callable: """生成一个model的get访问""" async def list(page: Dict[str, int] = Depends(paging_query_depend), user: User = Depends(create_current_active_user(need_user))): """ get :return: """ table = model.__table__ if fields == "__all__": query = table.select().offset((page['page_number'] - 1) * page['page_size']).limit( page['page_size']) # 第一页,每页20条数据。 默认第一页。 else: query = table.select([getattr(model.__table__.c, i) for i in fields]).offset( (page['page_number'] - 1) * page['page_size']).limit( page['page_size']) # 第一页,每页20条数据。 默认第一页。 paginate_obj = await AdminDatabase().database.fetch_all(query) query2 = select([func.count(table.c.id)]) total_page = await AdminDatabase().database.fetch_val(query2) print("注意需要考虑查询两次的两倍代价") return { "page_count": int(math.ceil(total_page * 1.0 / page['page_size'])), "rows_total": total_page, "page_number": page['page_number'], "page_size": page['page_size'], "data": paginate_obj } return list
6,342
def get_mysqlops_connections(): """ Get a connection to mysqlops for reporting Returns: A mysql connection """ (reporting_host, port, _, _) = get_mysql_connection('mysqlopsdb001') reporting = HostAddr(''.join((reporting_host, ':', str(port)))) return connect_mysql(reporting, 'scriptrw')
6,343
def _get_params(conv_layer, bn_layer, relu_layer=None): """Retrieve conv_bn params within wrapped layers.""" if 'use_bias' in conv_layer['config']: if conv_layer['config']['use_bias']: raise ValueError( 'use_bias should not be set to True in a Conv layer when followed ' 'by BatchNormalization. The bias in the Conv would be redundant ' 'with the one in the BatchNormalization.') del conv_layer['config']['use_bias'] if 'name' in bn_layer['config']: del bn_layer['config']['name'] # TODO(pulkitb): remove key conflicts params = dict( list(conv_layer['config'].items()) + list(bn_layer['config'].items())) if relu_layer is not None: params['post_activation'] = keras.layers.deserialize(relu_layer) return params
6,344
def parse_amconll(fil, validate:bool = True) -> Iterable[AMSentence]: """ Reads a file and returns a generator over AM sentences. :param fil: :return: """ expect_header = True new_sentence = True entries = [] attributes = dict() for line in fil: line = line.rstrip("\n") if line.strip() == "": # sentence finished if len(entries) > 0: sent = AMSentence(entries, attributes) if validate: sent.check_validity() yield sent new_sentence = True if new_sentence: expect_header = True attributes = dict() entries = [] new_sentence = False if line.strip() == "": continue if expect_header: if line.startswith("#"): key, val = line[1:].split(":", maxsplit=1) attributes[key] = val else: expect_header = False if not expect_header: fields = line.split("\t") assert len(fields) == 12 or len(fields) == 13 if len(fields) == 12 : #id + entry but no token ranges entries.append( Entry(fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8], int(fields[9]), fields[10], bool(fields[11]),None)) elif len(fields) == 13: entries.append( Entry(fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8], int(fields[9]), fields[10], bool(fields[11]),fields[12]))
6,345
def reverse(segment): """Reverses the track""" return segment.reverse()
6,346
def rmse(y, y_pred): """Returns the root mean squared error between ground truths and predictions. """ return np.sqrt(mse(y, y_pred))
6,347
def _remove_comment_rel_rev(connection, rel_id): """Removes relationships revision for comment. Args: connection: An instance of SQLAlchemy connection. rel_id: Id of comment relationship. Returns: - """ utils.add_to_objects_without_revisions( connection, rel_id, "Relationship", action="deleted", )
6,348
def cdm_cluster_location_command(client: PolarisClient, args: Dict[str, Any]): """ Find the CDM GeoLocation of a CDM Cluster. :type client: ``PolarisClient`` :param client: Rubrik Polaris client to use :type args: ``dict`` :param args: arguments obtained from demisto.args() :return: CommandResult object """ cluster_id = validate_required_arg("clusterId", args.get('clusterId')) raw_response = client.get_cdm_cluster_location(cluster_id) if raw_response == "No Location Configured": return CommandResults(readable_output=MESSAGES['NO_RESPONSE']) hr_content = {"Location": raw_response} hr = tableToMarkdown("CDM Cluster Location", hr_content, headers="Location", removeNull=True) context = { "ClusterId": cluster_id.lower(), "Cluster": { "Location": raw_response } } return CommandResults(outputs_prefix=OUTPUT_PREFIX['CDM_CLUSTER'], outputs_key_field="ClusterId", readable_output=hr, outputs=context, raw_response=raw_response)
6,349
def prepare_cases(cases, cutoff=25): """ clean cases per day for Rt estimation. """ new_cases = cases.diff() smoothed = new_cases.rolling(7, win_type='gaussian', min_periods=1, center=True).mean(std=2).round() idx_start = np.searchsorted(smoothed, cutoff) smoothed = smoothed.iloc[idx_start:] original = new_cases.loc[smoothed.index] return original, smoothed
6,350
def load_data(): """Load data into the database.""" if not args.populate: log.info("Data is loaded in Memgraph.") return log.info("Loading data into Memgraph.") try: memgraph.drop_database() load_twitch_data(memgraph) except Exception as e: log.info("Data loading error.")
6,351
def wrap_profile_folders(mount_point: Path, output_folder: Path) -> None: """ Load some tiffs with cuCIM and OpenSlide, save them, and run line_profile. :return: None. """ def wrap_profile_folders() -> None: profile_folders(mount_point, output_folder) lp = LineProfiler() lp.add_function(profile_cucim) lp.add_function(profile_openslide) lp.add_function(profile_folder) lp_wrapper = lp(wrap_profile_folders) lp_wrapper() with open("outputs/profile_folders.txt", "w", encoding="utf-8") as f: lp.print_stats(f)
6,352
def utility(board): """ Returns 1 if X has won the game, -1 if O has won, 0 otherwise. """ if winner(board) == 'X': return 1 elif winner(board) == 'O': return -1 else: return 0
6,353
def shard(group, num_shards): """Breaks the group apart into num_shards shards. Args: group: a breakdown, perhaps returned from categorize_files. num_shards: The number of shards into which to break down the group. Returns: A list of shards. """ shards = [] for i in range(num_shards): shards.append(LanguageBreakdown()) pos = 0 for kind, files in group.kinds.items(): for filename in files: shards[pos].kinds[kind].append(filename) pos = (pos + 1) % num_shards return shards
6,354
def is_bond_member(yaml, ifname): """Returns True if this interface is a member of a BondEthernet.""" if not "bondethernets" in yaml: return False for _bond, iface in yaml["bondethernets"].items(): if not "interfaces" in iface: continue if ifname in iface["interfaces"]: return True return False
6,355
def add_nexus_nodes(generator, vds_file_path): """Add in the additional information to make this into a standard nexus format file:- (a) create the standard structure under the 'entry' group with a subgroup for each dataset. 'set_bases' lists the data sets we make here. (b) save a dataset for each axis in each of the dimensions of the scan representing the demand position at every point in the scan. """ # create the axes dimensions attribute, a comma separated list giving size # of the axis dimensions padded with . for the detector dimensions and # multidimensional dimensions pad_dims = [] for d in generator.dimensions: if len(d.axes) == 1: pad_dims.append("%s_set" % d.axes[0]) else: pad_dims.append(".") pad_dims += ["."] * 2 # assume a 2 dimensional detector with h5py.File(vds_file_path, "r+", libver="latest") as vds: for data, node in zip(set_data, set_bases): # create a group for this entry vds.require_group(node) # points to the axis demand data sets vds[node].attrs["axes"] = pad_dims vds[node].attrs["NX_class"] = ["NXdata"] # points to the detector dataset for this entry vds[node].attrs["signal"] = data.split("/")[-1] # a hard link from this entry 'signal' to the actual data vds[node + data] = vds[data] axis_sets = {} # iterate the axes in each dimension of the generator to create the # axis information nodes for i, d in enumerate(generator.dimensions): for axis in d.axes: # add signal data dimension for axis axis_indices = "{}_set_indices".format(axis) vds[node].attrs[axis_indices] = i # demand positions for axis axis_set = "{}_set".format(axis) if axis_sets.get(axis_set): # link to the first entry's demand list vds[node + axis_set] = axis_sets[axis_set] else: # create the demand list for the first entry only axis_demands = d.get_positions(axis) vds.create_dataset(node + axis_set, data=axis_demands) vds[node + axis_set].attrs["units"] = generator.units[axis] axis_sets[axis_set] = vds[node + axis_set] vds["entry"].attrs["NX_class"] = ["NXentry"]
6,356
def create_table(dataset_id, table_id, project=None): """Creates a simple table in the given dataset. If no project is specified, then the currently active project is used. """ bigquery_client = bigquery.Client(project=project) dataset_ref = bigquery_client.dataset(dataset_id) table_ref = dataset_ref.table(table_id) table = bigquery.Table(table_ref) # Set the table schema table.schema = ( bigquery.SchemaField('Name', 'STRING'), bigquery.SchemaField('Age', 'INTEGER'), bigquery.SchemaField('Weight', 'FLOAT'), ) table = bigquery_client.create_table(table) print('Created table {} in dataset {}.'.format(table_id, dataset_id))
6,357
def get_id(group): """ Get the GO identifier from a list of GO term properties. Finds the first match to the id pattern. Args: group (List[str]) Returns: str """ return first_match(group, go_id).split(':',1)[1].strip()
6,358
def generateLogNormalVariate(mu, sigma): """ RV generated using rejection method """ variateGenerated = False while not variateGenerated: u1 = random.uniform(0, 1) u2 = random.uniform(0, 1) x = -1*math.log(u1) if u2 > math.exp(-1*math.pow((x-1), 2)/2): continue else: return math.exp(mu+(sigma*x))
6,359
def test_a_list_of_sub_resource_generates_okay(): """ Test that we generate subresources as expected when they are a list """ data = { "author": [ { "name": "This is the subresource" }, { "name": "This is another subresource" } ], "slug": "this-is-the-resource", "another_thing": "this-is-also-the-resource" } instance = SubResourcePeopleResource(**data) assert isinstance(instance.author, list) assert instance.author[0].name == 'This is the subresource' assert instance.author[1].name == 'This is another subresource' assert instance.slug == 'this-is-the-resource'
6,360
def replace_color_codes(text, replacement): """Replace ANSI color sequence from a given string. Args: text (str): Original string to replacement from. replacement (str): String to replace color codes with. Returns: str: Mutated string after the replacement. """ return re.sub(COLOR_CODE_REGEX, replacement, text)
6,361
def get_uti_for_extension(extension): """get UTI for a given file extension""" if not extension: return None # accepts extension with or without leading 0 if extension[0] == ".": extension = extension[1:] if (OS_VER, OS_MAJOR) <= (10, 16): # https://developer.apple.com/documentation/coreservices/1448939-uttypecreatepreferredidentifierf with objc.autorelease_pool(): uti = CoreServices.UTTypeCreatePreferredIdentifierForTag( CoreServices.kUTTagClassFilenameExtension, extension, None ) if uti: return uti # on MacOS 10.12, HEIC files are not supported and UTTypeCopyPreferredTagWithClass will return None for HEIC if extension.lower() == "heic": return "public.heic" return None uti = _get_uti_from_ext_dict(extension) if uti: return uti uti = _get_uti_from_mdls(extension) if uti: # cache the UTI EXT_UTI_DICT[extension.lower()] = uti UTI_EXT_DICT[uti] = extension.lower() return uti return None
6,362
def test_compose_2() -> None: """Verify composition of Sim2 objects works for non-identity input.""" aSb = Sim2(R=rotmat2d(np.deg2rad(90)), t=np.array([1, 2]), s=4) bSc = Sim2(R=rotmat2d(np.deg2rad(-45)), t=np.array([3, 4]), s=0.5) aSc = aSb.compose(bSc) # Via composition: 90 + -45 = 45 degrees assert aSc.theta_deg == 45.0 # Via composition: 4 * 0.5 = 2.0 assert aSc.scale == 2.0
6,363
def test_one_parameter_marked_only(param): """Case with parametrized argument and mark applied to the single one param"""
6,364
def load_testingData(tempTrainingVectors, tempTestingVectors): """ TODO: Merge load_testingData() and load_trainingData() functions This reads file DSL-StrongPasswordData.csv and returns the testing data in an ndarray of shape tempTestingVectors*noOfFeatures and target ndarray of shape (tempTestingVectors*noOfTotalClasses)*1. """ dataset = np.empty([0,noOfFeatures]) target = np.empty(0) file = open(datasetPath) reader = csv.reader(file) reader.next() for i in range(noOfTotalClasses): # for i in range(noOfTotalClasses+1): # Skip s002 # if i==0: # for j in range(noOfTotalVectors): # tempData = reader.next() # Discard one vector # continue for j in range(tempTrainingVectors): # Discard training vectors now tempData = reader.next() # Discard one vector for j in range(tempTestingVectors): tempData = reader.next() # Read one vector currentSubject = tempData[0] # Save subject's name for k in range(3): # Discard first 3 values del tempData[0] tempData = map(float, tempData) tempData = np.array(tempData, ndmin=2) dataset = np.append(dataset, tempData, axis=0) target = np.append(target, [currentSubject], axis=0) # Discard the rest of the unused vectors now for j in range(noOfTotalVectors - tempTrainingVectors - tempTestingVectors): tempData = reader.next() # Discard one vector return dataset,target
6,365
async def anext(*args): """Retrieve the next item from the async generator by calling its __anext__() method. If default is given, it is returned if the iterator is exhausted, otherwise StopAsyncIteration is raised. """ if len(args) < 1: raise TypeError( f"anext expected at least 1 arguments, got {len(args)}") aiterable, default, has_default = args[0], None, False if len(args) > 2: raise TypeError(f"anext expected at most 2 arguments, got {len(args)}") if len(args) == 2: default = args[1] has_default = True try: return await aiterable.__anext__() except (StopAsyncIteration, CancelledError) as exc: if has_default: return default raise StopAsyncIteration() from exc
6,366
def __prepare_arguments_for_d3_data(db_arguments, edge_type): """ :param db_arguments: :param edge_type: :return: """ all_ids = [] nodes = [] edges = [] extras = {} LOG.debug("Enter private function to prepare arguments for d3") # for each argument edges will be added as well as the premises for argument in db_arguments: counter = 1 # we have an argument with: # 1) with one premise and no undercut is done on this argument # 2) with at least two premises one conclusion or an undercut is done on this argument db_premises = DBDiscussionSession.query(Premise).filter(Premise.premisegroup_uid == argument.premisegroup_uid, Premise.is_disabled == False).all() db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument.uid).all() # target of the edge (case 1) or last edge (case 2) target = 'argument_' + str(argument.argument_uid) if argument.conclusion_uid is not None: target = 'statement_' + str(argument.conclusion_uid) if len(db_premises) == 1 and len(db_undercuts) == 0: __add_edge_to_dict(edges, argument, counter, db_premises[0], target, edge_type) else: __add_edge_and_node_to_dict(edges, nodes, all_ids, argument, counter, db_premises, target, edge_type) return all_ids, nodes, edges, extras
6,367
def delete_file(work_path, file_path): """We are only interested in maps, delete any other un-needed files.""" print(" * Deleting " + file_path) (work_path / file_path).unlink()
6,368
def get_service_gateway(client: VirtualNetworkClient = None, compartment_id: str = None, vcn_id: str = None) -> List[RouteTable]: """ Returns a complete, unfiltered list of Service Gateways of a vcn in the compartment. """ service_gateway = [] service_gateway_raw = client.list_service_gateways(compartment_id=compartment_id, vcn_id=vcn_id) service_gateway.extend(service_gateway_raw.data) while service_gateway_raw.has_next_page: service_gateway_raw = client.list_service_gateways( compartment_id=compartment_id, vcn_id=vcn_id, page=service_gateway_raw.next_page) service_gateway.extend(service_gateway_raw.data) return service_gateway
6,369
def get_road_network_data(city='Mumbai'): """ """ data = pd.read_csv("./RoadNetwork/"+city+"/"+city+"_Edgelist.csv") size = data.shape[0] X = np.array(data[['XCoord','YCoord']]) u, v = np.array(data['START_NODE'], dtype=np.int32), np.array(data['END_NODE'], dtype=np.int32) w = np.array(data['LENGTH'], dtype=np.float64) w = w/np.max(w) + 1e-6 G = sp.sparse.csr_matrix((w, (u,v)), shape = (size, size)) n, labels = sp.sparse.csgraph.connected_components(G) if n == 1: return G # If there are more than one connected component, return the largest connected component count_size_comp = np.bincount(labels) z = np.argmax(count_size_comp) indSelect = np.where(labels==z) Gtmp = G[indSelect].transpose()[indSelect] Gtmp = make_undirected(Gtmp) return X[indSelect], Gtmp
6,370
def main_json(config, in_metadata, out_metadata): """ Alternative main function ------------- This function launches the app using configuration written in two json files: config.json and input_metadata.json. """ # 1. Instantiate and launch the App logger.info("1. Instantiate and launch the App") from apps.jsonapp import JSONApp app = JSONApp() result = app.launch(process_fastqc, config, in_metadata, out_metadata) # 2. The App has finished logger.info("2. Execution finished; see " + out_metadata) return result
6,371
def test_datasource(request, dataset, dataset_fixture): """Test ZarrDataSource. Data is saved to file and opened by the data source.""" odc_dataset_ = request.getfixturevalue(dataset_fixture) group_name = list(dataset.keys())[0] source = ZarrDataSource(BandInfo(odc_dataset_, group_name)) with source.open() as band_source: ds = band_source.read() assert np.array_equal(ds.squeeze(), dataset[group_name].values.squeeze())
6,372
def _infection_active(state_old, state_new): """ Parameters ---------- state_old : dict or pd.Series Dictionary or pd.Series with the keys "s", "i", and "r". state_new : dict or pd.Series Same type requirements as for the `state_old` argument in this function apply. Returns ------- infection_active : bool True if the event that occurred between `state_old` and `state_new` was a transition from E to I. False otherwise. """ return state_new["s"] == state_old["s"] and \ state_new["e"] == state_old["e"] - 1 and \ state_new["i"] == state_old["i"] + 1 and \ state_new["r"] == state_old["r"]
6,373
def isnum(txt): """Return True if @param txt, is float""" try: float(txt) return True except TypeError: return False except ValueError: return False
6,374
def xhr(func): """A decorator to check for CSRF on POST/PUT/DELETE using a <form> element and JS to execute automatically (see #40 for a proof-of-concept). When an attacker uses a <form> to downvote a comment, the browser *should* add a `Content-Type: ...` header with three possible values: * application/x-www-form-urlencoded * multipart/form-data * text/plain If the header is not sent or requests `application/json`, the request is not forged (XHR is restricted by CORS separately). """ def dec(self, env, req, *args, **kwargs): if req.content_type and not req.content_type.startswith("application/json"): raise Forbidden("CSRF") return func(self, env, req, *args, **kwargs) return dec
6,375
def updateStyle(style, **kwargs): """Update a copy of a dict or the dict for the given style""" if not isinstance(style, dict): style = getStyle(style) # look up the style by name style = style.copy() style.update(**kwargs) return style
6,376
def lift2(f, a, b): """Apply f => (a -> b -> c) -> f a -> f b -> f c""" return a.map(f).apply_to(b)
6,377
def project_version(file_path=settings.PROJECT_VERSION_FILE): """Project version.""" try: with open(file_path) as file_obj: version = file_obj.read() return parse_version(version) except Exception: pass return None
6,378
def readBinary(fileName): """Read a binary FIXSRC file.""" with FIXSRC(fileName, "rb", numpy.zeros((0, 0, 0, 0))) as fs: fs.readWrite() return fs.fixSrc
6,379
def short_whitelist(whitelist): """A condensed version of the whitelist.""" for x in ["guid-4", "guid-5"]: whitelist.remove(x) return whitelist
6,380
def extract_paths(actions): """ <Purpose> Given a list of actions, it extracts all the absolute and relative paths from all the actions. <Arguments> actions: A list of actions from a parsed trace <Returns> absolute_paths: a list with all absolute paths extracted from the actions relative_paths: a list with all relative paths extracted from the actions """ absolute_paths = [] relative_paths = [] actions_with_path = ['open', 'creat', 'statfs', 'access', 'stat', 'link', 'unlink', 'chdir', 'rmdir', 'mkdir'] for action in actions: # get the name of the syscall and remove the "_syscall" part at the end. action_name = action[0][:action[0].find("_syscall")] # we only care about actions containing paths if action_name not in actions_with_path: continue # we only care about paths that exist action_result = action[2] if action_result == (-1, 'ENOENT'): continue path = action[1][0] if path.startswith("/"): if path not in absolute_paths: absolute_paths.append(path) else: if path not in relative_paths: relative_paths.append(path) # get the second path of link if action_name == "link": path = action[1][1] if path.startswith("/"): if path not in absolute_paths: absolute_paths.append(path) else: if path not in relative_paths: relative_paths.append(path) return absolute_paths, relative_paths
6,381
def str2int(s): """converts a string to an integer with the same bit pattern (little endian!!!)""" r = 0 for c in s: r <<= 8 r += ord(c) return r
6,382
def load_operations_from_docstring(docstring): """Return a dictionary of OpenAPI operations parsed from a a docstring. """ doc_data = load_yaml_from_docstring(docstring) return { key: val for key, val in iteritems(doc_data) if key in PATH_KEYS or key.startswith('x-') }
6,383
def inference_model(model, img): """Inference image(s) with the classifier. Args: model (nn.Module): The loaded segmentor. img (str/ndarray): The image filename or loaded image. Returns: result (list of dict): The segmentation results that contains: ... """ cfg = model.cfg device = next(model.parameters()).device # model device # build the data pipeline if isinstance(img, str): if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile': cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile')) data = dict(img_info=dict(filename=img), img_prefix=None) else: if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile': cfg.data.test.pipeline.pop(0) data = dict(img=img) test_pipeline = Compose(cfg.data.test.pipeline) data = test_pipeline(data) data = collate([data], samples_per_gpu=1) if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] # forward the model with torch.no_grad(): scores = model(return_loss=False, **data) model_out = scores[0] m_shape = model_out.shape mask = np.zeros([m_shape[0], m_shape[1], 3], dtype=np.uint8) for i in range(3): mask[:, :, i] = model_out ann = sly.Annotation.from_img_path(img) for idx, class_name in enumerate(model.CLASSES, 1): # curr_col2cls.items(): mask_bools = np.all(mask == idx, axis=2) # exact match (3-channel img & rgb color) if mask_bools.sum() == 0: # raise continue bitmap = sly.Bitmap(data=mask_bools) obj_class = g.meta.get_obj_class(class_name) # obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap) ann = ann.add_label(sly.Label(bitmap, obj_class)) # clear used pixels in mask to check missing colors, see below return ann.to_json()
6,384
def run_egad(go, nw, **kwargs): """EGAD running function Wrapper to lower level functions for EGAD EGAD measures modularity of gene lists in co-expression networks. This was translated from the MATLAB version, which does tiled Cross Validation The useful kwargs are: int - nFold : Number of CV folds to do, default is 3, int - {min,max}_count : limits for number of terms in each gene list, these are exclusive values Arguments: go {pd.DataFrame} -- dataframe of genes x terms of values [0,1], where 1 is included in gene lists nw {pd.DataFrame} -- dataframe of co-expression network, genes x genes **kwargs Returns: pd.DataFrame -- dataframe of terms x metrics where the metrics are ['AUC', 'AVG_NODE_DEGREE', 'DEGREE_NULL_AUC', 'P_Value'] """ assert nw.shape[0] == nw.shape[1] , 'Network is not square' assert np.all(nw.index == nw.columns) , 'Network index and columns are not in the same order' nw_mask = nw.isna().sum(axis=1) != nw.shape[1] nw = nw.loc[nw_mask, nw_mask].astype(float) np.fill_diagonal(nw.values, 1) return _runNV(go, nw, **kwargs)
6,385
def prepare_new(): """ Handles a request to add a prepare project configuration. This is the first step in a two-step process. This endpoint generates a form to request information about the new project from the user. Once the form is submitted a request is sent to begin editing the new config. """ issues = {} issue_list = github_call( "GET", f'repos/{app.config["GITHUB_ORG"]}/{editor_types["registry"]["repo"]}/issues', params={"state": "open", "labels": "new ontology"}, ) for issue in issue_list: number = issue["number"] title = issue["title"] logger.debug(f"Got issue: {number}, {title}") issues[number] = title return render_template( "prepare_new_config.jinja2", login=g.user.github_login, issueList=issues )
6,386
def generic_plot(xy_curves, title, save_path, x_label=None, y_label=None, formatter=None, use_legend=True, use_grid=True, close=True, grid_spacing=20, yaxis_sci=False): """ :param xy_curves: :param title: :param x_label: :param y_label: :param formatter: :param save_path: :param use_legend: :param use_grid: :return: """ fig, ax = plt.subplots() plt.title(title) plt.grid(use_grid) for curve in xy_curves: if curve.color is not None: ax.plot(curve.x, curve.y, curve.style, label=curve.label, color=curve.color) else: ax.plot(curve.x, curve.y, curve.style, label=curve.label) if formatter is not None: ax.xaxis.set_major_formatter(plt.FuncFormatter(format_xtick)) ax.xaxis.set_major_locator(MultipleLocator(grid_spacing)) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2e')) if x_label is not None: plt.xlabel(x_label) if y_label is not None: plt.ylabel(y_label) #ax.margins(0.05) if use_legend: ax.legend() """if yaxis_sci: ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0), useOffset=None)""" if save_path is not None: plt.savefig(save_path, bbox_inches='tight', transparent=True) if close: plt.close('all') return fig
6,387
def dump(module, path, **kwargs): """Serialize *module* as PVL text to the provided *path*. :param module: a ``PVLModule`` or ``dict``-like object to serialize. :param path: an :class:`os.PathLike` :param ``**kwargs``: the keyword arguments to pass to :func:`dumps()`. If *path* is an :class:`os.PathLike`, it will attempt to be opened and the serialized module will be written into that file via the :func:`pathlib.Path.write_text()` function, and will return what that function returns. If *path* is not an :class:`os.PathLike`, it will be assumed to be an already-opened file object, and ``.write()`` will be applied on that object to write the serialized module, and will return what that function returns. """ try: p = Path(path) return p.write_text(dumps(module, **kwargs)) except TypeError: # Not an os.PathLike, maybe it is an already-opened file object try: if isinstance(path, io.TextIOBase): return path.write(dumps(module, **kwargs)) else: return path.write(dumps(module, **kwargs).encode()) except AttributeError: # Not a path, not an already-opened file. raise TypeError( "Expected an os.PathLike or an already-opened " "file object for writing, but got neither." )
6,388
def get_user_labels(client: Client, *_): """ Returns all user Labels Args: client: Client """ labels = client.get_user_labels_request() contents = [] for label in labels: contents.append({ 'Label': label }) context = { 'Exabeam.UserLabel(val.Label && val.Label === obj.Label)': contents } human_readable = tableToMarkdown('Exabeam User Labels', contents) return human_readable, context, labels
6,389
def hdf5_sample(sample, request): """Fixture which provides the filename of a HDF5 tight-binding model.""" return sample(request.param)
6,390
def get_graph_embedding_features(fn='taxi_all.txt'): """ Get graph embedding vector, which is generated from LINE """ ge = [] with open(fn, 'r') as fin: fin.readline() for line in fin: ls = line.strip().split(" ") ge.append([float(i) for i in ls]) ge = np.array(ge) ge = ge[np.argsort(ge[:,0])] return ge[:,1:]
6,391
def sub_0_tron(D, Obj, W0, eta=1e0, C=1.0, rtol=5e-2, atol=1e-4, verbose=False): """Solve the Sub_0 problem with tron+cg is in lelm-imf.""" W, f_call = W0.copy(), (f_valp, f_grad, f_hess) tron(f_call, W.reshape(-1), n_iterations=5, rtol=rtol, atol=atol, args=(Obj, D, eta, C), verbose=verbose) return W
6,392
def find_flavor_name(nova_connection: NovaConnection, flavor_id: str): """ Find all flavor name from nova_connection with the id flavor_id :param nova_connection: NovaConnection :param flavor_id: str flavor id to find :return: list of flavors name """ flavor_list = [] for flavor in nova_connection.connection.flavors.list(): flavor_info = dict(flavor.to_dict()) if 'id' in flavor_info and 'name' in flavor_info and flavor_info['id'] == flavor_id: flavor_list.append(flavor_info['name']) return flavor_list
6,393
def entity_decode(txt): """decode simple entities""" # TODO: find out what ones twitter considers defined, # or if sgmllib.entitydefs is enough... return txt.replace("&gt;", ">").replace("&lt;", "<").replace("&amp;", "&")
6,394
def setup_configs(): """ Sets up the defualt log and config paths """ log_dir = '/var/log/cerberus' athos_log = os.path.join(log_dir, 'cerberus.log') conf_dir = '/etc/cerberus' conf_file = os.path.join(conf_dir, 'topology.json') rollback_dir = '/etc/cerberus/rollback' failed_dir = '/etc/cerberus/failed' umbrella_json = os.path.join(conf_dir, 'umbrella.json') defualt_umbrella = resource_filename(__name__, '/etc/cerberus/umbrella.json') default_conf = resource_filename(__name__, '/etc/cerberus/topology.json') try: if not os.path.exists(log_dir): print(f"Creating log dir: {log_dir}") os.makedirs(log_dir) if not os.path.isfile(athos_log): open(athos_log, 'a').close() if not os.path.exists(conf_dir): print(f"Creating config dir: {conf_dir}") os.makedirs(conf_dir) os.makedirs(rollback_dir) os.makedirs(failed_dir) if not os.path.exists(rollback_dir): print(f"Creating rollback dir: {rollback_dir}") os.makedirs(rollback_dir) if not os.path.exists(failed_dir): print(f"Creating failed config dir: {failed_dir}") os.makedirs(failed_dir) if not os.path.isfile(conf_file): print("Setting up default config for topology") print(f"Copying: {default_conf} to {conf_file} ") copyfile(default_conf, conf_file) if not os.path.isfile(umbrella_json): print("Setting up default p4 umbrella json") print(f"Copying: {defualt_umbrella} to {umbrella_json}") copyfile(defualt_umbrella, umbrella_json) except OSError as exc_info: if exc_info.errno == errno.EACCES: print(f"Permission denied when creating {exc_info.filename}\n" + "Are you running as root?") else: raise except Exception as exc_info: print(f"Error setting up default configs.\nError:\n{exc_info}")
6,395
def __format_focal_length_tuple(_tuple): """format FocalLenght tuple to short printable string we ignore the position after the decimal point because it is usually not very essential for focal length """ if (isinstance(_tuple,tuple)): numerator = _tuple[0] divisor = _tuple[1] else: numerator=_tuple.numerator divisor=_tuple.denominator if numerator == 0: return get_zero_value_ersatz() if numerator % 10 == 0 and divisor % 10 == 0: # example: change 110/10 -> 11 numerator = numerator // 10 divisor = divisor // 10 if divisor == 1: # example: change 8/1 to 8mm _string = f"{numerator}mm" else: # example: 524/10 -> 52mm # we ignore the position after the decimal point # because it is usually not very essential for focal length _string = f"{numerator//divisor}mm" return _string
6,396
def submit_resume_file(request): """ Submit resume """ resume_file = request.FILES['json_file'] # print('resume file=%s' % resume_file) file_content = resume_file.read() data = json.loads(file_content.decode('utf-8')) response = create_resume(data, request.user) return response
6,397
def read_from_env(export_format=None, export_file_path=None,deep_scan=False,sev=None): """ Collect requirments from env and scan and show reports """ print(stylize('Started Scanning .....', colored.fg("green"))) print('\n') data_dict = {} secure_data_dict = [] data_dict['pyraider'] = [] data_dict['version'] = '1.0.14' vul_package_count = 0 if deep_scan: data = scan_vulnerabilities() else: data = scan_light_vulnerabilities() dists = [d for d in pkg_resources.working_set] for pkg in dists: convert_str = str(pkg) package = convert_str.split() req_name = package[0].lower() req_version = package[1] scanned_data = scanned_vulnerable_data(data, req_name, req_version, sev) if bool(scanned_data): vul_package_count += 1 if not export_format: show_vulnerablities(scanned_data, sev) if export_format == 'json': data_dict['pyraider'].append(scanned_data) elif export_format == 'csv': data_dict['pyraider'].append(scanned_data) elif export_format == 'html': data_dict['pyraider'].append(scanned_data) if not export_format: show_secure_packages(secure_data_dict) if not export_format: show_secure_packages(secure_data_dict) if export_format == 'json': export_to_json(data_dict, export_file_path) elif export_format == 'csv': export_to_csv(data_dict, export_file_path) elif export_format == 'html': export_to_html(data_dict, export_file_path) if vul_package_count == 0: print(stylize('No known vulnerabilities found', colored.fg("green")))
6,398
def inner_E_vals(vec): """ Returns a list of the terms in the expectation times without dividing by the length or one minus length.\nThis is meant to be used in conjunction with an inner-product of two inner_E_vals() lists to compute variance or covariance. """ out = [None] * len(vec) dm = data_mean(vec) for i, item in enumerate(vec): out[i] = item - dm return(out)
6,399