content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_assistant_config_no_config_file(fs):
"""Test whether an `AssistantConfig` object raises
`AssistantConfigNotFoundError` when there's no assistant configuration
found in the search path.
"""
with pytest.raises(AssistantConfigNotFoundError):
assistant_config = AssistantConfig()
| 8,400 |
def is_open(state: int) -> bool:
"""Return whether a given position (x, y) is open."""
return state == states_id.OPEN
| 8,401 |
def gen_model_for_cp2k_simple(molten_salt_system):
"""
Description:
generate initial configuration for pure halide salt by pymatgen module, and transfer cif format to pdb format by Openbabel module
Args:
molten_salt_system: the simulated system
Returns:
no return
"""
global dir_molten_salt_system
global total_atoms
os.chdir(dir_molten_salt_system)
ICSD_para_list = get_info_from_ICSD_file(molten_salt_system)
spacegroup, unit_cell_list, element_list, fraction_coords_list, Z_number = get_structure_parameters(ICSD_para_list)
print(spacegroup, unit_cell_list, element_list, fraction_coords_list, Z_number)
# use pymatgen module to build initial configuration
lattice = pmg.Lattice.from_parameters(a=unit_cell_list[0], b=unit_cell_list[1], c=unit_cell_list[2], alpha=unit_cell_list[3], beta=unit_cell_list[4], gamma=unit_cell_list[5])
struct = pmg.Structure.from_spacegroup(spacegroup, lattice, element_list, fraction_coords_list)
# determine supercell according to total_atoms
Nx = Ny = Nz = round(pow(total_atoms/struct.num_sites, 1/3))
# make supercell, cif format
struct.make_supercell([Nx, Ny, Nz])
# write structure into a cif file
w = cif.CifWriter(struct)
w.write_file(molten_salt_system+'.cif')
# transfer cif format to pdb format by Openbabel module
os.system('obabel -icif '+molten_salt_system+'.cif -opdb -O '+molten_salt_system+'.pdb')
# make necessary modify, e.g. delete excess bonds、angles and impropers
modify_pdb(molten_salt_system)
# if initial configuration is nonorthogonal, then make a transformation by put the nonorthogonal system into a big orthogonal box using packmol module and get a orthogonal pdb file which is used by cp2k
if unit_cell_list[3] != 90 or unit_cell_list[4] != 90 or unit_cell_list[5] != 90:
print('nonorthogonal system! need transfored to orthogonal system!')
change2orthogonal(molten_salt_system, unit_cell_list, Nx, Ny, Nz)
| 8,402 |
def sendJabber(sender,
password,
receivers,
body,
senderDomain=NOTIFY_IM_DOMAIN_SENDER,
receiverDomain=NOTIFY_IM_DOMAIN_RECEIVER):
"""
Sends an instant message to the inputted receivers from the
given user. The senderDomain is an override to be used
when no domain is supplied, same for the receiverDomain.
:param sender <str>
:param password <str>
:param receivers <list> [ <str>, .. ]
:param body <str>
:param senderDomain <str>
:param receiverDomain <str>
:return <bool> success
"""
import xmpp
# make sure there is a proper domain as part of the sender
if '@' not in sender:
sender += '@' + senderDomain
# create a jabber user connection
user = xmpp.protocol.JID(sender)
# create a connection to an xmpp client
client = xmpp.Client(user.getDomain(), debug=[])
connection = client.connect(secure=0, use_srv=False)
if not connection:
text = 'Could not create a connection to xmpp (%s)' % sender
err = errors.NotifyError(text)
logger.error(err)
return False
# authenticate the session
auth = client.auth(user.getNode(), password, user.getResource())
if not auth:
text = 'Jabber not authenticated: (%s, %s)' % (sender, password)
err = errors.NotifyError(text)
logger.error(err)
return False
count = 0
# send the message to the inputted receivers
for receiver in receivers:
if '@' not in receiver:
receiver += '@' + receiverDomain
# create the message
msg = xmpp.protocol.Message(receiver, body)
# create the html message
html_http = {'xmlns': 'http://jabber.org/protocol/xhtml-im'}
html_node = xmpp.Node('html', html_http)
enc_msg = body.encode('utf-8')
xml = '<body xmlns="http://www.w3.org/1999/xhtml">%s</body>' % enc_msg
html_node.addChild(node=xmpp.simplexml.XML2Node(xml))
msg.addChild(node=html_node)
client.send(msg)
count += 1
return count > 0
| 8,403 |
def loss_plot(model, model_str: str, color: str):
"""Plot the loss plots."""
f, ax = plt.subplots(1, 2, figsize=(18, 6))
ax[0].plot(model.history["loss"], label=f"{model_str} loss", color=color)
ax[0].plot(
model.history["val_loss"], label=f"{model_str} val loss", color=color, ls="--",
)
ax[0].legend()
ax[1].plot(model.history["mae"], label=f"{model_str} MAE", color=color)
ax[1].plot(
model.history["val_mae"], label=f"{model_str} val MAE", color=color, ls="--",
)
ax[1].legend()
return
| 8,404 |
def variant_sanity_check(m):
"""Convenience function. Given an initialized model try and do a sanity check test with it."""
ref_seq = ref[1]['seq']
pos, stop, refs, alts, p = m.get_variants(ref_seq, seed=10)
if len(pos) == 0:
raise SkipTest('The defaults do not yield any variants to test')
for p, s, r, a in zip(pos, stop, refs, alts):
assert r[0] == ref_seq[p]
if len(r) != len(a):
assert a[0] == ref_seq[p]
assert s == p + len(r)
| 8,405 |
def directory_log_summary(config):
"""
Summarise the input and out diretories and key information as text log from matsim config
When submitting jobs via the Bitsim Orchestration
"""
message = []
# add the date
message.append(f"Date:{date.today()}")
# add paths of the input files
message.append("{:=^100s}".format("input files"))
message.append(f"network_path:{config['network']['inputNetworkFile']}")
message.append(f"plans_path:{config['plans']['inputPlansFile']}")
message.append(f"schedule_path:{config['transit']['transitScheduleFile']}")
message.append(f"vehicles_path:{config['transit']['vehiclesFile']}")
# add paths of the output diretory
message.append("{:=^100s}".format("output directory"))
message.append(f"output_directory:{config['controler']['outputDirectory']}")
# add mobsim setting summary
message.append("{:=^100s}".format("mobsim setting"))
message.append(f"mobsim:{config['controler']['mobsim']}")
message.append(f"Flow_Capacity_Factor:{config[config['controler']['mobsim']]['flowCapacityFactor']}")
message.append(f"Storage_Capacity_Factor:{config[config['controler']['mobsim']]['storageCapacityFactor']}")
return message
| 8,406 |
def search():
"""
Searches for users with their name. Excludes the logged in user.
"""
data = json.loads(request.data)
search_term = data['search_term']
this_user = interface.get_user_by_id(get_jwt_identity())
users = interface.search_users(search_term)
result = [user.get_public_data() for user in users if not user.id == this_user.id]
return {"result": result}, 200
| 8,407 |
def test_export_excel():
"""Test set the data in an excel file"""
reference = pandas.read_csv("tests/tracking.txt", sep='\t')
tracking = load.Load("tests/tracking.txt")
tracking.export("tests/test.xlsx", fmt="excel")
test = pandas.read_excel("tests/test.xlsx")
pandas.testing.assert_frame_equal(reference, test)
tracking.export("tests/test.xlsx", keys=["imageNumber"], fmt="excel")
test = pandas.read_excel("tests/test.xlsx")
pandas.testing.assert_frame_equal(reference[["imageNumber"]], test)
tracking.export("tests/test.xlsx", indexes=[1], fmt="excel")
test = pandas.read_excel("tests/test.xlsx")
pandas.testing.assert_frame_equal(
reference[reference.imageNumber == 1].reset_index(drop=True), test)
tracking.export("tests/test.xlsx", ids=[0], fmt="excel")
test = pandas.read_excel("tests/test.xlsx")
pandas.testing.assert_frame_equal(
reference[reference.id == 0].reset_index(drop=True), test)
tracking.export("tests/test.xlsx", ids=[0], indexes=[0], fmt="excel")
test = pandas.read_excel("tests/test.xlsx")
pandas.testing.assert_frame_equal(reference[(reference.id == 0) & (
reference.imageNumber == 0)].reset_index(drop=True), test)
| 8,408 |
def get_optics_mode(optics_mode, energy=energy):
"""Return magnet strengths of a given opics mode."""
if optics_mode == 'M0':
# 2019-08-01 Murilo
# tunes fitted to [19.20433 7.31417] for new dipoles segmented model
qf_high_en = 1.65458216649285
qd_high_en = -0.11276026973021
qs_high_en = 0.0
sf_high_en = 11.30745884748409
sd_high_en = 10.52221952522381
qf_low_en = 1.65380213538720
qd_low_en = -0.00097311784326
qs_low_en = 0.0
sf_low_en = 11.32009586848142
sd_low_en = 10.37672159358045
else:
raise _pyacc_acc.AcceleratorException('Optics mode not recognized.')
coeff = (energy-0.15e9)/(3e9-0.15e9)
strengths = {
'qf' : qf_low_en + coeff*(qf_high_en - qf_low_en),
'qd' : qd_low_en + coeff*(qd_high_en - qd_low_en),
'qs' : qs_low_en + coeff*(qs_high_en - qs_low_en),
'sf' : sf_low_en + coeff*(sf_high_en - sf_low_en),
'sd' : sd_low_en + coeff*(sd_high_en - sd_low_en),
}
return strengths
| 8,409 |
def load_stock_order():
"""加载csv文件,导入并备份为 [.yyyy-mm-dd HH_MM_SS.bak 结尾的文件"""
base_dir = './auto_order_dir'
file_name_list = os.listdir(base_dir)
if file_name_list is None:
log.info('No file')
data_df = None
for file_name in file_name_list:
file_base_name, file_extension = os.path.splitext(file_name)
if file_extension != '.csv':
continue
file_path = os.path.join(base_dir, file_name)
data_df_tmp = pd.read_csv(file_path, index_col='CodeDigit', header=0, skipinitialspace=True)
if data_df is None:
data_df_tmp.index = ['%06d' % stock_code for stock_code in data_df_tmp.index]
data_df = data_df_tmp
else:
data_df = data_df.append(data_df_tmp)
backup_file_name = file_base_name + datetime.now().strftime('%Y-%m-%d %H_%M_%S') + file_extension + '.bak'
os.rename(file_path, os.path.join(base_dir, backup_file_name))
if data_df is not None:
has_error = False
# data_df.rename(columns={k1: k2 for k1, k2 in
# zip(data_df.columns, ['final_position', 'ref_price', 'wap_mode'])}, inplace=True)
# 重复数据检测
for name, index in data_df.groupby(level=0).groups.items():
if len(index) > 1:
has_error = True
log.error('%s 存在%d条重复数据', name, len(index))
col_name_set = set(data_df.columns)
for col_name in {'Lot', 'TargetPrice', 'Algo'}:
if col_name not in col_name_set:
has_error = True
log.error('stock_target_df should has %s column', col_name)
if has_error:
raise ValueError('csv 文件存在格式或内容问题')
data_df.rename(columns={
'Lot': 'final_position',
'TargetPrice': 'ref_price',
'Algo': 'wap_mode',
}, inplace=True)
return data_df
| 8,410 |
def _get_last_block_in_previous_epoch(
constants: ConsensusConstants,
sub_height_to_hash: Dict[uint32, bytes32],
sub_blocks: Dict[bytes32, SubBlockRecord],
prev_sb: SubBlockRecord,
) -> SubBlockRecord:
"""
Retrieves the last block (not sub-block) in the previous epoch, which is infused before the last sub-block in
the epoch. This will be used for difficulty adjustment.
Args:
constants: consensus constants being used for this chain
sub_height_to_hash: sub-block height to header hash map for sub-blocks in peak path
sub_blocks: dict from header hash to sub-block of all relevant sub-blocks
prev_sb: last-sub-block in the current epoch.
prev epoch surpassed prev epoch started epoch sur. epoch started
v v v v
|.B...B....B. B....B...|......B....B.....B...B.|.B.B.B..|..B...B.B.B...|.B.B.B. B.|........
PREV EPOCH CURR EPOCH NEW EPOCH
The sub-blocks selected for the timestamps are the last sub-block which is also a block, and which is infused
before the final sub-block in the epoch. Block at height 0 is an exception.
# TODO: check edge cases here
"""
height_in_next_epoch = prev_sb.sub_block_height + constants.MAX_SUB_SLOT_SUB_BLOCKS + 3
height_epoch_surpass: uint32 = uint32(height_in_next_epoch - (height_in_next_epoch % constants.EPOCH_SUB_BLOCKS))
height_prev_epoch_surpass: uint32 = uint32(height_epoch_surpass - constants.EPOCH_SUB_BLOCKS)
if (height_in_next_epoch - height_epoch_surpass) > (3 * constants.MAX_SUB_SLOT_SUB_BLOCKS):
raise ValueError(
f"Height at {prev_sb.sub_block_height + 1} should not create a new epoch, it is far past the epoch barrier"
)
if height_prev_epoch_surpass == 0:
# The genesis block is an edge case, where we measure from the first block in epoch (height 0), as opposed to
# the last sub-block in the previous epoch, which would be height -1
return _get_blocks_at_height(sub_height_to_hash, sub_blocks, prev_sb, uint32(0))[0]
# If the prev slot is the first slot, the iterations start at 0
# We will compute the timestamps of the last block in epoch, as well as the total iterations at infusion
first_sb_in_epoch: SubBlockRecord
prev_slot_start_iters: uint128
prev_slot_time_start: uint64
fetched_blocks = _get_blocks_at_height(
sub_height_to_hash,
sub_blocks,
prev_sb,
uint32(height_prev_epoch_surpass - constants.MAX_SUB_SLOT_SUB_BLOCKS - 1),
uint32(2 * constants.MAX_SUB_SLOT_SUB_BLOCKS + 1),
)
# This is the last sb in the slot at which we surpass the height. The last block in epoch will be before this.
fetched_index: int = constants.MAX_SUB_SLOT_SUB_BLOCKS
last_sb_in_slot: SubBlockRecord = fetched_blocks[fetched_index]
fetched_index += 1
assert last_sb_in_slot.sub_block_height == height_prev_epoch_surpass - 1
curr_b: SubBlockRecord = fetched_blocks[fetched_index]
assert curr_b.sub_block_height == height_prev_epoch_surpass
# Wait until the slot finishes with a challenge chain infusion at start of slot
# Note that there are no overflow blocks at the start of new epochs
while curr_b.sub_epoch_summary_included is None:
last_sb_in_slot = curr_b
curr_b = fetched_blocks[fetched_index]
fetched_index += 1
# Backtrack to find the last block before the signage point
curr_b = sub_blocks[last_sb_in_slot.prev_hash]
while curr_b.total_iters > last_sb_in_slot.sp_total_iters(constants) or not curr_b.is_block:
curr_b = sub_blocks[curr_b.prev_hash]
return curr_b
| 8,411 |
def get_current_user():
"""Gets the current logged in user"""
user = User.get_one_by_field('id', value=get_jwt_identity())
response = {
'name': user['name'],
'username': user['username'],
}
return jsonify(response)
| 8,412 |
def test_dynamism2a():
"""
parent.val 17
parent.val 18
parent.val 29
[17, 18, 29]
"""
n = Node()
n1 = Node()
n2 = Node()
res = []
def func(*events):
for ev in events:
if n.parent:
res.append(n.parent.val)
else:
res.append(None)
n.reaction(func, 'parent.val')
loop.iter()
with loop: # does not get trigger, because n1.val was not set
n.set_parent(n1)
n.set_val(42)
with loop:
n1.set_val(17)
n2.set_val(27)
with loop:
n1.set_val(18)
n2.set_val(28)
with loop: # does not trigger
n.set_parent(n2)
with loop:
n1.set_val(19)
n2.set_val(29)
with loop:
n.set_parent(None)
with loop:
n1.set_val(11)
n2.set_val(21)
print(res)
| 8,413 |
def create_kv_store(vm_name, vmdk_path, opts):
""" Create the metadata kv store for a volume """
vol_meta = {kv.STATUS: kv.DETACHED,
kv.VOL_OPTS: opts,
kv.CREATED: time.asctime(time.gmtime()),
kv.CREATED_BY: vm_name}
return kv.create(vmdk_path, vol_meta)
| 8,414 |
def main():
""" Main Program """
pygame.init()
# Set the height and width of the screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("SSSSSnake")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
items=[vector(5,5,0)]
grid = Grid(35,35)
snake = Snake(grid)
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == 276:
newdir=vector(-1,0,0)
if event.key == 273:
newdir = vector(0, -1, 0)
if event.key == 275:
newdir = vector(1, 0, 0)
if event.key == 274:
newdir = vector(0, 1, 0)
if snake.head+newdir != snake.poslist[-2]:
snake.dir=newdir
if event.type == pygame.QUIT:
done = True
if grid.outside(snake.head+snake.dir) or snake.head+snake.dir in snake.poslist:
break
screen.fill(WHITE)
snake.move(grid, items)
grid.draw(screen, snake, items)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Limit to 60 frames per second
clock.tick(7+(snake.length-3)/15)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
| 8,415 |
def mapdict(itemfunc, dictionary):
"""
Much like the builtin function 'map', but works on dictionaries.
*itemfunc* should be a function which takes one parameter, a (key,
value) pair, and returns a new (or same) (key, value) pair to go in
the dictionary.
"""
return dict(map(itemfunc, dictionary.items()))
| 8,416 |
def get_backup_temp():
"""
This is the function for if the BMP280 malfunctions
"""
try:
temp = BNO055.temperature
logging.warning("Got backup temperature")
return temp
except RuntimeError:
logging.error("BNO055 not connected")
return get_backup_temp_2()
except Exception as error:
logging.error(error)
temp = get_backup_temp_2()
return temp
| 8,417 |
def test_graph(example):
"""
Use our example data to see that the dot output produced matches what we
expect.
"""
# The examples won't be byte-for-byte identical with what we produce unless
# we sort the lines
with open(example + ".dot") as f:
expect = "".join([l.strip() for l in f.readlines()])
with open(example + ".json") as f:
g = salt_state_graph.Graph(f)
got = "".join(g.render("dot").splitlines())
assert got.strip() == expect.strip()
| 8,418 |
def on_key_event(event):
"""
Keyboard interaction
:param event:
:return:
"""
key = event.key
if key.find("alt") == 0:
key = key.split("+")[1]
curAxis = plt.gca()
if key in "aeiou":
curAxis.set_title("Well done!")
plt.pause(1)
plt.close()
else:
curAxis.set_title(key + "is not a vowel: try again to find a vowel ...")
plt.draw()
| 8,419 |
def density_matrix(M, row_part, col_part):
"""
Given a sparse matrix M, row labels, and column labels, constructs a block matrix where each entry contains the proportion of 1-entries in the corresponding rows and columns.
"""
m, n = M.shape
if m <= 0 or n <= 0:
raise ValueError("Matrix M has dimensions with 0 or negative value.")
if m != len(row_part):
raise ValueError("Row labels must be the same length as the number of rows in M.")
if n != len(col_part):
raise ValueError("Column labels must be the same length as the number of columns in M.")
row_groups = Counter(row_part).keys()
col_groups = Counter(col_part).keys()
#print row_groups, col_groups
row_part = np.array(row_part)
col_part = np.array(col_part)
row_idx = [np.where(row_part == a)[0] for a in row_groups]
col_idx = [np.where(col_part == b)[0] for b in col_groups]
#print [len(a) for a in row_idx]
#print [len(b) for b in col_idx]
density_matrix = [[np.sum(M[row_idx[i]][:, col_idx[j]]) / float(len(row_idx[i]) * len(col_idx[j])) for j in range(len(col_groups))] for i in range(len(row_groups))]
return density_matrix
| 8,420 |
def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
"""
Am I subscribed to this address, is it in my addressbook or whitelist?
"""
if isAddressInMyAddressBook(address):
return True
queryreturn = sqlQuery(
'''SELECT address FROM whitelist where address=?'''
''' and enabled = '1' ''',
address)
if queryreturn != []:
return True
queryreturn = sqlQuery(
'''select address from subscriptions where address=?'''
''' and enabled = '1' ''',
address)
if queryreturn != []:
return True
return False
| 8,421 |
def GetDateTimeFromTimeStamp(timestamp, tzinfo=None):
"""Returns the datetime object for a UNIX timestamp.
Args:
timestamp: A UNIX timestamp in int or float seconds since the epoch
(1970-01-01T00:00:00.000000Z).
tzinfo: A tzinfo object for the timestamp timezone or None for the local
timezone.
Returns:
The datetime object for a UNIX timestamp.
"""
return datetime.datetime.fromtimestamp(timestamp, tzinfo)
| 8,422 |
def success(parsed_args):
"""
:param :py:class:`argparse.Namespace` parsed_args:
:return: Nowcast system message type
:rtype: str
"""
logger.info(
f"FVCOM {parsed_args.model_config} {parsed_args.run_type} run boundary condition "
f'file for {parsed_args.run_date.format("YYYY-MM-DD")} '
f"created on {parsed_args.host_name}"
)
msg_type = f"success {parsed_args.model_config} {parsed_args.run_type}"
return msg_type
| 8,423 |
def _kv_to_dict(kv_string):
"""
Simple splitting of a key value string to dictionary in "Name: <Key>, Values: [<value>]" form
:param kv_string: String in the form of "key:value"
:return Dictionary of values
"""
dict = {}
if ":" not in kv_string:
log.error(f'Keyvalue parameter not in the form of "key:value"')
raise ValueError
kv = kv_string.split(':')
dict['Name'] = f'tag:{kv[0]}'
dict['Values'] = [kv[1]]
return dict
| 8,424 |
def get_column_names(df: pd.DataFrame) -> List[str]:
"""Get number of particles from the DataFrame, and return a list of column names
Args:
df: DataFrame
Returns:
List of columns (e.g. PID_xx)
"""
c = df.shape[1]
if c <= 0:
raise IndexError("Please ensure the DataFrame isn't empty!")
return ["PID_{0}".format(x + 1) for x in range(c)]
| 8,425 |
def get_default(schema, key):
"""Get default value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.default == vol.UNDEFINED:
return None
return k.default()
| 8,426 |
async def send_websocket_messages_from_queue():
""" runs in the background and sends messages to all clients.
This is a janus queue, so it can be filled by sync code"""
try:
while True:
item = await app['message_queue'].async_q.get()
if len (app['websockets']) == 0:
log.debug(f"{datetime.datetime.now()}: there is message to send but no clients")
for subscriber in app['websockets']:
await subscriber.send_str(item) #assume is it json.dumps already
log.info (f"{datetime.datetime.now()}: sent message from queue ")
except asyncio.CancelledError:
pass
finally:
print("Cleanup 2")
| 8,427 |
def get_font_loader_class():
"""Get the font loader associated to the current platform
Returns:
FontLoader: specialized version of the font loader class.
"""
if "linux" in sys.platform:
return FontLoaderLinux
if "win" in sys.platform:
return FontLoaderWindows
raise NotImplementedError(
"This operating system ({}) is not currently supported".format(sys.platform)
)
| 8,428 |
def _map_tensor_names(original_tensor_name):
"""
Tensor name mapping
"""
global_tensor_map = {
"model/wte": "word_embedder/w",
"model/wpe": "position_embedder/w",
"model/ln_f/b": "transformer_decoder/beta",
"model/ln_f/g": "transformer_decoder/gamma",
}
if original_tensor_name in global_tensor_map:
return global_tensor_map[original_tensor_name]
original_tensor_name_split = original_tensor_name.split('/')
layer_tensor_map = {
"ln_1/b": "beta",
"ln_1/g": "gamma",
"ln_2/b": "past_poswise_ln/beta",
"ln_2/g": "past_poswise_ln/gamma",
"mlp/c_fc/b": "ffn/conv1/bias",
"mlp/c_fc/w": "ffn/conv1/kernel",
"mlp/c_proj/b": "ffn/conv2/bias",
"mlp/c_proj/w": "ffn/conv2/kernel",
"attn/c_proj/b": "self_attention/multihead_attention/output/bias",
"attn/c_proj/w": "self_attention/multihead_attention/output/kernel",
}
layer_num = int(original_tensor_name_split[1][1:])
layer_feature = '/'.join(original_tensor_name.split('/')[2:])
if layer_feature in layer_tensor_map:
layer_feature_ = layer_tensor_map[layer_feature]
tensor_name_ = '/'.join(
[
'transformer_decoder',
'layer_{}'.format(layer_num),
layer_feature_
])
return tensor_name_
else:
return original_tensor_name
| 8,429 |
def setplot(plotdata=None):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
from numpy import linspace
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'binary'
def timeformat(t):
from numpy import mod
hours = int(t/3600.)
tmin = mod(t,3600.)
min = int(tmin/60.)
sec = int(mod(tmin,60.))
timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2))
return timestr
def title_hours(current_data):
from pylab import title
t = current_data.t
timestr = timeformat(t)
title('%s after earthquake' % timestr)
#-----------------------------------------
# Figure for surface
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Computational domain', figno=0)
plotfigure.kwargs = {'figsize':(8,7)}
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
def aa(current_data):
from pylab import ticklabel_format, xticks, gca, cos, pi, savefig
gca().set_aspect(1.)
title_hours(current_data)
ticklabel_format(useOffset=False)
xticks(rotation=20)
plotaxes.afteraxes = aa
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = cmin
plotitem.pcolor_cmax = cmax
plotitem.add_colorbar = True
plotitem.colorbar_shrink = 0.7
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0,0,0,0]
plotitem.amr_data_show = [1,1,1,1,1,0,0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = cmax_land
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0,0,0,0]
plotitem.amr_data_show = [1,1,1,1,1,0,0]
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = linspace(-3000,-3000,1)
plotitem.amr_contour_colors = ['y'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':2}
plotitem.amr_contour_show = [1,0,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for coastal area
#-----------------------------------------
x1,x2,y1,y2 = [-0.005, 0.016, -0.01, 0.01]
plotfigure = plotdata.new_plotfigure(name="coastal area", figno=11)
plotfigure.show = True
plotfigure.kwargs = {'figsize': (6,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.scaled = False
plotaxes.xlimits = [x1, x2]
plotaxes.ylimits = [y1, y2]
def aa_withbox(current_data):
from pylab import plot
x1,x2,y1,y2 = (-0.009259, 0.013796, -0.005093, 0.005000)
if current_data.t > 5*60.:
plot([x1,x1,x2,x2,x1], [y1,y2,y2,y1,y1], 'w--')
aa(current_data)
plotaxes.afteraxes = aa_withbox
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.surface
#plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = cmin
plotitem.pcolor_cmax = cmax
plotitem.add_colorbar = True
plotitem.colorbar_shrink = 0.4
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = cmax_land
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.patchedges_show = 0
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
#plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = [-2,-1,0,1,2]
plotitem.amr_contour_colors = ['yellow'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':1}
plotitem.amr_contour_show = [0,0,1,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
# Plots of timing (CPU and wall time):
def make_timing_plots(plotdata):
import os
from clawpack.visclaw import plot_timing_stats
try:
timing_plotdir = plotdata.plotdir + '/_timing_figures'
os.system('mkdir -p %s' % timing_plotdir)
units = {'comptime':'hours', 'simtime':'hours', 'cell':'billions'}
plot_timing_stats.make_plots(outdir=plotdata.outdir, make_pngs=True,
plotdir=timing_plotdir, units=units)
os.system('cp %s/timing.* %s' % (plotdata.outdir, timing_plotdir))
except:
print('*** Error making timing plots')
otherfigure = plotdata.new_otherfigure(name='timing',
fname='_timing_figures/timing.html')
otherfigure.makefig = make_timing_plots
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
return plotdata
| 8,430 |
def test_skeleton(opts):
"""
Template of unittest for skeleton.py
:param opts: mapping parameters as dictionary
:return: file content as string
"""
template = get_template("test_skeleton")
return template.substitute(opts)
| 8,431 |
def get_classpath(obj):
"""
Return the full module and class path of the obj. For instance,
kgof.density.IsotropicNormal
Return a string.
"""
return obj.__class__.__module__ + "." + obj.__class__.__name__
| 8,432 |
def bill_content(bill_version: str) -> str:
"""
Returns the bill text, broken down by the way the XML was structured
Args:
bill_version (str): bill_version_id used as a fk on the BillContent table
Returns:
str: String json array of bills
"""
results = get_bill_contents(bill_version)
results = [x.to_dict() for x in results]
return json.dumps(results)
| 8,433 |
def has_default(column: Column) -> bool:
"""Column has server or Sqlalchemy default value."""
if has_server_default(column) or column.default:
return True
else:
return False
| 8,434 |
def iterations_median(benchmark_result):
"""A function to calculate the median of the amount of iterations.
Parameters
----------
benchmark_result : list of list of list of namedtuple
The result from a benchmark.
Returns
-------
numpy.ndarray
A 2D array containing the median of the amount of iterations for every
algorithm-problem pair. Note that the indices of a certain
algorithm-problem pair in the benchmark_result will be the same as the
indices one needs to get the results for that pair.
"""
return _func_on_data(benchmark_result, statistics.median, 1)
| 8,435 |
def _make_list(input_list, proj_ident):
"""Used by other functions, takes input_list and returns a list with items converted"""
if not input_list: return []
output_list = []
for item in input_list:
if item is None:
output_list.append(None)
elif item == '':
output_list.append('')
elif isinstance(item, list):
output_list.append(_make_list(item, proj_ident))
elif isinstance(item, dict):
output_list.append(_make_dictionary(item, proj_ident))
elif item is True:
output_list.append(True)
elif item is False:
output_list.append(False)
elif isinstance(item, skiboot.Ident):
if item.proj == proj_ident:
output_list.append(item.num)
else:
# ident is another project, put the full ident
output_list.append([item.proj, item.num])
else:
output_list.append(str(item))
return output_list
| 8,436 |
def another_method():
"""
Another method!
:return:
"""
pass
| 8,437 |
def impala_service(docker_ip, docker_services):
"""Ensure that Impala service is up and responsive."""
runner = CliEntryBasedTestRunner(impala_plugin)
# give the server some time to start before checking if it is ready
# before adding this sleep there were intermittent fails of the CI/CD with error:
# requests.exceptions.ConnectionError:
# ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
# More info: https://stackoverflow.com/questions/383738/104-connection-reset-by-peer-socket-error-or-when-does-closing-a-socket-resu
time.sleep(3)
docker_services.wait_until_responsive(
timeout=90.0, pause=0.3, check=lambda: _is_responsive(runner)
)
time.sleep(10)
| 8,438 |
def monthly_ndvi():
"""Get monthly NDVI from MOD13C2 products."""
mod13c2_dir = os.path.join(DATA_DIR, 'raw', 'MOD13C2')
months = [m for m in range(1, 13)]
cities = [city.id for city in CASE_STUDIES]
ndvi = pd.DataFrame(index=cities, columns=months)
for city, month in product(CASE_STUDIES, months):
aoi = city.aoi['geometry']
raster_path = os.path.join(
mod13c2_dir, 'mod13c2_{}.tif'.format(str(month).zfill(2))
)
# Mask MOD13C2 data based on the AOI
with rasterio.open(raster_path) as src:
data, _ = mask(src, [aoi], crop=True)
pixels = data[0, :, :].ravel()
# MOD13C2 valid range: -2000 - 10000
pixels = pixels[pixels >= -2000]
pixels = pixels[pixels <= 10000]
# MOD13C2 scale factor: 0.0001
pixels = pixels.astype(np.float)
pixels = pixels * 0.0001
ndvi.at[(city.id, month)] = pixels.mean()
return ndvi
| 8,439 |
def dvds_s(P, s):
""" Derivative of specific volume [m^3 kg K/ kg kJ]
w.r.t specific entropy at constant pressure"""
T = T_s(P, s)
return dvdT(P, T) / dsdT(P, T)
| 8,440 |
def serialize(_cls=None, *, ctor_args: Tuple[str, ...] = ()):
"""Class decorator to register a Proxy class for serialization.
Args:
- ctor_args: names of the attributes to pass to the constructor when deserializing
"""
global _registry
def wrapped(cls):
try:
_serialize = cls._serialize
if not isinstance(_serialize, (tuple, list)):
raise EncodeError(f"Expected tuple or list for _serialize, got {type(_serialize)} for {cls}")
except AttributeError:
cls._serialize = ()
_registry[cls.__name__] = (cls, ctor_args)
return cls
if _cls is None:
return wrapped
else:
return wrapped(_cls)
| 8,441 |
def runcmd(args):
"""
Run a given program/shell command and return its output.
Error Handling
==============
If the spawned proccess returns a nonzero exit status, it will print the
program's ``STDERR`` to the running Python iterpreter's ``STDERR``, cause
Python to exit with a return status of 1.
"""
proc = subprocess.Popen(
args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if proc.wait() == 1:
print(proc.stdout.read().decode())
die(proc.stderr.read().decode())
return proc.stdout.read()
| 8,442 |
def get_as_by_asn(asn_):
"""Return an AS by id.
Args:
asn: ASN
"""
try:
as_ = Asn.get_by_asn(asn_)
except AsnNotFoundError as e:
raise exceptions.AsnDoesNotExistException(str(e))
return as_
| 8,443 |
def spoken_form(text: str) -> str:
"""Convert ``text`` into a format compatible with speech lists."""
# TODO: Replace numeric digits with spoken digits
return _RE_NONALPHABETIC_CHAR.sub(" ", text.replace("'", " ")).strip()
| 8,444 |
def get_default_group_type():
"""Get the default group type."""
name = CONF.default_group_type
grp_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
grp_type = get_group_type_by_name(ctxt, name)
except exception.GroupTypeNotFoundByName:
# Couldn't find group type with the name in default_group_type
# flag, record this issue and move on
LOG.exception('Default group type is not found. '
'Please check default_group_type config.')
return grp_type
| 8,445 |
def toHVal(op: Any, suggestedType: Optional[HdlType]=None):
"""Convert python or hdl value/signal object to hdl value/signal object"""
if isinstance(op, Value) or isinstance(op, SignalItem):
return op
elif isinstance(op, InterfaceBase):
return op._sig
else:
if isinstance(op, int):
if suggestedType is not None:
return suggestedType.fromPy(op)
if op >= 1 << 31:
raise TypeError(
"Number %d is too big to fit in 32 bit integer of HDL"
" use Bits type instead" % op)
elif op < -(1 << 31):
raise TypeError(
"Number %d is too small to fit in 32 bit integer"
" of HDL use Bits type instead" % op)
try:
hType = defaultPyConversions[type(op)]
except KeyError:
hType = None
if hType is None:
raise TypeError("Unknown hardware type for %s" % (op.__class__))
return hType.fromPy(op)
| 8,446 |
def _make_readiness_probe(port: int) -> Dict[str, Any]:
"""Generate readiness probe.
Args:
port (int): service port.
Returns:
Dict[str, Any]: readiness probe.
"""
return {
"httpGet": {
"path": "/openmano/tenants",
"port": port,
},
"periodSeconds": 10,
"timeoutSeconds": 5,
"successThreshold": 1,
"failureThreshold": 3,
}
| 8,447 |
def compute_embeddings_and_distances_from_region_adjacency(g,info, metric='euclidean', norm_type = 2, n_jobs=1):
"""
This method runs local graph clustering for each node in the region adjacency graph.
Returns the embeddings for each node in a matrix X. Each row corresponds to an embedding
of a node in the region adjacency graph. It also returns the pairwise distance matrix Z.
For example, component Z[i,j] is the distance between nodes i and j.
Parameters
----------
g: GraphLocal
info: list of lists
Each element of the list is another list with two elements.
The first element is the indices of the a segment, while the second element
is the vector representation of that segment.
Parameters (optional)
---------------------
metric: str
Default = 'euclidean'
Metric for measuring distances among nodes.
For details check:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html
norm_type: int
Default = 2
Norm for normalization of the embeddings.
njobs: int
Default = 1
Number of jobs to be run in parallel
Returns
-------
X: csc matrix
The embeddings matrix. Each row corresponds to an embedding of a node in the regiona adjacency graph.
Z: 2D np.ndarray
The pairwise distance matrix Z. For example, component Z[i,j]
is the distance between nodes i and j.
"""
sum_ = 0
JA = [0]
IA = []
A = []
for data in info:
vec = data[1]/np.linalg.norm(data[1],norm_type)
how_many = len(data[0])
sum_ += how_many
JA.append(sum_)
IA.extend(list(data[0]))
A.extend(list(vec))
X = sp.sparse.csc_matrix((A, IA, JA), shape=(g._num_vertices, len(info)))
X = X.transpose()
Z = pairwise_distances(X, metric='euclidean', n_jobs=6)
return X, Z
| 8,448 |
def test_get_worst_rating_shortterm_with_explicit_rating_provider(
rtg_inputs_shortterm,
):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_worst_ratings(
rtg_inputs_shortterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="short-term",
)
expectations = pd.Series(
data=["A-2", "B", "A-1", "D", "B", np.nan, "A-2", "A-3"], name="worst_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
| 8,449 |
def _parse_assoc(lexer: shlex.shlex) -> AssociativeArray:
"""Parse an associative Bash array."""
assert lexer.get_token() == "("
result = {}
while True:
token = lexer.get_token()
assert token != lexer.eof
if token == ")":
break
assert token == "["
key = lexer.get_token()
assert lexer.get_token() == "]"
assert lexer.get_token() == "="
value = _parse_string(lexer.get_token())
result[key] = value
return result
| 8,450 |
def test_html_whitelist_caption():
"""The caption element represents the title of its parent table."""
check_html_output_contains_text("""
<p>The caption provides context to the table.</p>
<table>
<caption>
Table 1. This shows the possible results of flipping two coins.
</caption>
<tbody>
<tr>
<th></th>
<th>H</th>
<th>T</th>
</tr>
</tbody>
</table>
""", "<caption>Table 1. This shows the possible results of flipping two coins.</caption>")
| 8,451 |
def get_addon_by_name(addon_short_name):
"""get Addon object from Short Name."""
for addon in osf_settings.ADDONS_AVAILABLE:
if addon.short_name == addon_short_name:
return addon
| 8,452 |
def test_setter_factory():
"""Test using a factory setter.
"""
class FactoryTester(DummyParent):
state = False
feat = Feature(setter=conditional('1 if driver.state else 2', True))
driver = FactoryTester()
driver.feat = None
assert driver.d_set_cmd == 2
driver.state = True
driver.feat = True
assert driver.d_set_cmd == 1
| 8,453 |
def _normpdf(x):
"""Probability density function of a univariate standard Gaussian
distribution with zero mean and unit variance.
"""
return 1.0 / np.sqrt(2.0 * np.pi) * np.exp(-(x * x) / 2.0)
| 8,454 |
def scalar_mult(x, y):
"""A function that computes the product between complex matrices and scalars,
complex vectors and scalars or two complex scalars.
"""
y = y.to(x)
re = real(x) * real(y) - imag(x) * imag(y)
im = real(x) * imag(y) + imag(x) * real(y)
return to_complex(re, im)
| 8,455 |
def train_trajectory_encoder(trajectories):
"""
Train a fixed neural-network encoder that maps variable-length
trajectories (of states) into fixed length vectors, trained to reconstruct
said trajectories.
Returns TrajectoryEncoder.
Parameters:
trajectories (List of np.ndarray): A list of trajectories, each of shape
(?, D), where D is dimension of a state.
Returns:
encoder (TrajectoryEncoder).
"""
state_dim = trajectories[0].shape[1]
network = TrajectoryEncoder(state_dim)
optimizer = th.optim.Adam(network.parameters())
num_trajectories = len(trajectories)
num_batches_per_epoch = num_trajectories // BATCH_SIZE
# Copy trajectories as we are about to shuffle them in-place
trajectories = [x for x in trajectories]
for epoch in range(EPOCHS):
random.shuffle(trajectories)
total_loss = 0
for batch_i in range(num_batches_per_epoch):
batch_trajectories = trajectories[batch_i * BATCH_SIZE:(batch_i + 1) * BATCH_SIZE]
loss = network.vae_reconstruct_loss(batch_trajectories)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print("Epoch {}, Avrg loss {}".format(epoch, total_loss / num_batches_per_epoch))
return network
| 8,456 |
def flatten_list(x):
"""Flatten a nested list.
Parameters
----------
x : list
nested list of lists to flatten
Returns
-------
x : list
flattened input
"""
if isinstance(x, list):
return [a for i in x for a in flatten_list(i)]
else:
return [x]
| 8,457 |
def check_vpg_statuses(url, session, verify):
"""
Return a list of VPGs which meet the SLA and a list of those which don't
"""
good, bad = [], []
for vpg in get_api(url, session, "vpgs", verify):
name = vpg['VpgName']
status = vpg_statuses(vpg['Status'])
if status == vpg_statuses.meeting_sla:
good.append(name)
else:
bad.append(name)
return good, bad
| 8,458 |
def get_hits(adj_matrix, EPSILON = 0.001):
"""[summary]
hubs & authorities calculation
Arguments:
adj_matrix {float[][]} -- [input Adjacent matrix lists like [[1, 0], [0, 1]]
Keyword Arguments
EPSILON {float} -- [factor of change comparision] (default: {0.001})
Returns:
[(float[], float[])] -- [return hubs & authorities]
"""
# initialize to all 1's
is_coverage = False
hubs = np.ones(adj_matrix.shape[0])
authorities = np.ones(adj_matrix.shape[0])
while not is_coverage:
# a = A.T h, h = A a,
new_authorities = np.dot(adj_matrix.T, hubs)
new_hubs = np.dot(adj_matrix, authorities)
# normalize
normalize_auth = lambda x: x / sum(new_authorities)
normalize_hubs = lambda x: x / sum(new_hubs)
new_authorities = normalize_auth(new_authorities)
new_hubs = normalize_hubs(new_hubs)
# check is coverage
diff = abs(sum(new_hubs - hubs) + sum(new_authorities - authorities))
if diff < EPSILON:
is_coverage = True
else:
authorities = new_authorities
hubs = new_hubs
return (new_hubs, new_authorities)
| 8,459 |
def quicksort(seq):
"""
seq is a list of unsorted numbers
return a sorted list of numbers
"""
##stop condition:
if len(seq) <= 1:
return seq
##get the next nodes and process the current node --> call the partition
else:
low, pivot, high = partition(seq)
## self-call to get the sorted left and sorted right
## to return the sorted list by concantating the sorted left, pivot, and the sorted right
return quicksort(low) + [pivot] + quicksort(high)
| 8,460 |
def scan_resource(
location_rid,
scanners,
timeout=DEFAULT_TIMEOUT,
with_timing=False,
with_threading=True,
):
"""
Return a tuple of:
(location, rid, scan_errors, scan_time, scan_results, timings)
by running the `scanners` Scanner objects for the file or directory resource
with id `rid` at `location` provided as a `location_rid` tuple of (location,
rid) for up to `timeout` seconds. If `with_threading` is False, threading is
disabled.
The returned tuple has these values:
- `location` and `rid` are the original arguments.
- `scan_errors` is a list of error strings.
- `scan_results` is a mapping of scan results from all scanners.
- `scan_time` is the duration in seconds to run all scans for this resource.
- `timings` is a mapping of scan {scanner.name: execution time in seconds}
tracking the execution duration each each scan individually.
`timings` is empty unless `with_timing` is True.
All these values MUST be serializable/pickable because of the way multi-
processing/threading works.
"""
scan_time = time()
location, rid = location_rid
results = {}
scan_errors = []
timings = {} if with_timing else None
if not with_threading:
interruptor = fake_interruptible
else:
interruptor = interruptible
# The timeout is a soft deadline for a scanner to stop processing
# and start returning values. The kill timeout is otherwise there
# as a gatekeeper for runaway processes.
# run each scanner in sequence in its own interruptible
for scanner in scanners:
if with_timing:
start = time()
try:
# pass a deadline that the scanner can opt to honor or not
if timeout:
deadline = time() + int(timeout / 2.5)
else:
deadline = sys.maxsize
runner = partial(scanner.function, location, deadline=deadline)
error, values_mapping = interruptor(runner, timeout=timeout)
if error:
msg = 'ERROR: for scanner: ' + scanner.name + ':\n' + error
scan_errors.append(msg)
# the return value of a scanner fun MUST be a mapping
if values_mapping:
results.update(values_mapping)
except Exception:
msg = 'ERROR: for scanner: ' + scanner.name + ':\n' + traceback.format_exc()
scan_errors.append(msg)
finally:
if with_timing:
timings[scanner.name] = time() - start
scan_time = time() - scan_time
return location, rid, scan_errors, scan_time, results, timings
| 8,461 |
def insert_dummy():
"""
This function fills a existing database with dummy data that is red from dummyData.sql
"""
cnx = get_db()
#cnx = mysql.connector.connect(user='tom', password='jerry',
# host='127.0.0.1',
# database=DB_NAME)
cursor = cnx.cursor()
click.echo("Insert dummy")
with current_app.open_resource('dummyData.sql', 'r') as f:
try:
for line in f.readlines():
click.echo("line: {}".format(line))
cursor.execute(line)
cnx.commit()
except mysql.connector.Error as err:
click.echo("Failed inserting dummy data: {}".format(err))
exit(1)
#cnx.commit()
cursor.close()
| 8,462 |
def label_generator(df_well, df_tops, column_depth, label_name):
"""
Generate Formation (or other) Labels to Well Dataframe
(useful for machine learning and EDA purpose)
Input:
df_well is your well dataframe (that originally doesn't have the intended label)
df_tops is your label dataframe (this dataframe should ONLY have 2 columns)
1st column is the label name (e.g. formation top names)
2nd column is the depth of each label name
column_depth is the name of depth column on your df_well dataframe
label_name is the name of label that you want to produce (e.g. FM. LABEL)
Output:
df_well is your dataframe that now has the labels (e.g. FM. LABEL)
"""
import numpy as np
import pandas as pd
# generate list of formation depths and top names
fm_tops = df_tops.iloc[:,0]
fm_depths = df_tops.iloc[:,1]
# create FM. LABEL column to well dataframe
# initiate with NaNs
df_well[label_name] = np.full(len(df_well), np.nan)
indexes = []
topnames = []
for j in range(len(fm_depths)):
# search index at which the DEPTH in the well df equals to OR
# larger than the DEPTH of each pick in the pick df
if (df_well[column_depth].iloc[-1] > fm_depths[j]):
index = df_well.index[(df_well[column_depth] >= fm_depths[j])][0]
top = fm_tops[j]
indexes.append(index)
topnames.append(top)
# replace the NaN in the LABEL column of well df
# at the assigned TOP NAME indexes
df_well[label_name].loc[indexes] = topnames
# Finally, using pandas "ffill" to fill all the rows
# with the TOP NAMES
df_well = df_well.fillna(method='ffill')
return df_well
| 8,463 |
def tclexec(tcl_code):
"""Run tcl code"""
g[TCL][REQUEST] = tcl_code
g[TCL][RESULT] = tkeval(tcl_code)
return g[TCL][RESULT]
| 8,464 |
def cleanFiles(direct, CWD=os.getcwd()):
"""
removes the year and trailing white space, if there is a year
direct holds the file name for the file of the contents of the directory
@return list of the cleaned data
"""
SUBDIR = CWD + "output/" # change directory to ouput folder
contents = os.listdir(SUBDIR)
LOGDIR = CWD + "log/" # change directory for logging
log = open(f"{LOGDIR}log.txt", "w") # opens log file
for i in range(0, len(contents)):
contents[i] = contents[i].strip("\n") # remove trailing \n
if (
"(" in contents[i] or ")" in contents[i]
): # if '(' or ')'exists in the file name to signify if there is a year
old = contents[i] # holds the name of the movie for logging purposes
contents[i] = contents[i][
:-7
] # truncates the string to remove year and trailing whitespace
log.write(
f"Removing date from {old} -> {contents[i]})\n"
) # writes to the log file
log.close()
return contents
| 8,465 |
def test_labels(test_project_data):
"""A list of labels that correspond to SEED_LABELS."""
labels = []
for label in SEED_LABELS:
labels.append(Label.objects.create(name=label, project=test_project_data))
return labels
| 8,466 |
def unravel_index_2d(indices, dims):
"""Unravel index, for 2D inputs only.
See Numpy's unravel.
Args:
indices: <int32> [num_elements], coordinates into 2D row-major tensor.
dims: (N, M), dimensions of the 2D tensor.
Returns:
coordinates: <int32> [2, num_elements], row (1st) and column (2nd) indices.
"""
row_inds = tf.floordiv(indices, dims[1])
col_inds = tf.floormod(indices, dims[1])
return tf.stack([row_inds, col_inds], axis=0)
| 8,467 |
def np_xcycwh_to_xy_min_xy_max(bbox: np.array) -> np.array:
"""
Convert bbox from shape [xc, yc, w, h] to [xmin, ymin, xmax, ymax]
Args:
bbox A (tf.Tensor) list a bbox (n, 4) with n the number of bbox to convert
Returns:
The converted bbox
"""
# convert the bbox from [xc, yc, w, h] to [xmin, ymin, xmax, ymax].
bbox_xy = np.concatenate([bbox[:, :2] - (bbox[:, 2:] / 2), bbox[:, :2] + (bbox[:, 2:] / 2)], axis=-1)
return bbox_xy
| 8,468 |
def wmedian(spec, wt, cfwidth=100):
""" Performs a weighted median filtering of a 1d spectrum
Operates using a cumulative sum curve
Parameters
----------
spec : numpy.ndarray
Input 1d spectrum to be filtered
wt : numpy.ndarray
A spectrum of equal length as the input array to provide the weights.
cfwidth : int or float
Window size for the continuum filter, for the SVD computation.
Default to 100.
"""
# ignore the warning (feature not a bug)
old_settings = np.seterr(divide='ignore')
spec = np.pad(spec, (cfwidth, cfwidth), 'constant', constant_values=0)
wt = np.abs(wt)
wt = np.pad(wt, (cfwidth, cfwidth), 'constant',
constant_values=(np.min(wt) / 1000., np.min(wt) / 1000.))
# do some striding for speed
swin = rolling_window(spec, cfwidth) # create window container array
wwin = rolling_window(wt, cfwidth) # create window container array
# sort based on data
srt = np.argsort(swin, axis=-1)
ind = np.ogrid[0:swin.shape[0], 0:swin.shape[1]]
sdata = swin[ind[0], srt]
swt = wwin[ind[0], srt]
# calculate accumulated weights
awt = np.cumsum(swt, axis=-1)
# new weightsort for normalization and consideration of data
nw = (awt - 0.5 * swt) / awt[:, -1][:, np.newaxis]
# find the midpoint in the new weight sort
s = np.argmin(np.abs(nw - 0.5), axis=-1)
sl = np.arange(len(s))
nws = nw[sl, s]
nws1 = nw[sl, s - 1]
f1 = (nws - 0.5) / (nws - nws1)
f2 = (0.5 - nws1) / (nws - nws1)
wmed = sdata[sl, s - 1] * f1 + sdata[sl, s] * f2
width = cfwidth // 2
wmed = wmed[width:-width - 1]
np.seterr(old_settings['divide'])
return wmed
| 8,469 |
def execute_command(cmd, logfile):
"""
Function to execute a non-interactive command and
return the output of the command if there is some
"""
try:
rows, columns = subprocess.check_output(["stty", "size"]).decode().split()
child = pexpect.spawn(
"/bin/bash",
["-c", cmd.strip()],
logfile=logfile,
encoding="utf-8",
timeout=300,
)
child.setwinsize(int(rows), int(columns))
child.expect(pexpect.EOF)
child.close()
return child.before
except pexpect.exceptions.ExceptionPexpect as e:
print(e)
print("Error in command: " + cmd)
return None
| 8,470 |
def _read(filename, format=None, **kwargs):
"""
Reads a single event file into a ObsPy Catalog object.
"""
catalog, format = _read_from_plugin('event', filename, format=format,
**kwargs)
for event in catalog:
event._format = format
return catalog
| 8,471 |
def compute_encrypted_key_powers(s, k):
"""
Compute the powers of the custody key s, encrypted using Paillier. The validator
(outsourcer) gives these to the provider so they can compute the proof of custody
for them.
"""
spower = 1
enc_spowers = []
for i in range(k + 2):
enc_spowers.append(encrypt(spower))
spower = spower * s % r
return enc_spowers
| 8,472 |
def idiosyncratic_var_vector(returns, idiosyncratic_var_matrix):
"""
Get the idiosyncratic variance vector
Parameters
----------
returns : DataFrame
Returns for each ticker and date
idiosyncratic_var_matrix : DataFrame
Idiosyncratic variance matrix
Returns
-------
idiosyncratic_var_vector : DataFrame
Idiosyncratic variance Vector
"""
#TODO: Implement function
return pd.DataFrame(data=np.diag(idiosyncratic_var_matrix),index=returns.columns)
| 8,473 |
def get_ticker_quote_type(ticker: str) -> str:
"""Returns the quote type of ticker symbol
Parameters
----------
ticker : str
ticker symbol of organization
Returns
-------
str
quote type of ticker
"""
yf_ticker = yf.Ticker(ticker)
info = yf_ticker.info
return info["quoteType"] if "quoteType" in info else ""
| 8,474 |
def sensitivity_score(y_true, y_pred):
"""
Compute classification sensitivity score
Classification sensitivity (also named true positive rate or recall) measures
the proportion of actual positives (class 1) that are correctly identified as
positives. It is defined as follows:
TP
sensitivity = ---------
TP + FN
Parameters
----------
y_true : numpy array
1D labels array of ground truth labels
y_pred : numpy array
1D labels array of predicted labels
Returns
-------
Score value (float)
"""
# Compute the sensitivity score
return recall_score(y_true, y_pred)
| 8,475 |
def report_tacacs_failure(username: str, existing_fail_count: int, existing_fail_times: list, redis: Redis) -> None:
"""
Given a failure count and list of timestamps, increment them and stash the results in Redis
:param username: Who dun goofed?
:param existing_fail_count: How many failures were in the data (before this one we're reporting)
:param existing_fail_times: List of failure times (before this one)
:param redis: What instantiated Redis object/connection are we using
:return:
"""
# Update the timestamps list with a failure for right now
existing_fail_times.append(datetime.now())
# Pickle this list for Redis insertion
failure_timestamps = dumps(existing_fail_times)
# Setup our failed dict we're stashing in Redis:
failed_dict = {"failure_count": existing_fail_count + 1, "failure_timestamps": failure_timestamps}
redis.hmset("naas_failures_" + username, failed_dict)
| 8,476 |
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax
| 8,477 |
def post(url: str, **kwargs: Any) -> dict:
"""Helper function for performing a POST request."""
return __make_request(requests.post, url, **kwargs)
| 8,478 |
def vote92(path):
"""Reports of voting in the 1992 U.S. Presidential election.
Survey data containing self-reports of vote choice in the 1992 U.S.
Presidential election, with numerous covariates, from the 1992 American
National Election Studies.
A data frame with 909 observations on the following 10 variables.
`vote`
a factor with levels `Perot` `Clinton` `Bush`
`dem`
a numeric vector, 1 if the respondent reports identifying with the
Democratic party, 0 otherwise.
`rep`
a numeric vector, 1 if the respondent reports identifying with the
Republican party, 0 otherwise
`female`
a numeric vector, 1 if the respondent is female, 0 otherwise
`persfinance`
a numeric vector, -1 if the respondent reports that their personal
financial situation has gotten worse over the last 12 months, 0 for
no change, 1 if better
`natlecon`
a numeric vector, -1 if the respondent reports that national
economic conditions have gotten worse over the last 12 months, 0 for
no change, 1 if better
`clintondis`
a numeric vector, squared difference between respondent's
self-placement on a scale measure of political ideology and the
respondent's placement of the Democratic candidate, Bill Clinton
`bushdis`
a numeric vector, squared ideological distance of the respondent
from the Republican candidate, President George H.W. Bush
`perotdis`
a numeric vector, squared ideological distance of the respondent
from the Reform Party candidate, Ross Perot
Alvarez, R. Michael and Jonathan Nagler. 1995. Economics, issues and the
Perot candidacy: Voter choice in the 1992 Presidential election.
*American Journal of Political Science*. 39:714-44.
Miller, Warren E., Donald R. Kinder, Steven J. Rosenstone and the
National Election Studies. 1999. *National Election Studies, 1992:
Pre-/Post-Election Study*. Center for Political Studies, University of
Michigan: Ann Arbor, Michigan.
Inter-University Consortium for Political and Social Research. Study
Number 1112. http://dx.doi.org/10.3886/ICPSR01112.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `vote92.csv`.
Returns:
Tuple of np.ndarray `x_train` with 909 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'vote92.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/pscl/vote92.csv'
maybe_download_and_extract(path, url,
save_file_name='vote92.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 8,479 |
def root_title(inst, attr, value):
"""
Require a title for the defined API.
"""
if not value:
msg = "RAML File does not define an API title."
raise InvalidRootNodeError(msg)
| 8,480 |
def csr_full_col_slices(arr_data,arr_indices,arr_indptr,indptr,row):
"""
This algorithm is used for when all column dimensions are full slices with a step of one.
It might be worth it to make two passes over the array and use static arrays instead of lists.
"""
indices = []
data = []
for i,r in enumerate(row,1):
indices.extend(arr_indices[arr_indptr[r]:arr_indptr[r+1]])
data.extend(arr_data[arr_indptr[r]:arr_indptr[r+1]])
indptr[i] = indptr[i-1] + len(arr_indices[arr_indptr[r]:arr_indptr[r+1]])
data = np.array(data)
indices = np.array(indices)
return (data,indices,indptr)
| 8,481 |
def slide_period(scraping_period, vacancies):
"""Move upper period boundary to the value equal to the timestamp of the
last found vacancy."""
if not vacancies: # for cases when key 'total' = 0
return None
period_start, period_end = scraping_period
log(f'Change upper date {strtime_from_unixtime(period_end)}')
period_end = define_oldest_vacancy_unixtime(vacancies)
return period_start, period_end
| 8,482 |
def merge_to_many(gt_data, oba_data, tolerance):
"""
Merge gt_data dataframe and oba_data dataframe using the nearest value between columns 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'. Before merging, the data is grouped by 'GT_Collector' on gt_data and
each row on gt_data will be paired with one or none of the rows on oba_data grouped by userId.
:param tolerance: maximum allowed difference (seconds) between 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'.
:param gt_data: dataframe with preprocessed data from ground truth XLSX data file
:param oba_data: dataframe with preprocessed data from OBA firebase export CSV data file
:return: dataframe with the merged data.
"""
# List of unique collectors and and unique users
list_collectors = gt_data['GT_Collector'].unique()
list_oba_users = oba_data['User ID'].unique()
# Create empty dataframes to be returned
merged_df = pd.DataFrame()
matches_df = pd.DataFrame()
all_unmatched_trips_df = pd.DataFrame()
list_total_trips = []
for collector in list_collectors:
print("Merging data for collector ", collector)
# Create dataframe for a collector on list_collectors
gt_data_collector = gt_data[gt_data["GT_Collector"] == collector]
# Make sure dataframe is sorted by 'ClosesTime'
gt_data_collector.sort_values('GT_DateTimeOrigUTC', inplace=True)
# Add total trips per collector
list_total_trips.append(len(gt_data_collector))
i = 0
for oba_user in list_oba_users:
# Create a dataframe with the oba_user activities only
oba_data_user = oba_data[oba_data["User ID"] == oba_user]
# Make sure dataframes is sorted by 'Activity Start Date and Time* (UTC)'
oba_data_user.sort_values('Activity Start Date and Time* (UTC)', inplace=True)
# Create df for OBA trips without GT Data match
oba_unmatched_trips_df = oba_data_user.copy()
# Iterate over each trip of one collector to match it with zero to many activities of an oba_data_user
for index, row in gt_data_collector.iterrows():
bunch_of_matches = oba_data_user[(oba_data_user['Activity Start Date and Time* (UTC)'] >=
row['GT_DateTimeOrigUTC']) &
(oba_data_user['Activity Start Date and Time* (UTC)'] <=
row['GT_DateTimeDestUTC'])
]
# Get the size of bunch_of_matches to create a repeated dataframe to concatenate with
if bunch_of_matches.empty:
len_bunch = 1
else:
len_bunch = bunch_of_matches.shape[0]
# Remove matched rows from unmatched trips df
oba_unmatched_trips_df = pd.merge(oba_unmatched_trips_df, bunch_of_matches, indicator=True, how='outer').\
query('_merge=="left_only"').drop('_merge', axis=1)
subset_df = gt_data_collector.loc[[index], :]
# Repeat the firs row `len_bunch` times.
new_df = pd.DataFrame(np.repeat(subset_df.values, len_bunch, axis=0))
new_df.columns = gt_data_collector.columns
# Add backup Start Time Columns
new_df['GT_DateTimeOrigUTC_Backup'] = new_df['GT_DateTimeOrigUTC']
# Remove (Fill with NaN) repeated GT rows unless required no to
if len_bunch > 1 and not command_line_args.repeatGtRows:
new_df.loc[1:, new_df.columns.difference(['GT_DateTimeOrigUTC', 'GT_LatOrig', 'GT_LonOrig',
'GT_TourID', 'GT_TripID'])] = np.NaN
temp_merge = pd.concat([new_df.reset_index(drop=True), bunch_of_matches.reset_index(drop=True)],
axis=1)
# Make sure the bunch of matches has the 'User Id' even for the empty rows
temp_merge["User ID"] = oba_user
# Merge running matches with current set of found matches
merged_df = pd.concat([merged_df, temp_merge], ignore_index=True)
# Add oba_user and number of many matches to the matches_df
subset_df["User ID"] = oba_user[-4:]
subset_df["GT_NumberOfTransitions"] = 0 if bunch_of_matches.empty else len_bunch
matches_df = pd.concat([matches_df, subset_df], ignore_index=True)
# Reorder the OBA columns
oba_unmatched_trips_df= oba_unmatched_trips_df[constants.OBA_UNMATCHED_NEW_COLUMNS_ORDER]
# Add Collector and device to unmatched trips
oba_unmatched_trips_df['User ID'] = oba_user[-4:]
# oba_unmatched_trips_df['GT_Collector'] = collector
oba_unmatched_trips_df.insert(loc=0, column='GT_Collector', value=collector)
# Append the unmatched trips per collector/device to the all unmatched df
all_unmatched_trips_df = pd.concat([all_unmatched_trips_df, oba_unmatched_trips_df], ignore_index=True)
return merged_df, matches_df, all_unmatched_trips_df
| 8,483 |
def returnDevPage():
"""
Return page for the development input.
:return: rendered dev.html web page
"""
return render_template("dev.html")
| 8,484 |
def main():
"""Setup.py entry point."""
setuptools.setup(
name='ucs_detect',
version='0.0.4',
description=(
"Detects the Unicode Version of an interactive terminal for export"),
long_description=codecs.open(
_get_here('README.rst'), 'rb', 'utf8').read(),
author='Jeff Quast',
author_email='contact@jeffquast.com',
install_requires=('blessed>=1.17.6<2'),
license='MIT',
packages=['ucs_detect'],
url='https://github.com/jquast/ucs-detect',
package_data={
'': ['LICENSE', '*.rst'],
},
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Topic :: Terminals',
'Topic :: Text Processing :: General',
],
entry_points={
'console_scripts': ['ucs-detect=ucs_detect:main'],
},
keywords=[
'cjk',
'combining',
'console',
'eastasian',
'emoji'
'emulator',
'terminal',
'unicode',
'wcswidth',
'wcwidth',
'xterm',
],
)
| 8,485 |
def toeplitz(c, r=None):
"""Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with ``c`` as its first column
and ``r`` as its first row. If ``r`` is not given, ``r == conjugate(c)`` is
assumed.
Args:
c (cupy.ndarray): First column of the matrix. Whatever the actual shape
of ``c``, it will be converted to a 1-D array.
r (cupy.ndarray, optional): First row of the matrix. If None,
``r = conjugate(c)`` is assumed; in this case, if ``c[0]`` is real,
the result is a Hermitian matrix. r[0] is ignored; the first row of
the returned matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
Returns:
cupy.ndarray: The Toeplitz matrix. Dtype is the same as
``(c[0] + r[0]).dtype``.
.. seealso:: :func:`cupyx.scipy.linalg.circulant`
.. seealso:: :func:`cupyx.scipy.linalg.hankel`
.. seealso:: :func:`cupyx.scipy.linalg.solve_toeplitz`
.. seealso:: :func:`cupyx.scipy.linalg.fiedler`
.. seealso:: :func:`scipy.linalg.toeplitz`
"""
c = c.ravel()
r = c.conjugate() if r is None else r.ravel()
return _create_toeplitz_matrix(c[::-1], r[1:])
| 8,486 |
def get_humbug_integrations(args: argparse.Namespace) -> None:
"""
Get list of Humbug integrations.
"""
session = SessionLocal()
try:
query = session.query(HumbugEvent)
if args.id is not None:
query = query.filter(HumbugEvent.id == args.id)
if args.group is not None:
query = query.filter(HumbugEvent.group_id == args.group)
if args.journal is not None:
query = query.filter(HumbugEvent.journal_id == args.journal)
events = query.all()
events_response = HumbugIntegrationListResponse(
integrations=[
HumbugIntegrationResponse(
id=event.id,
group_id=event.group_id,
journal_id=event.journal_id,
created_at=event.created_at,
updated_at=event.updated_at,
)
for event in events
]
)
print(events_response.json())
except Exception as e:
print(str(e))
finally:
session.close()
| 8,487 |
def download_and_unzip(url, zip_path, csv_path, data_folder):
"""Downloads and unzips an online csv file.
Args:
url: Web address
zip_path: Path to download zip file
csv_path: Expected path to csv file
data_folder: Folder in which data is stored.
"""
download_from_url(url, zip_path)
unzip(zip_path, csv_path, data_folder)
print('Done.')
| 8,488 |
def generate_config(config_fp, **kwargs):
"""
Creates a config file.
:param config_fp: Filepath to the config
:param \*\*kwargs: Source for the config data
"""
_local = {}
for k, v in kwargs.items():
_local[k] = v
with open(config_fp, 'w') as c:
_dump(_local, c, indent=2)
| 8,489 |
def download_private_id_set_from_gcp(public_storage_bucket, storage_base_path):
"""Downloads private ID set file from cloud storage.
Args:
public_storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where private_id_set.json
is stored.
storage_base_path (str): The storage base path of the bucket.
Returns:
str: private ID set file full path.
"""
storage_id_set_path = os.path.join(storage_base_path, 'content/private_id_set.json') if storage_base_path else \
'content/private_id_set.json'
private_artifacts_path = '/home/runner/work/content-private/content-private/content/artifacts'
private_id_set_path = private_artifacts_path + '/private_id_set.json'
if not os.path.exists(private_artifacts_path):
os.mkdir(private_artifacts_path)
is_private_id_set_file_exist = id_set_file_exists_in_bucket(public_storage_bucket, storage_id_set_path)
if is_private_id_set_file_exist:
index_blob = public_storage_bucket.blob(storage_id_set_path)
index_blob.download_to_filename(private_id_set_path)
else:
create_empty_id_set_in_artifacts(private_id_set_path)
return private_id_set_path if os.path.exists(private_id_set_path) else ''
| 8,490 |
def get_inflows_from_parent_model(parent_reach_data, inset_reach_data,
mf2005_parent_sfr_outputfile, mf6_parent_sfr_budget_file,
inset_grid, active_area=None):
"""Get places in an inset model SFR network where the parent SFR network crosses
the inset model boundary, using common line ID numbers from parent and inset reach datasets.
MF2005 or MF6 supported; if either dataset contains only reach numbers (is MODFLOW-6),
the reach numbers are used as segment numbers, with each segment only having one reach.
Parameters
----------
parent_reach_data : str (filepath) or DataFrame
SFR reach data for parent model. Must include columns:
line_id : int; unique identifier for hydrography line that each reach is based on
rno : int; unique identifier for each reach. Optional if iseg and ireach columns are included.
iseg : int; unique identifier for each segment. Optional if rno is included.
ireach : int; unique identifier for each reach. Optional if rno is included.
geometry : shapely.geometry object representing location of each reach
inset_reach_data : str (filepath) or DataFrame
SFR reach data for inset model. Same columns as parent_reach_data,
except a geometry column isn't needed. line_id values must correspond to
same source hydrography as those in parent_reach_data.
mf2005_parent_sfr_outputfile : str (filepath)
Modflow-2005 style SFR text file budget output.
mf6_parent_sfr_budget_file : str (filepath)
Modflow-6 style SFR binary budget output
inset_grid : flopy.discretization.StructuredGrid instance describing model grid
Must be in same coordinate system as geometries in parent_reach_data.
Required only if active_area is None.
active_area : shapely.geometry.Polygon object
Describes the area of the inset model where SFR is applied. Used to find
inset reaches from parent model. Must be in same coordinate system as
geometries in parent_reach_data. Required only if inset_grid is None.
Returns
-------
inflows : DataFrame
Columns:
parent_segment : parent model segment
parent_reach : parent model reach
parent_rno : parent model reach number
line_id : unique identifier for hydrography line that each reach is based on
"""
locations = get_inflow_locations_from_parent_model(parent_reach_data=parent_reach_data,
inset_reach_data=inset_reach_data,
inset_grid=inset_grid,
active_area=active_area)
df = read_sfr_output(mf2005_sfr_outputfile=mf2005_parent_sfr_outputfile,
mf6_sfr_stage_file=None,
mf6_sfr_budget_file=mf6_parent_sfr_budget_file,
model=None)
j=2
| 8,491 |
def test_load_simulated(test_file):
"""Test joining simulation info onto telescope events"""
from ctapipe.io.tableloader import TableLoader
_, dl1_file = test_file
with TableLoader(dl1_file, load_simulated=True) as table_loader:
table = table_loader.read_subarray_events()
assert "true_energy" in table.colnames
table = table_loader.read_telescope_events([25])
assert "true_energy" in table.colnames
| 8,492 |
def matmul_op_select(x1_shape, x2_shape, transpose_x1, transpose_x2):
"""select matmul op"""
x1_dim, x2_dim = len(x1_shape), len(x2_shape)
if x1_dim == 1 and x2_dim == 1:
matmul_op = P.Mul()
elif x1_dim <= 2 and x2_dim <= 2:
transpose_x1 = False if x1_dim == 1 else transpose_x1
transpose_x2 = False if x2_dim == 1 else transpose_x2
matmul_op = P.MatMul(transpose_x1, transpose_x2)
elif x1_dim == 1 and x2_dim > 2:
matmul_op = P.BatchMatMul(False, transpose_x2)
elif x1_dim > 2 and x2_dim == 1:
matmul_op = P.BatchMatMul(transpose_x1, False)
else:
matmul_op = P.BatchMatMul(transpose_x1, transpose_x2)
return matmul_op
| 8,493 |
def test_set_proxy_windows():
"""
Test to make sure that we correctly set the proxy info on Windows
"""
calls = [
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
vdata=(
"http=192.168.0.1:3128;https=192.168.0.1:3128;ftp=192.168.0.1:3128;"
),
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
vdata=1,
vtype="REG_DWORD",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyOverride",
vdata="<local>;.moo.com;.salt.com",
),
]
mock_reg = MagicMock()
mock_cmd = MagicMock()
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.set_value": mock_reg}
), patch.dict(proxy.__salt__, {"cmd.run": mock_cmd}):
out = proxy.set_proxy_win(
server="192.168.0.1",
port=3128,
bypass_hosts=[".moo.com", ".salt.com"],
)
mock_reg.assert_has_calls(calls)
mock_cmd.assert_called_once_with("netsh winhttp import proxy source=ie")
assert out
| 8,494 |
def Align4(i):
"""Round up to the nearest multiple of 4. See unit tests."""
return ((i-1) | 3) + 1
| 8,495 |
def poggendorff_parameters(illusion_strength=0, difference=0):
"""Compute Parameters for Poggendorff Illusion.
Parameters
----------
illusion_strength : float
The strength of the line tilt in biasing the perception of an uncontinuous single line.
Specifically, the orientation of the lines in degrees, 0 being vertical and
larger values (in magnitude; no change with positive or negative sign) rotating clockwise.
difference : float
The objective magnitude of the lines discontinuity.
Specifically, the amount of displacement of the right line relative to the left line. A positive sign
represents the right line displaced higher up, and a negative sign represents it displaced lower down.
Returns
-------
dict
Dictionary of parameters of the Poggendorff illusion.
"""
y_offset = difference
# Coordinates of left line
angle = 90 - illusion_strength
angle = angle if illusion_strength >= 0 else -angle
coord, _, _ = _coord_line(x1=0, y1=0, angle=-angle, length=0.75)
left_x1, left_y1, left_x2, left_y2 = coord
# Right line
coord, _, _ = _coord_line(x1=0, y1=y_offset, angle=180 - angle, length=0.75)
right_x1, right_y1, right_x2, right_y2 = coord
parameters = {
"Illusion": "Poggendorff",
"Illusion_Strength": illusion_strength,
"Difference": difference,
"Illusion_Type": "Congruent" if illusion_strength > 0 else "Incongruent",
"Left_x1": left_x1,
"Left_y1": left_y1,
"Left_x2": left_x2,
"Left_y2": left_y2,
"Right_x1": right_x1,
"Right_y1": right_y1,
"Right_x2": right_x2,
"Right_y2": right_y2,
"Angle": angle,
"Rectangle_Height": 1.75,
"Rectangle_Width": 0.5,
"Rectangle_y": 0,
}
return parameters
| 8,496 |
def download_blobs(blobs: List[storage.Blob]) -> List[Tuple[str, str]]:
"""Download blobs from bucket."""
files_list = []
for blob in blobs:
tmp_file_name = "-".join(blob.name.split("/")[1:])
file_name = blob.name.split("/")[-1]
tmp_file_path = f"/tmp/{tmp_file_name}"
blob.download_to_filename(tmp_file_path)
files_list.append((file_name, tmp_file_path))
return files_list
| 8,497 |
def _test_kron_col_single_matrix(n, k, m):
"""Do one matrix test of utils.kron_col()."""
X = np.random.random((n,k))
Y = np.random.random((m,k))
XY = roi.utils.kron_col(X, Y)
assert XY.ndim == 2
assert XY.shape[0] == X.shape[0] * Y.shape[0]
assert XY.shape[1] == X.shape[1]
for i in range(n):
assert np.allclose(XY[i*m:(i+1)*m], X[i]*Y)
| 8,498 |
def testMarkov2(X, ns, alpha, verbose=True):
"""Test second-order Markovianity of symbolic sequence X with ns symbols.
Null hypothesis:
first-order MC <=>
p(X[t+1] | X[t], X[t-1]) = p(X[t+1] | X[t], X[t-1], X[t-2])
cf. Kullback, Technometrics (1962), Table 10.2.
Args:
x: symbolic sequence, symbols = [0, 1, 2, ...]
ns: number of symbols
alpha: significance level
Returns:
p: p-value of the Chi2 test for independence
"""
if verbose:
print("\nSECOND-ORDER MARKOVIANITY:")
n = len(X)
f_ijkl = np.zeros((ns,ns,ns,ns))
f_ijk = np.zeros((ns,ns,ns))
f_jkl = np.zeros((ns,ns,ns))
f_jk = np.zeros((ns,ns))
for t in range(n-3):
i = X[t]
j = X[t+1]
k = X[t+2]
l = X[t+3]
f_ijkl[i,j,k,l] += 1.0
f_ijk[i,j,k] += 1.0
f_jkl[j,k,l] += 1.0
f_jk[j,k] += 1.0
T = 0.0
for i, j, k, l in np.ndindex(f_ijkl.shape):
f = f_ijkl[i,j,k,l]*f_ijk[i,j,k]*f_jkl[j,k,l]*f_jk[j,k]
if (f > 0):
num_ = f_ijkl[i,j,k,l]*f_jk[j,k]
den_ = f_ijk[i,j,k]*f_jkl[j,k,l]
T += (f_ijkl[i,j,k,l]*np.log(num_/den_))
T *= 2.0
df = ns*ns*(ns-1)*(ns-1)
#p = chi2test(T, df, alpha)
p = chi2.sf(T, df, loc=0, scale=1)
if verbose:
print(f"p: {p:.2e} | t: {T:.3f} | df: {df:.1f}")
return p
| 8,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.