hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1c8500e99ab2c202368f0456ac1983694bf256
| 26,574 |
py
|
Python
|
helios/pipeViewer/pipe_view/model/query_set.py
|
debjyoti0891/map
|
abdae67964420d7d36255dcbf83e4240a1ef4295
|
[
"MIT"
] | 44 |
2019-12-13T06:39:13.000Z
|
2022-03-29T23:09:28.000Z
|
helios/pipeViewer/pipe_view/model/query_set.py
|
debjyoti0891/map
|
abdae67964420d7d36255dcbf83e4240a1ef4295
|
[
"MIT"
] | 222 |
2020-01-14T21:58:56.000Z
|
2022-03-31T20:05:12.000Z
|
helios/pipeViewer/pipe_view/model/query_set.py
|
debjyoti0891/map
|
abdae67964420d7d36255dcbf83e4240a1ef4295
|
[
"MIT"
] | 19 |
2020-01-03T19:03:22.000Z
|
2022-01-09T08:36:20.000Z
|
import copy
import logging
import sys
import time
from . import content_options as content
from logging import info, debug, warn, error
# #Formerly known as Ordered_Dict
class QuerySet:
# For sorting elements with no clock
__DEFAULT_T_OFF = 0
# # Get the Ordered Dict initialized. Note: in order to force-populate the
# Ordered Dict upon initialization, both optional parameters must be provided
def __init__(self, layout_context, d = None, l = None):
self.__layout_context = layout_context
self.__handle = self.__layout_context.dbhandle
# keeps track of the ranges we've already queried
self.old_hc = 0
# stores elements that need ranges of data
self.__range_pairs = []
self.__continued_transactions = {}
# A count of the number of times this Layout Context has requested to
# move to a different HC. NOTE: not an index of the current HC, nor
# number of stabbing queries performed
self.__stab_index = 0
# Don't use this unless you know what you are doing. Read below.
if (d is not None) and (l is not None) and (isinstance(d, dict) and isinstance(l, list)):
self.__t_off_sorted = d
# Instantiate a blank dictionary and list to
else:
# This will be a series of nested dictionaries, with t_offsets (in
# HC's) and loc_id's as their respective keys. The inmost values
# will be lists of Element Values
self.__t_off_sorted = {}
# # Adds a pair to the query set and stashes in correct location
# @profile
def AddPair(self, pair):
e = pair.GetElement()
# Recompute t_off in terms of plain HC's
####clock = self.GetClock(pair)
lmgr = self.__layout_context.dbhandle.database.location_manager
loc_str = e.GetProperty('LocationString')
variables = self.__layout_context.GetLocationVariables()
loc, _, clock = lmgr.getLocationInfo(loc_str, vars)
t_off_property = e.GetProperty('t_offset', period = pair.GetClockPeriod())
# Warn about invalid locations for content types which DO require transactions
if loc == lmgr.INVALID_LOCATION_ID and e.GetProperty('Content') not in content.NO_TRANSACTIONS_REQUIRED:
print('Warning: No collected location matching "{}" (using variables:{})' \
.format(loc_str, variables), file = sys.stderr)
if clock == self.__handle.database.location_manager.NO_CLOCK:
# Makes the assumption that there will always be something else at
# t_offset of 0. If not, then this could stand to be optimized
t_off = self.__DEFAULT_T_OFF
period = -1
else:
period = self.__handle.database.clock_manager.getClockDomain(clock).tick_period
t_off = period * t_off_property
pair.SetClockPeriod(period)
#import pdb; pdb.set_trace()
if e.GetQueryFrame(period):
self.__range_pairs.append(pair)
else:
if self.GetID(pair) == -1 and e.GetProperty('Content') not in content.NO_DATABASE_REQUIRED:
pair.SetMissingLocation()
else:
pair.SetVal('')
if t_off in self.__t_off_sorted:
self.__t_off_sorted[t_off] = self.__AddAtLoc(pair, self.__t_off_sorted[t_off])
else:
self.__t_off_sorted[t_off] = self.__AddAtLoc(pair)
# Update this pair to indicate that it was added with this this t_off and location
# This will be recalled when deleting this pair
pair.SetLocationAndTimingInformation(t_off_property, lmgr.getLocationString(loc))
# # Helper method to AddPair()
# @profile
def __AddAtLoc(self, pair, sub_dict = None):
if sub_dict:
if self.GetID(pair) in sub_dict:
if pair not in sub_dict[self.GetID(pair)]:
sub_dict[self.GetID(pair)].append(pair)
else:
sub_dict[self.GetID(pair)] = [pair]
return sub_dict
else:
return {self.GetID(pair):[pair]}
# # Used for re-sorting an Element's location within t_off_sorted{},
# before the Element's Properties have actually changed
def __ForceAddSingleQueryPair(self, pair, t_off_in, id):
e = pair.GetElement()
# Recompute t_off in terms of plain HC's
####clock = self.GetClock(pair)
lmgr = self.__layout_context.dbhandle.database.location_manager
loc_str = e.GetProperty('LocationString')
loc, _, clock = lmgr.getLocationInfo(loc_str, self.__layout_context.GetLocationVariables())
if clock == self.__handle.database.location_manager.NO_CLOCK:
# Makes the assumption that there will always be something else at
# t_offset of 0. If not, then this could stand to be optimized
t_off = self.__DEFAULT_T_OFF
else:
period = self.__handle.database.clock_manager.getClockDomain(clock).tick_period
t_off = period * t_off_in
pair.SetClockPeriod(period)
if id == -1 and e.GetProperty('Content') not in content.NO_DATABASE_REQUIRED:
pair.SetMissingLocation()
else:
pair.SetVal('')
if t_off in self.__t_off_sorted:
self.__t_off_sorted[t_off] = self.__ForceAddAtLoc(pair, id, self.__t_off_sorted[t_off])
else:
self.__t_off_sorted[t_off] = self.__ForceAddAtLoc(pair, id)
# Update this pair to indicate that it was added with this this t_off and location
# This will be recalled when deleting this pair
pair.SetLocationAndTimingInformation(t_off_in, lmgr.getLocationString(loc))
# Makes sure that draw_order and t_off_sorted have the same Element
# Value instance after this operation, and it is in the correct
# location within draw_order
# for i in range(len(self.__draw_order)):
# if self.__draw_order[i] == pair:
# self.__draw_order[i] = pair
# return True
# return False
return True
# # Helper method to __ForceAddSingleQueryPair()
def __ForceAddAtLoc(self, pair, id, sub_dict = None):
if sub_dict:
if id in sub_dict:
if pair not in sub_dict[id]:
sub_dict[id].append(pair)
else:
sub_dict[id] = [pair]
return sub_dict
else:
return {id:[pair]}
# # Removes the Element-Value associated with the provided Element from
# both draw_order and the t_off_sorted, without leaving lose ends.
# @param pair Element_Value pair. The element in this pair must not have
# had its location or t_offset changed since it was added, otherwise it will
# not be found in the expecected __t_offset_sorted bucket.
def DeletePair(self, pair):
# In the case of Resorting an Element in t_off_sorted, draw order
# delete range pair_entry if we have one
e = pair.GetElement()
# Get the properties related to rendering/sorting at the time this pair was added
prev_locstr = pair.GetDisplayLocationString() # get the fully-resolved (no variables) location string
prev_t_off = pair.GetDisplayTOffset()
if e.GetQueryFrame(pair.GetClockPeriod()):
for r_pair in self.__range_pairs:
if r_pair == pair:
self.__range_pairs.remove(pair)
break
else:
# Recompute t_off in terms of plain HC's
# #clock = self.GetClock(pair)
lmgr = self.__layout_context.dbhandle.database.location_manager
if prev_locstr is not None:
loc, _, clock = lmgr.getLocationInfo(prev_locstr, {})
else:
loc = lmgr.INVALID_LOCATION_ID
clock = lmgr.NO_CLOCK
####t_off = e.GetProperty('t_offset')
if clock == lmgr.NO_CLOCK:
# Makes the assumption that there will always be something else at
# t_offset of 0. If not, then this could stand to be optimized
t_off = self.__DEFAULT_T_OFF
else:
t_off = self.__handle.database.clock_manager.getClockDomain(clock).tick_period * prev_t_off
# Note that we could ignore missing t_offs here, but then we might
# have stale links in another t_off bucket. This guarantees that the
# the proper pair was removed by requiring it to be in the expected
# bucket
temp = self.__t_off_sorted[t_off].get(loc)
if not temp:
return
for p in temp:
if p == e:
temp.remove(p)
if len(self.__t_off_sorted[t_off][loc]) == 0:
del self.__t_off_sorted[t_off][loc]
if len(self.__t_off_sorted[t_off]) == 0:
del self.__t_off_sorted[t_off]
def CheckLocationVariablesChanged(self):
loc_vars_status = self.__layout_context.GetLocationVariablesChanged()
if loc_vars_status:
self.__layout_context.AckLocationVariablesChanged()
return loc_vars_status
# # Returns the internal ID which maps to the given Element's Location
# String, per the Location Manager
def GetID(self, pair):
el = pair.GetElement()
if not el.LocationHasVars():
return self.__layout_context.dbhandle.database.location_manager.getLocationInfoNoVars(el.GetProperty('LocationString'))[0]
else:
return self.__layout_context.dbhandle.database.location_manager.getLocationInfo(el.GetProperty('LocationString'), self.__layout_context.GetLocationVariables(), self.CheckLocationVariablesChanged())[0]
# # Returns the clock ID which maps to the given' Element's location
# string, per the Location Manager
def GetClock(self, pair):
el = pair.GetElement()
if el.LocationHasVars():
return self.__layout_context.dbhandle.database.location_manager.getLocationInfo(el.GetProperty('LocationString'), self.__layout_context.GetLocationVariables(), self.CheckLocationVariablesChanged())[2]
else:
return self.__layout_context.dbhandle.database.location_manager.getLocationInfoNoVars(el.GetProperty('LocationString'))[2]
# # When an element has it's LocationString (therefore LocationID) or
# it's t_offset changed, it needs to be resorted in the
# dictionary. This method is called, and executes, BEFORE the new
# property is assigned to the Element
def ReSort(self, pair, t_off, id):
self.DeletePair(pair)
res = self.__ForceAddSingleQueryPair(pair, t_off, id)
if res:
logging.debug('An Element was properly resorted')
else:
logging.warning("""An Element was unable to be resorted properly,
there is now a discrepancy between the Element Values stored in
t_off_sorted{} and draw_order[] inside Ordered Dict""")
# # Update the val of an Element Value when the Element's 'Content'
# property is changed
def ReValue(self, pair):
e = pair.GetElement()
if e.GetQueryFrame(pair.GetClockPeriod()):
pair.ClearTimedValues()
self.__layout_context.GoToHC(self.__layout_context.hc)
else:
if e.GetProperty('Content') in content.NO_TRANSACTIONS_REQUIRED:
# Recompute t_off in terms of plain HC's
clock = self.GetClock(pair)
t_off = e.GetProperty('t_offset')
if clock == self.__handle.database.location_manager.NO_CLOCK:
# Makes the assumption that there will always be something else at
# t_offset of 0. If not, then this could stand to be optimized
t_off = self.__DEFAULT_T_OFF
else:
t_off = self.__handle.database.clock_manager.getClockDomain(clock).tick_period * t_off
temp = self.__t_off_sorted[t_off][self.GetID(pair)]
for pair_tmp in temp:
if pair_tmp == e:
pair.SetVal(content.ProcessContent(e.GetProperty('Content'),
None,
e,
self.__handle,
self.__layout_context.hc,
self.__layout_context.GetLocationVariables()),
self.__stab_index,
)
return
else:
self.__layout_context.GoToHC(self.__layout_context.hc)
# @profile
def Update(self):
'''
This is where all Element Values get re-synchronized to the current hc
not sure if everything here is final, or best implemented
'''
self.__stab_index = self.__stab_index + 1
# Cached variable lookups
no_trans = content.NO_TRANSACTIONS_REQUIRED
stab_index = self.__stab_index
handle = self.__handle
hc = self.__layout_context.hc
loc_vars = self.__layout_context.GetLocationVariables()
ordered_ticks = [hc + toff for toff in self.__t_off_sorted]
# Clear all continued transactions so that we don't accidentally draw garbage
self.__continued_transactions.clear()
# add intermediate values to make sure Line-type elements have what they need
bottom_of_pair = 100000000000000000
top_of_pair = -100000000000
for pair in self.__range_pairs:
e = pair.GetElement()
e.SetTime(hc) # Always set time because it is used for drawing the schedule group
period = pair.GetClockPeriod()
if period == -1:
# unset/invalid
continue
qframe = e.GetQueryFrame(period)
# #print 'QUERY FRAME @ hc={} FOR {} = {}. Period = {}'.format(hc, e, qframe, period)
curr_time = qframe[0] + hc
end_time = qframe[1] + hc
curr_time = curr_time - curr_time % period
end_time = end_time - end_time % period
while curr_time <= end_time:
timed_val = pair.GetTimedVal(curr_time)
if not timed_val or not timed_val[0]:
ordered_ticks.append(curr_time)
if curr_time > top_of_pair:
top_of_pair = curr_time
if curr_time < bottom_of_pair:
bottom_of_pair = curr_time
curr_time += period
if len(ordered_ticks) == 0:
return # Nothing to update
ordered_ticks = sorted(set(ordered_ticks))
next_tick_idx = [0]
total_callbacks = [0]
total_useful_callbacks = [0]
total_updates = [0]
# @profile
def callback(t, tapi):
total_callbacks[0] += 1
next_tick = next_tick_idx[0]
if len(ordered_ticks) == 0:
return
next_t = ordered_ticks[next_tick]
# Show tick info
# #print 'On t=', t, ' @idx ', next_tick
# #print' next t=', next_t
if t < next_t:
# print ' ignored callback at t={}. t < next_t ({})'.format(t, next_t)
return # Ignore this t because there is no entry in ordered_ticks
# #in future, do this for all clocks.
# if t % period != 0:
# return
next_tick_idx[0] += 1
total_useful_callbacks[0] += 1
# get data for range_pairs
updated = 0
GetID = self.GetID
for range_pair_idx, range_pair in enumerate(self.__range_pairs):
period = range_pair.GetClockPeriod()
if period == -1:
# unset/invalid
continue
e = range_pair.GetElement()
frame = e.GetQueryFrame(period)
tick_start = frame[0] + self.__layout_context.hc
tick_end = frame[1] + self.__layout_context.hc
if tick_start <= t <= tick_end:
timed_val = range_pair.GetTimedVal(t)
if not timed_val or not timed_val[0]:
updated += 1
loc_id = GetID(range_pair)
content_type = e.GetProperty('Content')
# Update element content based on transaction
# If there is no data for this tick, this will return None
trans_proxy = self.__layout_context.dbhandle.api.getTransactionProxy(loc_id)
if trans_proxy is not None and trans_proxy.isValid():
if range_pair_idx in self.__continued_transactions:
old_interval, _, last = self.__continued_transactions[range_pair_idx]
if last and t >= old_interval[1]:
del self.__continued_transactions[range_pair_idx]
if range_pair_idx in self.__continued_transactions:
old_interval, old_processed_val, last = self.__continued_transactions[range_pair_idx]
old_left = old_interval[0]
new_interval = (old_left, trans_proxy.getRight())
# Fix for ARGOS-158/ARGOS-164
# There's a corner case where a heartbeat occurs in the middle of a clock period. We would ordinarily skip over it, and
# consequently miss the last part of a continued transaction. If a continued transaction ends before the next clock period begins,
# we add it to the ordered_ticks list so that we can catch the next part of it.
if new_interval[1] < new_interval[0] + period:
ordered_ticks.insert(next_tick_idx[0], new_interval[1])
self.__range_pairs[range_pair_idx].SetTimedVal(old_left, (old_processed_val, new_interval))
if not trans_proxy.isContinued():
self.__continued_transactions[range_pair_idx][0] = new_interval
self.__continued_transactions[range_pair_idx][2] = True
else:
processed_val = content.ProcessContent(content_type,
trans_proxy,
e,
handle,
hc,
loc_vars)
interval = (trans_proxy.getLeft(), trans_proxy.getRight())
if trans_proxy.isContinued():
self.__continued_transactions[range_pair_idx] = [interval, copy.copy(processed_val), False]
# Fix for ARGOS-158/ARGOS-164
# There's a corner case where a heartbeat occurs in the middle of a clock period. We would ordinarily skip over it, and
# consequently miss the last part of a continued transaction. If a continued transaction ends before the next clock period begins,
# we add it to the ordered_ticks list so that we can catch the next part of it.
if interval[1] < interval[0] + period:
ordered_ticks.insert(next_tick_idx[0], interval[1])
else:
range_pair.SetTimedVal(interval[0], (processed_val, interval))
if trans_proxy.getLeft() != t:
if t % period == 0:
original_start = trans_proxy.getLeft()
range_pair.SetTimedVal(t, (original_start, (t, t))) # placeholder
if not range_pair.GetTimedVal(original_start):
info('Unable to make full query.')
else:
if t % period == 0:
range_pair.SetTimedVal(t, (None, (t, t))) # placeholder
# Query at this time and update all elements for which a transaction
# exists.
# assert t-hc in self.__t_off_sorted, 'bad tick {0}'.format(t)
if t - hc in self.__t_off_sorted:
ids = self.__t_off_sorted[t - hc] # #.keys()
# #print 'IDs @ {0} = {1}'.format(t, ids)
# Dump all locations in a row with locaiton transacitons IDs coded into ascii
# #for locid in xrange(0, self.__layout_context.dbhandle.database.location_manager.getMaxLocationID()):
# # trans_proxy = self.__layout_context.dbhandle.api.getTransactionProxy(locid)
# # if trans_proxy is not None and trans_proxy.isValid():
# # sys.stdout.write('{:1s}'.format(chr((0x21 + trans_proxy.getTransactionID())
# # % (ord('~') - ord('!'))
# # )))
# # else:
# # sys.stdout.write('_')
# #print ''
for loc_id, els in self.__t_off_sorted[t - hc].items():
for pair in els:
e = pair.GetElement()
content_type = e.GetProperty('Content')
if content_type in no_trans:
# Update this element, which is not dependent on a transaction
pair.SetVal(content.ProcessContent(content_type,
None,
e,
handle,
hc,
loc_vars),
stab_index)
else:
# Update element content based on transaction
# If there is no data for this tick, this will return None
trans_proxy = self.__layout_context.dbhandle.api.getTransactionProxy(loc_id)
if loc_id == -1:
pair.SetMissingLocation()
elif trans_proxy is not None and trans_proxy.isValid():
pair.SetVal(content.ProcessContent(content_type,
trans_proxy,
e,
handle,
hc,
loc_vars),
stab_index)
else:
# There is no transaction here. It might be a fake query response or
# a genuine empty transaction.
# If previously, there was no transaction at this location,
# assume this is still the case. If an element changes locations
# and then points to a location that is valid but has no transaction
# it is the responsibility of AddElement-related methods to clear
# the 'no location' value so that it doesn't persist
if pair.GetVal() is not content.OverrideState('no loc'):
pair.SetVal(content.OverrideState('no trans'))
updated += 1
total_updates[0] += updated
logging.debug('Querying from {} to {}'.format(ordered_ticks[0], ordered_ticks[-1]))
t_start = time.monotonic()
try:
self.__layout_context.dbhandle.query(ordered_ticks[0], ordered_ticks[-1], callback, True)
logging.debug("Done with db query")
except Exception as ex:
logging.debug('Exception while querying!: {}'.format(ex))
raise
finally:
logging.debug('{0}s: Query+Update for {1} elements. {2} callbacks ({3} useful)' \
.format(time.monotonic() - t_start, total_updates[0], total_callbacks[0], total_useful_callbacks[0]))
logging.debug(' {}'.format(self.__layout_context.dbhandle.api))
node_states = self.__layout_context.dbhandle.api.getNodeStates().decode('utf-8').split('\n')
for ns in node_states:
logging.debug(' {}'.format(ns))
logging.debug('Done')
# print 'Node 0 dump:\n'
# print self.__layout_context.dbhandle.api.getNodeDump(0, 890, 905, 40);
# # For debug purposes
def __repr__(self):
return self.__str__()
def __str__(self):
return '<Ordered_Dict>'.format()
def GetElementDump(self):
res = ''
for t_off in self.__t_off_sorted:
res += str(t_off) + '\t'
for loc in self.__t_off_sorted[t_off]:
res += str(loc) + '\t'
for e in self.__t_off_sorted[t_off][loc]:
res += repr(e) + ', '
res += '\n\t'
res += '\n'
return res
| 50.234405 | 212 | 0.543765 |
4a1c85280f06ebfcf1ca1cd229d2ce7c1852ebbc
| 35,882 |
py
|
Python
|
launchdarkly_api/model/defaults.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 6 |
2020-02-06T20:17:25.000Z
|
2021-12-28T20:13:34.000Z
|
launchdarkly_api/model/defaults.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 7 |
2019-02-18T21:51:47.000Z
|
2021-09-03T17:49:33.000Z
|
launchdarkly_api/model/defaults.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 6 |
2019-08-02T16:10:31.000Z
|
2021-05-23T17:47:03.000Z
|
# -*- coding: utf-8 -*-
"""
LaunchDarkly REST API
# Overview ## Authentication All REST API resources are authenticated with either [personal or service access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens), or session cookies. Other authentication mechanisms are not supported. You can manage personal access tokens on your [Account settings](https://app.launchdarkly.com/settings/tokens) page. LaunchDarkly also has SDK keys, mobile keys, and client-side IDs that are used by our server-side SDKs, mobile SDKs, and client-side SDKs, respectively. **These keys cannot be used to access our REST API**. These keys are environment-specific, and can only perform read-only operations (fetching feature flag settings). | Auth mechanism | Allowed resources | Use cases | | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -------------------------------------------------- | | [Personal access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens) | Can be customized on a per-token basis | Building scripts, custom integrations, data export | | SDK keys | Can only access read-only SDK-specific resources and the firehose, restricted to a single environment | Server-side SDKs, Firehose API | | Mobile keys | Can only access read-only mobile SDK-specific resources, restricted to a single environment | Mobile SDKs | | Client-side ID | Single environment, only flags marked available to client-side | Client-side JavaScript | > #### Keep your access tokens and SDK keys private > > Access tokens should _never_ be exposed in untrusted contexts. Never put an access token in client-side JavaScript, or embed it in a mobile application. LaunchDarkly has special mobile keys that you can embed in mobile apps. If you accidentally expose an access token or SDK key, you can reset it from your [Account Settings](https://app.launchdarkly.com/settings#/tokens) page. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Via request header The preferred way to authenticate with the API is by adding an `Authorization` header containing your access token to your requests. The value of the `Authorization` header must be your access token. Manage personal access tokens from the [Account Settings](https://app.launchdarkly.com/settings/tokens) page. ### Via session cookie For testing purposes, you can make API calls directly from your web browser. If you're logged in to the application, the API will use your existing session to authenticate calls. If you have a [role](https://docs.launchdarkly.com/home/team/built-in-roles) other than Admin, or have a [custom role](https://docs.launchdarkly.com/home/team/custom-roles) defined, you may not have permission to perform some API calls. You will receive a `401` response code in that case. > ### Modifying the Origin header causes an error > > LaunchDarkly validates that the Origin header for any API request authenticated by a session cookie matches the expected Origin header. The expected Origin header is `https://app.launchdarkly.com`. > > If the Origin header does not match what's expected, LaunchDarkly returns an error. This error can prevent the LaunchDarkly app from working correctly. > > Any browser extension that intentionally changes the Origin header can cause this problem. For example, the `Allow-Control-Allow-Origin: *` Chrome extension changes the Origin header to `http://evil.com` and causes the app to fail. > > To prevent this error, do not modify your Origin header. > > LaunchDarkly does not require origin matching when authenticating with an access token, so this issue does not affect normal API usage. ## Representations All resources expect and return JSON response bodies. Error responses will also send a JSON body. Read [Errors](#section/Errors) for a more detailed description of the error format used by the API. In practice this means that you always get a response with a `Content-Type` header set to `application/json`. In addition, request bodies for `PUT`, `POST`, `REPORT` and `PATCH` requests must be encoded as JSON with a `Content-Type` header set to `application/json`. ### Summary and detailed representations When you fetch a list of resources, the response includes only the most important attributes of each resource. This is a _summary representation_ of the resource. When you fetch an individual resource (for example, a single feature flag), you receive a _detailed representation_ containing all of the attributes of the resource. The best way to find a detailed representation is to follow links. Every summary representation includes a link to its detailed representation. ### Links and addressability The best way to navigate the API is by following links. These are attributes in representations that link to other resources. The API always uses the same format for links: - Links to other resources within the API are encapsulated in a `_links` object. - If the resource has a corresponding link to HTML content on the site, it is stored in a special `_site` link. Each link has two attributes: an href (the URL) and a type (the content type). For example, a feature resource might return the following: ```json { \"_links\": { \"parent\": { \"href\": \"/api/features\", \"type\": \"application/json\" }, \"self\": { \"href\": \"/api/features/sort.order\", \"type\": \"application/json\" } }, \"_site\": { \"href\": \"/features/sort.order\", \"type\": \"text/html\" } } ``` From this, you can navigate to the parent collection of features by following the `parent` link, or navigate to the site page for the feature by following the `_site` link. Collections are always represented as a JSON object with an `items` attribute containing an array of representations. Like all other representations, collections have `_links` defined at the top level. Paginated collections include `first`, `last`, `next`, and `prev` links containing a URL with the respective set of elements in the collection. ## Updates Resources that accept partial updates use the `PATCH` verb, and support the [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) format. Some resources also support the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format. In addition, some resources support optional comments that can be submitted with updates. Comments appear in outgoing webhooks, the audit log, and other integrations. ### Updates via JSON Patch [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) is a way to specify the modifications to perform on a resource. For example, in this feature flag representation: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"description\": \"This is the description\", ... } ``` You can change the feature flag's description with the following patch document: ```json [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"This is the new description\" }] ``` JSON Patch documents are always arrays. You can specify multiple modifications to perform in a single request. You can also test that certain preconditions are met before applying the patch: ```json [ { \"op\": \"test\", \"path\": \"/version\", \"value\": 10 }, { \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" } ] ``` The above patch request tests whether the feature flag's `version` is `10`, and if so, changes the feature flag's description. Attributes that aren't editable, like a resource's `_links`, have names that start with an underscore. ### Updates via JSON Merge Patch The API also supports the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format, as well as the [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource. JSON Merge Patch is less expressive than JSON Patch but in many cases, it is simpler to construct a merge patch document. For example, you can change a feature flag's description with the following merge patch document: ```json { \"description\": \"New flag description\" } ``` ### Updates with comments You can submit optional comments with `PATCH` changes. The [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource supports comments. To submit a comment along with a JSON Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"patch\": [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" }] } ``` To submit a comment along with a JSON Merge Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"merge\": { \"description\": \"New flag description\" } } ``` ### Updates via semantic patches The API also supports the Semantic patch format. A semantic `PATCH` is a way to specify the modifications to perform on a resource as a set of executable instructions. JSON Patch uses paths and a limited set of operations to describe how to transform the current state of the resource into a new state. Semantic patch allows you to be explicit about intent using precise, custom instructions. In many cases, semantic patch instructions can also be defined independently of the current state of the resource. This can be useful when defining a change that may be applied at a future date. For example, in this feature flag configuration in environment Production: ```json { \"name\": \"Alternate sort order\", \"kind\": \"boolean\", \"key\": \"sort.order\", ... \"environments\": { \"production\": { \"on\": true, \"archived\": false, \"salt\": \"c29ydC5vcmRlcg==\", \"sel\": \"8de1085cb7354b0ab41c0e778376dfd3\", \"lastModified\": 1469131558260, \"version\": 81, \"targets\": [ { \"values\": [ \"Gerhard.Little@yahoo.com\" ], \"variation\": 0 }, { \"values\": [ \"1461797806429-33-861961230\", \"438580d8-02ee-418d-9eec-0085cab2bdf0\" ], \"variation\": 1 } ], \"rules\": [], \"fallthrough\": { \"variation\": 0 }, \"offVariation\": 1, \"prerequisites\": [], \"_site\": { \"href\": \"/default/production/features/sort.order\", \"type\": \"text/html\" } } } } ``` You can add a date you want a user to be removed from the feature flag's user targets. For example, “remove user 1461797806429-33-861961230 from the user target for variation 0 on the Alternate sort order flag in the production environment on Wed Jul 08 2020 at 15:27:41 pm”. This is done using the following: ```json { \"comment\": \"update expiring user targets\", \"instructions\": [ { \"kind\": \"removeExpireUserTargetDate\", \"userKey\": \"userKey\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\" }, { \"kind\": \"updateExpireUserTargetDate\", \"userKey\": \"userKey2\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1587582000000 }, { \"kind\": \"addExpireUserTargetDate\", \"userKey\": \"userKey3\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1594247266386 } ] } ``` Here is another example. In this feature flag configuration: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"environments\": { \"test\": { \"on\": true } } } ``` You can change the feature flag's description with the following patch document as a set of executable instructions. For example, “add user X to targets for variation Y and remove user A from targets for variation B for test flag”: ```json { \"comment\": \"\", \"instructions\": [ { \"kind\": \"removeUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"852cb784-54ff-46b9-8c35-5498d2e4f270\" }, { \"kind\": \"addUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"1bb18465-33b6-49aa-a3bd-eeb6650b33ad\" } ] } ``` > ### Supported semantic patch API endpoints > > - [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) > - [Update expiring user targets on feature flag](/tag/Feature-flags#operation/patchExpiringUserTargets) > - [Update expiring user target for flags](/tag/User-settings#operation/patchExpiringFlagsForUser) > - [Update expiring user targets on segment](/tag/Segments#operation/patchExpiringUserTargetsForSegment) ## Errors The API always returns errors in a common format. Here's an example: ```json { \"code\": \"invalid_request\", \"message\": \"A feature with that key already exists\", \"id\": \"30ce6058-87da-11e4-b116-123b93f75cba\" } ``` The general class of error is indicated by the `code`. The `message` is a human-readable explanation of what went wrong. The `id` is a unique identifier. Use it when you're working with LaunchDarkly support to debug a problem with a specific API call. ### HTTP Status - Error Response Codes | Code | Definition | Desc. | Possible Solution | | ---- | ----------------- | ------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | | 400 | Bad Request | A request that fails may return this HTTP response code. | Ensure JSON syntax in request body is correct. | | 401 | Unauthorized | User doesn't have permission to an API call. | Ensure your SDK key is good. | | 403 | Forbidden | User does not have permission for operation. | Ensure that the user or access token has proper permissions set. | | 409 | Conflict | The API request could not be completed because it conflicted with a concurrent API request. | Retry your request. | | 429 | Too many requests | See [Rate limiting](/#section/Rate-limiting). | Wait and try again later. | ## CORS The LaunchDarkly API supports Cross Origin Resource Sharing (CORS) for AJAX requests from any origin. If an `Origin` header is given in a request, it will be echoed as an explicitly allowed origin. Otherwise, a wildcard is returned: `Access-Control-Allow-Origin: *`. For more information on CORS, see the [CORS W3C Recommendation](http://www.w3.org/TR/cors). Example CORS headers might look like: ```http Access-Control-Allow-Headers: Accept, Content-Type, Content-Length, Accept-Encoding, Authorization Access-Control-Allow-Methods: OPTIONS, GET, DELETE, PATCH Access-Control-Allow-Origin: * Access-Control-Max-Age: 300 ``` You can make authenticated CORS calls just as you would make same-origin calls, using either [token or session-based authentication](#section/Authentication). If you’re using session auth, you should set the `withCredentials` property for your `xhr` request to `true`. You should never expose your access tokens to untrusted users. ## Rate limiting We use several rate limiting strategies to ensure the availability of our APIs. Rate-limited calls to our APIs will return a `429` status code. Calls to our APIs will include headers indicating the current rate limit status. The specific headers returned depend on the API route being called. The limits differ based on the route, authentication mechanism, and other factors. Routes that are not rate limited may not contain any of the headers described below. > ### Rate limiting and SDKs > > LaunchDarkly SDKs are never rate limited and do not use the API endpoints defined here. LaunchDarkly uses a different set of approaches, including streaming/server-sent events and a global CDN, to ensure availability to the routes used by LaunchDarkly SDKs. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Global rate limits Authenticated requests are subject to a global limit. This is the maximum number of calls that can be made to the API per ten seconds. All personal access tokens on the account share this limit, so exceeding the limit with one access token will impact other tokens. Calls that are subject to global rate limits will return the headers below: | Header name | Description | | ------------------------------ | -------------------------------------------------------------------------------- | | `X-Ratelimit-Global-Remaining` | The maximum number of requests the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | We do not publicly document the specific number of calls that can be made globally. This limit may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limit. ### Route-level rate limits Some authenticated routes have custom rate limits. These also reset every ten seconds. Any access tokens hitting the same route share this limit, so exceeding the limit with one access token may impact other tokens. Calls that are subject to route-level rate limits will return the headers below: | Header name | Description | | ----------------------------- | ----------------------------------------------------------------------------------------------------- | | `X-Ratelimit-Route-Remaining` | The maximum number of requests to the current route the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | A _route_ represents a specific URL pattern and verb. For example, the [Delete environment](/tag/Environments#operation/deleteEnvironment) endpoint is considered a single route, and each call to delete an environment counts against your route-level rate limit for that route. We do not publicly document the specific number of calls that can be made to each endpoint per ten seconds. These limits may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limits. ### IP-based rate limiting We also employ IP-based rate limiting on some API routes. If you hit an IP-based rate limit, your API response will include a `Retry-After` header indicating how long to wait before re-trying the call. Clients must wait at least `Retry-After` seconds before making additional calls to our API, and should employ jitter and backoff strategies to avoid triggering rate limits again. ## OpenAPI (Swagger) We have a [complete OpenAPI (Swagger) specification](https://app.launchdarkly.com/api/v2/openapi.json) for our API. You can use this specification to generate client libraries to interact with our REST API in your language of choice. This specification is supported by several API-based tools such as Postman and Insomnia. In many cases, you can directly import our specification to ease use in navigating the APIs in the tooling. ## Client libraries We auto-generate multiple client libraries based on our OpenAPI specification. To learn more, visit [GitHub](https://github.com/search?q=topic%3Alaunchdarkly-api+org%3Alaunchdarkly&type=Repositories). ## Method Overriding Some firewalls and HTTP clients restrict the use of verbs other than `GET` and `POST`. In those environments, our API endpoints that use `PUT`, `PATCH`, and `DELETE` verbs will be inaccessible. To avoid this issue, our API supports the `X-HTTP-Method-Override` header, allowing clients to \"tunnel\" `PUT`, `PATCH`, and `DELETE` requests via a `POST` request. For example, if you wish to call one of our `PATCH` resources via a `POST` request, you can include `X-HTTP-Method-Override:PATCH` as a header. ## Beta resources We sometimes release new API resources in **beta** status before we release them with general availability. Resources that are in beta are still undergoing testing and development. They may change without notice, including becoming backwards incompatible. We try to promote resources into general availability as quickly as possible. This happens after sufficient testing and when we're satisfied that we no longer need to make backwards-incompatible changes. We mark beta resources with a \"Beta\" callout in our documentation, pictured below: > ### This feature is in beta > > To use this feature, pass in a header including the `LD-API-Version` key with value set to `beta`. Use this header with each call. To learn more, read [Beta resources](/#section/Beta-resources). ### Using beta resources To use a beta resource, you must include a header in the request. If you call a beta resource without this header, you'll receive a `403` response. Use this header: ``` LD-API-Version: beta ``` ## Versioning We try hard to keep our REST API backwards compatible, but we occasionally have to make backwards-incompatible changes in the process of shipping new features. These breaking changes can cause unexpected behavior if you don't prepare for them accordingly. Updates to our REST API include support for the latest features in LaunchDarkly. We also release a new version of our REST API every time we make a breaking change. We provide simultaneous support for multiple API versions so you can migrate from your current API version to a new version at your own pace. ### Setting the API version per request You can set the API version on a specific request by sending an `LD-API-Version` header, as shown in the example below: ``` LD-API-Version: 20191212 ``` The header value is the version number of the API version you'd like to request. The number for each version corresponds to the date the version was released. In the example above the version `20191212` corresponds to December 12, 2019. ### Setting the API version per access token When creating an access token, you must specify a specific version of the API to use. This ensures that integrations using this token cannot be broken by version changes. Tokens created before versioning was released have their version set to `20160426` (the version of the API that existed before versioning) so that they continue working the same way they did before versioning. If you would like to upgrade your integration to use a new API version, you can explicitly set the header described above. > ### Best practice: Set the header for every client or integration > > We recommend that you set the API version header explicitly in any client or integration you build. > > Only rely on the access token API version during manual testing. # noqa: E501
The version of the OpenAPI document: 2.0
Contact: support@launchdarkly.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from launchdarkly_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from launchdarkly_api.exceptions import ApiAttributeError
class Defaults(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'on_variation': (int,), # noqa: E501
'off_variation': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'on_variation': 'onVariation', # noqa: E501
'off_variation': 'offVariation', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, on_variation, off_variation, *args, **kwargs): # noqa: E501
"""Defaults - a model defined in OpenAPI
Args:
on_variation (int):
off_variation (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.on_variation = on_variation
self.off_variation = off_variation
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, on_variation, off_variation, *args, **kwargs): # noqa: E501
"""Defaults - a model defined in OpenAPI
Args:
on_variation (int):
off_variation (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.on_variation = on_variation
self.off_variation = off_variation
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 132.405904 | 24,467 | 0.6372 |
4a1c859043ec3a4a85781bbf8514ec9ad35c1784
| 3,703 |
py
|
Python
|
wxgui/debug.py
|
pshchelo/vampy
|
85555daf3a04bf36e81d6cd64f66bb1e4b341860
|
[
"BSD-3-Clause"
] | 1 |
2021-04-19T09:48:04.000Z
|
2021-04-19T09:48:04.000Z
|
wxgui/debug.py
|
pshchelo/vampy
|
85555daf3a04bf36e81d6cd64f66bb1e4b341860
|
[
"BSD-3-Clause"
] | null | null | null |
wxgui/debug.py
|
pshchelo/vampy
|
85555daf3a04bf36e81d6cd64f66bb1e4b341860
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
'''Frame for single image debugging
'''
import wx
import matplotlib as mplt
mplt.use('WXAgg', warn=False)
from matplotlib import cm
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar2
from matplotlib.figure import Figure
from resources import MICROSCOPE
import widgets
from calc import smooth
class ImageDebugFrame(wx.Frame):
def __init__(self, parent, id, img, out, extra_out):
wx.Frame.__init__(self, parent, id, size=(800,600), title = 'Single Image Debug')
self.statusbar = widgets.PlotStatusBar(self)
self.SetStatusBar(self.statusbar)
panel = wx.Panel(self, -1)
pansizer = wx.BoxSizer(wx.VERTICAL)
self.figure = Figure(facecolor = widgets.rgba_wx2mplt(panel.GetBackgroundColour()))
self.canvas = FigureCanvas(panel, -1, self.figure)
self.canvas.mpl_connect('motion_notify_event', self.statusbar.SetPosition)
pansizer.Add(self.canvas, 1, wx.GROW)
navtoolbar = NavigationToolbar2(self.canvas)
navtoolbar.Realize()
pansizer.Add(navtoolbar, 0, wx.GROW)
profile = extra_out['profile']
self.profileplot = self.figure.add_subplot(221, title = 'Axis brightness profile')
self.profileplot.plot(profile)
order = parent.analysispanel.GetParams()['order']
window = parent.analysispanel.GetParams()['window']
smoothmode = parent.analysispanel.GetParams()['smoothing']
grad = abs(smooth.smooth1d(profile, smoothmode, order, window, diff=1))
multiplier = profile.max()/grad.max()/2
grad *= multiplier
self.profileplot.plot(grad)
self.profileplot.axvline(extra_out['pip'], color = 'blue')
self.profileplot.axvline(extra_out['asp'], color = 'yellow')
self.profileplot.axvline(extra_out['ves'], color = 'green')
self.imgplot = self.figure.add_subplot(222, title = 'Image')
refs = extra_out['refs']
for ref in refs:
self.imgplot.plot([ref[0][1]], [ref[0][0]], 'yo') # due to format of refs
self.imgplot.imshow(img, aspect = 'equal', extent = None, cmap = cm.get_cmap('gray'))
self.pipprofile1 = self.figure.add_subplot(223, title = 'Left pipette section')
xleft = refs[0][0][1]
pipprofile1 = img[:,xleft]
self.pipprofile1.plot(pipprofile1)
self.pipprofile2 = self.figure.add_subplot(224, title = 'Right pipette section')
xright = refs[-1][0][1]
pipprofile2 = img[:,xright]
self.pipprofile2.plot(pipprofile2)
# self.gradpipprofile1 = self.figure.add_subplot(325, title = 'Left pipette section gradient')
# self.gradpipprofile1.plot(utils.get_gradient(pipprofile1, 3))
#
# self.gradpipprofile2 = self.figure.add_subplot(326, title = 'Right pipette section gradient')
# self.gradpipprofile2.plot(utils.get_gradient(pipprofile2, 3))
#
panel.SetSizer(pansizer)
panel.Fit()
title = '%s : %s - Image %s - %s'%(parent.imagedate, parent.imagedir, parent.imgpanel.GetImgNo(), self.GetTitle())
self.SetTitle(title)
self.SetFrameIcons(MICROSCOPE, (16,24,32))
self.canvas.draw()
def SetFrameIcons(self, artid, sizes):
ib = wx.IconBundle()
for size in sizes:
ib.AddIcon(wx.ArtProvider.GetIcon(artid, size = (size,size)))
self.SetIcons(ib)
| 41.606742 | 123 | 0.63138 |
4a1c87732c7b62e0147abcabc405b248f772ccd6
| 130 |
py
|
Python
|
pantheon-hermes/pantheon/hermes/gpu/nvidia/gtx.py
|
CaptainBriot/hermes
|
b603b63b76b1e708364839367632fd3ba6dace97
|
[
"MIT"
] | null | null | null |
pantheon-hermes/pantheon/hermes/gpu/nvidia/gtx.py
|
CaptainBriot/hermes
|
b603b63b76b1e708364839367632fd3ba6dace97
|
[
"MIT"
] | 2 |
2017-12-24T20:13:15.000Z
|
2017-12-25T20:57:55.000Z
|
pantheon-hermes/pantheon/hermes/gpu/nvidia/gtx.py
|
CaptainBriot/pantheon-hermes
|
b603b63b76b1e708364839367632fd3ba6dace97
|
[
"MIT"
] | null | null | null |
from .. import gpu
class Nvidia1070(gpu.BaseGPU):
model = 'GeForce GTX 1070'
mem = 1300
clock = 100
power = 100
| 14.444444 | 30 | 0.615385 |
4a1c877858500c664101f51835bf5e58004af0e9
| 970 |
py
|
Python
|
project_templates/stack_package/example_dds/python/lsst/example/dds/__init__.py
|
lsst/templates
|
ea9ea300299eabc482468de6582c71ae3146f671
|
[
"Adobe-Glyph"
] | 6 |
2016-07-19T20:55:01.000Z
|
2019-03-29T02:34:00.000Z
|
project_templates/stack_package/example_dds/python/lsst/example/dds/__init__.py
|
lsst/templates
|
ea9ea300299eabc482468de6582c71ae3146f671
|
[
"Adobe-Glyph"
] | 51 |
2015-12-09T20:50:59.000Z
|
2022-03-29T17:12:52.000Z
|
project_templates/stack_package/example_dds/python/lsst/example/dds/__init__.py
|
lsst/templates
|
ea9ea300299eabc482468de6582c71ae3146f671
|
[
"Adobe-Glyph"
] | 31 |
2015-05-06T08:56:44.000Z
|
2021-03-29T20:18:16.000Z
|
# This file is part of example_dds.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from .version import * # Generated by sconsUtils
| 42.173913 | 72 | 0.764948 |
4a1c887f4139d63d1b87243ef3b9e57746dba52e
| 12,743 |
py
|
Python
|
4_pose_estimation/utils/dataloader.py
|
ryotaro137/pytorch_advanced
|
af212d250162d598546fc45b2823b441b013223e
|
[
"MIT"
] | 1 |
2021-04-11T00:12:41.000Z
|
2021-04-11T00:12:41.000Z
|
4_pose_estimation/utils/dataloader.py
|
ryotaro137/pytorch_advanced
|
af212d250162d598546fc45b2823b441b013223e
|
[
"MIT"
] | null | null | null |
4_pose_estimation/utils/dataloader.py
|
ryotaro137/pytorch_advanced
|
af212d250162d598546fc45b2823b441b013223e
|
[
"MIT"
] | 2 |
2021-04-11T00:12:24.000Z
|
2021-05-22T10:06:24.000Z
|
# 第4章姿勢推定のデータオーギュメンテーション
# 実装の一部参考に使用
# https://github.com/tensorboy/pytorch_Realtime_Multi-Person_Pose_Estimation/
# Released under the MIT license
import os
import os.path as osp
import json
from PIL import Image
import cv2
import numpy as np
from scipy import misc, ndimage
import torch
import torch.utils.data as data
from utils.data_augumentation import Compose, get_anno, add_neck, aug_scale, aug_rotate, aug_croppad, aug_flip, remove_illegal_joint, Normalize_Tensor, no_Normalize_Tensor
def putGaussianMaps(center, accumulate_confid_map, params_transform):
'''ガウスマップに変換する'''
crop_size_y = params_transform['crop_size_y']
crop_size_x = params_transform['crop_size_x']
stride = params_transform['stride']
sigma = params_transform['sigma']
grid_y = crop_size_y / stride
grid_x = crop_size_x / stride
start = stride / 2.0 - 0.5
y_range = [i for i in range(int(grid_y))]
x_range = [i for i in range(int(grid_x))]
xx, yy = np.meshgrid(x_range, y_range)
xx = xx * stride + start
yy = yy * stride + start
d2 = (xx - center[0]) ** 2 + (yy - center[1]) ** 2
exponent = d2 / 2.0 / sigma / sigma
mask = exponent <= 4.6052
cofid_map = np.exp(-exponent)
cofid_map = np.multiply(mask, cofid_map)
accumulate_confid_map += cofid_map
accumulate_confid_map[accumulate_confid_map > 1.0] = 1.0
return accumulate_confid_map
def putVecMaps(centerA, centerB, accumulate_vec_map, count, params_transform):
'''Parts A Fieldのベクトルを求める'''
centerA = centerA.astype(float)
centerB = centerB.astype(float)
stride = params_transform['stride']
crop_size_y = params_transform['crop_size_y']
crop_size_x = params_transform['crop_size_x']
grid_y = crop_size_y / stride
grid_x = crop_size_x / stride
thre = params_transform['limb_width'] # limb width
centerB = centerB / stride
centerA = centerA / stride
limb_vec = centerB - centerA
norm = np.linalg.norm(limb_vec)
if (norm == 0.0):
# print 'limb is too short, ignore it...'
return accumulate_vec_map, count
limb_vec_unit = limb_vec / norm
# print 'limb unit vector: {}'.format(limb_vec_unit)
# To make sure not beyond the border of this two points
min_x = max(int(round(min(centerA[0], centerB[0]) - thre)), 0)
max_x = min(int(round(max(centerA[0], centerB[0]) + thre)), grid_x)
min_y = max(int(round(min(centerA[1], centerB[1]) - thre)), 0)
max_y = min(int(round(max(centerA[1], centerB[1]) + thre)), grid_y)
range_x = list(range(int(min_x), int(max_x), 1))
range_y = list(range(int(min_y), int(max_y), 1))
xx, yy = np.meshgrid(range_x, range_y)
ba_x = xx - centerA[0] # the vector from (x,y) to centerA
ba_y = yy - centerA[1]
limb_width = np.abs(ba_x * limb_vec_unit[1] - ba_y * limb_vec_unit[0])
mask = limb_width < thre # mask is 2D
vec_map = np.copy(accumulate_vec_map) * 0.0
vec_map[yy, xx] = np.repeat(mask[:, :, np.newaxis], 2, axis=2)
vec_map[yy, xx] *= limb_vec_unit[np.newaxis, np.newaxis, :]
mask = np.logical_or.reduce(
(np.abs(vec_map[:, :, 0]) > 0, np.abs(vec_map[:, :, 1]) > 0))
accumulate_vec_map = np.multiply(
accumulate_vec_map, count[:, :, np.newaxis])
accumulate_vec_map += vec_map
count[mask == True] += 1
mask = count == 0
count[mask == True] = 1
accumulate_vec_map = np.divide(accumulate_vec_map, count[:, :, np.newaxis])
count[mask == True] = 0
return accumulate_vec_map, count
def get_ground_truth(meta, mask_miss):
"""アノテーションとマスクデータから正しい答えを求める"""
# 初期設定
params_transform = dict()
params_transform['stride'] = 8 # 画像サイズを変更したくない場合は1にする
params_transform['mode'] = 5
params_transform['crop_size_x'] = 368
params_transform['crop_size_y'] = 368
params_transform['np'] = 56
params_transform['sigma'] = 7.0
params_transform['limb_width'] = 1.0
stride = params_transform['stride']
mode = params_transform['mode']
crop_size_y = params_transform['crop_size_y']
crop_size_x = params_transform['crop_size_x']
num_parts = params_transform['np']
nop = meta['numOtherPeople']
# 画像サイズ
grid_y = crop_size_y / stride
grid_x = crop_size_x / stride
channels = (num_parts + 1) * 2
# 格納する変数
heatmaps = np.zeros((int(grid_y), int(grid_x), 19))
pafs = np.zeros((int(grid_y), int(grid_x), 38))
mask_miss = cv2.resize(mask_miss, (0, 0), fx=1.0 / stride, fy=1.0 /
stride, interpolation=cv2.INTER_CUBIC).astype(
np.float32)
mask_miss = mask_miss / 255.
mask_miss = np.expand_dims(mask_miss, axis=2)
# マスク変数
heat_mask = np.repeat(mask_miss, 19, axis=2)
paf_mask = np.repeat(mask_miss, 38, axis=2)
# ピンポイントの座標情報をガウス分布にぼやっとさせる
for i in range(18):
if (meta['joint_self'][i, 2] <= 1):
center = meta['joint_self'][i, :2]
gaussian_map = heatmaps[:, :, i]
heatmaps[:, :, i] = putGaussianMaps(
center, gaussian_map, params_transform)
# coco_dataのみ
for j in range(nop):
if (meta['joint_others'][j, i, 2] <= 1):
center = meta['joint_others'][j, i, :2]
gaussian_map = heatmaps[:, :, i]
heatmaps[:, :, i] = putGaussianMaps(
center, gaussian_map, params_transform)
# pafs
mid_1 = [2, 9, 10, 2, 12, 13, 2, 3, 4,
3, 2, 6, 7, 6, 2, 1, 1, 15, 16]
mid_2 = [9, 10, 11, 12, 13, 14, 3, 4, 5,
17, 6, 7, 8, 18, 1, 15, 16, 17, 18]
thre = 1
for i in range(19):
# limb
count = np.zeros((int(grid_y), int(grid_x)), dtype=np.uint32)
if (meta['joint_self'][mid_1[i] - 1, 2] <= 1 and meta['joint_self'][mid_2[i] - 1, 2] <= 1):
centerA = meta['joint_self'][mid_1[i] - 1, :2]
centerB = meta['joint_self'][mid_2[i] - 1, :2]
vec_map = pafs[:, :, 2 * i:2 * i + 2]
# print vec_map.shape
pafs[:, :, 2 * i:2 * i + 2], count = putVecMaps(centerA=centerA,
centerB=centerB,
accumulate_vec_map=vec_map,
count=count, params_transform=params_transform)
# coco_dataのみ
for j in range(nop):
if (meta['joint_others'][j, mid_1[i] - 1, 2] <= 1 and meta['joint_others'][j, mid_2[i] - 1, 2] <= 1):
centerA = meta['joint_others'][j, mid_1[i] - 1, :2]
centerB = meta['joint_others'][j, mid_2[i] - 1, :2]
vec_map = pafs[:, :, 2 * i:2 * i + 2]
pafs[:, :, 2 * i:2 * i + 2], count = putVecMaps(centerA=centerA,
centerB=centerB,
accumulate_vec_map=vec_map,
count=count, params_transform=params_transform)
# background
heatmaps[:, :, -
1] = np.maximum(1 - np.max(heatmaps[:, :, :18], axis=2), 0.)
# Tensorに
heat_mask = torch.from_numpy(heat_mask)
heatmaps = torch.from_numpy(heatmaps)
paf_mask = torch.from_numpy(paf_mask)
pafs = torch.from_numpy(pafs)
return heat_mask, heatmaps, paf_mask, pafs
def make_datapath_list(rootpath):
"""
学習、検証の画像データとアノテーションデータ、マスクデータへのファイルパスリストを作成する。
"""
# アノテーションのJSONファイルを読み込む
json_path = osp.join(rootpath, 'itop_coco_extract.json')
with open(json_path) as data_file:
data_this = json.load(data_file)
data_json = data_this['root']
# indexを格納
num_samples = len(data_json)
train_indexes = []
val_indexes = []
for count in range(num_samples):
if 'train' in data_json[count]["img_paths"]:
train_indexes.append(count)
else:
val_indexes.append(count)
# 画像ファイルパスを格納
train_img_list = list()
val_img_list = list()
for idx in train_indexes:
img_path = os.path.join(rootpath, data_json[idx]['img_paths'])
train_img_list.append(img_path)
for idx in val_indexes:
img_path = os.path.join(rootpath, data_json[idx]['img_paths'])
val_img_list.append(img_path)
# マスクデータのパスを格納
train_mask_list = []
val_mask_list = []
for idx in train_indexes:
img_idx = data_json[idx]['img_paths'][-16:-4]
if data_json[idx]['type'] == 1:
anno_path = "./data/mask/MaskImg" + '.jpg'
else:
anno_path = "./data/mask/train2014/mask_COCO_train2014_" + img_idx+'.jpg'
train_mask_list.append(anno_path)
for idx in val_indexes:
img_idx = data_json[idx]['img_paths'][-16:-4]
if data_json[idx]['type'] == 1:
anno_path = "./data/mask/MaskImg" + '.jpg'
else:
anno_path = "./data/mask/val2014/mask_COCO_val2014_" + img_idx+'.jpg'
val_mask_list.append(anno_path)
# アノテーションデータを格納
train_meta_list = list()
val_meta_list = list()
for idx in train_indexes:
train_meta_list.append(data_json[idx])
for idx in val_indexes:
val_meta_list.append(data_json[idx])
return train_img_list, train_mask_list, val_img_list, val_mask_list, train_meta_list, val_meta_list
class DataTransform():
"""
画像とマスク、アノテーションの前処理クラス。
学習時と推論時で異なる動作をする。
学習時はデータオーギュメンテーションする。
"""
def __init__(self):
self.data_transform = {
'train': Compose([
get_anno(), # JSONからアノテーションを辞書に格納
add_neck(), # アノテーションデータの順番を変更し、さらに首のアノテーションデータを追加
aug_scale(), # 拡大縮小
aug_rotate(), # 回転
aug_croppad(), # 切り出し
aug_flip(), # 左右反転
remove_illegal_joint(), # 画像からはみ出たアノテーションを除去
Normalize_Tensor() # 色情報の標準化とテンソル化
]),
'val': Compose([
get_anno(), # JSONからアノテーションを辞書に格納
add_neck(), # アノテーションデータの順番を変更し、さらに首のアノテーションデータを追加
aug_scale(), # 拡大縮小
aug_rotate(), # 回転
aug_croppad(), # 切り出し
aug_flip(), # 左右反転
remove_illegal_joint(), # 画像からはみ出たアノテーションを除去
Normalize_Tensor() # 色情報の標準化とテンソル化
])
}
def __call__(self, phase, meta_data, img, mask_miss):
"""
Parameters
----------
phase : 'train' or 'val'
前処理のモードを指定。
"""
meta_data, img, mask_miss = self.data_transform[phase](
meta_data, img, mask_miss)
return meta_data, img, mask_miss
class COCOkeypointsDataset(data.Dataset):
"""
MSCOCOのCocokeypointsのDatasetを作成するクラス。PyTorchのDatasetクラスを継承。
Attributes
----------
img_list : リスト
画像のパスを格納したリスト
anno_list : リスト
アノテーションへのパスを格納したリスト
phase : 'train' or 'test'
学習か訓練かを設定する。
transform : object
前処理クラスのインスタンス
"""
def __init__(self, img_list, mask_list, meta_list, phase, transform):
self.img_list = img_list
self.mask_list = mask_list
self.meta_list = meta_list
self.phase = phase
self.transform = transform
def __len__(self):
'''画像の枚数を返す'''
return len(self.img_list)
def __getitem__(self, index):
img, heatmaps, heat_mask, pafs, paf_mask = self.pull_item(index)
return img, heatmaps, heat_mask, pafs, paf_mask
def pull_item(self, index):
'''画像のTensor形式のデータ、アノテーション、マスクを取得する'''
# 1. 画像読み込み
image_file_path = self.img_list[index]
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
# 2. マスクとアノテーション読み込み
mask_miss = cv2.imread(self.mask_list[index])
meta_data = self.meta_list[index]
# 3. 画像前処理
meta_data, img, mask_miss = self.transform(
self.phase, meta_data, img, mask_miss)
# 4. 正解アノテーションデータの取得
mask_miss_numpy = mask_miss.numpy().transpose((1, 2, 0))
heat_mask, heatmaps, paf_mask, pafs = get_ground_truth(
meta_data, mask_miss_numpy)
# 5. マスクデータはRGBが(1,1,1)か(0,0,0)なので、次元を落とす
heat_mask = heat_mask[:, :, :, 0]
paf_mask = paf_mask[:, :, :, 0]
# 6. チャネルが最後尾にあるので順番を変える
# 例:paf_mask:torch.Size([46, 46, 38])
# → torch.Size([38, 46, 46])
paf_mask = paf_mask.permute(2, 0, 1)
heat_mask = heat_mask.permute(2, 0, 1)
pafs = pafs.permute(2, 0, 1)
heatmaps = heatmaps.permute(2, 0, 1)
return img, heatmaps, heat_mask, pafs, paf_mask
| 33.71164 | 171 | 0.587774 |
4a1c889601234d29e9fa7706bb9f84541f39b694
| 12,023 |
py
|
Python
|
tests/api_workflow/mocked_api_workflow_client.py
|
dmarx/lightly
|
d23b639d33f68ce2e986000516770e8f2ee29453
|
[
"MIT"
] | null | null | null |
tests/api_workflow/mocked_api_workflow_client.py
|
dmarx/lightly
|
d23b639d33f68ce2e986000516770e8f2ee29453
|
[
"MIT"
] | null | null | null |
tests/api_workflow/mocked_api_workflow_client.py
|
dmarx/lightly
|
d23b639d33f68ce2e986000516770e8f2ee29453
|
[
"MIT"
] | null | null | null |
import unittest
from io import IOBase
from requests import Response
from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest
from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData
from lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi
import lightly
from lightly.api.api_workflow_client import ApiWorkflowClient
from typing import *
from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, \
InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, \
SampleWriteUrls, SampleData
from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi
from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi
from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi
from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi
from lightly.openapi_generated.swagger_client.api.tags_api import TagsApi
from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData
from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData
from lightly.openapi_generated.swagger_client.models.job_result_type import JobResultType
from lightly.openapi_generated.swagger_client.models.job_state import JobState
from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData
from lightly.openapi_generated.swagger_client.models.job_status_data_result import JobStatusDataResult
from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest
from lightly.openapi_generated.swagger_client.models.tag_data import TagData
from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData
class MockedEmbeddingsApi(EmbeddingsApi):
def __init__(self, api_client):
EmbeddingsApi.__init__(self, api_client=api_client)
self.embeddings = [DatasetEmbeddingData(id="embedding_id_xyz", name="embedding_name_xxyyzz",
is_processed=True, created_at=0)]
def get_embeddings_csv_write_url_by_id(self, dataset_id: str, **kwargs):
assert isinstance(dataset_id, str)
response_ = WriteCSVUrlData(signed_write_url="signed_write_url_valid", embedding_id="embedding_id_xyz")
return response_
def get_embeddings_by_dataset_id(self, dataset_id, **kwargs) -> List[DatasetEmbeddingData]:
assert isinstance(dataset_id, str)
return self.embeddings
class MockedSamplingsApi(SamplingsApi):
def trigger_sampling_by_id(self, body: SamplingCreateRequest, dataset_id, embedding_id, **kwargs):
assert isinstance(body, SamplingCreateRequest)
assert isinstance(dataset_id, str)
assert isinstance(embedding_id, str)
response_ = AsyncTaskData(job_id="155")
return response_
class MockedJobsApi(JobsApi):
def __init__(self, *args, **kwargs):
self.no_calls = 0
JobsApi.__init__(self, *args, **kwargs)
def get_job_status_by_id(self, job_id, **kwargs):
assert isinstance(job_id, str)
self.no_calls += 1
if self.no_calls > 3:
result = JobStatusDataResult(type=JobResultType.SAMPLING, data="sampling_tag_id_xyz")
response_ = JobStatusData(id="id_", status=JobState.FINISHED, wait_time_till_next_poll=0,
created_at=1234, finished_at=1357, result=result)
else:
result = None
response_ = JobStatusData(id="id_", status=JobState.RUNNING, wait_time_till_next_poll=0.001,
created_at=1234, result=result)
return response_
class MockedTagsApi(TagsApi):
def create_initial_tag_by_dataset_id(self, body, dataset_id, **kwargs):
assert isinstance(body, InitialTagCreateRequest)
assert isinstance(dataset_id, str)
response_ = CreateEntityResponse(id="xyz")
return response_
def get_tag_by_tag_id(self, dataset_id, tag_id, **kwargs):
assert isinstance(dataset_id, str)
assert isinstance(tag_id, str)
response_ = TagData(id=tag_id, dataset_id=dataset_id, prev_tag_id="initial-tag", bit_mask_data="0x80bda23e9",
name='second-tag', tot_size=15, created_at=1577836800, changes=dict())
return response_
def get_tags_by_dataset_id(self, dataset_id, **kwargs):
tag_1 = TagData(id='inital_tag_id', dataset_id=dataset_id, prev_tag_id=None,
bit_mask_data="0xF", name='initial-tag', tot_size=4,
created_at=1577836800, changes=dict())
tag_2 = TagData(id='query_tag_id_xyz', dataset_id=dataset_id, prev_tag_id="initial-tag",
bit_mask_data="0xF", name='query_tag_name_xyz', tot_size=4,
created_at=1577836800, changes=dict())
tag_3 = TagData(id='preselected_tag_id_xyz', dataset_id=dataset_id, prev_tag_id="initial-tag",
bit_mask_data="0x1", name='preselected_tag_name_xyz', tot_size=4,
created_at=1577836800, changes=dict())
tag_4 = TagData(id='sampled_tag_xyz', dataset_id=dataset_id, prev_tag_id="preselected_tag_id_xyz",
bit_mask_data="0x3", name='sampled_tag_xyz', tot_size=4,
created_at=1577836800, changes=dict())
tags = [tag_1, tag_2, tag_3, tag_4]
no_tags_to_return = getattr(self, "no_tags", 4)
tags = tags[:no_tags_to_return]
return tags
def perform_tag_arithmetics(self, body: TagArithmeticsRequest, dataset_id, **kwargs):
return TagBitMaskResponse(bit_mask_data="0x2")
class MockedScoresApi(ScoresApi):
def create_or_update_active_learning_score_by_tag_id(self, body, dataset_id, tag_id, **kwargs) -> \
CreateEntityResponse:
if len(body.scores) > 0 and not isinstance(body.scores[0], float):
raise AttributeError
response_ = CreateEntityResponse(id="sampled_tag_id_xyz")
return response_
class MockedMappingsApi(MappingsApi):
def __init__(self, *args, **kwargs):
sample_names = [f'img_{i}.jpg' for i in range(100)]
sample_names.reverse()
self.sample_names = sample_names
MappingsApi.__init__(self, *args, **kwargs)
def get_sample_mappings_by_dataset_id(self, dataset_id, field, **kwargs):
return self.sample_names
class MockedSamplesApi(SamplesApi):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sample_create_requests: List[SampleCreateRequest] = []
def get_samples_by_dataset_id(self, dataset_id, **kwargs) -> List[SampleData]:
samples = []
for i, body in enumerate(self.sample_create_requests):
sample = SampleData(id=f'{i}_xyz', dataset_id='dataset_id_xyz', file_name=body.file_name)
samples.append(sample)
return samples
def create_sample_by_dataset_id(self, body, dataset_id, **kwargs):
assert isinstance(body, SampleCreateRequest)
response_ = CreateEntityResponse(id="xyz")
self.sample_create_requests.append(body)
return response_
def get_sample_image_write_url_by_id(self, dataset_id, sample_id, is_thumbnail, **kwargs):
url = f"{sample_id}_write_url"
return url
def get_sample_image_read_url_by_id(self, dataset_id, sample_id, type, **kwargs):
url = f"{sample_id}_write_url"
return url
def get_sample_image_write_urls_by_id(self, dataset_id, sample_id, **kwargs) -> SampleWriteUrls:
thumb_url = f"{sample_id}_thumb_write_url"
full_url = f"{sample_id}_full_write_url"
ret = SampleWriteUrls(full=full_url, thumb=thumb_url)
return ret
class MockedDatasetsApi(DatasetsApi):
def __init__(self, api_client):
no_datasets = 3
self.default_datasets = [DatasetData(name=f"dataset_{i}", id=f"dataset_{i}_id", last_modified_at=i,
type="", img_type="full", size_in_bytes=-1, n_samples=-1, created_at=-1)
for i in range(no_datasets)]
self.reset()
def reset(self):
self.datasets = self.default_datasets
def get_datasets(self, **kwargs):
return self.datasets
def create_dataset(self, body: DatasetCreateRequest, **kwargs):
assert isinstance(body, DatasetCreateRequest)
id = body.name + "_id"
dataset = DatasetData(id=id, name=body.name, last_modified_at=len(self.datasets) + 1,
type="", size_in_bytes=-1, n_samples=-1, created_at=-1)
self.datasets += [dataset]
response_ = CreateEntityResponse(id=id)
return response_
def get_dataset_by_id(self, dataset_id):
return next(dataset for dataset in self.default_datasets if dataset_id == dataset.id)
def delete_dataset_by_id(self, dataset_id, **kwargs):
datasets_without_that_id = [dataset for dataset in self.datasets if dataset.id != dataset_id]
assert len(datasets_without_that_id) == len(self.datasets) - 1
self.datasets = datasets_without_that_id
class MockedVersioningApi(VersioningApi):
def get_latest_pip_version(self, **kwargs):
return "1.0.8"
def get_minimum_compatible_pip_version(self, **kwargs):
return "1.0.0"
class MockedQuotaApi(QuotaApi):
def get_quota_maximum_dataset_size(self, **kwargs):
return "60000"
def mocked_request_put(dst_url: str, data=IOBase) -> Response:
assert isinstance(dst_url, str)
assert isinstance(data, IOBase)
response_ = Response()
response_.status_code = 200
return response_
class MockedApiClient(ApiClient):
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
raise ValueError("ERROR: calling ApiClient.request(), but this should be mocked.")
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
raise ValueError("ERROR: calling ApiClient.call_api(), but this should be mocked.")
class MockedApiWorkflowClient(ApiWorkflowClient):
def __init__(self, *args, **kwargs):
lightly.api.api_workflow_client.ApiClient = MockedApiClient
lightly.api.version_checking.VersioningApi = MockedVersioningApi
ApiWorkflowClient.__init__(self, *args, **kwargs)
self.samplings_api = MockedSamplingsApi(api_client=self.api_client)
self.jobs_api = MockedJobsApi(api_client=self.api_client)
self.tags_api = MockedTagsApi(api_client=self.api_client)
self.embeddings_api = MockedEmbeddingsApi(api_client=self.api_client)
self.mappings_api = MockedMappingsApi(api_client=self.api_client)
self.scores_api = MockedScoresApi(api_client=self.api_client)
self.samples_api = MockedSamplesApi(api_client=self.api_client)
self.datasets_api = MockedDatasetsApi(api_client=self.api_client)
self.quota_api = MockedQuotaApi(api_client=self.api_client)
lightly.api.api_workflow_client.requests.put = mocked_request_put
self.wait_time_till_next_poll = 0.001 # for api_workflow_sampling
class MockedApiWorkflowSetup(unittest.TestCase):
def setUp(self, token="token_xyz", dataset_id="dataset_id_xyz") -> None:
self.api_workflow_client = MockedApiWorkflowClient(token=token, dataset_id=dataset_id)
| 46.420849 | 120 | 0.713882 |
4a1c88b7de5f651ff18825680f2b76fb515a584a
| 1,320 |
py
|
Python
|
rallyrolebot/api/bot_activity_mappings.py
|
Ju99ernaut/RallyRoleBot
|
068a66fce957c2d65fa8d121fa96abe8c7b00e2d
|
[
"MIT"
] | null | null | null |
rallyrolebot/api/bot_activity_mappings.py
|
Ju99ernaut/RallyRoleBot
|
068a66fce957c2d65fa8d121fa96abe8c7b00e2d
|
[
"MIT"
] | null | null | null |
rallyrolebot/api/bot_activity_mappings.py
|
Ju99ernaut/RallyRoleBot
|
068a66fce957c2d65fa8d121fa96abe8c7b00e2d
|
[
"MIT"
] | null | null | null |
import data
from cogs.update_cog import running_bots, update_activity
from fastapi import APIRouter, Depends, HTTPException
from .dependencies import owner_or_admin
from .models import BotActivityMapping
from constants import *
import config
config.parse_args()
router = APIRouter(
prefix="/mappings/bot_activity",
tags=["bot_avatar"],
dependencies=[Depends(owner_or_admin)],
responses={404: {"description": "Not found"}},
)
@router.get("/{guildId}", response_model=BotActivityMapping)
async def read_mapping(guildId: str):
bot_instance = data.get_bot_instance(guildId)
if not bot_instance:
return {}
activity_text = bot_instance[BOT_ACTIVITY_TEXT_KEY]
activity_type = bot_instance[BOT_ACTIVITY_TYPE_KEY]
return {"activity_text": activity_text, "activity_type": activity_type}
@router.post("", response_model=BotActivityMapping)
async def add_mapping(mapping: BotActivityMapping, guildId: str):
bot_instance = data.get_bot_instance(guildId)
if not bot_instance:
raise HTTPException(status_code=404, detail="Bot config not found")
error = await update_activity(bot_instance, mapping.activity_type, mapping.activity_text)
if error:
raise HTTPException(status_code=500, detail="Error changing bot activity")
return {"success": 1}
| 29.333333 | 93 | 0.757576 |
4a1c8948c74453991c33d100e993cd03fcec2129
| 132 |
py
|
Python
|
findyour3d/contact/urls.py
|
hqpr/findyour3d
|
8ad3d2cb7bd0adfd080bb2314df1c78b94d3973a
|
[
"MIT"
] | null | null | null |
findyour3d/contact/urls.py
|
hqpr/findyour3d
|
8ad3d2cb7bd0adfd080bb2314df1c78b94d3973a
|
[
"MIT"
] | null | null | null |
findyour3d/contact/urls.py
|
hqpr/findyour3d
|
8ad3d2cb7bd0adfd080bb2314df1c78b94d3973a
|
[
"MIT"
] | 1 |
2020-11-26T10:52:20.000Z
|
2020-11-26T10:52:20.000Z
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(regex=r'^add/$', view=views.contact, name='form'),
]
| 16.5 | 58 | 0.674242 |
4a1c8a1110961eb26419058dab1a14f7a179733c
| 2,091 |
py
|
Python
|
run/old_scripts/run_plot_train_history.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
run/old_scripts/run_plot_train_history.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | 20 |
2019-07-15T21:49:29.000Z
|
2020-01-09T14:35:03.000Z
|
run/old_scripts/run_plot_train_history.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
import numpy as np
from pathlib import Path
import libs.utils as utils
import libs.dirs as dirs
from libs.vis_functions import plot_model_history
iteration = int(input("Enter iteration number.\n"))
epochs = int(input("Enter number of epochs.\n"))
rede = 1
datasetName = "full_dataset_softmax"
savedModelsFolder = Path(dirs.saved_models) / \
"{}_rede_{}/iteration_{}".format(datasetName, rede, iteration)
historyPath = savedModelsFolder \
/ "history_{}_no_finetune_{}_epochs_rede_{}_iteration_{}.pickle".format(datasetName, epochs, rede, iteration)
resultsFolder = Path(dirs.results) / historyPath.stem
nameEnd = "history_{}_epochs_rede_{}_iteration_{}.pdf".format(epochs, rede, iteration)
lossName = "loss_" + nameEnd
accName = "accuracy_" + nameEnd
f1Name = "f1_" + nameEnd
if not(historyPath.is_file()):
print("History file does not exist.\nFile:\n", historyPath)
print("\nExiting program.")
exit()
dirs.create_folder(resultsFolder)
history = utils.load_pickle(historyPath)
print(history.keys())
valLoss = history['loss-val']
trainLoss = history['loss-train']
trainAcc = history['acc-train']
valAcc = history['acc-val']
trainF1 = np.array((history['f1-train']))[:, 0]
valF1 = np.array((history['f1-val']))[:, 0]
plot_model_history([trainLoss, valLoss], data_labels=["Train Loss", "Val Loss"], xlabel="Epochs",
ylabel="Loss", title="Training loss history", save_path=resultsFolder / lossName,
show=False)
plot_model_history([trainAcc, valAcc], data_labels=["Train Acc", "Val Acc"], xlabel="Epochs",
ylabel="Acc", title="Training accuracy history", save_path=resultsFolder / accName,
show=False)
plot_model_history([trainF1, valF1], data_labels=["Train F1", "Val F1"], xlabel="Epochs",
ylabel="F1", title="Training F1 history", save_path=resultsFolder / f1Name,
show=False)
print("\nSaved results to ", resultsFolder)
| 37.339286 | 113 | 0.653754 |
4a1c8a936e5cee9afa234315181a7be87ecbba2f
| 809 |
py
|
Python
|
procuret/term_rate/term_rate.py
|
Procuret/procuret-python
|
2f49cbd3454e33986c84a6c32c0f0ab8f60d4b82
|
[
"MIT"
] | null | null | null |
procuret/term_rate/term_rate.py
|
Procuret/procuret-python
|
2f49cbd3454e33986c84a6c32c0f0ab8f60d4b82
|
[
"MIT"
] | null | null | null |
procuret/term_rate/term_rate.py
|
Procuret/procuret-python
|
2f49cbd3454e33986c84a6c32c0f0ab8f60d4b82
|
[
"MIT"
] | 1 |
2020-10-28T14:26:21.000Z
|
2020-10-28T14:26:21.000Z
|
"""
Procuret Python
Term Rate Module
author: hugh@blinkybeach.com
"""
from procuret.data.codable import Codable, CodingDefinition as CD
from decimal import Decimal
class TermRate(Codable):
path = '/term-rate'
coding_map = {
'supplier_entity_id': CD(int),
'periods': CD(int),
'periods_in_year': CD(Decimal)
}
def __init__(
self,
supplier_entity_id: int,
periods: int,
periods_in_year: Decimal
) -> None:
self._supplier_entity_id = supplier_entity_id
self._periods = periods
self._periods_in_year = periods_in_year
return
periods = property(lambda s: s._periods)
periods_in_year = property(lambda s: s._periods_in_year)
supplier_entity_id = property(lambda s: s._supplier_entity_id)
| 22.472222 | 66 | 0.663782 |
4a1c8ae6984ae9ec1b616c78517f0e10ca2113e3
| 19,020 |
py
|
Python
|
tests/FeatureEngineering_QA.py
|
SeanBenner/RetroFit
|
1417775c2154c2127b3dedaf133f8f21d5f1adfa
|
[
"MIT"
] | null | null | null |
tests/FeatureEngineering_QA.py
|
SeanBenner/RetroFit
|
1417775c2154c2127b3dedaf133f8f21d5f1adfa
|
[
"MIT"
] | null | null | null |
tests/FeatureEngineering_QA.py
|
SeanBenner/RetroFit
|
1417775c2154c2127b3dedaf133f8f21d5f1adfa
|
[
"MIT"
] | null | null | null |
# QA: Test FE0_AutoLags
import timeit
import datatable as dt
import polars as pl
import retrofit
from retrofit import FeatureEngineering as fe
## No Group Example: datatable
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoLags(data=data, ArgsList=None, LagPeriods=1, LagColumnNames='Leads', DateColumnName='CalendarDateColumn', ByVariables=None, ImputeValue=-1, Sort=True, Processing='datatable', InputFrame='datatable', OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data1 = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data1.names)
print(ArgsList)
# # Args
# ArgsList=None
# LagPeriods=1
# LagColumnNames='Leads'
# DateColumnName='CalendarDateColumn'
# ByVariables=None
# ImputeValue=-1
# Sort=True
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
## No Group Example: polars
data = pl.read_csv("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoLags(data=data, ArgsList=None, LagPeriods=1, LagColumnNames='Leads', DateColumnName='CalendarDateColumn', ByVariables=None, ImputeValue=-1.0, Sort=True, Processing='polars', InputFrame='polars', OutputFrame='polars')
t_end = timeit.default_timer()
print(t_end - t_start)
data2 = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data2.columns)
print(ArgsList)
# # Args
# data=data
# LagPeriods=1
# LagColumnNames='Weekly_Sales'
# DateColumnName='CalendarDateColumn'
# ByVariables=['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# ImputeValue=-1.0
# Sort=True
# Processing='polars'
# InputFrame='polars'
# OutputFrame='polars'
## Group Example, Single Lag: datatable
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoLags(data=data, ArgsList=None, LagPeriods=1, LagColumnNames='Leads', DateColumnName='CalendarDateColumn', ByVariables=['MarketingSegments','MarketingSegments2','MarketingSegments3', 'Label'], ImputeValue=-1, Sort=True, Processing='datatable', InputFrame='datatable', OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data1 = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data1.names)
print(ArgsList)
# # Args
# ArgsList=None
# LagPeriods=1
# LagColumnNames='Leads'
# DateColumnName='CalendarDateColumn'
# ByVariables=['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# ImputeValue=-1
# Sort=True
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
## Group Exmaple: polars
data = pl.read_csv("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoLags(data=data, ArgsList=None, LagPeriods=1, LagColumnNames='Leads', DateColumnName='CalendarDateColumn', ByVariables=['MarketingSegments','MarketingSegments2','MarketingSegments3', 'Label'], ImputeValue=-1.0, Sort=True, Processing='polars', InputFrame='polars', OutputFrame='polars')
t_end = timeit.default_timer()
print(t_end - t_start)
data2 = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data2.columns)
print(ArgsList)
# # Args
# ArgsList=None
# LagPeriods=1
# LagColumnNames='Leads'
# DateColumnName='CalendarDateColumn'
# ByVariables=['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# ImputeValue=-1.0
# Sort=True
# Processing='polars'
# InputFrame='polars'
# OutputFrame='polars'
## Group and Multiple Periods and LagColumnNames: datatable
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoLags(data=data, ArgsList=None, LagPeriods=[1,3,5], LagColumnNames=['Leads','XREGS1'], DateColumnName='CalendarDateColumn', ByVariables=['MarketingSegments','MarketingSegments2','MarketingSegments3', 'Label'], ImputeValue=-1, Sort=True, Processing='datatable', InputFrame='datatable', OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data1 = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data1.names)
print(ArgsList)
# # Args
# ArgsList=None
# LagPeriods=[1,3,5]
# LagColumnNames=['Leads','XREGS1']
# DateColumnName='CalendarDateColumn'
# ByVariables=['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# ImputeValue=-1
# Sort=True
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
## Group and Multiple Periods and LagColumnNames: datatable
data = pl.read_csv("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoLags(data=data, ArgsList=None, LagPeriods=[1,3,5], LagColumnNames=['Leads','XREGS1'], DateColumnName='CalendarDateColumn', ByVariables=['MarketingSegments','MarketingSegments2','MarketingSegments3', 'Label'], ImputeValue=-1.0, Sort=True, Processing='polars', InputFrame='polars', OutputFrame='polars')
t_end = timeit.default_timer()
print(t_end - t_start)
data2 = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data2.columns)
print(ArgsList)
# # Args
# ArgsList=None
# LagPeriods=[1,3,5]
# LagColumnNames=['Leads','XREGS1']
# DateColumnName='CalendarDateColumn'
# ByVariables=['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# ImputeValue=-1.0
# Sort=True
# Processing='polars'
# InputFrame='polars'
# OutputFrame='polars'
#########################################################################################################
#########################################################################################################
# QA FE0_AutoRollStats
import timeit
import datatable as dt
import polars as pl
from retrofit import FeatureEngineering as fe
## No Group Example
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoRollStats(data=data, ArgsList=None, RollColumnNames='Leads', DateColumnName='CalendarDateColumn', ByVariables=None, MovingAvg_Periods=[3,5,7], MovingSD_Periods=[3,5,7], MovingMin_Periods=[3,5,7], MovingMax_Periods=[3,5,7], ImputeValue=-1, Sort=True, Processing='datatable', InputFrame='datatable', OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data.names)
print(ArgsList)
# # Args
# ArgsList=None
# RollColumnNames='Leads'
# DateColumnName='CalendarDateColumn'
# ByVariables=None
# MovingAvg_Periods=[3,5,7]
# MovingSD_Periods=[3,5,7]
# MovingMin_Periods=[3,5,7]
# MovingMax_Periods=[3,5,7]
# ImputeValue=-1
# Sort=True
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
## No Group Example
data = pl.read_csv("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoRollStats(data=data, ArgsList=None, RollColumnNames='Leads', DateColumnName='CalendarDateColumn', ByVariables=None, MovingAvg_Periods=[3,5,7], MovingSD_Periods=[3,5,7], MovingMin_Periods=[3,5,7], MovingMax_Periods=[3,5,7], ImputeValue=-1, Sort=True, Processing='datatable', InputFrame='datatable', OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data.names)
print(ArgsList)
# # Args
# ArgsList=None
# RollColumnNames='Leads'
# DateColumnName='CalendarDateColumn'
# ByVariables=None
# MovingAvg_Periods=[3,5,7]
# MovingSD_Periods=[3,5,7]
# MovingMin_Periods=[3,5,7]
# MovingMax_Periods=[3,5,7]
# ImputeValue=-1
# Sort=True
# Processing='polars'
# InputFrame='polars'
# OutputFrame='polars'
## Group and Multiple Periods and RollColumnNames:
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoRollStats(data=data, ArgsList=None, RollColumnNames=['Leads','XREGS1'], DateColumnName='CalendarDateColumn', ByVariables=['MarketingSegments','MarketingSegments2','MarketingSegments3', 'Label'], MovingAvg_Periods=[3,5,7], MovingSD_Periods=[3,5,7], MovingMin_Periods=[3,5,7], MovingMax_Periods=[3,5,7], ImputeValue=-1, Sort=True, Processing='datatable', InputFrame='datatable', OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data.names)
print(ArgsList)
# # Args
# ArgsList=None
# RollColumnNames=['Leads','XREGS1']
# DateColumnName='CalendarDateColumn'
# ByVariables=['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# MovingAvg_Periods=[3,5,7]
# MovingSD_Periods=[3,5,7]
# MovingMin_Periods=[3,5,7]
# MovingMax_Periods=[3,5,7]
# ImputeValue=-1
# Sort=True
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
## No Group Example:
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoRollStats(data=data, ArgsList=None, RollColumnNames='Leads', DateColumnName='CalendarDateColumn', ByVariables=None, MovingAvg_Periods=[3,5,7], MovingSD_Periods=[3,5,7], MovingMin_Periods=[3,5,7], MovingMax_Periods=[3,5,7], ImputeValue=-1, Sort=True, Processing='datatable', InputFrame='datatable', OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data.names)
print(ArgsList)
# # Args
# ArgsList=None
# RollColumnNames='Leads'
# DateColumnName='CalendarDateColumn'
# ByVariables=None
# MovingAvg_Periods=[3,5,7]
# MovingSD_Periods=[3,5,7]
# MovingMin_Periods=[3,5,7]
# MovingMax_Periods=[3,5,7]
# ImputeValue=-1
# Sort=True
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
#########################################################################################################
#########################################################################################################
# QA FE0_AutoDiff
import timeit
import datatable as dt
from datatable import sort, f, by
import retrofit
from retrofit import FeatureEngineering as fe
## Group Example:
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoDiff(data=data, ArgsList=None, DateColumnName = 'CalendarDateColumn', ByVariables = ['MarketingSegments','MarketingSegments2','MarketingSegments3', 'Label'], DiffNumericVariables = 'Leads', DiffDateVariables = 'CalendarDateColumn', DiffGroupVariables = None, NLag1 = 0, NLag2 = 1, Sort=True, Processing = 'datatable', InputFrame = 'datatable', OutputFrame = 'datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data.names)
print(ArgsList)
# # Args
# ArgsList=None
# DateColumnName = 'CalendarDateColumn'
# ByVariables = ['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# DiffNumericVariables = 'Leads'
# DiffDateVariables = 'CalendarDateColumn'
# DiffGroupVariables = None
# NLag1 = 0
# NLag2 = 1
# Sort=True
# Processing = 'datatable'
# InputFrame = 'datatable'
# OutputFrame = 'datatable'
## Group and Multiple Periods and RollColumnNames:
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoDiff(data=data, ArgsList=None, DateColumnName = 'CalendarDateColumn', ByVariables = ['MarketingSegments','MarketingSegments2','MarketingSegments3', 'Label'], DiffNumericVariables = 'Leads', DiffDateVariables = 'CalendarDateColumn', DiffGroupVariables = None, NLag1 = 0, NLag2 = 1, Sort=True, Processing = 'datatable', InputFrame = 'datatable', OutputFrame = 'datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data.names)
print(ArgsList)
# # Args
# ArgsList=None
# DateColumnName = 'CalendarDateColumn'
# ByVariables = ['MarketingSegment','MarketingSegment2','MarketingSegment3', 'Label']
# DiffNumericVariables = 'Leads'
# DiffDateVariables = 'CalendarDateColumn'
# DiffGroupVariables = None
# NLag1 = 0
# NLag2 = 1
# Sort=True
# Processing = 'datatable'
# InputFrame = 'datatable'
# OutputFrame = 'datatable'
## No Group Example:
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE0_AutoDiff(data=data, ArgsList=None, DateColumnName = 'CalendarDateColumn', ByVariables = None, DiffNumericVariables = 'Leads', DiffDateVariables = 'CalendarDateColumn', DiffGroupVariables = None, NLag1 = 0, NLag2 = 1, Sort=True, Processing = 'datatable', InputFrame = 'datatable', OutputFrame = 'datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
del Output
print(data.names)
print(ArgsList)
# # Args
# ArgsList=None
# DateColumnName = 'CalendarDateColumn'
# ByVariables = None
# DiffNumericVariables = 'Leads'
# DiffDateVariables = 'CalendarDateColumn'
# DiffGroupVariables = None
# NLag1 = 0
# NLag2 = 1
# Sort=True
# Processing = 'datatable'
# InputFrame = 'datatable'
# OutputFrame = 'datatable'
#########################################################################################################
#########################################################################################################
# QA FE0_AutoDiff
import timeit
import datatable as dt
from datatable import sort, f, by
import retrofit
from retrofit import FeatureEngineering as fe
# FE1_AutoCalendarVariables
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE1_AutoCalendarVariables(data=data, ArgsList=None, DateColumnNames = 'CalendarDateColumn', CalendarVariables = ['wday','mday','wom','month','quarter','year'], Processing = 'datatable', InputFrame = 'datatable', OutputFrame = 'datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
print(data.names)
#########################################################################################################
#########################################################################################################
# Example: datatable
import timeit
import datatable as dt
import retrofit
from retrofit import FeatureEngineering as fe
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE1_DummyVariables(
data=data,
ArgsList=None,
CategoricalColumnNames=['MarketingSegments','MarketingSegments2'],
Processing='datatable',
InputFrame='datatable',
OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
# Example: polars
import retrofit
from retrofit import FeatureEngineering as fe
import polars as pl
data = pl.read_csv("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
Output = fe.FE1_DummyVariables(
data=data,
ArgsList=None,
CategoricalColumnNames=['MarketingSegments','MarketingSegments2'],
Processing='polars',
InputFrame='polars',
OutputFrame='polars')
t_end = timeit.default_timer()
print(t_end - t_start)
data = Output['data']
ArgsList = Output['ArgsList']
#########################################################################################################
#########################################################################################################
# FE2_AutoDataParition
import timeit
import datatable as dt
import polars as pl
import retrofit
from retrofit import FeatureEngineering as fe
from retrofit import utils as u
# datatable random Example
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
DataSets = fe.FE2_AutoDataParition(
data=data,
ArgsList=None,
DateColumnName='CalendarDateColumn',
PartitionType='random',
Ratios=[0.70,0.20,0.10],
Sort = False,
ByVariables=None,
Processing='datatable',
InputFrame='datatable',
OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
TrainData = DataSets['TrainData']
ValidationData = DataSets['ValidationData']
TestData = DataSets['TestData']
ArgsList = DataSets['ArgsList']
# data=data
# ArgsList=None
# DateColumnName='CalendarDateColumn'
# PartitionType='random'
# Ratios=[0.70,0.20,0.10]
# Sort = False
# ByVariables=None
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
# polars random Example
data = pl.read_csv("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
DataSets = fe.FE2_AutoDataParition(
data=data,
ArgsList=None,
DateColumnName='CalendarDateColumn',
PartitionType='random',
Ratios=[0.70,0.20,0.10],
ByVariables=None,
Sort = False,
Processing='polars',
InputFrame='polars',
OutputFrame='polars')
t_end = timeit.default_timer()
print(t_end - t_start)
TrainData = DataSets['TrainData']
ValidationData = DataSets['ValidationData']
TestData = DataSets['TestData']
ArgsList = DataSets['ArgsList']
# data=data
# ArgsList=None
# DateColumnName='CalendarDateColumn'
# PartitionType='random'
# Ratios=[0.70,0.20,0.10]
# Sort = False
# ByVariables=None
# Processing='polars'
# InputFrame='polars'
# OutputFrame='polars'
# datatable time Example
data = dt.fread("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
DataSets = fe.FE2_AutoDataParition(
data=data,
ArgsList=None,
DateColumnName='CalendarDateColumn',
PartitionType='time',
Ratios=[0.70,0.20,0.10],
Sort = True,
ByVariables=None,
Processing='datatable',
InputFrame='datatable',
OutputFrame='datatable')
t_end = timeit.default_timer()
print(t_end - t_start)
TrainData = DataSets['TrainData']
ValidationData = DataSets['ValidationData']
TestData = DataSets['TestData']
ArgsList = DataSets['ArgsList']
# data=data
# ArgsList=None
# DateColumnName='CalendarDateColumn'
# PartitionType='time'
# Ratios=[0.70,0.20,0.10]
# Sort = False
# ByVariables=None
# Processing='datatable'
# InputFrame='datatable'
# OutputFrame='datatable'
# polars time Example
data = pl.read_csv("C:/Users/Bizon/Documents/GitHub/BenchmarkData.csv")
t_start = timeit.default_timer()
DataSets = fe.FE2_AutoDataParition(
data=data,
ArgsList=None,
DateColumnName='CalendarDateColumn',
PartitionType='time',
Ratios=[0.70,0.20,0.10],
ByVariables=None,
Sort = True,
Processing='polars',
InputFrame='polars',
OutputFrame='polars')
t_end = timeit.default_timer()
print(t_end - t_start)
TrainData = DataSets['TrainData']
ValidationData = DataSets['ValidationData']
TestData = DataSets['TestData']
ArgsList = DataSets['ArgsList']
# data=data
# ArgsList=None
# DateColumnName='CalendarDateColumn'
# PartitionType='time'
# Ratios=[0.70,0.20,0.10]
# Sort = False
# ByVariables=None
# Processing='polars'
# InputFrame='polars'
# OutputFrame='polars'
for i in data.shape[1]:
if not isinstance(data[i].dtype, pl.Categorical)
data[i] = data[i].cast(pl.Categorical)
data.sort(DateColumnName, reverse = False, in_place = True)
| 33.964286 | 421 | 0.71572 |
4a1c8aeea785e250202c2c08969fff022b626c9f
| 2,402 |
py
|
Python
|
ApplicantClassifier.py
|
SilverQ/naive-bayes-applicant-classifier
|
b900e1dedc5fedf96834def230cb25453c4ae372
|
[
"MIT"
] | null | null | null |
ApplicantClassifier.py
|
SilverQ/naive-bayes-applicant-classifier
|
b900e1dedc5fedf96834def230cb25453c4ae372
|
[
"MIT"
] | null | null | null |
ApplicantClassifier.py
|
SilverQ/naive-bayes-applicant-classifier
|
b900e1dedc5fedf96834def230cb25453c4ae372
|
[
"MIT"
] | null | null | null |
# from naiveBayesClassifier import tokenizer
from naiveBayesClassifier.tokenizer import Tokenizer
from naiveBayesClassifier.trainer import Trainer
from naiveBayesClassifier.classifier import Classifier
import csv
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train')
parser.add_argument('--test')
parser.add_argument('--result')
args = parser.parse_args()
arg_train = args.train if args.train else 'data/nm_kind_wo_person.txt'
arg_test = args.test if args.test == '' else 'data/nm_test2.txt'
arg_result = args.result if args.result else 'data/nm_test_wo_person_result2.csv'
# https://github.com/muatik/naive-bayes-classifier
tokenizer = Tokenizer()
# https://kkamikoon.tistory.com/119
# related to the tokenizer positional argument error
ApplicantTrainer = Trainer(Tokenizer)
file_encoding = 'cp1252'
# 6 classes
exam01_train = 'data/nm_kind.csv'
exam01_test = 'data/nm_test2.txt'
exam01_result = 'data/nm_test_result2.csv'
# 5 classes without person
exam02_train = 'data/nm_kind_wo_person.txt'
exam02_test = 'data/nm_test2.txt'
exam02_result = 'data/nm_test_wo_person_result2.csv'
with open(arg_train, newline='', encoding=file_encoding) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for applicant in reader:
# print(applicant['nm'], applicant['kind_code'])
ApplicantTrainer.train(text=applicant['nm'], className=applicant['kind_code'])
ApplicantClassifier = Classifier(ApplicantTrainer.data, tokenizer)
# classification = ApplicantClassifier.classify("sam univ")
# print(classification)
results = []
# with open('data/nm_test.txt') as testfile:
# for applicant in testfile:
# classification = np.array(ApplicantClassifier.classify(applicant))
# # print(applicant, classification[:, 1], classification[0][0])
# results.append((applicant, classification[0][0]))
# print(applicant)
#
# print(results[:3])
results = []
with open(arg_test, newline='', mode='rt') as testfile:
for line in testfile:
# print(line.strip())
classification = np.array(ApplicantClassifier.classify(line.strip()))
# print(classification[0][0])
results.append((line.strip(), classification[0][0]))
with open(arg_result, newline='', mode='w', encoding='utf-8') as write_file:
writer = csv.writer(write_file, delimiter='\t')
writer.writerows(results)
| 33.361111 | 86 | 0.738135 |
4a1c8b01b8c186d362d16ca57de681adc31d0182
| 60,412 |
py
|
Python
|
jax/_src/random.py
|
lumip/jax
|
597879752889277adeaf36c26816b532918fee53
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/_src/random.py
|
lumip/jax
|
597879752889277adeaf36c26816b532918fee53
|
[
"ECL-2.0",
"Apache-2.0"
] | 2 |
2022-01-31T13:20:35.000Z
|
2022-02-14T13:20:49.000Z
|
jax/_src/random.py
|
lumip/jax
|
597879752889277adeaf36c26816b532918fee53
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Optional, Sequence, Union
import warnings
import numpy as np
from jax import lax
from jax import core
from jax import numpy as jnp
from jax._src import dtypes
from jax.core import NamedShape
from jax._src.api import jit, vmap
from jax._src.numpy.lax_numpy import _constant_like, _convert_and_clip_integer, _check_arraylike
from jax.lib import xla_bridge
from jax.lib import xla_client
from jax.lib import cuda_prng
from jax.numpy.linalg import cholesky, svd, eigh
from jax.interpreters import ad
from jax.interpreters import batching
from jax.interpreters import xla
from jax._src.util import prod
Array = Any
RealArray = Array
IntegerArray = Array
# TODO: Import or define these to match
# https://github.com/numpy/numpy/blob/main/numpy/typing/_dtype_like.py.
DTypeLikeInt = Any
DTypeLikeFloat = Any
_UINT_DTYPES = {8: jnp.uint8, 16: jnp.uint16, 32: jnp.uint32, 64: jnp.uint64}
def PRNGKey(seed: int) -> jnp.ndarray:
"""Create a pseudo-random number generator (PRNG) key given an integer seed.
Args:
seed: a 64- or 32-bit integer used as the value of the key.
Returns:
A PRNG key, which is modeled as an array of shape (2,) and dtype uint32. The
key is constructed from a 64-bit seed by effectively bit-casting to a pair
of uint32 values (or from a 32-bit seed by first padding out with zeros).
"""
# Avoid overflowerror in X32 mode by first converting ints to int64.
# This breaks JIT invariance of PRNGKey for large ints, but supports the
# common use-case of instantiating PRNGKey with Python hashes in X32 mode.
if isinstance(seed, int):
seed_arr = jnp.asarray(np.int64(seed))
else:
seed_arr = jnp.asarray(seed)
if seed_arr.shape:
raise TypeError(f"PRNGKey seed must be a scalar; got {seed!r}.")
if not np.issubdtype(seed_arr.dtype, np.integer):
raise TypeError(f"PRNGKey seed must be an integer; got {seed!r}")
convert = lambda k: lax.reshape(lax.convert_element_type(k, np.uint32), [1])
k1 = convert(lax.shift_right_logical(seed_arr, lax._const(seed_arr, 32)))
k2 = convert(jnp.bitwise_and(seed_arr, np.uint32(0xFFFFFFFF)))
return lax.concatenate([k1, k2], 0)
def _is_prng_key(key: jnp.ndarray) -> bool:
try:
return key.shape == (2,) and key.dtype == np.uint32
except AttributeError:
return False
### utilities
def _make_rotate_left(dtype):
if not jnp.issubdtype(dtype, np.integer):
raise TypeError("_rotate_left only accepts integer dtypes.")
nbits = np.array(jnp.iinfo(dtype).bits, dtype)
def _rotate_left(x, d):
if lax.dtype(d) != dtype:
d = lax.convert_element_type(d, dtype)
if lax.dtype(x) != dtype:
x = lax.convert_element_type(x, dtype)
return lax.shift_left(x, d) | lax.shift_right_logical(x, nbits - d)
return _rotate_left
def _bit_stats(bits):
"""This is a debugging function to compute the statistics of bit fields."""
return np.array([list(map(int, np.binary_repr(x, 64))) for x in bits]).mean(0)
### hash function and split
def _threefry2x32_abstract_eval(*args):
if any(a.dtype != jnp.uint32 for a in args):
raise TypeError("Arguments to threefry2x32 must have uint32 type, got {}"
.format(args))
if all(isinstance(arg, core.ShapedArray) for arg in args):
shape = lax._broadcasting_shape_rule(*args)
named_shape = core.join_named_shapes(*(a.named_shape for a in args))
aval = core.ShapedArray(shape, jnp.dtype(jnp.uint32), named_shape=named_shape)
else:
aval = core.UnshapedArray(jnp.dtype(jnp.uint32))
return (aval,) * 2
rotate_left = _make_rotate_left(np.uint32)
def apply_round(v, rot):
v = v[:]
v[0] = v[0] + v[1]
v[1] = rotate_left(v[1], rot)
v[1] = v[0] ^ v[1]
return v
def rotate_list(xs):
return xs[1:] + xs[:1]
def rolled_loop_step(i, state):
x, ks, rotations = state
for r in rotations[0]:
x = apply_round(x, r)
new_x = [x[0] + ks[0], x[1] + ks[1] + jnp.asarray(i + 1, dtype=np.uint32)]
return new_x, rotate_list(ks), rotate_list(rotations)
def _threefry2x32_lowering(key1, key2, x1, x2, use_rolled_loops=True):
"""Apply the Threefry 2x32 hash.
Args:
keypair: a pair of 32bit unsigned integers used for the key.
count: an array of dtype uint32 used for the counts.
Returns:
An array of dtype uint32 with the same shape as `count`.
"""
x = [x1, x2]
rotations = [np.array([13, 15, 26, 6], dtype=np.uint32),
np.array([17, 29, 16, 24], dtype=np.uint32)]
ks = [key1, key2, key1 ^ key2 ^ np.uint32(0x1BD11BDA)]
x[0] = x[0] + ks[0]
x[1] = x[1] + ks[1]
if use_rolled_loops:
x, _, _ = lax.fori_loop(0, 5, rolled_loop_step, (x, rotate_list(ks), rotations))
else:
for r in rotations[0]:
x = apply_round(x, r)
x[0] = x[0] + ks[1]
x[1] = x[1] + ks[2] + np.uint32(1)
for r in rotations[1]:
x = apply_round(x, r)
x[0] = x[0] + ks[2]
x[1] = x[1] + ks[0] + np.uint32(2)
for r in rotations[0]:
x = apply_round(x, r)
x[0] = x[0] + ks[0]
x[1] = x[1] + ks[1] + np.uint32(3)
for r in rotations[1]:
x = apply_round(x, r)
x[0] = x[0] + ks[1]
x[1] = x[1] + ks[2] + np.uint32(4)
for r in rotations[0]:
x = apply_round(x, r)
x[0] = x[0] + ks[2]
x[1] = x[1] + ks[0] + np.uint32(5)
return tuple(x)
def _threefry2x32_gpu_translation_rule(c, k1, k2, x1, x2):
shape = lax.broadcast_shapes(
c.get_shape(k1).dimensions(), c.get_shape(k2).dimensions(),
c.get_shape(x1).dimensions(), c.get_shape(x2).dimensions())
rank = len(shape)
if 0 in shape:
zeros = xla_client.ops.Broadcast(
xla_bridge.constant(c, np.array(0, np.uint32)), shape)
return xla_client.ops.Tuple(c, [zeros, zeros])
def _broadcast(x):
ndims = c.get_shape(x).rank()
return xla_client.ops.BroadcastInDim(x, shape,
tuple(range(rank - ndims, rank)))
return cuda_prng.threefry2x32(
c, (_broadcast(k1), _broadcast(k2)), (_broadcast(x1), _broadcast(x2)))
threefry2x32_p = core.Primitive("threefry2x32")
threefry2x32_p.multiple_results = True
threefry2x32_p.def_impl(partial(xla.apply_primitive, threefry2x32_p))
threefry2x32_p.def_abstract_eval(_threefry2x32_abstract_eval)
batching.defbroadcasting(threefry2x32_p)
xla.translations_with_avals[threefry2x32_p] = xla.lower_fun(
partial(_threefry2x32_lowering, use_rolled_loops=False),
multiple_results=True, with_avals=True)
xla.backend_specific_translations['cpu'][threefry2x32_p] = xla.lower_fun(
partial(_threefry2x32_lowering, use_rolled_loops=True),
multiple_results=True)
if cuda_prng:
xla.backend_specific_translations['gpu'][threefry2x32_p] = \
_threefry2x32_gpu_translation_rule
@jit
def threefry_2x32(keypair, count):
"""Apply the Threefry 2x32 hash.
Args:
keypair: a pair of 32bit unsigned integers used for the key.
count: an array of dtype uint32 used for the counts.
Returns:
An array of dtype uint32 with the same shape as `count`.
"""
key1, key2 = keypair
if not lax.dtype(key1) == lax.dtype(key2) == lax.dtype(count) == np.uint32:
msg = "threefry_2x32 requires uint32 arguments, got {}"
raise TypeError(msg.format([lax.dtype(x) for x in [key1, key2, count]]))
odd_size = count.size % 2
if odd_size:
x = list(jnp.split(jnp.concatenate([count.ravel(), np.uint32([0])]), 2))
else:
x = list(jnp.split(count.ravel(), 2))
x = threefry2x32_p.bind(key1, key2, x[0], x[1])
out = jnp.concatenate(x)
assert out.dtype == np.uint32
return lax.reshape(out[:-1] if odd_size else out, count.shape)
def split(key: jnp.ndarray, num: int = 2) -> jnp.ndarray:
"""Splits a PRNG key into `num` new keys by adding a leading axis.
Args:
key: a PRNGKey (an array with shape (2,) and dtype uint32).
num: optional, a positive integer indicating the number of keys to produce
(default 2).
Returns:
An array with shape (num, 2) and dtype uint32 representing `num` new keys.
"""
return _split(key, int(num)) # type: ignore
@partial(jit, static_argnums=(1,))
def _split(key, num) -> jnp.ndarray:
counts = lax.iota(np.uint32, num * 2)
return lax.reshape(threefry_2x32(key, counts), (num, 2))
def fold_in(key: jnp.ndarray, data: int) -> jnp.ndarray:
"""Folds in data to a PRNG key to form a new PRNG key.
Args:
key: a PRNGKey (an array with shape (2,) and dtype uint32).
data: a 32bit integer representing data to be folded in to the key.
Returns:
A new PRNGKey that is a deterministic function of the inputs and is
statistically safe for producing a stream of new pseudo-random values.
"""
return _fold_in(key, jnp.uint32(data))
@jit
def _fold_in(key, data):
return threefry_2x32(key, PRNGKey(data))
@partial(jit, static_argnums=(1, 2))
def _random_bits(key, bit_width, shape):
"""Sample uniform random bits of given width and shape using PRNG key."""
if not _is_prng_key(key):
raise TypeError("_random_bits got invalid prng key.")
if bit_width not in (8, 16, 32, 64):
raise TypeError("requires 8-, 16-, 32- or 64-bit field width.")
shape = core.as_named_shape(shape)
for name, size in shape.named_items:
real_size = lax.psum(1, name)
if real_size != size:
raise ValueError(f"The shape of axis {name} was specified as {size}, "
f"but it really is {real_size}")
axis_index = lax.axis_index(name)
key = fold_in(key, axis_index)
size = prod(shape.positional)
max_count = int(np.ceil(bit_width * size / 32))
nblocks, rem = divmod(max_count, jnp.iinfo(np.uint32).max)
if not nblocks:
bits = threefry_2x32(key, lax.iota(np.uint32, rem))
else:
keys = split(key, nblocks + 1)
subkeys, last_key = keys[:-1], keys[-1]
blocks = vmap(threefry_2x32, in_axes=(0, None))(subkeys, lax.iota(np.uint32, jnp.iinfo(np.uint32).max))
last = threefry_2x32(last_key, lax.iota(np.uint32, rem))
bits = lax.concatenate([blocks.ravel(), last], 0)
dtype = _UINT_DTYPES[bit_width]
if bit_width == 64:
bits = [lax.convert_element_type(x, dtype) for x in jnp.split(bits, 2)]
bits = lax.shift_left(bits[0], dtype(32)) | bits[1]
elif bit_width in [8, 16]:
# this is essentially bits.view(dtype)[:size]
bits = lax.bitwise_and(
np.uint32(np.iinfo(dtype).max),
lax.shift_right_logical(
lax.broadcast(bits, (1,)),
lax.mul(
np.uint32(bit_width),
lax.broadcasted_iota(np.uint32, (32 // bit_width, 1), 0)
)
)
)
bits = lax.reshape(bits, (np.uint32(max_count * 32 // bit_width),), (1, 0))
bits = lax.convert_element_type(bits, dtype)[:size]
return lax.reshape(bits, shape)
### random samplers
def _check_shape(name, shape: Union[Sequence[int], NamedShape], *param_shapes):
shape = core.as_named_shape(shape)
if param_shapes:
shape_ = lax.broadcast_shapes(shape.positional, *param_shapes)
if shape.positional != shape_:
msg = ("{} parameter shapes must be broadcast-compatible with shape "
"argument, and the result of broadcasting the shapes must equal "
"the shape argument, but got result {} for shape argument {}.")
raise ValueError(msg.format(name, shape_, shape))
def uniform(key: jnp.ndarray,
shape: Union[Sequence[int], NamedShape] = (),
dtype: DTypeLikeFloat = dtypes.float_,
minval: RealArray = 0.,
maxval: RealArray = 1.) -> jnp.ndarray:
"""Sample uniform random values in [minval, maxval) with given shape/dtype.
Args:
key: a PRNGKey used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
minval: optional, a minimum (inclusive) value broadcast-compatible with shape for the range (default 0).
maxval: optional, a maximum (exclusive) value broadcast-compatible with shape for the range (default 1).
Returns:
A random array with the specified shape and dtype.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `uniform` must be a float dtype, "
f"got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.as_named_shape(shape)
return _uniform(key, shape, dtype, minval, maxval) # type: ignore
@partial(jit, static_argnums=(1, 2))
def _uniform(key, shape, dtype, minval, maxval) -> jnp.ndarray:
_check_shape("uniform", shape)
if not jnp.issubdtype(dtype, np.floating):
raise TypeError("uniform only accepts floating point dtypes.")
minval = lax.convert_element_type(minval, dtype)
maxval = lax.convert_element_type(maxval, dtype)
minval = lax.broadcast_to_rank(minval, shape.positional_rank)
maxval = lax.broadcast_to_rank(maxval, shape.positional_rank)
finfo = jnp.finfo(dtype)
nbits, nmant = finfo.bits, finfo.nmant
if nbits not in (16, 32, 64):
raise TypeError("uniform only accepts 32- or 64-bit dtypes.")
bits = _random_bits(key, nbits, shape)
# The strategy here is to randomize only the mantissa bits with an exponent of
# 1 (after applying the bias), then shift and scale to the desired range. The
# bit-level transformation we use relies on Numpy and XLA having bit-for-bit
# equivalent float representations, which might not be true on all platforms.
float_bits = lax.bitwise_or(
lax.shift_right_logical(bits, np.array(nbits - nmant, lax.dtype(bits))),
np.array(1., dtype).view(_UINT_DTYPES[nbits]))
floats = lax.bitcast_convert_type(float_bits, dtype) - np.array(1., dtype)
return lax.max(
minval,
lax.reshape(floats * (maxval - minval) + minval, shape.positional))
def randint(key: jnp.ndarray,
shape: Sequence[int],
minval: IntegerArray,
maxval: IntegerArray,
dtype: DTypeLikeInt = dtypes.int_):
"""Sample uniform random values in [minval, maxval) with given shape/dtype.
Args:
key: a PRNGKey used as the random key.
shape: a tuple of nonnegative integers representing the shape.
minval: int or array of ints broadcast-compatible with ``shape``, a minimum
(inclusive) value for the range.
maxval: int or array of ints broadcast-compatible with ``shape``, a maximum
(exclusive) value for the range.
dtype: optional, an int dtype for the returned values (default int64 if
jax_enable_x64 is true, otherwise int32).
Returns:
A random array with the specified shape and dtype.
"""
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _randint(key, shape, minval, maxval, dtype)
@partial(jit, static_argnums=(1, 4))
def _randint(key, shape, minval, maxval, dtype):
_check_shape("randint", shape, np.shape(minval), np.shape(maxval))
if not jnp.issubdtype(dtype, np.integer):
raise TypeError(f"randint only accepts integer dtypes, got {dtype}")
_check_arraylike("randint", minval, maxval)
minval = jnp.asarray(minval)
maxval = jnp.asarray(maxval)
if not jnp.issubdtype(minval.dtype, np.integer):
minval = minval.astype(int)
if not jnp.issubdtype(maxval.dtype, np.integer):
maxval = maxval.astype(int)
# Flag where maxval is greater than the maximum value of dtype
# in order to handle cases like randint(key, shape, 0, 256, 'uint8')
maxval_out_of_range = lax.gt(
maxval, _convert_and_clip_integer(jnp.array(jnp.iinfo(dtype).max, dtype), maxval.dtype))
minval = _convert_and_clip_integer(minval, dtype)
maxval = _convert_and_clip_integer(maxval, dtype)
minval = lax.broadcast_to_rank(minval, len(shape))
maxval = lax.broadcast_to_rank(maxval, len(shape))
nbits = jnp.iinfo(dtype).bits
if nbits not in (8, 16, 32, 64):
raise TypeError(f"randint only accepts 8-, 16-, 32-, or 64-bit dtypes, got {dtype}")
# This algorithm is biased whenever (maxval - minval) is not a power of 2.
# We generate double the number of random bits required by the dtype so as to
# reduce that bias.
k1, k2 = split(key)
rbits = lambda key: _random_bits(key, nbits, shape)
higher_bits, lower_bits = rbits(k1), rbits(k2)
unsigned_dtype = _UINT_DTYPES[nbits]
span = lax.convert_element_type(maxval - minval, unsigned_dtype)
# Ensure that span=1 when maxval <= minval, so minval is always returned;
# https://github.com/google/jax/issues/222
span = lax.select(maxval <= minval, lax.full_like(span, 1), span)
# When maxval is out of range, the span has to be one larger.
# If span is already the maximum representable value, this will wrap to zero,
# causing remainders below to have no effect, which is the correct semantics.
span = lax.select(
maxval_out_of_range & (maxval > minval),
lax.add(span, lax._const(span, 1)),
span)
# To compute a remainder operation on an integer that might have twice as many
# bits as we can represent in the native unsigned dtype, we compute a
# multiplier equal to 2**nbits % span. To avoid overflow, we use the identity:
# (a * b) % N = [(a % N) * (b % N)] % N
multiplier = lax.rem(lax._const(span, 2 ** (nbits // 2)), span)
multiplier = lax.rem(lax.mul(multiplier, multiplier), span)
random_offset = lax.add(lax.mul(lax.rem(higher_bits, span), multiplier),
lax.rem(lower_bits, span))
random_offset = lax.rem(random_offset, span)
return lax.add(minval, lax.convert_element_type(random_offset, dtype))
def shuffle(key: jnp.ndarray, x: Array, axis: int = 0) -> jnp.ndarray:
"""Shuffle the elements of an array uniformly at random along an axis.
Args:
key: a PRNGKey used as the random key.
x: the array to be shuffled.
axis: optional, an int axis along which to shuffle (default 0).
Returns:
A shuffled version of x.
"""
msg = ("jax.random.shuffle is deprecated and will be removed in a future release. "
"Use jax.random.permutation")
warnings.warn(msg, FutureWarning)
return _shuffle(key, x, axis) # type: ignore
def permutation(key: jnp.ndarray, x: Array) -> jnp.ndarray:
"""
Permute elements of an array along its first axis or return a permuted range.
If `x` is a multi-dimensional array, it is only shuffled along its
first index.
Args:n
key: a PRNGKey used as the random key.
x: the array or integer range to be shuffled.
Returns:
A shuffled version of x or array range
"""
if not np.ndim(x):
# scalar case, must be a concrete integer
if not np.issubdtype(lax.dtype(x), np.integer):
raise TypeError("x must be an integer or at least 1-dimensional")
x = int(x) # type: ignore[assignment]
return _shuffle(key, jnp.arange(x), 0)
elif np.ndim(x) == 1:
return _shuffle(key, x, 0)
else:
assert isinstance(x, jnp.ndarray)
ind = _shuffle(key, jnp.arange(x.shape[0]), 0) # type: ignore[attribute-error]
return x[ind]
@partial(jit, static_argnums=(2,))
def _shuffle(key, x, axis) -> jnp.ndarray:
# On parallel architectures, Fisher-Yates is more expensive than doing
# multiple sorts. This algorithm is based on one developed and analyzed by
# tjablin@. We sort according to randomly-generated 32bit keys, but those keys
# may have collisions. If we repeat the process, using fresh 32bit keys for
# each sort, then whenever all pairs of elements have been assigned distinct
# keys at some iteration (or equivalently when the strings formed by
# concatenating the successive keys for each element are all distinct) then we
# are guaranteed to have a perfect sample (assuming that either the sort is
# stable or that any bias is not value-dependent). Since checking uniqueness
# at runtime may be expensive, we use a heuristic static stop criterion
# developed by tjablin@. See tensorflow/compiler/tf2xla/random_ops.cc for more
# info, and for the original implementation of this algorithm. See also
# Section 2 of http://people.csail.mit.edu/costis/6896sp11/lec5s.pdf for
# another analysis (where the keys are generated one bit at a time).
exponent = 3 # see tjablin@'s analysis for explanation of this parameter
uint32max = jnp.iinfo(np.uint32).max
num_rounds = int(np.ceil(exponent * np.log(max(1, x.size)) / np.log(uint32max)))
for _ in range(num_rounds):
key, subkey = split(key)
sort_keys = _random_bits(subkey, 32, x.shape)
_, x = lax.sort_key_val(sort_keys, x, axis)
return x
def choice(key: jnp.ndarray,
a: IntegerArray,
shape: Sequence[int] = (),
replace: bool = True,
p=None) -> jnp.ndarray:
"""Generates a random sample from a given 1-D array.
Args:
key: a PRNGKey used as the random key.
a : 1D array or int. If an ndarray, a random sample is generated from
its elements. If an int, the random sample is generated as if a were
arange(a).
shape : tuple of ints, optional. Output shape. If the given shape is,
e.g., ``(m, n)``, then ``m * n`` samples are drawn. Default is (),
in which case a single value is returned.
replace : boolean. Whether the sample is with or without replacement.
default is True.
p : 1-D array-like, The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
Returns:
An array of shape `shape` containing samples from `a`.
"""
if not isinstance(shape, Sequence):
raise TypeError("shape argument of jax.random.choice must be a sequence, "
f"got {shape}")
if np.ndim(a) not in [0, 1]:
raise ValueError("a must be an integer or 1-dimensional")
_check_arraylike("choice", a)
if np.ndim(a) == 0:
a = core.concrete_or_error(int, a, "The error occurred in jax.random.choice()")
else:
a = jnp.asarray(a)
n_inputs = int(a) if np.ndim(a) == 0 else len(a) # type: ignore[arg-type]
n_draws = prod(shape)
if n_draws == 0:
return jnp.zeros(shape, dtype=lax.dtype(a))
if n_inputs <= 0:
raise ValueError("a must be greater than 0 unless no samples are taken")
if not replace and n_draws > n_inputs:
raise ValueError("Cannot take a larger sample than population when 'replace=False'")
if p is None:
if replace:
ind = randint(key, shape, 0, n_inputs)
result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]
else:
result = permutation(key, a)[:n_draws]
else:
if p.shape != (n_inputs,):
raise ValueError("p must be None or match the shape of a")
if replace:
p_cuml = jnp.cumsum(p)
r = p_cuml[-1] * (1 - uniform(key, shape))
ind = jnp.searchsorted(p_cuml, r)
result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]
else:
# Gumbel top-k trick: https://timvieira.github.io/blog/post/2019/09/16/algorithms-for-sampling-without-replacement/
g = -gumbel(key, (n_inputs,)) - jnp.log(p)
ind = jnp.argsort(g)[:n_draws]
result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]
return result.reshape(shape)
def normal(key: jnp.ndarray,
shape: Union[Sequence[int], NamedShape] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample standard normal random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified shape and dtype.
"""
if not dtypes.issubdtype(dtype, np.inexact):
raise ValueError(f"dtype argument to `normal` must be a float or complex dtype, "
f"got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.as_named_shape(shape)
return _normal(key, shape, dtype) # type: ignore
@partial(jit, static_argnums=(1, 2))
def _normal(key, shape, dtype) -> jnp.ndarray:
if dtypes.issubdtype(dtype, np.complexfloating):
sqrt2 = np.array(np.sqrt(2), dtype)
key_re, key_im = split(key)
real_dtype = np.array(0, dtype).real.dtype
_re = _normal_real(key_re, shape, real_dtype)
_im = _normal_real(key_im, shape, real_dtype)
return (_re + 1j * _im) / sqrt2
else:
return _normal_real(key, shape, dtype) # type: ignore
@partial(jit, static_argnums=(1, 2))
def _normal_real(key, shape, dtype) -> jnp.ndarray:
_check_shape("normal", shape)
lo = np.nextafter(np.array(-1., dtype), np.array(0., dtype), dtype=dtype)
hi = np.array(1., dtype)
u = uniform(key, shape, dtype, lo, hi) # type: ignore[arg-type]
return np.array(np.sqrt(2), dtype) * lax.erf_inv(u)
def multivariate_normal(key: jnp.ndarray,
mean: RealArray,
cov: RealArray,
shape: Optional[Sequence[int]] = None,
dtype: DTypeLikeFloat = dtypes.float_,
method: str = 'cholesky') -> jnp.ndarray:
"""Sample multivariate normal random values with given mean and covariance.
Args:
key: a PRNGKey used as the random key.
mean: a mean vector of shape ``(..., n)``.
cov: a positive definite covariance matrix of shape ``(..., n, n)``. The
batch shape ``...`` must be broadcast-compatible with that of ``mean``.
shape: optional, a tuple of nonnegative integers specifying the result
batch shape; that is, the prefix of the result shape excluding the last
axis. Must be broadcast-compatible with ``mean.shape[:-1]`` and
``cov.shape[:-2]``. The default (None) produces a result batch shape by
broadcasting together the batch shapes of ``mean`` and ``cov``.
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
method: optional, a method to compute the factor of ``cov``.
Must be one of 'svd', eigh, and 'cholesky'. Default 'cholesky'.
Returns:
A random array with the specified dtype and shape given by
``shape + mean.shape[-1:]`` if ``shape`` is not None, or else
``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``.
"""
if method not in {'svd', 'eigh', 'cholesky'}:
raise ValueError("method must be one of {'svd', 'eigh', 'cholesky'}")
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `multivariate_normal` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
if shape is not None:
shape = core.canonicalize_shape(shape)
return _multivariate_normal(key, mean, cov, shape, dtype, method) # type: ignore
@partial(jit, static_argnums=(3, 4, 5))
def _multivariate_normal(key, mean, cov, shape, dtype, method) -> jnp.ndarray:
if not np.ndim(mean) >= 1:
msg = "multivariate_normal requires mean.ndim >= 1, got mean.ndim == {}"
raise ValueError(msg.format(np.ndim(mean)))
if not np.ndim(cov) >= 2:
msg = "multivariate_normal requires cov.ndim >= 2, got cov.ndim == {}"
raise ValueError(msg.format(np.ndim(cov)))
n = mean.shape[-1]
if np.shape(cov)[-2:] != (n, n):
msg = ("multivariate_normal requires cov.shape == (..., n, n) for n={n}, "
"but got cov.shape == {shape}.")
raise ValueError(msg.format(n=n, shape=np.shape(cov)))
if shape is None:
shape = lax.broadcast_shapes(mean.shape[:-1], cov.shape[:-2])
else:
_check_shape("normal", shape, mean.shape[:-1], cov.shape[:-2])
if method == 'svd':
(u, s, _) = svd(cov)
factor = u * jnp.sqrt(s)
elif method == 'eigh':
(w, v) = eigh(cov)
factor = v * jnp.sqrt(w)
else: # 'cholesky'
factor = cholesky(cov)
normal_samples = normal(key, shape + mean.shape[-1:], dtype)
return mean + jnp.einsum('...ij,...j->...i', factor, normal_samples)
def truncated_normal(key: jnp.ndarray,
lower: RealArray,
upper: RealArray,
shape: Optional[Union[Sequence[int], NamedShape]] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample truncated standard normal random values with given shape and dtype.
Args:
key: a PRNGKey used as the random key.
lower: a float or array of floats representing the lower bound for
truncation. Must be broadcast-compatible with ``upper``.
upper: a float or array of floats representing the upper bound for
truncation. Must be broadcast-compatible with ``lower``.
shape: optional, a tuple of nonnegative integers specifying the result
shape. Must be broadcast-compatible with ``lower`` and ``upper``. The
default (None) produces a result shape by broadcasting ``lower`` and
``upper``.
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified dtype and shape given by ``shape`` if
``shape`` is not None, or else by broadcasting ``lower`` and ``upper``.
Returns values in the open interval ``(lower, upper)``.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `truncated_normal` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
if shape is not None:
shape = core.as_named_shape(shape)
return _truncated_normal(key, lower, upper, shape, dtype) # type: ignore
@partial(jit, static_argnums=(3, 4))
def _truncated_normal(key, lower, upper, shape, dtype) -> jnp.ndarray:
if shape is None:
shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper))
else:
_check_shape("truncated_normal", shape, np.shape(lower), np.shape(upper))
sqrt2 = np.array(np.sqrt(2), dtype)
lower = lax.convert_element_type(lower, dtype)
upper = lax.convert_element_type(upper, dtype)
a = lax.erf(lower / sqrt2)
b = lax.erf(upper / sqrt2)
if not jnp.issubdtype(dtype, np.floating):
raise TypeError("truncated_normal only accepts floating point dtypes.")
u = uniform(key, shape, dtype, minval=a, maxval=b)
out = sqrt2 * lax.erf_inv(u)
# Clamp the value to the open interval (lower, upper) to make sure that
# rounding (or if we chose `a` for `u`) doesn't push us outside of the range.
return jnp.clip(
out,
lax.nextafter(lax.stop_gradient(lower), np.array(np.inf, dtype=dtype)),
lax.nextafter(lax.stop_gradient(upper), np.array(-np.inf, dtype=dtype)))
def bernoulli(key: jnp.ndarray,
p: RealArray = np.float32(0.5),
shape: Optional[Union[Sequence[int], NamedShape]] = None) -> jnp.ndarray:
"""Sample Bernoulli random values with given shape and mean.
Args:
key: a PRNGKey used as the random key.
p: optional, a float or array of floats for the mean of the random
variables. Must be broadcast-compatible with ``shape``. Default 0.5.
shape: optional, a tuple of nonnegative integers representing the result
shape. Must be broadcast-compatible with ``p.shape``. The default (None)
produces a result shape equal to ``p.shape``.
Returns:
A random array with boolean dtype and shape given by ``shape`` if ``shape``
is not None, or else ``p.shape``.
"""
dtype = dtypes.canonicalize_dtype(lax.dtype(p))
if shape is not None:
shape = core.as_named_shape(shape)
if not jnp.issubdtype(dtype, np.floating):
msg = "bernoulli probability `p` must have a floating dtype, got {}."
raise TypeError(msg.format(dtype))
p = lax.convert_element_type(p, dtype)
return _bernoulli(key, p, shape) # type: ignore
@partial(jit, static_argnums=(2,))
def _bernoulli(key, p, shape) -> jnp.ndarray:
if shape is None:
# TODO: Use the named part of `p` as well
shape = np.shape(p)
else:
_check_shape("bernoulli", shape, np.shape(p))
return uniform(key, shape, lax.dtype(p)) < p
def beta(key: jnp.ndarray,
a: RealArray,
b: RealArray,
shape: Optional[Sequence[int]] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Beta random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
a: a float or array of floats broadcast-compatible with ``shape``
representing the first parameter "alpha".
b: a float or array of floats broadcast-compatible with ``shape``
representing the second parameter "beta".
shape: optional, a tuple of nonnegative integers specifying the result
shape. Must be broadcast-compatible with ``a`` and ``b``. The default
(None) produces a result shape by broadcasting ``a`` and ``b``.
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified dtype and shape given by ``shape`` if
``shape`` is not None, or else by broadcasting ``a`` and ``b``.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `beta` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
if shape is not None:
shape = core.canonicalize_shape(shape)
return _beta(key, a, b, shape, dtype)
def _beta(key, a, b, shape, dtype):
if shape is None:
shape = lax.broadcast_shapes(np.shape(a), np.shape(b))
else:
_check_shape("beta", shape, np.shape(a), np.shape(b))
a = lax.convert_element_type(a, dtype)
b = lax.convert_element_type(b, dtype)
key_a, key_b = split(key)
a = jnp.broadcast_to(a, shape)
b = jnp.broadcast_to(b, shape)
gamma_a = gamma(key_a, a, shape, dtype)
gamma_b = gamma(key_b, b, shape, dtype)
return gamma_a / (gamma_a + gamma_b)
def cauchy(key: jnp.ndarray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Cauchy random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified shape and dtype.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `cauchy` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _cauchy(key, shape, dtype)
@partial(jit, static_argnums=(1, 2))
def _cauchy(key, shape, dtype):
_check_shape("cauchy", shape)
u = uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)
pi = _constant_like(u, np.pi)
return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5))))
def dirichlet(key: jnp.ndarray,
alpha: RealArray,
shape: Optional[Sequence[int]] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Dirichlet random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
alpha: an array of shape ``(..., n)`` used as the concentration
parameter of the random variables.
shape: optional, a tuple of nonnegative integers specifying the result
batch shape; that is, the prefix of the result shape excluding the last
element of value ``n``. Must be broadcast-compatible with
``alpha.shape[:-1]``. The default (None) produces a result shape equal to
``alpha.shape``.
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified dtype and shape given by
``shape + (alpha.shape[-1],)`` if ``shape`` is not None, or else
``alpha.shape``.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `dirichlet` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
if shape is not None:
shape = core.canonicalize_shape(shape)
return _dirichlet(key, alpha, shape, dtype)
@partial(jit, static_argnums=(2, 3))
def _dirichlet(key, alpha, shape, dtype):
if not np.ndim(alpha) >= 1:
msg = "dirichlet requires alpha.ndim >= 1, got alpha.ndim == {}"
raise ValueError(msg.format(np.ndim(alpha)))
if shape is None:
shape = np.shape(alpha)[:-1]
else:
_check_shape("dirichlet", shape, np.shape(alpha)[:-1])
alpha = lax.convert_element_type(alpha, dtype)
gamma_samples = gamma(key, alpha, shape + np.shape(alpha)[-1:], dtype)
return gamma_samples / jnp.sum(gamma_samples, axis=-1, keepdims=True)
def exponential(key: jnp.ndarray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Exponential random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified shape and dtype.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `exponential` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _exponential(key, shape, dtype)
@partial(jit, static_argnums=(1, 2))
def _exponential(key, shape, dtype):
_check_shape("exponential", shape)
u = uniform(key, shape, dtype)
# taking 1 - u to move the domain of log to (0, 1] instead of [0, 1)
return lax.neg(lax.log1p(lax.neg(u)))
def _gamma_one(key, alpha):
# Ref: A simple method for generating gamma variables, George Marsaglia and Wai Wan Tsang
# The algorithm can also be founded in:
# https://en.wikipedia.org/wiki/Gamma_distribution#Generating_gamma-distributed_random_variables
zero = _constant_like(alpha, 0)
one = _constant_like(alpha, 1)
minus_one = _constant_like(alpha, -1)
one_over_two = _constant_like(alpha, 0.5)
one_over_three = _constant_like(alpha, 1. / 3.)
squeeze_const = _constant_like(alpha, 0.0331)
dtype = lax.dtype(alpha)
key, subkey = split(key)
# for alpha < 1, we boost alpha to alpha + 1 and get a sample according to
# Gamma(alpha) ~ Gamma(alpha+1) * Uniform()^(1 / alpha)
boost = lax.select(lax.ge(alpha, one),
one,
lax.pow(uniform(subkey, (), dtype=dtype), lax.div(one, alpha)))
alpha = lax.select(lax.ge(alpha, one), alpha, lax.add(alpha, one))
d = lax.sub(alpha, one_over_three)
c = lax.div(one_over_three, lax.sqrt(d))
def _cond_fn(kXVU):
_, X, V, U = kXVU
# TODO: use lax.cond when its batching rule is supported
# The reason is to avoid evaluating second condition which involves log+log
# if the first condition is satisfied
cond = lax.bitwise_and(lax.ge(U, lax.sub(one, lax.mul(squeeze_const, lax.mul(X, X)))),
lax.ge(lax.log(U), lax.add(lax.mul(X, one_over_two),
lax.mul(d, lax.add(lax.sub(one, V),
lax.log(V))))))
return cond
def _body_fn(kXVU):
def _next_kxv(kxv):
key = kxv[0]
key, subkey = split(key)
x = normal(subkey, (), dtype=dtype)
v = lax.add(one, lax.mul(x, c))
return key, x, v
key = kXVU[0]
key, x_key, U_key = split(key, 3)
_, x, v = lax.while_loop(lambda kxv: lax.le(kxv[2], zero), _next_kxv, (x_key, zero, minus_one))
X = lax.mul(x, x)
V = lax.mul(lax.mul(v, v), v)
U = uniform(U_key, (), dtype=dtype)
return key, X, V, U
# initial state is chosen such that _cond_fn will return True
_, _, V, _ = lax.while_loop(_cond_fn, _body_fn, (key, zero, one, _constant_like(alpha, 2)))
z = lax.mul(lax.mul(d, V), boost)
return lax.select(lax.eq(z, zero), jnp.finfo(z.dtype).tiny, z)
def _gamma_grad(sample, a):
samples = jnp.reshape(sample, -1)
alphas = jnp.reshape(a, -1)
if xla_bridge.get_backend().platform == 'cpu':
grads = lax.map(lambda args: lax.random_gamma_grad(*args), (alphas, samples))
else:
grads = vmap(lax.random_gamma_grad)(alphas, samples)
return grads.reshape(np.shape(a))
def _gamma_impl(key, a, use_vmap=False):
a_shape = jnp.shape(a)
# split key to match the shape of a
key_ndim = jnp.ndim(key) - 1
key = jnp.reshape(key, (-1, 2))
key = vmap(split, in_axes=(0, None))(key, prod(a_shape[key_ndim:]))
keys = jnp.reshape(key, (-1, 2))
alphas = jnp.reshape(a, -1)
if use_vmap:
samples = vmap(_gamma_one)(keys, alphas)
else:
samples = lax.map(lambda args: _gamma_one(*args), (keys, alphas))
return jnp.reshape(samples, a_shape)
def _gamma_batching_rule(batched_args, batch_dims):
k, a = batched_args
bk, ba = batch_dims
size = next(t.shape[i] for t, i in zip(batched_args, batch_dims) if i is not None)
k = batching.bdim_at_front(k, bk, size)
a = batching.bdim_at_front(a, ba, size)
return random_gamma_p.bind(k, a), 0
random_gamma_p = core.Primitive('random_gamma')
random_gamma_p.def_impl(_gamma_impl)
random_gamma_p.def_abstract_eval(lambda key, a: core.raise_to_shaped(a))
ad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a: tangent * _gamma_grad(ans, a))
xla.translations_with_avals[random_gamma_p] = xla.lower_fun(
partial(_gamma_impl, use_vmap=True),
multiple_results=False, with_avals=True)
xla.backend_specific_translations['cpu'][random_gamma_p] = xla.lower_fun(
partial(_gamma_impl, use_vmap=False),
multiple_results=False)
batching.primitive_batchers[random_gamma_p] = _gamma_batching_rule
def gamma(key: jnp.ndarray,
a: RealArray,
shape: Optional[Sequence[int]] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Gamma random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
a: a float or array of floats broadcast-compatible with ``shape``
representing the parameter of the distribution.
shape: optional, a tuple of nonnegative integers specifying the result
shape. Must be broadcast-compatible with ``a``. The default (None)
produces a result shape equal to ``a.shape``.
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``a.shape``.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gamma` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
if shape is not None:
shape = core.canonicalize_shape(shape)
return _gamma(key, a, shape, dtype)
@partial(jit, static_argnums=(2, 3))
def _gamma(key, a, shape, dtype):
if shape is None:
shape = np.shape(a)
else:
_check_shape("gamma", shape, np.shape(a))
a = lax.convert_element_type(a, dtype)
if np.shape(a) != shape:
a = jnp.broadcast_to(a, shape)
return random_gamma_p.bind(key, a)
@partial(jit, static_argnums=(2, 3, 4))
def _poisson_knuth(key, lam, shape, dtype, max_iters):
# Knuth's algorithm for generating Poisson random variates.
# Reference:
# https://en.wikipedia.org/wiki/Poisson_distribution#Generating_Poisson-distributed_random_variables
def body_fn(carry):
i, k, rng, log_prod = carry
rng, subkey = split(rng)
k = lax.select(log_prod > -lam, k + 1, k)
u = uniform(subkey, shape, np.float32)
return i + 1, k, rng, log_prod + jnp.log(u)
def cond_fn(carry):
i, log_prod = carry[0], carry[3]
return (log_prod > -lam).any() & (i < max_iters)
k_init = lax.full_like(lam, 0, dtype, shape)
log_rate_init = lax.full_like(lam, 0, np.float32, shape)
k = lax.while_loop(cond_fn, body_fn, (0, k_init, key, log_rate_init))[1]
return (k - 1).astype(dtype)
@partial(jit, static_argnums=(2, 3, 4))
def _poisson_rejection(key, lam, shape, dtype, max_iters):
# Transformed rejection due to Hormann.
# Reference:
# http://citeseer.ist.psu.edu/viewdoc/citations;jsessionid=1BEB35946CC807879F55D42512E5490C?doi=10.1.1.48.3054.
log_lam = lax.log(lam)
b = 0.931 + 2.53 * lax.sqrt(lam)
a = -0.059 + 0.02483 * b
inv_alpha = 1.1239 + 1.1328 / (b - 3.4)
v_r = 0.9277 - 3.6224 / (b - 2)
def body_fn(carry):
i, k_out, accepted, key = carry
key, subkey_0, subkey_1 = split(key, 3)
u = uniform(subkey_0, shape, lam.dtype) - 0.5
v = uniform(subkey_1, shape, lam.dtype)
u_shifted = 0.5 - abs(u)
k = lax.floor((2 * a / u_shifted + b) * u + lam + 0.43)
s = lax.log(v * inv_alpha / (a / (u_shifted * u_shifted) + b))
t = -lam + k * log_lam - lax.lgamma(k + 1)
accept1 = (u_shifted >= 0.07) & (v <= v_r)
reject = (k < 0) | ((u_shifted < 0.013) & (v > u_shifted))
accept2 = s <= t
accept = accept1 | (~reject & accept2)
k_out = lax.select(accept, k, k_out)
accepted |= accept
return i + 1, k_out, accepted, key
def cond_fn(carry):
i, k_out, accepted, key = carry
return (~accepted).any() & (i < max_iters)
k_init = lax.full_like(lam, -1, lam.dtype, shape)
accepted = lax.full_like(lam, False, jnp.bool_, shape)
k = lax.while_loop(cond_fn, body_fn, (0, k_init, accepted, key))[1]
return k.astype(dtype)
@partial(jit, static_argnums=(2, 3))
def _poisson(key, lam, shape, dtype):
# The implementation matches TensorFlow and NumPy:
# https://github.com/tensorflow/tensorflow/blob/v2.2.0-rc3/tensorflow/core/kernels/random_poisson_op.cc
# https://github.com/numpy/numpy/blob/v1.18.3/numpy/random/src/distributions/distributions.c#L574
# For lambda < 10, we use the Knuth algorithm; otherwise, we use transformed
# rejection sampling.
use_knuth = lam < 10
lam_knuth = lax.select(use_knuth, lam, lax.full_like(lam, 0.0))
# The acceptance probability for rejection sampling maxes out at 89% as
# λ -> ∞, so pick some arbitrary large value.
lam_rejection = lax.select(use_knuth, lax.full_like(lam, 1e5), lam)
max_iters = dtype.type(jnp.iinfo(dtype).max) # insanely conservative
result = lax.select(
use_knuth,
_poisson_knuth(key, lam_knuth, shape, dtype, max_iters),
_poisson_rejection(key, lam_rejection, shape, dtype, max_iters),
)
return lax.select(lam == 0, jnp.zeros_like(result), result)
def poisson(key: jnp.ndarray,
lam: RealArray,
shape: Sequence[int] = (),
dtype: DTypeLikeInt = dtypes.int_) -> jnp.ndarray:
"""Sample Poisson random values with given shape and integer dtype.
Args:
key: a PRNGKey used as the random key.
lam: rate parameter (mean of the distribution), must be >= 0.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a integer dtype for the returned values (default int64 if
jax_enable_x64 is true, otherwise int32).
Returns:
A random array with the specified shape and dtype.
"""
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
if np.shape(lam) != shape:
lam = jnp.broadcast_to(lam, shape)
lam = lax.convert_element_type(lam, np.float32)
return _poisson(key, lam, shape, dtype)
def gumbel(key: jnp.ndarray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Gumbel random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified shape and dtype.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gumbel` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _gumbel(key, shape, dtype)
@partial(jit, static_argnums=(1, 2))
def _gumbel(key, shape, dtype):
_check_shape("gumbel", shape)
return -jnp.log(-jnp.log(
uniform(key, shape, dtype, minval=jnp.finfo(dtype).tiny, maxval=1.)))
def categorical(key: jnp.ndarray,
logits: RealArray,
axis: int = -1,
shape: Optional[Sequence[int]] = None) -> jnp.ndarray:
"""Sample random values from categorical distributions.
Args:
key: a PRNGKey used as the random key.
logits: Unnormalized log probabilities of the categorical distribution(s) to sample from,
so that `softmax(logits, axis)` gives the corresponding probabilities.
axis: Axis along which logits belong to the same categorical distribution.
shape: Optional, a tuple of nonnegative integers representing the result shape.
Must be broadcast-compatible with ``np.delete(logits.shape, axis)``.
The default (None) produces a result shape equal to ``np.delete(logits.shape, axis)``.
Returns:
A random array with int dtype and shape given by ``shape`` if ``shape``
is not None, or else ``np.delete(logits.shape, axis)``.
"""
if axis >= 0:
axis -= len(logits.shape)
batch_shape = tuple(np.delete(logits.shape, axis))
if shape is None:
shape = batch_shape
else:
shape = tuple(shape)
_check_shape("categorical", shape, batch_shape)
sample_shape = shape[:len(shape)-len(batch_shape)]
return jnp.argmax(gumbel(key, sample_shape + logits.shape, logits.dtype) + logits, axis=axis)
def laplace(key: jnp.ndarray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Laplace random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified shape and dtype.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `laplace` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _laplace(key, shape, dtype)
@partial(jit, static_argnums=(1, 2))
def _laplace(key, shape, dtype):
_check_shape("laplace", shape)
u = uniform(
key, shape, dtype, minval=-1. + jnp.finfo(dtype).epsneg, maxval=1.)
return lax.mul(lax.sign(u), lax.log1p(lax.neg(lax.abs(u))))
def logistic(key: jnp.ndarray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample logistic random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
shape. Default ().
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified shape and dtype.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `logistic` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _logistic(key, shape, dtype)
@partial(jit, static_argnums=(1, 2))
def _logistic(key, shape, dtype):
_check_shape("logistic", shape)
x = uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)
return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x)))
def pareto(key: jnp.ndarray,
b: RealArray,
shape: Optional[Sequence[int]] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Pareto random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
b: a float or array of floats broadcast-compatible with ``shape``
representing the parameter of the distribution.
shape: optional, a tuple of nonnegative integers specifying the result
shape. Must be broadcast-compatible with ``b``. The default (None)
produces a result shape equal to ``b.shape``.
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``b.shape``.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `pareto` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
if shape is not None:
shape = core.canonicalize_shape(shape)
return _pareto(key, b, shape, dtype)
@partial(jit, static_argnums=(2, 3))
def _pareto(key, b, shape, dtype):
if shape is None:
shape = np.shape(b)
else:
_check_shape("pareto", shape)
b = lax.convert_element_type(b, dtype)
e = exponential(key, shape, dtype)
return lax.exp(e / b)
def t(key: jnp.ndarray,
df: RealArray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample Student's t random values with given shape and float dtype.
Args:
key: a PRNGKey used as the random key.
df: a float or array of floats broadcast-compatible with ``shape``
representing the parameter of the distribution.
shape: optional, a tuple of nonnegative integers specifying the result
shape. Must be broadcast-compatible with ``df``. The default (None)
produces a result shape equal to ``df.shape``.
dtype: optional, a float dtype for the returned values (default float64 if
jax_enable_x64 is true, otherwise float32).
Returns:
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``df.shape``.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `t` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _t(key, df, shape, dtype)
@partial(jit, static_argnums=(2, 3))
def _t(key, df, shape, dtype):
if shape is None:
shape = np.shape(df)
else:
_check_shape("t", shape, np.shape(df))
df = lax.convert_element_type(df, dtype)
key_n, key_g = split(key)
n = normal(key_n, shape, dtype)
two = _constant_like(n, 2)
half_df = lax.div(df, two)
g = gamma(key_n, half_df, shape, dtype)
return n * jnp.sqrt(half_df / g)
def rademacher(key: jnp.ndarray,
shape: Sequence[int],
dtype: DTypeLikeInt = dtypes.int_) -> jnp.ndarray:
"""Sample from a Rademacher distribution.
Args:
key: a PRNGKey key.
shape: The shape of the returned samples.
dtype: The type used for samples.
Returns:
A jnp.array of samples, of shape `shape`. Each element in the output has
a 50% change of being 1 or -1.
"""
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _rademacher(key, shape, dtype)
@partial(jit, static_argnums=(1, 2))
def _rademacher(key, shape, dtype):
bernoulli_samples = bernoulli(key=key, p=0.5, shape=shape)
return (2 * bernoulli_samples - 1).astype(dtype)
def maxwell(key: jnp.ndarray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample from a one sided Maxwell distribution.
The scipy counterpart is `scipy.stats.maxwell`.
Args:
key: a PRNGKey key.
shape: The shape of the returned samples.
dtype: The type used for samples.
Returns:
A jnp.array of samples, of shape `shape`.
"""
# Generate samples using:
# sqrt(X^2 + Y^2 + Z^2), X,Y,Z ~N(0,1)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `maxwell` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _maxwell(key, shape, dtype)
@partial(jit, static_argnums=(1, 2))
def _maxwell(key, shape, dtype):
shape = shape + (3,)
norm_rvs = normal(key=key, shape=shape, dtype=dtype)
return jnp.linalg.norm(norm_rvs, axis=-1)
def double_sided_maxwell(key: jnp.ndarray,
loc: RealArray,
scale: RealArray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample from a double sided Maxwell distribution.
Samples using:
loc + scale* sgn(U-0.5)* one_sided_maxwell U~Unif;
Args:
key: a PRNGKey key.
loc: The location parameter of the distribution.
scale: The scale parameter of the distribution.
shape: The shape added to the parameters loc and scale broadcastable shape.
dtype: The type used for samples.
Returns:
A jnp.array of samples.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `double_sided_maxwell` must be a float"
f" dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _double_sided_maxwell(key, loc, scale, shape, dtype)
@partial(jit, static_argnums=(3, 4))
def _double_sided_maxwell(key, loc, scale, shape, dtype):
params_shapes = lax.broadcast_shapes(np.shape(loc), np.shape(scale))
if not shape:
shape = params_shapes
shape = shape + params_shapes
maxwell_key, rademacher_key = split(key)
maxwell_rvs = maxwell(maxwell_key, shape=shape, dtype=dtype)
# Generate random signs for the symmetric variates.
random_sign = rademacher(rademacher_key, shape=shape, dtype=dtype)
assert random_sign.shape == maxwell_rvs.shape
return random_sign * maxwell_rvs * scale + loc
def weibull_min(key: jnp.ndarray,
scale: RealArray,
concentration: RealArray,
shape: Sequence[int] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
"""Sample from a Weibull distribution.
The scipy counterpart is `scipy.stats.weibull_min`.
Args:
key: a PRNGKey key.
scale: The scale parameter of the distribution.
concentration: The concentration parameter of the distribution.
shape: The shape added to the parameters loc and scale broadcastable shape.
dtype: The type used for samples.
Returns:
A jnp.array of samples.
"""
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `weibull_min` must be a float "
f"dtype, got {dtype}")
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _weibull_min(key, scale, concentration, shape, dtype)
@partial(jit, static_argnums=(3, 4))
def _weibull_min(key, scale, concentration, shape, dtype):
random_uniform = uniform(
key=key, shape=shape, minval=0, maxval=1, dtype=dtype)
# Inverse weibull CDF.
return jnp.power(-jnp.log1p(-random_uniform), 1.0/concentration) * scale
| 37.923415 | 121 | 0.675412 |
4a1c8b0b4b10c228bc48d7183480144734b19537
| 89 |
py
|
Python
|
proxy/core/apps.py
|
VitorCapuano/nac-proxy-investimento
|
9974ddee533d1601339df7a4d76b57677051350b
|
[
"Apache-2.0"
] | null | null | null |
proxy/core/apps.py
|
VitorCapuano/nac-proxy-investimento
|
9974ddee533d1601339df7a4d76b57677051350b
|
[
"Apache-2.0"
] | 3 |
2020-02-11T21:52:37.000Z
|
2021-03-19T21:45:33.000Z
|
proxy/core/apps.py
|
vcapss/nac-proxy-investimento
|
9974ddee533d1601339df7a4d76b57677051350b
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'proxy.core'
| 14.833333 | 33 | 0.741573 |
4a1c8b7042d3fb948ba246f6a9443dc48912d420
| 144 |
py
|
Python
|
lab2/mymetaclasses/lab2_task14/ModelField.py
|
kinpa200296/python_labs
|
bb26c426cbe9bb27f45b8ee4c974c38db300468f
|
[
"MIT"
] | null | null | null |
lab2/mymetaclasses/lab2_task14/ModelField.py
|
kinpa200296/python_labs
|
bb26c426cbe9bb27f45b8ee4c974c38db300468f
|
[
"MIT"
] | null | null | null |
lab2/mymetaclasses/lab2_task14/ModelField.py
|
kinpa200296/python_labs
|
bb26c426cbe9bb27f45b8ee4c974c38db300468f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = 'kinpa200296'
class ModelField(object):
@classmethod
def convert(cls, value):
return None
| 14.4 | 28 | 0.666667 |
4a1c8c6ebdcc95293573babfed6c48d4304bb938
| 1,744 |
py
|
Python
|
imitation_agent.py
|
kdliu00/ROAR
|
17856ee47d842ed8342c34154696a4b10b916891
|
[
"Apache-2.0"
] | null | null | null |
imitation_agent.py
|
kdliu00/ROAR
|
17856ee47d842ed8342c34154696a4b10b916891
|
[
"Apache-2.0"
] | null | null | null |
imitation_agent.py
|
kdliu00/ROAR
|
17856ee47d842ed8342c34154696a4b10b916891
|
[
"Apache-2.0"
] | null | null | null |
from ROAR.agent_module.agent import Agent
from pathlib import Path
from ROAR.control_module.pid_controller import PIDController
from ROAR.planning_module.local_planner.simple_waypoint_following_local_planner import \
SimpleWaypointFollowingLocalPlanner
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.waypoint_following_mission_planner import WaypointFollowingMissionPlanner
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
import logging
import cv2
import numpy as np
class ImitationAgent(Agent):
def __init__(self, model, steer_model, **kwargs):
super().__init__(**kwargs)
self.model = model
self.steer_model = steer_model
def run_step(self, vehicle: Vehicle,
sensors_data: SensorsData) -> VehicleControl:
super(ImitationAgent, self).run_step(vehicle=vehicle,
sensors_data=sensors_data)
image = sensors_data.front_rgb.data
input_image = np.array([cv2.resize(image, (160, 120),
interpolation=cv2.INTER_LANCZOS4)])
output = self.model.predict(input_image, batch_size=None)
steer_output = self.steer_model.predict(input_image, batch_size=None)
control = VehicleControl()
control.throttle = 0.7
if round(steer_output[0][0].item()) == 0:
print("Not steering")
control.steering = 0.0
else:
print("Steering")
control.steering = output[0][0].item()
print(f"Control: {control}\n")
return control
| 38.755556 | 115 | 0.694954 |
4a1c8c8916d6ab79fff383acf26b88b02492e49e
| 3,320 |
py
|
Python
|
weboa/utils/Processing.py
|
lonagi/weboa
|
01fcfffb945b0c77f9e365f07fafe33fe39d52cd
|
[
"Apache-2.0"
] | null | null | null |
weboa/utils/Processing.py
|
lonagi/weboa
|
01fcfffb945b0c77f9e365f07fafe33fe39d52cd
|
[
"Apache-2.0"
] | null | null | null |
weboa/utils/Processing.py
|
lonagi/weboa
|
01fcfffb945b0c77f9e365f07fafe33fe39d52cd
|
[
"Apache-2.0"
] | null | null | null |
import sys, io, lesscpy, sass, glob
from shutil import copy2
from shutil import copytree as copytree2
from weboa import os, json
from weboa.utils import Meta, FileSystem
from .Printer import *
from six import StringIO
class Processing(Meta.meta,FileSystem.filesystem):
def __init__(self, path = "../", BUILDFOLDER = ""):
self.path = path
self.BUILDFOLDER = BUILDFOLDER
self.os = sys.platform
Meta.meta.Weboa_Add("build_folder",self.BUILDFOLDER)
Meta.meta.Weboa_Add("rpath", self.path)
if(self.os in ["Windows","win32","win64","win"]):
self.os = "Windows"
@staticmethod
def correct_path(path):
if os.name in ('nt','posix'):
path = path.replace("/", "\\")
path = path.replace("\\\\","\\")
else:
path = path.replace("\\\\", "/")
path = path.replace("\\", "/")
return path
@staticmethod
def minify(folder,filetype):
for f in glob.glob(folder+"/*."+filetype):
Printer.log("Minify file " + f)
with open(f, "r") as file:
code = file.read()
code = code.replace("\n", " ")
with open(f, "w") as file:
file.write(code)
@staticmethod
def pre_css(_weboa, i, precss="less"):
with open(i, "r") as f:
prep = f.read()
try:
if precss == "less":
css = lesscpy.compile(StringIO(prep), minify=True)
elif precss in ("sass", "scss"):
css = sass.compile(string=prep, output_style="compressed")
except:
return False
with open(i[:-4] + "css", "w") as f:
f.write(css)
_weboa[precss][i] = os.stat(i).st_mtime
Processing.Weboa_Save(_weboa)
return True
@staticmethod
def Save_Path(_path):
try:
with open(".weboa", "r") as f:
dweboa = json.loads(f.read())
except FileNotFoundError:
Printer.log("Add .weboa file")
dweboa = Processing.Weboa_Init()
except json.decoder.JSONDecodeError:
Printer.warning("json .weboa file is empty!")
dweboa = Processing.Weboa_Init()
dweboa["path"] = _path
Processing.Weboa_Save(dweboa)
Printer.log("Save the project path")
def Folder_Create(self, foldername):
try:
os.mkdir(self.path+self.BUILDFOLDER+foldername)
return True
except FileExistsError:
Printer.warning(f"Folder {foldername} exists.")
return False
def File_Create(self, filename, text=""):
# Creating a file at specified location
with io.open(os.path.join(self.path, self.BUILDFOLDER)+filename, 'w', encoding="utf-8") as f:
f.write(text)
def copy(self, src, dst):
copy2(self.path + src, os.path.join(self.path, self.BUILDFOLDER) + dst)
def copytree(self, src, dst):
try:
copytree2(self.path + src, os.path.join(self.path, self.BUILDFOLDER) + dst)
except FileExistsError:
pass
def Trime(self, text):
return text.replace("\t", "").replace(" ", "")
def Delete_Lines(self, text):
return text.replace("\n", "")
| 31.923077 | 101 | 0.552711 |
4a1c8d0a117b77229367928a938c25d0b460989c
| 2,162 |
py
|
Python
|
ProjectApplication/project_core/migrations/0052_countryuid_model.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 5 |
2020-07-29T10:00:11.000Z
|
2022-02-19T11:00:34.000Z
|
ProjectApplication/project_core/migrations/0052_countryuid_model.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 471 |
2019-09-20T14:37:28.000Z
|
2022-03-25T14:16:34.000Z
|
ProjectApplication/project_core/migrations/0052_countryuid_model.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 5 |
2020-03-15T12:42:47.000Z
|
2022-02-15T18:06:52.000Z
|
# Generated by Django 2.2.6 on 2019-10-22 15:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project_core', '0051_createmodify_country'),
]
operations = [
migrations.RemoveField(
model_name='country',
name='source',
),
migrations.CreateModel(
name='CountryUid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='Date and time at which the entry was created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True)),
('uid', models.CharField(help_text='Unique identifier', max_length=150, null=True)),
('created_by', models.ForeignKey(blank=True, help_text='User by which the entry was created', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_countryuid_created_by_related', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, help_text='User by which the entry was modified', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_countryuid_modified_by_related', to=settings.AUTH_USER_MODEL)),
('source', models.ForeignKey(help_text='Source of the UID', on_delete=django.db.models.deletion.PROTECT, to='project_core.Source')),
],
options={
'abstract': False,
'unique_together': {('uid', 'source')},
},
),
migrations.AddField(
model_name='country',
name='uid',
field=models.ForeignKey(default=None, help_text='Source of country name', on_delete=django.db.models.deletion.PROTECT, to='project_core.CountryUid'),
preserve_default=False,
),
]
| 50.27907 | 258 | 0.654487 |
4a1c8df21657a2bde287c3fceeed1329e9257f44
| 3,520 |
py
|
Python
|
cloudkitty/utils/validation.py
|
elastx/cloudkitty
|
9654373f12daad606bfabac70a48b68279d522bd
|
[
"Apache-2.0"
] | 97 |
2015-10-18T02:53:17.000Z
|
2022-03-07T05:15:39.000Z
|
cloudkitty/utils/validation.py
|
shanafang9/cloudkitty
|
911c90569ccb09ecf0d7aa11a5a707c8ebda09cf
|
[
"Apache-2.0"
] | 1 |
2017-11-29T15:39:27.000Z
|
2017-11-29T15:39:27.000Z
|
cloudkitty/utils/validation.py
|
shanafang9/cloudkitty
|
911c90569ccb09ecf0d7aa11a5a707c8ebda09cf
|
[
"Apache-2.0"
] | 54 |
2015-10-27T10:55:02.000Z
|
2022-02-18T08:23:19.000Z
|
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Common utils for voluptuous schema validation"""
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import functools
import voluptuous
class DictTypeValidator(object):
"""Voluptuous helper validating dict key and value types.
When possible, keys and values will be converted to the required type.
This behaviour can be disabled through the `cast` param.
:param key_type: type of the dict keys
:param value_type: type of the dict values
:param cast: Set to False if you do not want to cast elements to the
required type.
:type cast: bool
:rtype: dict
"""
def __init__(self, key_type, value_type, cast=True):
if cast:
self._kval = voluptuous.Coerce(key_type)
self._vval = voluptuous.Coerce(value_type)
else:
def __type_validator(type_, elem):
if not isinstance(elem, type_):
raise voluptuous.Invalid(
"{e} is not of type {t}".format(e=elem, t=type_))
return elem
self._kval = functools.partial(__type_validator, key_type)
self._vval = functools.partial(__type_validator, value_type)
def __call__(self, item):
try:
return {self._kval(k): self._vval(v)
for k, v in dict(item).items()}
except (TypeError, ValueError):
raise voluptuous.Invalid(
"{} can't be converted to dict".format(item))
class IterableValuesDict(DictTypeValidator):
"""Voluptuous helper validating dicts with iterable values.
When possible, keys and elements of values will be converted to the
required type. This behaviour can be disabled through the `cast`
param.
:param key_type: type of the dict keys
:param value_type: type of the dict values
:param cast: Set to False if you do not want to convert elements to the
required type.
:type cast: bool
:rtype: dict
"""
def __init__(self, key_type, value_type, cast=True):
super(IterableValuesDict, self).__init__(key_type, value_type, cast)
# NOTE(peschk_l): Using type(it) to return an iterable of the same
# type as the passed argument.
self.__vval = lambda it: type(it)(self._vval(i) for i in it)
def __call__(self, item):
try:
for v in dict(item).values():
if not isinstance(v, Iterable):
raise voluptuous.Invalid("{} is not iterable".format(v))
return {self._kval(k): self.__vval(v) for k, v in item.items()}
except (TypeError, ValueError) as e:
raise voluptuous.Invalid(
"{} can't be converted to a dict: {}".format(item, e))
def get_string_type():
"""Returns ``basestring`` in python2 and ``str`` in python3."""
return str
| 36.666667 | 78 | 0.64375 |
4a1c8e581c171110433c14cfd40bc3dc60426b2f
| 974 |
py
|
Python
|
selection/log_decorator.py
|
tlatkowski/deep-learning-gene-expression
|
47552e8902d9612c87244c26ccc8bfc503a9f9f2
|
[
"MIT"
] | 28 |
2017-11-29T10:41:41.000Z
|
2022-03-10T15:31:01.000Z
|
selection/log_decorator.py
|
NimritaKoul/deep-learning-gene-expression
|
47552e8902d9612c87244c26ccc8bfc503a9f9f2
|
[
"MIT"
] | 3 |
2018-10-27T11:08:00.000Z
|
2019-09-19T12:40:39.000Z
|
selection/log_decorator.py
|
NimritaKoul/deep-learning-gene-expression
|
47552e8902d9612c87244c26ccc8bfc503a9f9f2
|
[
"MIT"
] | 13 |
2017-11-29T10:41:44.000Z
|
2022-02-15T15:19:38.000Z
|
import logging
import os
import time
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LogDecorator:
def __init__(self, name):
self.__name = name
self.__path = 'features/{}.csv'.format(self.__name)
def __call__(self, f, *args, **kwargs):
if not os.path.exists(self.__path):
logger.info('Running %s feature selection...', self.__name)
start = time.time()
else:
logger.info('Reading %s features from file : [%s]...', self.__name, self.__path)
def new_f(*args, **kwargs):
X = f(*args, **kwargs)
if not os.path.exists(self.__path):
logger.info('Saving %s features to [%s]...', self.__name, self.__path)
logger.info('%s selection last [%s]...', self.__name, (time.time() - start) * 1000)
else:
logger.info('Read %s features from file : [%s]...', self.__name, self.__path)
return X
new_f.__name__ = f.__name__
return new_f
| 28.647059 | 91 | 0.620123 |
4a1c8e77b8b16cf80ecbb8fa50c8a58f736cfa71
| 1,283 |
py
|
Python
|
scr/trace_expansion.py
|
zmoitier/Scattering_BIE_QPAX
|
a4f9660f570c618f7a14585ab943b0f16c712632
|
[
"MIT"
] | null | null | null |
scr/trace_expansion.py
|
zmoitier/Scattering_BIE_QPAX
|
a4f9660f570c618f7a14585ab943b0f16c712632
|
[
"MIT"
] | null | null | null |
scr/trace_expansion.py
|
zmoitier/Scattering_BIE_QPAX
|
a4f9660f570c618f7a14585ab943b0f16c712632
|
[
"MIT"
] | null | null | null |
""" Trace expansion for ellipses
Author: Zoïs Moitier
Karlsruhe Institute of Technology, Germany
"""
from numpy import pi
from .analytic import eval_field
def trace_neumann_expansion(field):
"""
u0, u1 = trace_neu_asy(field)
Return the fist two terms of the asymptotic expansion of the trace along the
ellipse.
Parameters
----------
field : Field
Field object from the dataclass
Returns
-------
u0 : Function
θ ↦ uⁱⁿᶜ(0, sin(θ))
u1 : Function
θ ↦ cos(θ) ∂ₓ uⁱⁿᶜ(0, sin(θ))
"""
return (
lambda θ: eval_field(field, 0, pi / 2 - θ),
lambda θ: eval_field(field, 0, pi / 2 - θ, d_ξ=1),
)
def trace_dirichlet_expansion(field):
"""
u0, u1 = trace_dir_asy(field)
Return the fist two terms of the asymptotic expansion of the trace along the
ellipse.
Parameters
----------
field : Field
Field object from the dataclass
Returns
-------
u0 : Function
θ ↦ cos(θ) ∂ₓ uⁱⁿᶜ(0, sin(θ))
u1 : Function
θ ↦ cos(θ)² ∂ₓₓ uⁱⁿᶜ(0, sin(θ)) + sin(θ) ∂_y uⁱⁿᶜ(0, sin(θ))
"""
return (
lambda θ: eval_field(field, 0, pi / 2 - θ, d_ξ=1),
lambda θ: eval_field(field, 0, pi / 2 - θ, d_ξ=2),
)
| 21.032787 | 80 | 0.564302 |
4a1c90b27c386d1f7c5c71d7038bebbbf7e74904
| 8,241 |
py
|
Python
|
virtual/lib/python3.6/site-packages/PIL/ImageColor.py
|
mzazakeith/flask-blog
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
[
"MIT"
] | 4 |
2018-08-23T14:45:14.000Z
|
2021-05-22T16:12:33.000Z
|
virtual/lib/python3.6/site-packages/PIL/ImageColor.py
|
mzazakeith/flask-blog
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
[
"MIT"
] | 13 |
2020-01-28T22:20:14.000Z
|
2022-03-11T23:20:14.000Z
|
virtual/lib/python3.6/site-packages/PIL/ImageColor.py
|
mzazakeith/flask-blog
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
[
"MIT"
] | 2 |
2019-11-30T10:33:16.000Z
|
2021-02-03T06:29:40.000Z
|
#
# The Python Imaging Library
# $Id$
#
# map CSS3-style colour description strings to RGB
#
# History:
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-15 fl Added RGBA support
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
# 2004-07-19 fl Fixed gray/grey spelling issues
# 2009-03-05 fl Fixed rounding error in grayscale calculation
#
# Copyright (c) 2002-2004 by Secret Labs AB
# Copyright (c) 2002-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
import re
def getrgb(color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
if re.match('#[a-f0-9]{3}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
)
if re.match('#[a-f0-9]{4}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
int(color[4]*2, 16),
)
if re.match('#[a-f0-9]{6}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
)
if re.match('#[a-f0-9]{8}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
int(color[7:9], 16),
)
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3))
)
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match(r"hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$",
color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4))
)
raise ValueError("unknown color specifier: %r" % color)
def getcolor(color, mode):
"""
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
greyscale value if the mode is not color or a palette image. If the string
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])``
"""
# same as getrgb, but converts the result to the given mode
color, alpha = getrgb(color), 255
if len(color) == 4:
color, alpha = color[0:3], color[3]
if Image.getmodebase(mode) == "L":
r, g, b = color
color = (r*299 + g*587 + b*114)//1000
if mode[-1] == 'A':
return (color, alpha)
else:
if mode[-1] == 'A':
return color + (alpha,)
return color
colormap = {
# X11 colour table from https://drafts.csswg.org/css-color-4/, with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
| 27.841216 | 78 | 0.517534 |
4a1c90d254cc4e8b98de12a39a101d0833b23748
| 3,168 |
py
|
Python
|
data/custom_dataset.py
|
ferodia/MichiGAN
|
a49acb49f9659d7538e62faa3ed08e46afb0ddae
|
[
"MIT"
] | 235 |
2020-07-21T20:47:15.000Z
|
2022-03-24T00:32:18.000Z
|
data/custom_dataset.py
|
ferodia/MichiGAN
|
a49acb49f9659d7538e62faa3ed08e46afb0ddae
|
[
"MIT"
] | 16 |
2020-08-04T06:56:37.000Z
|
2022-03-21T15:25:29.000Z
|
data/custom_dataset.py
|
ferodia/MichiGAN
|
a49acb49f9659d7538e62faa3ed08e46afb0ddae
|
[
"MIT"
] | 31 |
2020-07-21T01:53:22.000Z
|
2022-03-22T11:27:41.000Z
|
"""
Copyright (C) University of Science and Technology of China.
Licensed under the MIT License.
"""
from data.pix2pix_dataset import Pix2pixDataset
from data.image_folder import make_dataset
import random
import os
class CustomDataset(Pix2pixDataset):
""" Dataset that loads images from directories
Use option --label_dir, --image_dir, --instance_dir to specify the directories.
The images in the directories are sorted in alphabetical order and paired in order.
"""
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
# parser.set_defaults(load_size=load_size)
# parser.set_defaults(crop_size=256)
# parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=2)
parser.set_defaults(contain_dontcare_label=False)
# parser.add_argument('--data_dir', type=str, default='/mnt/lvdisk1/tzt/HairSynthesis/SPADE-master/datasets/FFHQ',
# help='path to the directory that contains training & val data')
parser.add_argument('--label_dir', type=str, default='train_labels',
help='path to the directory that contains label images')
parser.add_argument('--image_dir', type=str, default='train_images',
help='path to the directory that contains photo images')
parser.add_argument('--instance_dir', type=str, default='',
help='path to the directory that contains instance maps. Leave black if not exists')
parser.add_argument('--orient_dir', type=str, default='train_dense_orients',
help='path to the directory that contains orientation mask')
parser.add_argument('--clear', type=str, default='',
help='[ |clear_], clear_ means use the selected training data')
return parser
def get_paths(self, opt):
# combine data_dir and others
label_dir = os.path.join(opt.data_dir, opt.clear+opt.label_dir)
image_dir = os.path.join(opt.data_dir, opt.clear+opt.image_dir)
orient_dir = os.path.join(opt.data_dir, opt.clear+opt.orient_dir)
# label_dir = opt.label_dir
label_paths = make_dataset(label_dir, recursive=False, read_cache=True)
# image_dir = opt.image_dir
image_paths = make_dataset(image_dir, recursive=False, read_cache=True)
if len(opt.instance_dir) > 0:
instance_dir = opt.instance_dir
instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True)
else:
instance_paths = []
if len(opt.orient_dir) > 0:
# orient_dir = opt.orient_dir
orient_paths = make_dataset(orient_dir, recursive=False, read_cache=True)
else:
orient_paths = []
assert len(label_paths) == len(image_paths), "The #images in %s and %s do not match. Is there something wrong?"
return label_paths, image_paths, instance_paths, orient_paths
| 44.619718 | 122 | 0.665404 |
4a1c91182c71546899845ec9d2825fcbb3259610
| 1,393 |
py
|
Python
|
pantam_cli/action.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | 2 |
2020-10-04T10:29:43.000Z
|
2021-03-30T13:45:09.000Z
|
pantam_cli/action.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | null | null | null |
pantam_cli/action.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from os import getcwd
from pathlib import Path
import sys
from pantam_cli.utils.filesystem import (
create_file,
make_class_name,
load_pantamrc_file,
)
from pantam_cli.utils.messages import (
info_msg,
error_msg,
success_msg,
write_error,
write_msg,
NewLine,
)
from pantam_cli.utils.templates import action_template
from pantam_cli.utils import clear
def action(action_file: str) -> None:
"""Create action file"""
clear()
options = load_pantamrc_file()
try:
actions_folder = options["actions_folder"]
write_msg(info_msg("Creating %s/%s file..." % (actions_folder, action_file)))
cwd = Path(getcwd())
create_file(
cwd / actions_folder / action_file,
action_template(make_class_name(action_file)),
)
write_msg(
success_msg(" Done!"), NewLine.after,
)
except FileExistsError:
write_msg(
error_msg(" file exists, skipping"), NewLine.after,
)
def run_action(action_file: str) -> None:
"""CLI runner for action()"""
try:
action(action_file)
write_msg(success_msg("Your new action `%s` is ready!" % action_file))
except Exception as error:
write_error(error_msg(str(error)))
if __name__ == "__main__":
file_name = sys.argv[1]
run_action(file_name)
| 24.017241 | 85 | 0.646088 |
4a1c91197664870a0644dbc6e9e3197b297a3c37
| 412 |
py
|
Python
|
.history/my_classes/FunctionParameters/KeywordArgs_20210630185109.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FunctionParameters/KeywordArgs_20210630185109.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FunctionParameters/KeywordArgs_20210630185109.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
''' [Keyword Arguments]
Positional parameters can, optionally be passed as named (keyword) arguments
def func(a, b, c):
# code
func(1, 2, 3) -> a = 1, b = 2, c = 3
func(a=1, c=3, b=2) -> a = 1, b = 2, c = 3
Using named arguments in this case is entirely up to the caller.
[Mandatory Keyword Arguments]
I can make keyword arguments mandatory
to do so, I can
'''
| 20.6 | 76 | 0.589806 |
4a1c918301acc361996b739c4dd63c9214eb993c
| 1,887 |
py
|
Python
|
mesos_viewer/popup.py
|
alimaken/mesos-viewer
|
1e6170cf7222b93e37d910477c735daf7f081bb9
|
[
"MIT"
] | 2 |
2019-05-01T08:20:23.000Z
|
2020-05-12T04:27:39.000Z
|
mesos_viewer/popup.py
|
alimaken/mesos-viewer
|
1e6170cf7222b93e37d910477c735daf7f081bb9
|
[
"MIT"
] | 1 |
2019-04-23T11:56:17.000Z
|
2019-04-23T11:56:17.000Z
|
mesos_viewer/popup.py
|
alimaken/mesos-viewer
|
1e6170cf7222b93e37d910477c735daf7f081bb9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import urwid
class Popup(urwid.WidgetWrap):
"""
Creates a popup menu on top of another BoxWidget.
Attributes:
selected -- Contains the item the user has selected by pressing <RETURN>,
or None if nothing has been selected.
"""
selected = None
def __init__(self, menu_list, attr, pos, body):
"""
menu_list -- a list of strings with the menu entries
attr -- a tuple (background, active_item) of attributes
pos -- a tuple (x, y), position of the menu widget
body -- widget displayed beneath the message widget
"""
content = [w for w in menu_list]
# Calculate width and height of the menu widget:
height = len(menu_list)
width = 0
for entry in menu_list:
base_widget = entry.base_widget
base_widget_len = (
(len(base_widget[0].text) + len(base_widget[1].text)) \
if type(base_widget) == urwid.container.Columns \
else len(base_widget.text))
if base_widget_len > width:
width = base_widget_len + 10
# Create the ListBox widget and put it on top of body:
self._listbox = urwid.AttrWrap(urwid.ListBox(content), attr[0])
overlay = urwid.Overlay(self._listbox, body, 'center',
width + 2, 'middle', height)
urwid.WidgetWrap.__init__(self, overlay)
def keypress(self, size, key):
"""
<RETURN> key selects an item, other keys will be passed to
the ListBox widget.
"""
if key == "enter":
(widget, foo) = self._listbox.get_focus()
(text, foo) = widget.get_text()
self.selected = text[1:] # Get rid of the leading space...
else:
return self._listbox.keypress(size, key)
| 32.534483 | 77 | 0.573397 |
4a1c92ce97507667e8bafa6b2f85245b538f7594
| 2,889 |
py
|
Python
|
cellfinder_core/tools/tiff.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 5 |
2021-01-22T11:40:01.000Z
|
2021-09-10T07:16:05.000Z
|
cellfinder_core/tools/tiff.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 38 |
2021-01-22T11:50:29.000Z
|
2022-03-11T11:04:06.000Z
|
cellfinder_core/tools/tiff.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 12 |
2021-06-18T09:57:24.000Z
|
2022-03-06T13:03:18.000Z
|
import natsort
from os import listdir
from os.path import isfile, join
from imlib.cells.cells import Cell, UntypedCell
class TiffList(object):
"""This class represents list of tiff files. These tiff files are the
output from the cell extractor plugin
and are used as training and classification data.
"""
def __init__(self, ch1_list, channels, label=None):
"""A list of tiff files output by the cell extractor plugin to be
used in machine learning.
Expects file names to end with "ch[ch].tif", where [ch] is the
non-zero-padded channel index.
Given a list of tiff files for the first channel, it will find the
corresponding files for the
channels passed in the [channels] parameter.
:param ch1_list: List of the tiff files of the first channel.
:param channels: List of channels to use.
:param label: Label of the directory (e.g. 2 for cell, 1 for no cell).
Can be ignored on classification runs.
"""
self.ch1_list = natsort.natsorted(ch1_list)
self.label = label
self.channels = channels
def make_tifffile_list(self):
"""
:return: Returns the relevant tiff files as a list of TiffFile objects.
"""
files = [
f
for f in self.ch1_list
if f.lower().endswith("ch" + str(self.channels[0]) + ".tif")
]
tiff_files = [
TiffFile(tiffFile, self.channels, self.label) for tiffFile in files
]
return tiff_files
class TiffDir(TiffList):
"""A simplified version of TiffList that uses all tiff files without
any filtering.
"""
def __init__(self, tiff_dir, channels, label=None):
super(TiffDir, self).__init__(
[
join(tiff_dir, f)
for f in listdir(tiff_dir)
if f.lower().endswith("ch" + str(channels[0]) + ".tif")
],
channels,
label,
)
class TiffFile(object):
"""This class represents a multichannel tiff file, with one individual
file per channel.
"""
def __init__(self, path, channels, label=None):
self.path = path
self.channels = channels
self.label = label
def files_exist(self):
return all([isfile(tif) for tif in self.img_files])
def as_cell(self, force_typed=True):
if force_typed:
return (
Cell(self.path, -1)
if self.label is None
else Cell(self.path, self.label)
)
else:
return (
UntypedCell(self.path)
if self.label is None
else Cell(self.path, self.label)
)
@property
def img_files(self):
return [self.path[:-5] + str(ch) + ".tif" for ch in self.channels]
| 29.783505 | 79 | 0.585324 |
4a1c9318f4801e7767efad6e4c6648816524b504
| 3,427 |
py
|
Python
|
ime/utils/tools.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1 |
2022-02-25T05:34:44.000Z
|
2022-02-25T05:34:44.000Z
|
ime/utils/tools.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
ime/utils/tools.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1 |
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of common functions."""
import os
import random
import numpy as np
import torch
def seed_torch(seed=42):
"""Defines a function to fix the seed of different random variables for reproducability.
Args:
seed: an integer
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def add_gaussian_noise(tensor, mean=0.1, std=1., device='cpu'):
"""Function that add noise to a given tensor.
Args:
tensor: An input tensor
mean: Gaussian noise mean
std: Gaussian noise std
device: device used to store tensor
Returns:
tensor: A new tensor with added noise
"""
return tensor + torch.randn(tensor.size()).to(device) * std + mean
class EarlyStopping:
"""Class to montior the progress of the model and stop early if no improvement on validation set."""
def __init__(self, patience=7, verbose=False, delta=0):
"""Initializes parameters for EarlyStopping class.
Args:
patience: an integer
verbose: a boolean
delta: a float
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path):
"""Checks if the validation loss is better than the best validation loss.
If so model is saved.
If not the EarlyStopping counter is increased
Args:
val_loss: a float representing validation loss
model: the trained model
path: a string representing the path to save the model
"""
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.early_stop = False
else:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
self.counter = 0
def save_checkpoint(self, val_loss, model, path):
"""Saves the model and updates the best validation loss.
Args:
val_loss: a float representing validation loss
model: the trained model
path: a string representing the path to save the model
"""
if self.verbose:
print(
f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...'
)
torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')
self.val_loss_min = val_loss
| 29.042373 | 102 | 0.6904 |
4a1c93502a56d64bad555514380421cfc4b88db4
| 39,760 |
py
|
Python
|
lolstaticdata/champions/pull_champions_wiki.py
|
meraki-analytics/lolstaticdata
|
3ebd99feb58fa8ea69e1b3068a76aa3e8d803eab
|
[
"MIT"
] | 24 |
2020-05-16T16:33:19.000Z
|
2022-03-26T16:35:15.000Z
|
lolstaticdata/champions/pull_champions_wiki.py
|
meraki-analytics/lolstaticdata
|
3ebd99feb58fa8ea69e1b3068a76aa3e8d803eab
|
[
"MIT"
] | 17 |
2020-05-21T14:20:41.000Z
|
2021-04-26T12:42:23.000Z
|
lolstaticdata/champions/pull_champions_wiki.py
|
meraki-analytics/lolstaticdata
|
3ebd99feb58fa8ea69e1b3068a76aa3e8d803eab
|
[
"MIT"
] | 14 |
2020-05-24T18:09:11.000Z
|
2021-10-04T15:11:30.000Z
|
from typing import Tuple, List, Union, Iterator, Dict
import re
from bs4 import BeautifulSoup
from collections import Counter
from slpp import slpp as lua
from datetime import datetime
from ..common.modelcommon import (
DamageType,
Health,
HealthRegen,
Mana,
ManaRegen,
Armor,
MagicResistance,
AttackDamage,
AbilityPower,
AttackSpeed,
AttackRange,
Movespeed,
Lethality,
CooldownReduction,
GoldPer10,
HealAndShieldPower,
Lifesteal,
MagicPenetration,
Stat,
)
from ..common.utils import (
download_soup,
parse_top_level_parentheses,
grouper,
to_enum_like,
download_json,
)
from .modelchampion import (
Champion,
Stats,
Ability,
AttackType,
AttributeRatings,
Cooldown,
Cost,
Effect,
Price,
Resource,
Modifier,
Role,
Leveling,
Skin,
Chroma,
Description,
Rarities,
)
class UnparsableLeveling(Exception):
pass
class HTMLAbilityWrapper:
def __init__(self, soup):
self.soup = soup
self.table = self.soup.find_all(["th", "td"])
# Do a little html modification based on the "viewsource"
strip_table = [item.text.strip() for item in self.table]
start = strip_table.index("Parameter") + 3
self.table = self.table[start:]
self.data = {}
for i, (parameter, value, desc) in enumerate(grouper(self.table, 3)):
if not value:
continue
if i == 0: # parameter is '1' for some reason but it's the ability name
parameter = "name"
else:
parameter = parameter.text.strip()
# desc = desc.text.strip()
text = value.text.strip()
if text:
self.data[parameter] = value
def __getitem__(self, item):
return self.data[item].text.strip()
def __delitem__(self, item):
del self.data[item]
def get(self, item, backup=None):
try:
return self[item]
except KeyError:
return backup
def get_source(self, item, backup=None):
try:
return self.data[item]
except KeyError:
return backup
def __str__(self):
d = {}
for key in self.data:
d[key] = self[key]
return str(d)
class LolWikiDataHandler:
MISSING_SKILLS = {
"Annie": ["Command Tibbers"],
"Jinx": ["Switcheroo! 2"],
"Lillia": ["Prance"],
"Mordekaiser": ["Indestructible 2"],
"Nidalee": ["Aspect of the Cougar 2"],
"Pyke": ["Death from Below 2"],
"Rumble": ["Electro Harpoon 2"],
"Samira": ["splash coin"],
"Shaco": ["Command: Hallucinate"],
"Syndra": ["Force of Will 2"],
"Taliyah": ["Seismic Shove 2"],
}
def __init__(self, use_cache: bool = True):
self.use_cache = use_cache
def check_ability(self, data):
for x in data:
if data[x] in self.abil_test:
continue
else:
return False
def get_champions(self) -> Iterator[Champion]:
# Download the page source
url = "https://leagueoflegends.fandom.com/wiki/Module:ChampionData/data"
html = download_soup(url, self.use_cache)
soup = BeautifulSoup(html, "lxml")
# Pull the relevant champData from the html tags
spans = soup.find("pre", {"class": "mw-code mw-script"})
start = None
spans = spans.text.split("\n")
for i, span in enumerate(spans):
if str(span) == "return {":
start = i
spans[i] = "{"
split_stuff = re.compile("({)|(})")
spans = spans[start:]
for i, span in enumerate(spans):
if span in ["-- </pre>", "-- [[Category:Lua]]"]:
spans[i] = ""
spans = "".join(spans)
data = lua.decode(spans)
# Return the champData as a list of Champions
self.skin_data = self._get_skins()
for name, d in data.items():
print(name)
if name in [
"Kled & Skaarl",
"GnarBig",
"Mega Gnar",
]:
continue
if name in ["Kled"]:
# champion = self._render_champion_data(name, d)
d["skill_i"] = {1: d["skills"][1], 2: d["skills"][2]}
d["skill_q"] = {1: d["skills"][3], 2: d["skills"][4]}
d["skill_e"] = {1: d["skills"][6], 2: d["skills"][7]}
d["skill_r"] = {1: d["skills"][8], 2: d["skills"][9]}
if (
d["id"] == 9999
or d["date"] == "Upcoming"
or datetime.strptime(d["date"], "%Y-%m-%d") > datetime.today()
): # Champion not released yet
continue
champion = self._render_champion_data(name, d)
yield champion
def _render_champion_data(self, name: str, data: Dict) -> Champion:
adaptive_type = data["adaptivetype"]
if adaptive_type.upper() in ("PHYSICAL", "MIXED,PHYSICAL"):
adaptive_type = "PHYSICAL_DAMAGE"
if adaptive_type.upper() in ("MAGIC",):
adaptive_type = "MAGIC_DAMAGE"
if adaptive_type.upper() in ("MIXED",):
adaptive_type = "MIXED_DAMAGE"
if data["patch"][0] == "V":
patch = data["patch"][1:]
else:
patch = data["patch"]
sale = self._get_sale()
sale_price = 0
if name in sale:
if sale[name]["price"] != 0:
sale_price = int(sale[name]["price"])
champion = Champion(
id=data["id"],
key=data["apiname"],
name=name,
title=data["title"],
full_name=data.get("fullname", ""),
icon=None,
resource=Resource.from_string(data["resource"]),
attack_type=AttackType.from_string(data["rangetype"]),
adaptive_type=DamageType.from_string(adaptive_type),
stats=Stats(
health=Health(
flat=data["stats"]["hp_base"],
per_level=data["stats"]["hp_lvl"],
),
health_regen=HealthRegen(
flat=data["stats"]["hp5_base"],
per_level=data["stats"]["hp5_lvl"],
),
mana=Mana(
flat=data["stats"]["mp_base"],
per_level=data["stats"]["mp_lvl"],
),
mana_regen=ManaRegen(
flat=data["stats"]["mp5_base"],
per_level=data["stats"]["mp5_lvl"],
),
armor=Armor(
flat=data["stats"]["arm_base"],
per_level=data["stats"]["arm_lvl"],
),
magic_resistance=MagicResistance(
flat=data["stats"]["mr_base"],
per_level=data["stats"]["mr_lvl"],
),
attack_damage=AttackDamage(
flat=data["stats"]["dam_base"],
per_level=data["stats"]["dam_lvl"],
),
attack_speed=AttackSpeed(
flat=data["stats"]["as_base"],
per_level=data["stats"]["as_lvl"],
),
attack_speed_ratio=Stat(flat=data["stats"]["as_ratio"]),
attack_cast_time=Stat(
flat=data["stats"].get("attack_cast_time", 0.3)
), # I don't know if this default is correct, but going off the values the wiki provides, it seems reasonable.
attack_total_time=Stat(flat=data["stats"].get("attack_total_time", 1.6)), # ibid
attack_delay_offset=Stat(flat=data["stats"].get("attack_delay_offset", 0)),
attack_range=AttackRange(
flat=data["stats"]["range"],
per_level=data["stats"].get("range_lvl", 0),
),
critical_strike_damage=Stat(flat=data["stats"].get("crit_base", 200)),
critical_strike_damage_modifier=Stat(flat=data["stats"].get("crit_base", 1.0)),
movespeed=Movespeed(flat=data["stats"]["ms"]),
acquisition_radius=Stat(flat=data["stats"].get("acquisition_radius", 800)),
selection_radius=Stat(flat=data["stats"].get("selection_radius", 100)),
pathing_radius=Stat(flat=data["stats"].get("pathing_radius", 35)),
gameplay_radius=Stat(flat=data["stats"].get("gameplay_radius", 65)),
aram_damage_taken=Stat(flat=data["stats"].get("aram_dmg_taken", 1.0)),
aram_damage_dealt=Stat(flat=data["stats"].get("aram_dmg_dealt", 1.0)),
aram_healing=Stat(flat=data["stats"].get("aram_healing", 1.0)),
aram_shielding=Stat(flat=data["stats"].get("aram_shielding", 1.0)),
urf_damage_taken=Stat(flat=data["stats"].get("urf_dmg_taken", 1.0)),
urf_damage_dealt=Stat(flat=data["stats"].get("urf_dmg_dealt", 1.0)),
urf_healing=Stat(flat=data["stats"].get("urf_healing", 1.0)),
urf_shielding=Stat(flat=data["stats"].get("urf_shielding", 1.0)),
),
roles=sorted(
{
*(Role.from_string(r) for r in data["role"]),
*(
Role.from_string(role)
for role in (
data.get("herotype"),
data.get("alttype"),
)
if role is not None and role != ""
),
}
),
attribute_ratings=AttributeRatings(
damage=data["damage"],
toughness=data["toughness"],
control=data["control"],
mobility=data["mobility"],
utility=data["utility"],
ability_reliance=data["style"],
attack=data["attack"],
defense=data["defense"],
magic=data["magic"],
difficulty=data["difficulty"],
),
abilities=dict(
[
self._render_abilities(
champion_name=name,
abilities=[
self._pull_champion_ability(champion_name=name, ability_name=ability_name)
for ability_name in data["skill_i"].values()
if not (
name in LolWikiDataHandler.MISSING_SKILLS
and ability_name in LolWikiDataHandler.MISSING_SKILLS[name]
)
],
),
self._render_abilities(
champion_name=name,
abilities=[
self._pull_champion_ability(champion_name=name, ability_name=ability_name)
for ability_name in data["skill_q"].values()
if not (
name in LolWikiDataHandler.MISSING_SKILLS
and ability_name in LolWikiDataHandler.MISSING_SKILLS[name]
)
],
),
self._render_abilities(
champion_name=name,
abilities=[
self._pull_champion_ability(champion_name=name, ability_name=ability_name)
for ability_name in data["skill_w"].values()
if not (
name in LolWikiDataHandler.MISSING_SKILLS
and ability_name in LolWikiDataHandler.MISSING_SKILLS[name]
)
],
),
self._render_abilities(
champion_name=name,
abilities=[
self._pull_champion_ability(champion_name=name, ability_name=ability_name)
for ability_name in data["skill_e"].values()
if not (
name in LolWikiDataHandler.MISSING_SKILLS
and ability_name in LolWikiDataHandler.MISSING_SKILLS[name]
)
],
),
self._render_abilities(
champion_name=name,
abilities=[
self._pull_champion_ability(champion_name=name, ability_name=ability_name)
for ability_name in data["skill_r"].values()
if not (
name in LolWikiDataHandler.MISSING_SKILLS
and ability_name in LolWikiDataHandler.MISSING_SKILLS[name]
)
],
),
]
),
release_date=data["date"],
release_patch=patch,
# remove the leading "V"
patch_last_changed=data["changes"][1:], # remove the leading "V"
price=Price(rp=data["rp"], blue_essence=data["be"], sale_rp=sale_price),
lore="",
skins=self._get_champ_skin(name, sale),
)
# "nickname": "nickname",
# "disp_name": "dispName",
return champion
def _pull_champion_ability(self, champion_name, ability_name) -> HTMLAbilityWrapper:
ability_name = ability_name.replace(" ", "_")
# Pull the html from the wiki
# print(f" {ability_name}")
url = f"https://leagueoflegends.fandom.com/wiki/Template:Data_{champion_name}/{ability_name}"
# temporary fix for pyke passive
if url in "https://leagueoflegends.fandom.com/wiki/Template:Data_Pyke/Gift_of_the_Drowned_Ones":
url = "https://leagueoflegends.fandom.com/wiki/User:Dryan426/Sandbox"
html = download_soup(url, self.use_cache)
soup = BeautifulSoup(html, "lxml")
return HTMLAbilityWrapper(soup)
def _render_abilities(self, champion_name, abilities: List[HTMLAbilityWrapper]) -> Tuple[str, List[Ability]]:
inputs, abilities = abilities, [] # rename variables
skill_key = inputs[0]["skill"]
for data in inputs:
_skill_key = data["skill"]
if champion_name == "Aphelios" and data["name"] in (
"Calibrum",
"Severum",
"Gravitum",
"Infernum",
"Crescendum",
):
_skill_key = "I"
if champion_name == "Gnar" and data["name"] in ("Boulder Toss",):
_skill_key = "Q"
assert _skill_key == skill_key
if champion_name == "Pyke" and _skill_key == "I":
del data[
"Cost"
] # This is a weird one... There's an embedded table that doesn't get parsed right. It overwrites 'cost', but luckily that isn't an issue because 'cost' is empty.
if data.get("Cost") is not None:
raise ValueError(data)
nvalues = 5 if _skill_key in ("Q", "W", "E") else 3
if champion_name == "Aphelios" and _skill_key == "I":
nvalues = 6
elif champion_name == "Heimerdinger":
nvalues = None
elif champion_name == "Janna" and _skill_key == "I":
nvalues = 2
elif champion_name == "Sona" and _skill_key in ("Q", "W", "E"):
nvalues = None
elif champion_name == "Jayce":
nvalues = 6
elif champion_name == "Karma":
nvalues = None
elif champion_name == "Kindred" and _skill_key == "I":
nvalues = 2
elif champion_name == "Nidalee":
nvalues = None
elif champion_name == "Udyr":
nvalues = 6
elif champion_name == "Yuumi" and _skill_key == "Q":
nvalues = 6
ability_cost = data.get("cost")
cooldown = data.get("cooldown", data.get("static"))
damage_type = data.get("damagetype")
if damage_type is not None:
damage_type = to_enum_like(damage_type)
if "/" in damage_type:
damage_type = "MIXED_DAMAGE"
elif damage_type == "PHYSICAL":
damage_type = "PHYSICAL_DAMAGE"
elif damage_type == "MAGIC":
damage_type = "MAGIC_DAMAGE"
elif damage_type == "TRUE":
damage_type = "TRUE_DAMAGE"
elif damage_type == "PURE":
damage_type = "PURE_DAMAGE"
else:
damage_type = "OTHER_DAMAGE"
damage_type = DamageType.from_string(damage_type)
resource = data.get("costtype")
if resource is not None:
resource = to_enum_like(resource)
if resource in (
"MANA",
"NO_COST",
"HEALTH",
"MAXIMUM_HEALTH",
"ENERGY",
"CURRENT_HEALTH",
"HEALTH_PER_SECOND",
"MANA_PER_SECOND",
"CHARGE",
"FURY",
):
pass
elif resource in (
"MANA_+_4_FOCUS",
"MANA_+_4_FROST_STACKS",
"MANA_+_6_CHARGES",
"MANA_+_1_SAND_SOLDIER",
"MANA_+_40_/_45_/_50_/_55_/_60_PER_SECOND",
"MAXIMUM_HEALTH_+_50_/_55_/_60_/_65_/_70_MANA",
"MANA_+_1_TURRET_KIT",
"MANA_+_1_MISSILE",
"MANA_+_1_CHARGE",
"MANA_+_ALL_CHARGES",
):
resource = "MANA"
elif resource == "OF_CURRENT_HEALTH":
resource = "CURRENT_HEALTH"
elif resource == "%_OF_CURRENT_HEALTH":
resource = "CURRENT_HEALTH"
elif resource == "CURRENT_GRIT":
resource = "GRIT"
elif resource == "CURRENT_FURY":
resource = "FURY"
elif resource == "FURY_EVERY_0.5_SECONDS":
resource = "FURY"
else:
resource = "OTHER"
resource = Resource.from_string(resource)
projectile = data.get("projectile")
if projectile:
projectile = to_enum_like(projectile)
recharge_rate = data.get("recharge")
if recharge_rate:
_, recharge_rate = ParsingAndRegex.regex_simple_flat(recharge_rate, nvalues) # ignore units
effects = []
for ending in ["", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]:
description = data.get(f"description{ending}")
while description and " " in description:
description = description.replace(" ", " ")
leveling = data.get_source(f"leveling{ending}")
leveling = self._render_levelings(leveling, nvalues) if leveling else []
if description or leveling:
effects.append(Effect(description=description, leveling=leveling))
ability = Ability(
name=data["name"],
icon=data.get(f"icon{ending}"),
effects=effects,
cost=self._render_ability_cost(ability_cost, nvalues) if ability_cost else None,
cooldown=self._render_ability_cooldown(cooldown, "static" in data.data, nvalues) if cooldown else None,
targeting=data.get("targeting"),
affects=data.get("affects"),
spellshieldable=data.get("spellshield"),
resource=resource,
damage_type=damage_type,
spell_effects=data.get("spelleffects"),
projectile=projectile,
on_hit_effects=data.get("onhiteffects"),
occurrence=data.get("occurrence"),
blurb=data.get("blurb"),
notes=data.get("notes") if data.get("notes") != "* No additional notes." else None,
missile_speed=data.get("missile_speed"),
recharge_rate=recharge_rate,
collision_radius=data.get("collision radius"),
tether_radius=data.get("tether radius"),
on_target_cd_static=data.get("ontargetcdstatic"),
inner_radius=data.get("inner radius"),
speed=data.get("speed"),
width=data.get("width"),
angle=data.get("angle"),
cast_time=data.get("cast time"),
effect_radius=data.get("effect radius"),
target_range=data.get("target range"),
)
if ability.notes is not None and ability.notes.startswith("*"):
ability.notes = ability.notes[1:].strip()
abilities.append(ability)
if skill_key == "I":
skill_key = "P"
# Check for duplicate abilities
hashes = []
unique_abilities = []
for ability in abilities:
h = hash(str(ability))
if h not in hashes:
hashes.append(h)
unique_abilities.append(ability)
return skill_key, unique_abilities
def _render_levelings(self, html: BeautifulSoup, nvalues: int) -> List[Leveling]:
# Do some pre-processing on the html
if not isinstance(html, str):
html = str(html)
html = html.replace("</dt>", "\n</dt>")
html = html.replace("</dd>", "\n</dd>")
html = BeautifulSoup(html, "lxml")
html = html.text.strip()
while "\n\n" in html:
html = html.replace("\n\n", "\n")
while " " in html:
html = html.replace(" ", " ")
levelings = html.replace("\xa0", " ")
# Get ready
results = []
# Let's parse!
initial_split = levelings.split("\n")
initial_split = [
lvling.strip()
for lvling in initial_split
if lvling.strip()
not in (
"Takedown scales with Aspect of the Cougar's rank",
"Swipe scales with Aspect of the Cougar's rank",
"Pounce scales with Aspect of the Cougar's rank",
"Cougar form's abilities rank up when Aspect of the Cougar does",
)
]
initial_split = list(grouper(initial_split, 2))
for attribute, data in initial_split:
if attribute.endswith(":"):
attribute = attribute[:-1]
result = self._render_leveling(attribute, data, nvalues)
results.append(result)
return results
def _render_leveling(self, attribute: str, data: str, nvalues: int) -> Leveling:
modifiers = self._render_modifiers(data, nvalues)
leveling = Leveling(
attribute=attribute,
modifiers=modifiers,
)
return leveling
def _render_modifiers(self, mods: str, nvalues: int) -> List[Modifier]:
modifiers = [] # type: List[Modifier]
try:
parsed_modifiers = ParsingAndRegex.split_modifiers(mods)
except Exception as error:
print("ERROR: FAILURE TO SPLIT MODIFIER")
print("ERROR:", error)
return modifiers
for lvling in parsed_modifiers:
try:
modifier = self._render_modifier(lvling, nvalues)
modifiers.append(modifier)
except Exception as error:
print(f"ERROR: FAILURE TO PARSE MODIFIER: {lvling}")
print("ERROR:", error)
while " " in lvling:
lvling = lvling.replace(" ", " ")
value = 0
if lvling.lower() == "Siphoning Strike Stacks".lower(): # Nasus
value = 1
if lvling.lower() == "increased by 3% per 1% of health lost in the past 4 seconds".lower(): # Ekko
value = 3
lvling = "% per 1% of health lost in the past 4 seconds"
modifier = Modifier(
values=[value for _ in range(nvalues)],
units=[lvling for _ in range(nvalues)],
)
modifiers.append(modifier)
return modifiers
def _render_modifier(self, mod: str, nvalues: int) -> Modifier:
units, values = ParsingAndRegex.get_modifier(mod, nvalues)
modifier = Modifier(
values=values,
units=units,
)
return modifier
def _render_ability_cost(self, mods: str, nvalues: int) -> Cost:
modifiers = self._render_modifiers(mods, nvalues)
cost = Cost(modifiers=modifiers)
return cost
def _render_ability_cooldown(self, mods: str, static_cooldown: bool, nvalues: int) -> Cooldown:
modifiers = self._render_modifiers(mods, nvalues)
cooldown = Cooldown(
modifiers=modifiers,
affected_by_cdr=not static_cooldown,
)
return cooldown
def _get_sale(self):
get_prices = re.compile(r"(\d+) (\d+)")
url = f"https://leagueoflegends.fandom.com/wiki/Sales"
# temporary fix for pyke passive
html = download_soup(url, False)
soup = BeautifulSoup(html, "lxml")
spans = soup.findAll("div", {"class": "skin_portrait skin-icon"})
sale = {}
for i in spans:
prices = get_prices.findall(i.text)
champion = i["data-champion"]
if not sale.get(champion):
sale[champion] = {}
sale[champion]["price"] = 0
skin = i["data-skin"]
if skin != "":
sale[champion][skin] = prices[0][1]
else:
sale[champion]["price"] = prices[0][1]
return sale
def _get_skin_id(self, id, skin_id):
if skin_id < 10:
id_test = str(id) + "00" + str(skin_id)
elif skin_id >= 10 and skin_id < 100:
id_test = str(id) + "0" + str(skin_id)
else:
id_test = str(id) + str(skin_id)
# If a single champion gets over 1k skin ids tell Dan he was wrong to think that it would never happen
return id_test
def _get_chroma_attribs(self, id, name):
if "chromas" in self.cdragDict[0]:
for c in self.cdragDict[0]["chromas"]:
if int(id) == c["id"]:
descriptions = []
rarities = []
if c["descriptions"]:
for desc in c["descriptions"]:
descriptions.append(Description(desc["description"], desc["region"]))
else:
descriptions.append(Description(None, None))
if c["rarities"]:
for rarity in c["rarities"]:
rarities.append(Rarities(rarity["rarity"], rarity["region"]))
else:
rarities.append(Rarities(None, None))
chroma = Chroma(
name=name,
id=c["id"],
chroma_path=self._get_skin_path(c["chromaPath"]),
colors=c["colors"],
descriptions=descriptions,
rarities=rarities,
)
return chroma
def _get_skins(self):
url = f"https://leagueoflegends.fandom.com/wiki/Module:SkinData/data"
html = download_soup(url, False)
soup = BeautifulSoup(html, "lxml")
# Pull the relevant champData from the html tags
spans = soup.find("pre", {"class": "mw-code mw-script"})
start = None
spans = spans.text.split("\n")
for i, span in enumerate(spans):
if str(span) == "return {":
start = i
spans[i] = "{"
spans = spans[start:]
test1 = re.compile("\w -- \w|.\w--\w|\w --\w|.\w--\s")
for i, span in enumerate(spans):
if span in ["-- </pre>", "-- [[Category:Lua]]"]:
spans[i] = ""
if re.search(test1, span):
test2 = re.search(test1, span)
spans[i] = span.replace(test2.group()[2] + test2.group()[3], " ")
span = spans[i]
comment_start = span.find("--")
# text = text.replace("-", " ")
if comment_start > -1:
spans[i] = span[:comment_start]
spans = "".join(spans)
skin_data = lua.decode(spans)
return skin_data
def _get_skin_path(self, path):
if "/assets/ASSETS" in path:
path = path.split("ASSETS")[1]
path = path.lower()
path = "https://raw.communitydragon.org/pbe/plugins/rcp-be-lol-game-data/global/default/assets" + path
return path
base_url = "http://raw.communitydragon.org/pbe/plugins/rcp-be-lol-game-data/global/default/v1"
# /lol-game-data/assets/v1/champion-chroma-images/32/32014.png
path = path.split("v1")[1]
return base_url + path
def _get_champ_skin(self, name, sale):
"""
Pulls champion skin data from wiki and cdragon
"""
champ_data = self.skin_data[name]["skins"]
skins = []
champ_id = self.skin_data[name]["id"]
cdragon = "http://raw.communitydragon.org/pbe/plugins/rcp-be-lol-game-data/global/default/v1/champions/{0}.json".format(
champ_id
)
cdrag_json = download_json(cdragon, False)
for s in champ_data:
# Default values for LOL Wiki attributes
if champ_data[s]["id"] == None:
continue
skin_ID = self._get_skin_id(champ_id, champ_data[s]["id"])
new_effects = False
new_recall = False
new_animations = False
new_voice = False
new_quotes = False
chromas = []
distribution = None
sets = []
format_name = s
voice_actors = []
splash_arist = []
loot_eligible = True
lore = None
cdragon_ids = []
self.cdragDict = [i for i in cdrag_json["skins"] if i["id"] == int(skin_ID)] # Cdragon Dict
for skin in cdrag_json["skins"]:
cdragon_ids.append(skin["id"])
if int(skin_ID) not in cdragon_ids:
continue
# cdragon attributes
is_base = self.cdragDict[0]["isBase"]
splash_path = self._get_skin_path(self.cdragDict[0]["splashPath"])
uncentered_splash_path = self._get_skin_path(self.cdragDict[0]["uncenteredSplashPath"])
tile_path = self._get_skin_path(self.cdragDict[0]["tilePath"])
load_screen_path = self._get_skin_path(self.cdragDict[0]["loadScreenPath"])
if "loadScreenVintagePath" in self.cdragDict[0]:
load_screen_vintage_path = self._get_skin_path(self.cdragDict[0]["loadScreenVintagePath"])
else:
load_screen_vintage_path = None
rarity = self.cdragDict[0]["rarity"][1:]
if "neweffects" in champ_data[s]:
new_effects = True
if "newrecall" in champ_data[s]:
new_recall = True
if "newanimations" in champ_data[s]:
new_animations = True
if "newquotes" in champ_data[s]:
new_quotes = True
if "newvoice" in champ_data[s]:
new_voice = True
if "chromas" in champ_data[s]:
for chroma in champ_data[s]["chromas"]:
chromas.append(
self._get_chroma_attribs(
self._get_skin_id(champ_id, champ_data[s]["chromas"][chroma]["id"]),
chroma,
)
)
if "distribution" in champ_data[s]:
distribution = champ_data[s]["distribution"]
if "set" in champ_data[s]:
for set in champ_data[s]["set"]:
sets.append(set)
if "formatname" in champ_data[s]:
format_name = champ_data[s]["formatname"]
if "voiceactor" in champ_data[s]:
for va in champ_data[s]["voiceactor"]:
voice_actors.append(va)
if "lore" in champ_data[s]:
lore = champ_data[s]["lore"]
if "splashartist" in champ_data[s]:
for sa in champ_data[s]["splashartist"]:
splash_arist.append(sa)
if "looteligible" in champ_data[s]:
loot_eligible = champ_data[s]["looteligible"]
if "release" in champ_data[s]:
if "N/A" in champ_data[s]["release"]:
timestamp = "0000-00-00"
else:
timestamp = champ_data[s]["release"]
sale_rp = 0
if name in sale:
if s in sale[name]:
sale_rp = sale[name][s]
skin = Skin(
name=s,
id=int(skin_ID),
availability=champ_data[s]["availability"],
format_name=format_name,
loot_eligible=loot_eligible,
cost=champ_data[s]["cost"],
sale=int(sale_rp),
release=timestamp,
distribution=distribution,
set=sets,
new_effects=new_effects,
new_animations=new_animations,
new_recall=new_recall,
voice_actor=voice_actors,
splash_artist=splash_arist,
chromas=chromas,
lore=lore,
new_quotes=new_quotes,
new_voice=new_voice,
is_base=is_base,
splash_path=splash_path,
uncentered_splash_path=uncentered_splash_path,
tile_path=tile_path,
load_screen_path=load_screen_path,
load_screen_vintage_path=load_screen_vintage_path,
rarity=rarity,
)
skins.append(skin)
return skins
class ParsingAndRegex:
rc_scaling = re.compile(r"(\(\+.+?\))")
r_number = r"(\d+\.?\d*)"
rc_number = re.compile(r_number)
rc_based_on_level = re.compile(r"(\d+\.?\d*) ?− ?(\d+\.?\d*) \(based on level\)")
@staticmethod
def regex_slash_separated(string: str, nvalues: int) -> Tuple[List[str], List[Union[int, float]]]:
for i in range(20, 1, -1):
regex = " / ".join([ParsingAndRegex.r_number for _ in range(i)])
result = re.findall(regex, string)
if result:
assert len(result) == 1
result = result[0]
parsed = " / ".join([f"{{{j}}}" for j in range(i)]).format(*result)
not_parsed = string.split(parsed)
values = [eval(r) for r in result]
# Special case...
if nvalues == 3 and len(values) == 5:
values = [values[0], values[2], values[4]]
if nvalues is not None and len(values) != nvalues:
print(f"WARNING: Unexpected number of modifier values: {values} (expected {nvalues})")
return not_parsed, values
raise ValueError(f"Could not parse slash-separated string: {string}")
@staticmethod
def parse_based_on_level(start, stop):
# e.g. 5 − 139 (based on level)
delta = (stop - start) / 17.0
values = [start + i * delta for i in range(18)]
return values
@staticmethod
def regex_simple_flat(string: str, nvalues: int) -> Tuple[List[str], List[Union[int, float]]]:
numbers = ParsingAndRegex.rc_number.findall(string)
if "/" in string:
return ParsingAndRegex.regex_slash_separated(string, nvalues)
elif len(ParsingAndRegex.rc_based_on_level.findall(string)) > 0:
level = ParsingAndRegex.rc_based_on_level.findall(string)
assert len(level) == 1
start, stop = level[0]
start, stop = eval(start), eval(stop)
values = ParsingAndRegex.parse_based_on_level(start, stop)
parsed = f"{start} − {stop} (based on level)"
not_parsed = string.split(parsed)
assert len(not_parsed) >= 2
if len(not_parsed) != 2: # see below
not_parsed = not_parsed[0], parsed.join(not_parsed[1:])
assert len(values) == 18
return not_parsed, values
elif len(numbers) - len(re.findall(r" per \d", string)) == 1 + string.count("(+ "):
number = numbers[0]
not_parsed = string.split(number)
assert len(not_parsed) >= 2
if len(not_parsed) != 2: # Fix e.g. `15 per 150 AP`
not_parsed = not_parsed[0], number.join(not_parsed[1:])
number = eval(number)
if nvalues is None:
nvalues = len(numbers)
values = [number for _ in range(nvalues)]
assert len(values) == nvalues
return not_parsed, values
raise UnparsableLeveling(f"Could not parse a simple flat value: {string}")
@staticmethod
def get_units(not_parsed: List[str]) -> str:
assert len(not_parsed) == 2
assert not_parsed[0] == ""
return not_parsed[1]
@staticmethod
def get_modifier(mod: str, nvalues: int) -> [List[str], List[Union[int, float]]]:
units, parsed = ParsingAndRegex.regex_simple_flat(mod, nvalues)
units = ParsingAndRegex.get_units(units)
units = [units for _ in range(len(parsed))]
return units, parsed
@staticmethod
def split_modifiers(mods: str) -> List[str]:
flat, scalings = ParsingAndRegex.get_scalings(mods)
if " + " in flat:
flat = flat.split(" + ")
else:
flat = [flat]
return flat + scalings
@staticmethod
def get_scalings(numbers: str):
scalings = ParsingAndRegex.rc_scaling.findall(numbers)
if scalings:
scalings = parse_top_level_parentheses(numbers)
scalings = [scaling for scaling in scalings if scaling != "(based on level)"]
for scaling in scalings:
numbers = numbers.replace(scaling, "").strip() # remove the scaling part of the string for processing later
scalings = [x.strip() for x in scalings]
for i, scaling in enumerate(scalings):
if scaling.startswith("(") and scaling.endswith(")"):
scaling = scaling[1:-1].strip()
if scaling.startswith("+"):
scaling = scaling[1:].strip()
scalings[i] = scaling
return numbers, scalings
| 39.879639 | 179 | 0.511268 |
4a1c9554ec10485779e0e007f397ea17c597160a
| 3,131 |
py
|
Python
|
day/seven/test_main.py
|
stupoid/aoc-2020
|
dc72f81c7e0150baeb208bf5470a4cb9d79864d9
|
[
"MIT"
] | null | null | null |
day/seven/test_main.py
|
stupoid/aoc-2020
|
dc72f81c7e0150baeb208bf5470a4cb9d79864d9
|
[
"MIT"
] | null | null | null |
day/seven/test_main.py
|
stupoid/aoc-2020
|
dc72f81c7e0150baeb208bf5470a4cb9d79864d9
|
[
"MIT"
] | null | null | null |
from day.seven.main import (
generate_lookup,
generate_reverse_lookup,
get_bags_in_bag,
parse,
find_bag,
)
def test_parse():
input_file = open("day/seven/test_input.txt", "r")
rules = [parse(i.strip()) for i in input_file]
assert rules == [
["light red", (1, "bright white"), (2, "muted yellow")],
["dark orange", (3, "bright white"), (4, "muted yellow")],
["bright white", (1, "shiny gold")],
["muted yellow", (2, "shiny gold"), (9, "faded blue")],
["shiny gold", (1, "dark olive"), (2, "vibrant plum")],
["dark olive", (3, "faded blue"), (4, "dotted black")],
["vibrant plum", (5, "faded blue"), (6, "dotted black")],
["faded blue"],
["dotted black"],
]
def test_generate_lookup():
input_file = open("day/seven/test_input.txt", "r")
rules = [parse(i.strip()) for i in input_file]
assert generate_lookup(rules) == {
"light red": {"bright white": 1, "muted yellow": 2},
"dark orange": {"bright white": 3, "muted yellow": 4},
"bright white": {"shiny gold": 1},
"muted yellow": {"shiny gold": 2, "faded blue": 9},
"shiny gold": {"dark olive": 1, "vibrant plum": 2},
"dark olive": {"faded blue": 3, "dotted black": 4},
"vibrant plum": {"faded blue": 5, "dotted black": 6},
"faded blue": 0,
"dotted black": 0,
}
def test_generate_reverse_lookup():
input_file = open("day/seven/test_input.txt", "r")
rules = [parse(i.strip()) for i in input_file]
assert generate_reverse_lookup(rules) == {
"bright white": {"light red": 1, "dark orange": 3},
"muted yellow": {"light red": 2, "dark orange": 4},
"shiny gold": {"bright white": 1, "muted yellow": 2},
"faded blue": {"muted yellow": 9, "dark olive": 3, "vibrant plum": 5},
"dark olive": {"shiny gold": 1},
"vibrant plum": {"shiny gold": 2},
"dotted black": {"dark olive": 4, "vibrant plum": 6},
}
def test_find_bag():
input_file = open("day/seven/test_input.txt", "r")
rules = [parse(i.strip()) for i in input_file]
assert find_bag("shiny gold", generate_reverse_lookup(rules)) == {
"bright white",
"muted yellow",
"dark orange",
"light red",
}
assert find_bag("dark olive", generate_reverse_lookup(rules)) == {
"shiny gold",
"bright white",
"muted yellow",
"dark orange",
"light red",
}
def test_count_bags():
input_file = open("day/seven/test_input.txt", "r")
rules = [parse(i.strip()) for i in input_file]
lookup_table = generate_lookup(rules)
assert get_bags_in_bag("dark olive", lookup_table) == 7
assert get_bags_in_bag("faded blue", lookup_table) == 0
assert get_bags_in_bag("vibrant plum", lookup_table) == 11
assert get_bags_in_bag("shiny gold", lookup_table) == 32
input_file = open("day/seven/test_input_2.txt", "r")
rules = [parse(i.strip()) for i in input_file]
lookup_table = generate_lookup(rules)
assert get_bags_in_bag("shiny gold", lookup_table) == 126
| 35.988506 | 78 | 0.582881 |
4a1c95fde0b1322c8c643a4d68d290da609d9db3
| 6,241 |
py
|
Python
|
Proyecto_parqueadero_Ean.py
|
Matieljimenez/Proyecto_parqueadero_Ean
|
b75d370eea6ad1760c0f248e4aee3d2660d7a7be
|
[
"MIT"
] | null | null | null |
Proyecto_parqueadero_Ean.py
|
Matieljimenez/Proyecto_parqueadero_Ean
|
b75d370eea6ad1760c0f248e4aee3d2660d7a7be
|
[
"MIT"
] | null | null | null |
Proyecto_parqueadero_Ean.py
|
Matieljimenez/Proyecto_parqueadero_Ean
|
b75d370eea6ad1760c0f248e4aee3d2660d7a7be
|
[
"MIT"
] | null | null | null |
print("¡¡BIENVENIDO AL PARQUEADERO DE LA UNIVERSIDAD EAN!! ")
usuario=int(input("Si eres empleado del parqueadero ingresa 1 si eres usuario 0:"))
if(usuario==1):
lista=["lorena","1234"]
while True:
empleado=input("Digite Usuario: ")
if(empleado==lista[0]):
while (empleado==lista[0])==True:
contraseña=input("Digite contraseña: ")
if(contraseña==lista[1]):
print("Bienvenido ")
break
else:
print("Contraseña incorrecta")
break
else:
print("Usuario incorrecto")
if(usuario==0):
n=input("Ingrese su nombre completo: ")
while True:
cd=input("Ingrese su número de cédula: ")
try:
cd=int(cd)
if(cd>0):
break
else:
print("El número de cedula debe ser positivo")
except:
print("Ingrese solo numero su cédula ")
while True:
tipo=input("1-->para carro , 2-->para moto , 3-->para bicicleta: ")
try:
tipo=int(tipo)
if(tipo==1 or tipo==2 or tipo==3):
break
else:
print("Número fuera del rango, ingrese un número nuevamente ")
except:
print("Ingrese solo números del 1 al 3")
if(tipo==1 or tipo==2):
placa=(input("Ingrese placa: "))
print(placa[0],placa[1],placa[2])
print(placa[3],placa[4],placa[5])
else:
registro=(input("Ingrese número de registro: "))
print(registro[0],registro[1],registro[2])
print(registro[3],registro[4],registro[5])
print(registro[6],registro[7])
from datetime import datetime
fecha_entrada=input("Ingrese la fecha separada por (/) de la entrada del vehiculo: ").split("/")
dia_e,mes_e,año_e=fecha_entrada
dia_e=int(dia_e)
fecha_salida=input("Ingrese la fecha separada por (/) de la salida del vehiculo: ").split("/")
dia_s,mes_s,año_s=fecha_salida
dia_s=int(dia_s)
dias=dia_s-dia_e
Hora_entrada=input("Ingrese la hora militar separada por (:) de la entrada del vehiculo ").split(":")
h_e,m_e=Hora_entrada
h_e=int(h_e)
m_e=int(m_e)
Hora_salida=input("Ingrese la hora militar separada por (:) de la entrada del vehiculo ").split(":")
h_s,m_s=Hora_salida
h_s=int(h_s)
m_s=int(m_s)
hora_entrada=str(h_e)+":"+str(m_e)+":00"
hora_salida=str(h_s)+":"+str(m_s)+":00"
formato_hora='%H:%M:%S'
he=datetime.strptime(hora_entrada,formato_hora)
hs=datetime.strptime(hora_salida,formato_hora)
if dias==0:
total_horas=hs-he
h=str(total_horas)
horas=h.split(':')
horas_posicion=int(horas[0])
minutos_posicion=int(horas[1])
horas_minutos=horas_posicion*60
TotalMinutos=minutos_posicion+horas_minutos
print('Total tiempo transcurrido fue: '+str(TotalMinutos)+" Minutos")
else:
if dias==1:
hrealsalida=hora_salida.split(':')
hora_salida="23:59:59"
hs=datetime.strptime(hora_salida,formato_hora)
totalhoras=hs-he
seconds=int(totalhoras.total_seconds())
horasd2=int(hrealsalida[0])
horasd2*=3600
minutosd2=int(hrealsalida[1])
minutosd2*=60
segundosd2=int(hrealsalida[2])
totalsegundosd2=(horasd2+minutosd2+segundosd2+1)
seconds+=totalsegundosd2
timehoras=seconds//3600
segundosrestantes=seconds%3600
timeminutos=segundosrestantes//60
timeseconds=segundosrestantes%60
Horas_Minutos=timehoras*60
TotalMinutos=Horas_Minutos+timeminutos
print('Total tiempo transcurrido fue: '+str(TotalMinutos)+" Minutos")
else:
if dias>=2:
dias-=1
dias*=86400
totalhoras=hs-he
seconds=int(totalhoras.total_seconds())
seconds+=dias
timehoras=seconds//3600
segundosrestantes=seconds%3600
timeminutos=segundosrestantes//60
timeseconds=segundosrestantes%60
Horas_Minutos=timehoras*60
TotalMinutos=Horas_Minutos+timeminutos
print('Total tiempo transcurrido fue: '+str(TotalMinutos)+" Minutos")
cobro_carro=110
cobro_moto=80
cobro_cicla=30
cc=int(input("Ingrese su CC para saber si cuenta con un descuento 🤑 : "))
listaCc=[1003894791,1003894791,1003894791,1003894791]
listaCc.append(cc)
#cuentas cedulas hayprint(listaCc.count(cc))
if(listaCc.count(cc)==5):
print("tiene descuento del 20% por su fidelidad",n)
if tipo==1:
total=TotalMinutos*cobro_carro-(TotalMinutos*cobro_carro*0.20)
elif tipo==2:
total=TotalMinutos*cobro_moto-(TotalMinutos*cobro_moto*0.20)
elif tipo==3:
total=TotalMinutos*cobro_cicla-(TotalMinutos*cobro_cicla*0.20)
print("El total a pagar es: "+str(total))
else:
print("no tine descuento: ",n)
if tipo==1:
total=TotalMinutos*cobro_carro
elif tipo==2:
total=TotalMinutos*cobro_moto
elif tipo==3:
total=TotalMinutos*cobro_cicla
print("El total a pagar es: "+str(total))
"""
#Lugar para parquear:
parqueo=("Ingrese el piso en el que desea parquear:")
listaparp1c=[[101,0],[102,1],103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,160]
listaparp1c.append()
listaparqp1m=[161,162,163,164,165,166,167,168,169,170]
listaparqp1m.append()
listaparp1b=[171,172,173,174,175,176,177,178,179,180]
listaparqp1b.append()
listaparqueop2c=[201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260]
listaparqueop2m=[261,262,263,264,265,266,267,268,269,270]
listaparqueop2b=[271,272,273,274,275,276,277,278,279,280]
listaparqueop3=[301,302,303],304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360]
listaparqueop3m=[361,362,363,364,365,366,367,368,369,370]
listaparqueop3b=[371,372,373,374,375,376,377,378,379,380]
"""
print("Dirijase al cajero para realizar el pago ")
print("Muchas gracias")
print("ESPERAMOS LE HAYA GUSTADO NUESTRO SERVICIO")
print("¡¡¡VUELVA PRONTO!!!, FELIZ DÍA ")
| 33.021164 | 257 | 0.683544 |
4a1c96368de5084a71fb5519afee0cd8fcc20697
| 8,685 |
py
|
Python
|
src/griffe/agents/extensions/base.py
|
WillDaSilva/griffe
|
301384e499d598c0475814c99f3a15d0b8feb587
|
[
"ISC"
] | 29 |
2022-01-31T21:39:28.000Z
|
2022-03-24T04:03:27.000Z
|
src/griffe/agents/extensions/base.py
|
WillDaSilva/griffe
|
301384e499d598c0475814c99f3a15d0b8feb587
|
[
"ISC"
] | 56 |
2022-01-31T20:41:23.000Z
|
2022-03-31T19:03:07.000Z
|
src/griffe/agents/extensions/base.py
|
WillDaSilva/griffe
|
301384e499d598c0475814c99f3a15d0b8feb587
|
[
"ISC"
] | 5 |
2022-02-19T16:58:55.000Z
|
2022-03-21T18:28:03.000Z
|
"""This module contains the base classes for dealing with extensions."""
from __future__ import annotations
import ast
import enum
from collections import defaultdict
from inspect import isclass
from typing import TYPE_CHECKING, Any, Sequence, Type, Union
from griffe.agents.base import BaseInspector, BaseVisitor
from griffe.agents.nodes import ObjectNode
from griffe.exceptions import ExtensionNotLoadedError
from griffe.importer import dynamic_import
if TYPE_CHECKING:
from griffe.agents.inspector import Inspector
from griffe.agents.visitor import Visitor
class When(enum.Enum):
"""This enumeration contains the different times at which an extension is used.
Attributes:
before_all: For each node, before the visit/inspection.
before_children: For each node, after the visit has started, and before the children visit/inspection.
after_children: For each node, after the children have been visited/inspected, and before finishing the visit/inspection.
after_all: For each node, after the visit/inspection.
"""
before_all: int = 1
before_children: int = 2
after_children: int = 3
after_all: int = 4
class VisitorExtension(BaseVisitor):
"""The node visitor extension base class, to inherit from."""
when: When
def __init__(self) -> None:
"""Initialize the visitor extension."""
super().__init__()
self.visitor: Visitor = None # type: ignore[assignment]
def attach(self, visitor: Visitor) -> None:
"""Attach the parent visitor to this extension.
Parameters:
visitor: The parent visitor.
"""
self.visitor = visitor
def visit(self, node: ast.AST) -> None:
"""Visit a node.
Parameters:
node: The node to visit.
"""
getattr(self, f"visit_{node.kind}", lambda _: None)(node) # type: ignore[attr-defined]
class InspectorExtension(BaseInspector):
"""The object inspector extension base class, to inherit from."""
when: When
def __init__(self) -> None:
"""Initialize the inspector extension."""
super().__init__()
self.inspector: Inspector = None # type: ignore[assignment]
def attach(self, inspector: Inspector) -> None:
"""Attach the parent inspector to this extension.
Parameters:
inspector: The parent inspector.
"""
self.inspector = inspector
def inspect(self, node: ObjectNode) -> None:
"""Inspect a node.
Parameters:
node: The node to inspect.
"""
getattr(self, f"inspect_{node.kind}", lambda _: None)(node)
Extension = Union[VisitorExtension, InspectorExtension]
class Extensions:
"""This class helps iterating on extensions that should run at different times."""
def __init__(self, *extensions: Extension) -> None:
"""Initialize the extensions container.
Parameters:
*extensions: The extensions to add.
"""
self._visitors: dict[When, list[VisitorExtension]] = defaultdict(list)
self._inspectors: dict[When, list[InspectorExtension]] = defaultdict(list)
self.add(*extensions)
def add(self, *extensions: Extension) -> None:
"""Add extensions to this container.
Parameters:
*extensions: The extensions to add.
"""
for extension in extensions:
if isinstance(extension, VisitorExtension):
self._visitors[extension.when].append(extension)
else:
self._inspectors[extension.when].append(extension)
def attach_visitor(self, parent_visitor: Visitor) -> Extensions:
"""Attach a parent visitor to the visitor extensions.
Parameters:
parent_visitor: The parent visitor, leading the visit.
Returns:
Self, conveniently.
"""
for when in self._visitors.keys():
for visitor in self._visitors[when]:
visitor.attach(parent_visitor)
return self
def attach_inspector(self, parent_inspector: Inspector) -> Extensions:
"""Attach a parent inspector to the inspector extensions.
Parameters:
parent_inspector: The parent inspector, leading the inspection.
Returns:
Self, conveniently.
"""
for when in self._inspectors.keys():
for inspector in self._inspectors[when]:
inspector.attach(parent_inspector)
return self
@property
def before_visit(self) -> list[VisitorExtension]:
"""Return the visitors that run before the visit.
Returns:
Visitors.
"""
return self._visitors[When.before_all]
@property
def before_children_visit(self) -> list[VisitorExtension]:
"""Return the visitors that run before the children visit.
Returns:
Visitors.
"""
return self._visitors[When.before_children]
@property
def after_children_visit(self) -> list[VisitorExtension]:
"""Return the visitors that run after the children visit.
Returns:
Visitors.
"""
return self._visitors[When.after_children]
@property
def after_visit(self) -> list[VisitorExtension]:
"""Return the visitors that run after the visit.
Returns:
Visitors.
"""
return self._visitors[When.after_all]
@property
def before_inspection(self) -> list[InspectorExtension]:
"""Return the inspectors that run before the inspection.
Returns:
Inspectors.
"""
return self._inspectors[When.before_all]
@property
def before_children_inspection(self) -> list[InspectorExtension]:
"""Return the inspectors that run before the children inspection.
Returns:
Inspectors.
"""
return self._inspectors[When.before_children]
@property
def after_children_inspection(self) -> list[InspectorExtension]:
"""Return the inspectors that run after the children inspection.
Returns:
Inspectors.
"""
return self._inspectors[When.after_children]
@property
def after_inspection(self) -> list[InspectorExtension]:
"""Return the inspectors that run after the inspection.
Returns:
Inspectors.
"""
return self._inspectors[When.after_all]
builtin_extensions: set[str] = {
"hybrid",
}
def load_extension(extension: str | dict[str, Any] | Extension | Type[Extension]) -> Extension:
"""Load a configured extension.
Parameters:
extension: An extension, with potential configuration options.
Raises:
ExtensionNotLoadedError: When the extension cannot be loaded,
either because the module is not found, or because it does not expose
the Extension attribute. ImportError will bubble up so users can see
the traceback.
Returns:
An extension instance.
"""
if isinstance(extension, (VisitorExtension, InspectorExtension)):
return extension
if isclass(extension) and issubclass(extension, (VisitorExtension, InspectorExtension)): # type: ignore[arg-type]
return extension() # type: ignore[operator]
if isinstance(extension, dict):
import_path, options = next(iter(extension.items()))
else: # we consider it's a string
import_path = str(extension)
options = {}
if import_path in builtin_extensions:
import_path = f"griffe.agents.extensions.{import_path}"
try:
ext_module = dynamic_import(import_path)
except ModuleNotFoundError as error:
raise ExtensionNotLoadedError(f"Extension module '{import_path}' could not be found") from error
except ImportError as error:
raise ExtensionNotLoadedError(f"Error while importing extension module '{import_path}': {error}") from error
try:
return ext_module.Extension(**options)
except AttributeError as error: # noqa: WPS440
raise ExtensionNotLoadedError(f"Extension module '{import_path}' has no 'Extension' attribute") from error
def load_extensions(exts: Sequence[str | dict[str, Any] | Extension | Type[Extension]]) -> Extensions: # noqa: WPS231
"""Load configured extensions.
Parameters:
exts: A sequence of extension, with potential configuration options.
Returns:
An extensions container.
"""
extensions = Extensions()
for extension in exts:
extensions.add(load_extension(extension))
return extensions
| 30.797872 | 129 | 0.653541 |
4a1c976973b2a67a260c3efe014cf5f2eba99d19
| 3,951 |
py
|
Python
|
api/allennlp_demo/permalinks/api.py
|
jvstokes/allennlp-demo
|
f98a72118c3c9e27429d8111fd71e727b647ff89
|
[
"Apache-2.0"
] | 1 |
2021-01-02T12:59:04.000Z
|
2021-01-02T12:59:04.000Z
|
api/allennlp_demo/permalinks/api.py
|
Przegali/allennlp-demo
|
f03823d90468b84803a64a87a9cecdac05309838
|
[
"Apache-2.0"
] | null | null | null |
api/allennlp_demo/permalinks/api.py
|
Przegali/allennlp-demo
|
f03823d90468b84803a64a87a9cecdac05309838
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional
import psycopg2
from flask import Flask, Response, jsonify, request
from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
from allennlp_demo.permalinks.db import DemoDatabase, PostgresDemoDatabase
from allennlp_demo.permalinks.models import slug_to_int, int_to_slug
from allennlp_demo.common.logs import configure_logging
class PermaLinkService(Flask):
def __init__(self, name: str = "permalinks", db: Optional[DemoDatabase] = None):
super().__init__(name)
configure_logging(self)
self.db = db
if db is None:
self.logger.warning("No database, permalinks are disabled.")
@self.errorhandler(BadRequest)
def handle_400(err: BadRequest):
return jsonify({"error": str(err)}), 400
@self.errorhandler(InternalServerError)
def handle_500(err: InternalServerError) -> Response:
self.logger.error(err)
return jsonify({"error": "Something went wrong."}), 500
@self.route("/", methods=["GET"])
def info():
"""
The simplest of info routes. We can add more here later.
"""
return jsonify({"id": "permalinks"})
@self.route("/<string:slug>")
def get_permalink(slug: str) -> Response:
"""
Find a permalink by slug.
"""
# If we don't have a database configured, there are no permalinks.
if self.db is None:
raise BadRequest("Permalinks are not enabled")
link_id = slug_to_int(slug)
if link_id is None:
# Malformed slug
raise BadRequest(f"Unrecognized permalink: {slug}")
# Fetch the results from the database.
try:
link = self.db.get_result(link_id)
except psycopg2.Error:
self.logger.exception(f"Unable to get results from database: {link_id}")
raise InternalServerError("Database error")
if link is None:
raise NotFound(f"Permalink not found: {slug}")
return jsonify(link._asdict())
@self.route("/", methods=["POST"])
def create_permalink() -> Response:
"""
Creates a new permalink.
"""
# If we don't have a database configured, there are no permalinks.
if self.db is None:
raise BadRequest("Permalinks are not enabled")
request_data = request.json.get("request_data")
if not request_data:
raise BadRequest("Invalid request_data")
# Old models send this field. New models do not.
# TODO: Remove this once all models use the new serving mechanism.
model_name = request.json.get("model_name")
# New models send these fields, but old models do not.
# TODO: Once all models are served via the new mechanism these should be required.
model_id = request.json.get("model_id")
task_name = request.json.get("task_name")
try:
id = self.db.insert_request(
model_name=model_name,
request_data=request_data,
model_id=model_id,
task_name=task_name,
)
return jsonify(int_to_slug(id))
except psycopg2.Error as err:
self.logger.exception("Error saving permalink: %s", err)
raise InternalServerError("Unable to create permalink")
# noop post for image upload, we need an endpoint, but we don't need to save the image
@self.route("/noop", methods=["POST"])
def noop():
return ""
if __name__ == "__main__":
db = PostgresDemoDatabase.from_environment()
app = PermaLinkService(db=db)
app.run(host="0.0.0.0", port=8000)
| 36.583333 | 94 | 0.587952 |
4a1c978c9f6246ee22c7d2baa4f7a6053ea34aba
| 1,097 |
py
|
Python
|
kubernetes/test/test_v2beta2_horizontal_pod_autoscaler_spec.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 2 |
2020-06-21T08:03:18.000Z
|
2020-06-21T09:53:29.000Z
|
kubernetes/test/test_v2beta2_horizontal_pod_autoscaler_spec.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v2beta2_horizontal_pod_autoscaler_spec.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 1 |
2020-12-10T07:28:08.000Z
|
2020-12-10T07:28:08.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v2beta2_horizontal_pod_autoscaler_spec import V2beta2HorizontalPodAutoscalerSpec # noqa: E501
from kubernetes.client.rest import ApiException
class TestV2beta2HorizontalPodAutoscalerSpec(unittest.TestCase):
"""V2beta2HorizontalPodAutoscalerSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta2HorizontalPodAutoscalerSpec(self):
"""Test V2beta2HorizontalPodAutoscalerSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v2beta2_horizontal_pod_autoscaler_spec.V2beta2HorizontalPodAutoscalerSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.425 | 132 | 0.758432 |
4a1c9932ca3aebc963adb17ea24cce0c0e4856c5
| 13,906 |
py
|
Python
|
pyqldbsamples/insert_ion_types.py
|
simonz-bq/amazon-qldb-dmv-sample-python
|
ca27d40d992e63e5dec8e1a431517fc745f8185c
|
[
"MIT-0"
] | null | null | null |
pyqldbsamples/insert_ion_types.py
|
simonz-bq/amazon-qldb-dmv-sample-python
|
ca27d40d992e63e5dec8e1a431517fc745f8185c
|
[
"MIT-0"
] | null | null | null |
pyqldbsamples/insert_ion_types.py
|
simonz-bq/amazon-qldb-dmv-sample-python
|
ca27d40d992e63e5dec8e1a431517fc745f8185c
|
[
"MIT-0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This code expects that you have AWS credentials setup per:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html
from datetime import datetime
from decimal import Decimal
from logging import basicConfig, getLogger, INFO
from amazon.ion.simple_types import IonPyBool, IonPyBytes, IonPyDecimal, IonPyDict, IonPyFloat, IonPyInt, IonPyList, \
IonPyNull, IonPySymbol, IonPyText, IonPyTimestamp
from amazon.ion.simpleion import loads
from amazon.ion.symbols import SymbolToken
from amazon.ion.core import IonType
from pyqldbsamples.create_table import create_table
from pyqldbsamples.insert_document import insert_documents
from pyqldbsamples.model.sample_data import convert_object_to_ion
from pyqldbsamples.connect_to_ledger import create_qldb_driver
logger = getLogger(__name__)
basicConfig(level=INFO)
TABLE_NAME = 'IonTypes'
def update_record_and_verify_type(transaction_executor, parameter, ion_object, ion_type):
"""
Update a record in the database table. Then query the value of the record and verify correct ion type saved.
:type transaction_executor: :py:class:`pyqldb.execution.executor.Executor`
:param transaction_executor: An Executor object allowing for execution of statements within a transaction.
:type parameter: :py:class:`amazon.ion.simple_types.IonPyValue`
:param parameter: The Ion value or Python native type that is convertible to Ion for filling in parameters of the
statement.
:type ion_object: :py:obj:`IonPyBool`/:py:obj:`IonPyBytes`/:py:obj:`IonPyDecimal`/:py:obj:`IonPyDict`
/:py:obj:`IonPyFloat`/:py:obj:`IonPyInt`/:py:obj:`IonPyList`/:py:obj:`IonPyNull`
/:py:obj:`IonPySymbol`/:py:obj:`IonPyText`/:py:obj:`IonPyTimestamp`
:param ion_object: The Ion object to verify against.
:type ion_type: :py:class:`amazon.ion.core.IonType`
:param ion_type: The Ion type to verify against.
:raises TypeError: When queried value is not an instance of Ion type.
"""
update_query = 'UPDATE {} SET Name = ?'.format(TABLE_NAME)
transaction_executor.execute_statement(update_query, parameter)
logger.info('Updated record.')
search_query = 'SELECT VALUE Name FROM {}'.format(TABLE_NAME)
cursor = transaction_executor.execute_statement(search_query)
for c in cursor:
if not isinstance(c, ion_object):
raise TypeError('The queried value is not an instance of {}'.format(ion_object.__name__))
if c.ion_type is not ion_type:
raise TypeError('The queried value type does not match {}'.format(ion_type))
logger.info("Successfully verified value is instance of '{}' with type '{}'.".format(ion_object.__name__, ion_type))
return cursor
def delete_table(transaction_executor, table_name):
"""
Delete a table.
:type transaction_executor: :py:class:`pyqldb.execution.executor.Executor`
:param transaction_executor: An Executor object allowing for execution of statements within a transaction.
:type table_name: str
:param table_name: Name of the table to delete.
:rtype: int
:return: The number of changes to the database.
"""
logger.info("Deleting '{}' table...".format(table_name))
cursor = transaction_executor.execute_statement('DROP TABLE {}'.format(table_name))
logger.info("'{}' table successfully deleted.".format(table_name))
return len(list(cursor))
def insert_and_verify_ion_types(driver):
"""
Insert all the supported Ion types and Python values that are convertible to Ion into a ledger and verify that they
are stored and can be retrieved properly, retaining their original properties.
:type driver: :py:class:`pyqldb.driver.qldb_driver.QldbDriver`
:param driver: A QLDB Driver object.
"""
python_bytes = str.encode('hello')
python_bool = True
python_float = float('0.2')
python_decimal = Decimal('0.1')
python_string = "string"
python_int = 1
python_null = None
python_datetime = datetime(2016, 12, 20, 5, 23, 43)
python_list = [1, 2]
python_dict = {"brand": "Ford"}
ion_clob = convert_object_to_ion(loads('{{"This is a CLOB of text."}}'))
ion_blob = convert_object_to_ion(python_bytes)
ion_bool = convert_object_to_ion(python_bool)
ion_decimal = convert_object_to_ion(python_decimal)
ion_float = convert_object_to_ion(python_float)
ion_int = convert_object_to_ion(python_int)
ion_list = convert_object_to_ion(python_list)
ion_null = convert_object_to_ion(python_null)
ion_sexp = convert_object_to_ion(loads('(cons 1 2)'))
ion_string = convert_object_to_ion(python_string)
ion_struct = convert_object_to_ion(python_dict)
ion_symbol = convert_object_to_ion(SymbolToken(text='abc', sid=123))
ion_timestamp = convert_object_to_ion(python_datetime)
ion_null_clob = convert_object_to_ion(loads('null.clob'))
ion_null_blob = convert_object_to_ion(loads('null.blob'))
ion_null_bool = convert_object_to_ion(loads('null.bool'))
ion_null_decimal = convert_object_to_ion(loads('null.decimal'))
ion_null_float = convert_object_to_ion(loads('null.float'))
ion_null_int = convert_object_to_ion(loads('null.int'))
ion_null_list = convert_object_to_ion(loads('null.list'))
ion_null_sexp = convert_object_to_ion(loads('null.sexp'))
ion_null_string = convert_object_to_ion(loads('null.string'))
ion_null_struct = convert_object_to_ion(loads('null.struct'))
ion_null_symbol = convert_object_to_ion(loads('null.symbol'))
ion_null_timestamp = convert_object_to_ion(loads('null.timestamp'))
driver.execute_lambda(lambda transaction_executor: create_table(transaction_executor, TABLE_NAME)
and insert_documents(transaction_executor, TABLE_NAME, [{'Name': 'val'}])
and update_record_and_verify_type(transaction_executor, python_bytes, IonPyBytes,
IonType.BLOB)
and update_record_and_verify_type(transaction_executor, python_bool, IonPyBool,
IonType.BOOL)
and update_record_and_verify_type(transaction_executor, python_float, IonPyFloat,
IonType.FLOAT)
and update_record_and_verify_type(transaction_executor, python_decimal, IonPyDecimal,
IonType.DECIMAL)
and update_record_and_verify_type(transaction_executor, python_string, IonPyText,
IonType.STRING)
and update_record_and_verify_type(transaction_executor, python_int, IonPyInt,
IonType.INT)
and update_record_and_verify_type(transaction_executor, python_null, IonPyNull,
IonType.NULL)
and update_record_and_verify_type(transaction_executor, python_datetime,
IonPyTimestamp, IonType.TIMESTAMP)
and update_record_and_verify_type(transaction_executor, python_list, IonPyList,
IonType.LIST)
and update_record_and_verify_type(transaction_executor, python_dict, IonPyDict,
IonType.STRUCT)
and update_record_and_verify_type(transaction_executor, ion_clob, IonPyBytes,
IonType.CLOB)
and update_record_and_verify_type(transaction_executor, ion_blob, IonPyBytes,
IonType.BLOB)
and update_record_and_verify_type(transaction_executor, ion_bool, IonPyBool,
IonType.BOOL)
and update_record_and_verify_type(transaction_executor, ion_decimal, IonPyDecimal,
IonType.DECIMAL)
and update_record_and_verify_type(transaction_executor, ion_float, IonPyFloat,
IonType.FLOAT)
and update_record_and_verify_type(transaction_executor, ion_int, IonPyInt,
IonType.INT)
and update_record_and_verify_type(transaction_executor, ion_list, IonPyList,
IonType.LIST)
and update_record_and_verify_type(transaction_executor, ion_null, IonPyNull,
IonType.NULL)
and update_record_and_verify_type(transaction_executor, ion_sexp, IonPyList,
IonType.SEXP)
and update_record_and_verify_type(transaction_executor, ion_string, IonPyText,
IonType.STRING)
and update_record_and_verify_type(transaction_executor, ion_struct, IonPyDict,
IonType.STRUCT)
and update_record_and_verify_type(transaction_executor, ion_symbol, IonPySymbol,
IonType.SYMBOL)
and update_record_and_verify_type(transaction_executor, ion_timestamp,
IonPyTimestamp, IonType.TIMESTAMP)
and update_record_and_verify_type(transaction_executor, ion_null_clob, IonPyNull,
IonType.CLOB)
and update_record_and_verify_type(transaction_executor, ion_null_blob, IonPyNull,
IonType.BLOB)
and update_record_and_verify_type(transaction_executor, ion_null_bool, IonPyNull,
IonType.BOOL)
and update_record_and_verify_type(transaction_executor, ion_null_decimal,
IonPyNull, IonType.DECIMAL)
and update_record_and_verify_type(transaction_executor, ion_null_float, IonPyNull,
IonType.FLOAT)
and update_record_and_verify_type(transaction_executor, ion_null_int, IonPyNull,
IonType.INT)
and update_record_and_verify_type(transaction_executor, ion_null_list, IonPyNull,
IonType.LIST)
and update_record_and_verify_type(transaction_executor, ion_null_sexp, IonPyNull,
IonType.SEXP)
and update_record_and_verify_type(transaction_executor, ion_null_string, IonPyNull,
IonType.STRING)
and update_record_and_verify_type(transaction_executor, ion_null_struct, IonPyNull,
IonType.STRUCT)
and update_record_and_verify_type(transaction_executor, ion_null_symbol, IonPyNull,
IonType.SYMBOL)
and update_record_and_verify_type(transaction_executor, ion_null_timestamp,
IonPyNull, IonType.TIMESTAMP)
and delete_table(transaction_executor, TABLE_NAME),
lambda retry_attempt: logger.info('Retrying due to OCC conflict...'))
if __name__ == '__main__':
"""
Insert all the supported Ion types and Python values that are convertible to Ion into a ledger and verify that they
are stored and can be retrieved properly, retaining their original properties.
"""
try:
with create_qldb_driver() as driver:
insert_and_verify_ion_types(driver)
except Exception:
logger.exception('Error updating and validating Ion types.')
| 60.46087 | 120 | 0.6106 |
4a1c9bbd138b37fbe24f58c153d1966d36dc436a
| 3,248 |
py
|
Python
|
docs/sphinxext/autodoc_traits.py
|
mariobuikhuizen/pythreejs
|
150ff1c10c868b17fefa63d19153b5ee1fe87f66
|
[
"BSD-3-Clause"
] | 451 |
2018-04-20T17:44:49.000Z
|
2022-03-26T21:53:05.000Z
|
docs/sphinxext/autodoc_traits.py
|
Jimmy-INL/pythreejs
|
a78cb57456948526e39ea79ac003c2cfde5ed0f4
|
[
"BSD-3-Clause"
] | 164 |
2018-04-19T08:34:57.000Z
|
2022-03-31T12:58:53.000Z
|
docs/sphinxext/autodoc_traits.py
|
Jimmy-INL/pythreejs
|
a78cb57456948526e39ea79ac003c2cfde5ed0f4
|
[
"BSD-3-Clause"
] | 119 |
2018-04-23T16:01:02.000Z
|
2022-03-26T03:28:59.000Z
|
"""autodoc extension for traits"""
from collections import OrderedDict
from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits
from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
def dict_info(trait):
try:
trait_base = trait._value_trait
except AttributeError:
trait_base = trait._trait
try:
traits = trait._per_key_traits
except AttributeError:
traits = trait._traits
if traits is None and (trait_base is None or isinstance(trait_base, Any)):
value_string = 'elements of any type'
else:
parts = []
if traits:
parts.append('the following types: %r' % {k: v.info() for k,v in traits})
if trait_base:
parts.append('values that are: %s' % trait_base.info())
value_string = 'elements with ' + ', and '.join(parts)
return '{} with {}'.format(trait.info(), value_string)
def extended_trait_info(trait):
if isinstance(trait, Dict):
return dict_info(trait)
elif isinstance(trait, Container):
if trait._trait is None:
return '{} of any type'.format(trait.info())
return '{} with values that are: {}'.format(trait.info(), trait._trait.info())
return trait.info()
class HasTraitsDocumenter(ClassDocumenter):
"""Specialized Documenter subclass for traits"""
objtype = 'hastraits'
directivetype = 'class'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, HasTraits)
def get_object_members(self, want_all):
"""Add traits to members list"""
check, members = super().get_object_members(want_all)
get_traits = self.object.class_own_traits if self.options.inherited_members \
else self.object.class_traits
members_new = OrderedDict()
for m in members:
members_new[m[0]] = m[1]
traits = tuple(get_traits().items())
for name, trait in traits:
if name not in members_new:
# Don't add a member that would normally be filtered
continue
# pass # FIXME: Debugging
# put help in __doc__ where autodoc will look for it
trait.__doc__ = trait.help or extended_trait_info(getattr(self.object, name))
members_new[name] = trait
return check, [kv for kv in members_new.items()]
class TraitDocumenter(AttributeDocumenter):
objtype = 'trait'
directivetype = 'attribute'
member_order = 1
priority = 100
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, TraitType)
def format_name(self):
return self.objpath[-1]
def add_directive_header(self, sig):
default = self.object.default_value
if default is Undefined:
default_s = ''
else:
default_s = repr(default)
sig = ' = {}({})'.format(
self.object.__class__.__name__,
default_s,
)
return super().add_directive_header(sig)
def setup(app):
app.add_autodocumenter(HasTraitsDocumenter)
app.add_autodocumenter(TraitDocumenter)
| 31.843137 | 89 | 0.637623 |
4a1c9bed1782540eaf62e2518db17693e7acbaf9
| 800 |
py
|
Python
|
puma/attribute/attribute/sharing_attribute_between_scopes_not_allowed_error.py
|
gift-surg/puma
|
58beae3459a0c8d96adfe9af323e26868428df4d
|
[
"Apache-2.0"
] | null | null | null |
puma/attribute/attribute/sharing_attribute_between_scopes_not_allowed_error.py
|
gift-surg/puma
|
58beae3459a0c8d96adfe9af323e26868428df4d
|
[
"Apache-2.0"
] | 13 |
2020-05-04T14:14:58.000Z
|
2020-07-29T16:37:03.000Z
|
puma/attribute/attribute/sharing_attribute_between_scopes_not_allowed_error.py
|
gift-surg/puma
|
58beae3459a0c8d96adfe9af323e26868428df4d
|
[
"Apache-2.0"
] | null | null | null |
from puma.attribute import ThreadAction
class SharingAttributeBetweenScopesNotAllowedError(TypeError):
def __init__(self, attribute_name: str, scope_type: str, action_type: str) -> None:
super().__init__(f"Attribute '{attribute_name}' may not be passed between {scope_type} as its {action_type} is '{ThreadAction.NOT_ALLOWED.name}'")
class SharingAttributeBetweenThreadsNotAllowedError(SharingAttributeBetweenScopesNotAllowedError):
def __init__(self, attribute_name: str) -> None:
super().__init__(attribute_name, "Threads", "ThreadAction")
class SharingAttributeBetweenProcessesNotAllowedError(SharingAttributeBetweenScopesNotAllowedError):
def __init__(self, attribute_name: str) -> None:
super().__init__(attribute_name, "Processes", "ProcessAction")
| 40 | 154 | 0.7825 |
4a1c9c0740602217e1e0ab20eb228c96098d6c1e
| 978 |
py
|
Python
|
python/phonenumbers/shortdata/region_GH.py
|
vishnuku/python-phonenumbers
|
6ac2cdd06b7ccf709a8efb21629cf2c5f030e627
|
[
"Apache-2.0"
] | 3 |
2018-12-02T23:09:00.000Z
|
2018-12-02T23:16:59.000Z
|
python/phonenumbers/shortdata/region_GH.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/shortdata/region_GH.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
"""Auto-generated file, do not edit by hand. GH metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GH = PhoneMetadata(id='GH', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[14589]\\d{2,4}', possible_number_pattern='\\d{3,5}', possible_length=(3, 4, 5)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='19[123]|999', possible_number_pattern='\\d{3}', example_number='999', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='19[123]|40404|(?:54|83)00|999', possible_number_pattern='\\d{3,5}', example_number='999', possible_length=(3, 4, 5)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(national_number_pattern='40404|(?:54|83)00', possible_number_pattern='\\d{4,5}', example_number='5400', possible_length=(4, 5)),
short_data=True)
| 75.230769 | 173 | 0.757669 |
4a1c9c3305d19ce1330cb7bd93bf49261a4dce31
| 6,469 |
py
|
Python
|
tests/test_symbolic.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 2 |
2018-05-02T17:41:36.000Z
|
2020-05-18T02:49:16.000Z
|
tests/test_symbolic.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_symbolic.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 1 |
2019-08-07T01:42:01.000Z
|
2019-08-07T01:42:01.000Z
|
import angr
import nose
#def broken_symvalue():
# # concrete symvalue
# zero = SimValue(se.BVV(0, 64))
# nose.tools.assert_false(zero.is_symbolic())
# nose.tools.assert_equal(zero.eval(), 0)
# nose.tools.assert_raises(ConcretizingException, zero.eval_exactly, 2)
#
# # symbolic symvalue
# x = se.BVS('x', 64)
# sym = SimValue(x, constraints = [ x > 100, x < 200 ])
# nose.tools.assert_true(sym.is_symbolic())
# nose.tools.assert_equal(sym.min_int(), 101)
# nose.tools.assert_equal(sym.max_int(), 199)
# nose.tools.assert_items_equal(sym.eval_upto(99), range(101, 200))
# nose.tools.assert_raises(ConcretizingException, zero.eval_exactly, 102)
def test_concretization_strategies():
initial_memory = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
s = angr.SimState(arch='AMD64', memory_backer=initial_memory)
# sanity check
nose.tools.assert_equal(s.se.eval_upto(s.memory.load(3, 1), 2, cast_to=str), ['D'])
x = s.se.BVS('x', s.arch.bits)
s.add_constraints(x >= 1)
s.add_constraints(x <= 3)
ss = s.copy()
nose.tools.assert_equal(tuple(sorted(ss.se.eval_upto(ss.memory.load(x, 1), 10, cast_to=str))), ('B', 'C', 'D'))
ss = s.copy()
x = s.se.BVS('x', s.arch.bits)
s.add_constraints(x >= 1)
ss.options.add(angr.options.CONSERVATIVE_READ_STRATEGY)
ss.memory._create_default_read_strategies()
nose.tools.assert_true('symbolic' in next(iter(ss.memory.load(x, 1).variables)))
#def test_concretization():
# s = angr.SimState(arch="AMD64", mode="symbolic")
# dst = s.se.BVV(0x41424300, 32)
# dst_addr = s.se.BVV(0x1000, 64)
# s.memory.store(dst_addr, dst, 4)
#
# print "MEM KEYS", s.memory.mem.keys()
# print "REG KEYS", s.registers.mem.keys()
#
# print "TO NATIVE..."
# s.set_native(True)
# print "... done"
#
# vv = s.native_env.vexecute(pyvex.IRExpr.Load("Iend_BE", "Ity_I32", pyvex.IRExpr.Const(pyvex.IRConst.U64(0x1000))))
# nose.tools.assert_equals(vv.str[:4], 'ABC\x00')
# s.native_env.vexecute(pyvex.IRSB(bytes='\xb8\x41\x42\x43\x44'))
#
# #import IPython; IPython.embed()
# print "FROM NATIVE..."
# s.set_native(False)
# print "... done"
#
# nose.tools.assert_equals(s.reg_value(16).se.eval(), 0x44434241)
# print "YEAH"
#@nose.tools.timed(10)
def broken_symbolic_write():
s = angr.SimState(arch='AMD64', mode='symbolic')
addr = s.se.BVS('addr', 64)
s.add_constraints(s.se.Or(addr == 10, addr == 20, addr == 30))
nose.tools.assert_equals(len(s.se.eval_upto(addr, 10)), 3)
s.memory.store(10, s.se.BVV(1, 8))
s.memory.store(20, s.se.BVV(2, 8))
s.memory.store(30, s.se.BVV(3, 8))
nose.tools.assert_true(s.se.unique(s.memory.load(10, 1)))
nose.tools.assert_true(s.se.unique(s.memory.load(20, 1)))
nose.tools.assert_true(s.se.unique(s.memory.load(30, 1)))
#print "CONSTRAINTS BEFORE:", s.constraints._solver.constraints
#s.memory.store(addr, s.se.BVV(255, 8), strategy=['symbolic','any'], limit=100)
s.memory.store(addr, s.se.BVV(255, 8))
nose.tools.assert_true(s.satisfiable())
print "GO TIME"
nose.tools.assert_equals(len(s.se.eval_upto(addr, 10)), 3)
nose.tools.assert_items_equal(s.se.eval_upto(s.memory.load(10, 1), 3), [ 1, 255 ])
nose.tools.assert_items_equal(s.se.eval_upto(s.memory.load(20, 1), 3), [ 2, 255 ])
nose.tools.assert_items_equal(s.se.eval_upto(s.memory.load(30, 1), 3), [ 3, 255 ])
nose.tools.assert_equals(len(s.se.eval_upto(addr, 10)), 3)
# see if it works when constraining the write address
sa = s.copy()
sa.add_constraints(addr == 20)
nose.tools.assert_true(sa.satisfiable())
nose.tools.assert_items_equal(sa.se.eval_upto(sa.memory.load(10, 1), 3), [ 1 ])
nose.tools.assert_items_equal(sa.se.eval_upto(sa.memory.load(20, 1), 3), [ 255 ])
nose.tools.assert_items_equal(sa.se.eval_upto(sa.memory.load(30, 1), 3), [ 3 ])
nose.tools.assert_items_equal(sa.se.eval_upto(addr, 10), [ 20 ])
# see if it works when constraining a value to the written one
sv = s.copy()
sv.add_constraints(sv.memory.load(30, 1) == 255)
nose.tools.assert_true(sv.satisfiable())
nose.tools.assert_items_equal(sv.se.eval_upto(sv.memory.load(10, 1), 3), [ 1 ])
nose.tools.assert_items_equal(sv.se.eval_upto(sv.memory.load(20, 1), 3), [ 2 ])
nose.tools.assert_items_equal(sv.se.eval_upto(sv.memory.load(30, 1), 3), [ 255 ])
nose.tools.assert_items_equal(sv.se.eval_upto(addr, 10), [ 30 ])
# see if it works when constraining a value to the unwritten one
sv = s.copy()
sv.add_constraints(sv.memory.load(30, 1) == 3)
nose.tools.assert_true(sv.satisfiable())
nose.tools.assert_items_equal(sv.se.eval_upto(sv.memory.load(10, 1), 3), [ 1, 255 ])
nose.tools.assert_items_equal(sv.se.eval_upto(sv.memory.load(20, 1), 3), [ 2, 255 ])
nose.tools.assert_items_equal(sv.se.eval_upto(sv.memory.load(30, 1), 3), [ 3 ])
nose.tools.assert_items_equal(sv.se.eval_upto(addr, 10), [ 10, 20 ])
s = angr.SimState(arch='AMD64', mode='symbolic')
s.memory.store(0, s.se.BVV(0x4141414141414141, 64))
length = s.se.BVS("length", 32)
#s.memory.store(0, s.se.BVV(0x4242424242424242, 64), symbolic_length=length)
s.memory.store(0, s.se.BVV(0x4242424242424242, 64))
for i in range(8):
ss = s.copy()
ss.add_constraints(length == i)
nose.tools.assert_equal(ss.se.eval(s.memory.load(0, 8), cast_to=str), "B"*i + "A"*(8-i))
print "GROOVY"
def test_unsat_core():
s = angr.SimState(arch='AMD64', mode='symbolic', add_options={ angr.options.CONSTRAINT_TRACKING_IN_SOLVER })
x = s.se.BVS('x', 32)
s.add_constraints(s.se.BVV(0, 32) == x)
s.add_constraints(s.se.BVV(1, 32) == x)
nose.tools.assert_false(s.satisfiable())
unsat_core = s.se.unsat_core()
nose.tools.assert_equal(len(unsat_core), 2)
def test_compatibility_layer():
s = angr.SimState(arch='AMD64', mode='symbolic')
x = s.se.BVS('x', 32)
s.add_constraints(x > 20)
s.add_constraints(x < 40)
nose.tools.assert_true(s.se.any_int(x) > 20)
nose.tools.assert_true(s.se.any_int(x) < 40)
nose.tools.assert_true(len(s.se.any_n_int(x, 100)), 19)
y = s.se.BVS('y', 72)
s.add_constraints(y == 0x696c6f766563617400)
nose.tools.assert_true(s.se.any_str(y) == 'ilovecat\x00')
nose.tools.assert_true(s.se.any_n_str(y, 2) == ['ilovecat\x00'])
if __name__ == '__main__':
# test_concretization_strategies()
test_compatibility_layer()
| 38.736527 | 118 | 0.662544 |
4a1c9edf8592837c981ab4a7d6383dbc2259c85b
| 596 |
py
|
Python
|
desafio-04/sklearn_transforms/setup.py
|
igorvroberto/Maratona-IBM-2020
|
b8dd6413e3ef80764901bd8d05c5ee307a8ee71d
|
[
"MIT"
] | null | null | null |
desafio-04/sklearn_transforms/setup.py
|
igorvroberto/Maratona-IBM-2020
|
b8dd6413e3ef80764901bd8d05c5ee307a8ee71d
|
[
"MIT"
] | null | null | null |
desafio-04/sklearn_transforms/setup.py
|
igorvroberto/Maratona-IBM-2020
|
b8dd6413e3ef80764901bd8d05c5ee307a8ee71d
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from pandas import Categorical, get_dummies
setup(
name='my_custom_sklearn_transforms',
version='1.0',
description='''
This is a sample python package for encapsulating custom
tranforms from scikit-learn into Watson Machine Learning
''',
url='https://github.com/igorvroberto/Maratona-IBM-2020/desafio-04/sklearn_transforms',
author='Igor Roberto',
author_email='igorvroberto@gmail.com',
license='BSD',
packages=[
'my_custom_sklearn_transforms'
],
zip_safe=False
)
| 29.8 | 92 | 0.66443 |
4a1c9fbd49e836e0079f501713f70ea1c5c090bc
| 731 |
py
|
Python
|
CONFIG.sample.py
|
radiantly/ample-indexer
|
55c607bd3752cef32bf1389ca10b10bc0f24328d
|
[
"MIT"
] | null | null | null |
CONFIG.sample.py
|
radiantly/ample-indexer
|
55c607bd3752cef32bf1389ca10b10bc0f24328d
|
[
"MIT"
] | null | null | null |
CONFIG.sample.py
|
radiantly/ample-indexer
|
55c607bd3752cef32bf1389ca10b10bc0f24328d
|
[
"MIT"
] | null | null | null |
from webdriver_manager.utils import ChromeType
# Your email and password
user_email = ""
user_passw = ""
# What browser to use while retrieving cookies
# ChromeType.GOOGLE = Google Chrome
# ChromeType.CHROMIUM = Chromium
# ChromeType.MSEDGE = Microsoft Edge
chrome_type = ChromeType.GOOGLE
# For savepdf.py
# Base directory
# For Windows, this may need to be something like:
# baseDir = r"C:\Users\radiantly\Documents\AmpleStuff"
baseDir = "/mnt/gdrive10"
# The directory in which pdfs for each subject is to be stored
# Each key should be in the format "Course code": "Directory name"
directoryMap = {
"19CCE201": "MIT",
"19CCE202": "DSA",
"19CCE204": "Signal",
"19MAT205": "Math",
"CIR_SSK211": "CIR",
}
| 25.206897 | 66 | 0.716826 |
4a1ca051cf8886974877830507736f925e5477d1
| 1,169 |
py
|
Python
|
test/look_back.py
|
apolloaggrey/block_chain
|
62b908239fc866fdc8baa4bf412a36efcc5889f5
|
[
"MIT"
] | null | null | null |
test/look_back.py
|
apolloaggrey/block_chain
|
62b908239fc866fdc8baa4bf412a36efcc5889f5
|
[
"MIT"
] | null | null | null |
test/look_back.py
|
apolloaggrey/block_chain
|
62b908239fc866fdc8baa4bf412a36efcc5889f5
|
[
"MIT"
] | null | null | null |
import sys as DVDNVSVBUSHNB
import time as YRYGFBUYEGBFU
def main():
NIFNVNIJCBDB = str(DVDNVSVBUSHNB.argv[0]).split("\\")[-1]
NBJYVJHHVYSY = ""
with open(NIFNVNIJCBDB, "r") as YTFYBTFBVDYF:
for GBGBYFFHTF in YTFYBTFBVDYF:
if GBGBYFFHTF.startswith(" JBVFUSVNSVUB"):
GBGBYFFHTF = GBGBYFFHTF.split(",")
TDVUYTDFTVRDT = GBGBYFFHTF[:-1]
DSCRDVHTRHGB = GBGBYFFHTF[-1:]
VFJTRFTYFHJGC = ""
for CDHRTCVTYJDY in TDVUYTDFTVRDT:
VFJTRFTYFHJGC += str(CDHRTCVTYJDY) + ","
VFJTRFTYFHJGC += (str(YRYGFBUYEGBFU.gmtime()[5]))
for CDHRTCVTYJDY in DSCRDVHTRHGB:
VFJTRFTYFHJGC += "," + str(CDHRTCVTYJDY)
NBJYVJHHVYSY += str(VFJTRFTYFHJGC)
else:
NBJYVJHHVYSY += str(GBGBYFFHTF)
YTFYBTFBVDYF.close()
with open(NIFNVNIJCBDB, "w") as YTFYBTFBVDYF:
for VFTYFUYFVVYD in NBJYVJHHVYSY:
YTFYBTFBVDYF.write(VFTYFUYFVVYD)
YTFYBTFBVDYF.close()
pass
if __name__ == '__main__':
JBVFUSVNSVUB = [None, 43, ]
main()
| 35.424242 | 65 | 0.570573 |
4a1ca0fc7e4a6e046bbbf8f67c24a3c307e9d7fd
| 17,536 |
py
|
Python
|
models/resnet.py
|
YantaoShen/openBCT
|
69e798c2dd6380572da7a88b68e0e9d31d9b08a4
|
[
"BSD-2-Clause"
] | 64 |
2020-10-13T06:24:41.000Z
|
2022-03-08T11:23:22.000Z
|
models/resnet.py
|
YantaoShen/openBCT
|
69e798c2dd6380572da7a88b68e0e9d31d9b08a4
|
[
"BSD-2-Clause"
] | 4 |
2020-12-29T05:57:34.000Z
|
2022-01-13T18:07:05.000Z
|
models/resnet.py
|
YantaoShen/openBCT
|
69e798c2dd6380572da7a88b68e0e9d31d9b08a4
|
[
"BSD-2-Clause"
] | 10 |
2020-10-13T06:25:51.000Z
|
2022-03-03T00:06:06.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.hub import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, old_fc=None, use_feat=False, norm_sm=False):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.use_feat = use_feat
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.norm_sm = norm_sm
if num_classes != 0:
self.fc = nn.Linear(512 * block.expansion, num_classes)
if self.norm_sm:
self.kernel = nn.Parameter(torch.Tensor(512 * block.expansion, num_classes))
# initial kernel
self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.s = 30. # see normface https://arxiv.org/abs/1704.06369
if old_fc is not None:
if old_fc.endswith('.npy'):
# loaded weights should be n * d, n: num of classes, d: feature dimension
w_npy = np.load(old_fc)
n, d = w_npy.shape
self.old_fc = nn.Linear(d, n, bias=False)
with torch.no_grad():
self.old_fc.weight.copy_(torch.from_numpy(w_npy).float())
elif old_fc.endswith('.pth') or old_fc.endswith('pth.tar'):
w_dict = torch.load(old_fc)
if type(w_dict) is dict:
for _, value, in w_dict.items():
if len(value.shape) > 1:
n, d = value.shape
weight = value
else:
bias = value
else:
n, d = w_dict.size()
weight = w_dict
self.old_fc = nn.Linear(d, n, bias=(type(w_dict) is dict) and (len(w_dict) > 1))
with torch.no_grad():
self.old_fc.weight.copy_(weight)
if type(w_dict) is dict and len(w_dict) > 1:
self.old_fc.bias.copy_(bias)
else:
raise TypeError('Only .pth or .npy files are acceptable!')
# freeze old classifier for feature compatibility
for para in self.old_fc.parameters():
para.requires_grad = False
self.old_d = d
self.old_cls_num = n
else:
self.old_fc = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
if not self.training or self.use_feat:
if self.old_fc is not None:
if self.old_d <= x.size(1):
return F.normalize(x[:, :self.old_d], dim=1)
else:
z = torch.zeros(x.size(0), self.old_d - x.size(1))
z = z.cuda() if torch.cuda.is_available() else z
x = torch.cat((x, z), 1)
return F.normalize(x, dim=1)
else:
return F.normalize(x, dim=1)
if self.norm_sm:
normed_kernel = torch.nn.functional.normalize(self.kernel, dim=1)
output_feat = torch.nn.functional.normalize(x, dim=1)
cos_theta = torch.mm(output_feat, normed_kernel)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
score = cos_theta * self.s
else:
score = self.fc(x)
if self.old_fc is not None:
if self.old_d <= x.size(1):
x = x[:, :self.old_d]
old_score = self.old_fc(x)
else:
z = torch.zeros(x.size(0), self.old_d-x.size(1))
z = z.cuda() if torch.cuda.is_available() else z
x = torch.cat((x, z), 1)
old_score = self.old_fc(x)
return score, old_score, x
return score
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 41.164319 | 107 | 0.607835 |
4a1ca163f13aebaee21c43f6fa286245720a23db
| 2,843 |
py
|
Python
|
qiskit/providers/aer/library/save_instructions/save_stabilizer.py
|
garrison/qiskit-aer
|
24c51a675b8653c8ad2af587d40b795ac94c07c7
|
[
"Apache-2.0"
] | 313 |
2018-12-19T09:19:12.000Z
|
2022-03-21T18:15:41.000Z
|
qiskit/providers/aer/library/save_instructions/save_stabilizer.py
|
garrison/qiskit-aer
|
24c51a675b8653c8ad2af587d40b795ac94c07c7
|
[
"Apache-2.0"
] | 933 |
2018-12-21T02:56:49.000Z
|
2022-03-30T01:19:54.000Z
|
qiskit/providers/aer/library/save_instructions/save_stabilizer.py
|
chriseclectic/qiskit-aer
|
61b028b7ccd1d6e96c8de48a10648c0bc3c07ff9
|
[
"Apache-2.0"
] | 313 |
2018-12-19T14:52:55.000Z
|
2022-02-28T20:20:14.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Simulator instruction to save Clifford state.
"""
from qiskit.circuit import QuantumCircuit
from .save_data import SaveSingleData
from ..default_qubits import default_qubits
class SaveStabilizer(SaveSingleData):
"""Save Stabilizer instruction"""
def __init__(self, num_qubits, label="stabilizer",
pershot=False, conditional=False):
"""Create new instruction to save the stabilizer simulator state as a StabilizerState.
Args:
num_qubits (int): the number of qubits of the
label (str): the key for retrieving saved data from results.
pershot (bool): if True save a list of StabilizerStates for each
shot of the simulation rather than a single
statevector [Default: False].
conditional (bool): if True save data conditional on the current
classical register values [Default: False].
.. note::
This save instruction must always be performed on the full width of
qubits in a circuit, otherwise an exception will be raised during
simulation.
"""
super().__init__('save_stabilizer', num_qubits, label,
pershot=pershot,
conditional=conditional)
def save_stabilizer(self, label="stabilizer", pershot=False, conditional=False):
"""Save the current stabilizer simulator quantum state as a StabilizerState.
Args:
label (str): the key for retrieving saved data from results.
pershot (bool): if True save a list of StabilizerStates for each
shot of the simulation [Default: False].
conditional (bool): if True save pershot data conditional on the
current classical register values
[Default: False].
Returns:
QuantumCircuit: with attached instruction.
.. note::
This instruction is always defined across all qubits in a circuit.
"""
qubits = default_qubits(self)
instr = SaveStabilizer(len(qubits),
label=label,
pershot=pershot,
conditional=conditional)
return self.append(instr, qubits)
QuantumCircuit.save_stabilizer = save_stabilizer
| 38.418919 | 94 | 0.641224 |
4a1ca164c9ee70f8adabde7e754bf34dc4418500
| 18,280 |
py
|
Python
|
regions/io/crtf/read.py
|
Yash-10/regions
|
bd69db9b0e9fe5025bdb2c121086a09325795f9e
|
[
"BSD-3-Clause"
] | null | null | null |
regions/io/crtf/read.py
|
Yash-10/regions
|
bd69db9b0e9fe5025bdb2c121086a09325795f9e
|
[
"BSD-3-Clause"
] | null | null | null |
regions/io/crtf/read.py
|
Yash-10/regions
|
bd69db9b0e9fe5025bdb2c121086a09325795f9e
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import copy
import itertools
from warnings import warn
from astropy import units as u
from astropy import coordinates
from .core import CRTFRegionParserError, CRTFRegionParserWarning, valid_symbols
from ..core import Shape, ShapeList, reg_mapping
__all__ = ['read_crtf', 'CRTFParser', 'CRTFRegionParser']
# All CASA files start with '#CRTF' . It may also include the version number like '#CRTFv0' .
regex_begin = re.compile(r'^#CRTFv?[\d]?$')
# Comment Format :
regex_comment = re.compile(r'^#.*$')
# Identifies the global attributes Format.
regex_global = re.compile(r'^global\s+(?P<parameters>.*)?')
# Coordinate Format : "[x, y]"
regex_coordinate = re.compile(r'\[([\w.+-:]*?)\s*[,]\s*([\w.+-:]*?)\]')
# Single length Format. For Ex : helps us to extract the radius of a circle.
regex_length = re.compile(r'(?:\[[^=]*\])+[,]\s*([^\[]*)\]')
# Extracts each 'parameter=value' pair.
regex_meta = re.compile(r'(?:(\w+)\s*=[\s\'\"]*([^,\[\]]+?)[\'\",]+)|(?:(\w+)\s*=\s*\[(.*?)\])')
# Region format which segregates include('+'|'-') parameter, kind of definition ('ann' for annotations | '' for regions)
# and region type.
regex_region = re.compile(r'(?P<include>[+-])?(?P<type>ann(?=\s))?\s*(?P<regiontype>[a-z]*?)\[[^=]*]')
# Line format which checks the validity of the line and segregates the meta attributes from the region format.
regex_line = re.compile(r'(?P<region>[+-]?(?:ann(?=\s))?\s*[a-z]+?\[[^=]+\])(?:\s*[,]\s*(?P<parameters>.*))?')
def read_crtf(filename, errors='strict'):
"""
Reads a CRTF region file and returns a list of region objects.
Parameters
----------
filename : `str`
The file path
errors : ``warn``, ``ignore``, ``strict``, optional
The error handling scheme to use for handling parsing errors.
The default is 'strict', which will raise a `~regions.CRTFRegionParserError`.
``warn`` will raise a `~regions.CRTFRegionParserWarning`, and ``ignore`` will do nothing
(i.e., be silent).
Returns
-------
regions : `list`
Python `list` of `~regions.Region` objects.
Examples
--------
>>> from regions import read_crtf
>>> from astropy.utils.data import get_pkg_data_filename
>>> file = get_pkg_data_filename('data/CRTFgeneral.crtf', package='regions.io.crtf.tests')
>>> regs = read_crtf(file, errors='warn')
>>> print(regs[0].visual)
{'color': 'blue'}
"""
with open(filename) as fh:
if regex_begin.search(fh.readline()):
region_string = fh.read()
parser = CRTFParser(region_string, errors)
return parser.shapes.to_regions()
else:
raise CRTFRegionParserError('Every CRTF Region must start with "#CRTF" ')
class CRTFParser:
"""
Parses a CRTF string.
This class transforms a CRTF string to a `~regions.io.core.ShapeList`. The
result is stored as ``shapes`` attribute.
Each line is tested for either containing a region with meta attributes or global parameters.
If global parameters are found then, it is stored in the ``global_meta`` attribute.
If a region is found the `~regions.CRTFRegionParser` is invoked to transform the line into a
`~regions.io.core.Shape` object.
Parameters
----------
region_string : `str`
CRTF region string
errors : ``warn``, ``ignore``, ``strict``, optional
The error handling scheme to use for handling parsing errors.
The default is 'strict', which will raise a `~regions.CRTFRegionParserError`.
``warn`` will raise a `~regions.CRTFRegionParserWarning`, and ``ignore`` will do nothing
(i.e., be silent).
Examples
--------
>>> from regions import CRTFParser
>>> reg_str = "ann circle[[18h12m24s, -23d11m00s], 2.3arcsec], coord=B1950, frame=BARY, corr=[I, Q], color=blue"
>>> regs = CRTFParser(reg_str, errors='warn').shapes.to_regions()
>>> print(regs[0].visual)
{'color': 'blue'}
"""
# It contains a tuple of valid definition (region, annotation) type.
valid_definition = ('box', 'centerbox', 'rotbox', 'poly', 'circle', 'annulus', 'ellipse',
'line', 'vector', 'text', 'symbol')
# It contains a tuple of valid name of the parameters(attributes).
valid_global_keys = ('coord', 'frame', 'corr', 'veltype', 'restfreq', 'linewidth', 'linestyle', 'symsize',
'symthick', 'color', 'font', 'fontsize', 'fontstyle', 'usetex', 'labelpos','labelcolor',
'labeloff', 'range')
def __init__(self, region_string, errors='strict'):
if errors not in ('strict', 'ignore', 'warn'):
msg = "``errors`` must be one of strict, ignore, or warn; is {}"
raise ValueError(msg.format(errors))
self.region_string = region_string
self.errors = errors
# Global states
self.global_meta = {}
# Results
self.shapes = ShapeList()
self.run()
def __str__(self):
ss = self.__class__.__name__
ss += f'\nErrors: {self.errors}'
ss += f'\nGlobal meta: {self.global_meta}'
ss += f'\nShapes: {self.shapes}'
ss += '\n'
return ss
def parse_line(self, line):
"""
Parses a single line.
"""
# Skip blanks
if line == '':
return
# Skip comments
if regex_comment.search(line):
return
# Special case / header: parse global parameters into metadata
global_parameters = regex_global.search(line)
if global_parameters:
self.parse_global_meta(global_parameters.group('parameters'))
return
# Tries to check the validity of the line.
crtf_line = regex_line.search(line)
if crtf_line:
# Tries to parse the line.
# Finds info about the region.
region = regex_region.search(crtf_line.group('region'))
type_ = region.group('type') or 'reg'
include = region.group('include') or '+'
region_type = region.group('regiontype').lower()
if region_type in self.valid_definition:
helper = CRTFRegionParser(self.global_meta, include, type_, region_type,
*crtf_line.group('region', 'parameters'))
self.shapes.append(helper.shape)
else:
self._raise_error(f"Not a valid CRTF Region type: '{region_type}'.")
else:
self._raise_error(f"Not a valid CRTF line: '{line}'.")
return
def _raise_error(self, msg):
if self.errors == 'warn':
warn(msg, CRTFRegionParserWarning)
elif self.errors == 'strict':
raise CRTFRegionParserError(msg)
def run(self):
"""
Run all the steps.
Splits the regions into line and calls ``parse_line`` for each line.
"""
for line in self.region_string.split('\n'):
self.parse_line(line)
def parse_global_meta(self, global_meta_str):
"""
Parses the line starting with global to extract all the valid meta key/value pair.
"""
if global_meta_str:
global_meta_str = regex_meta.findall(global_meta_str + ',')
if global_meta_str:
for par in global_meta_str:
if par[0] != '':
val1 = par[0].lower()
val2 = par[1]
else:
val1 = par[2].lower()
val2 = par[3]
val1 = val1.strip()
val2 = val2.strip()
if val1 in self.valid_global_keys :
if val1 in ('range', 'corr', 'labeloff'):
val2 = val2.split(",")
val2 = [x.strip() for x in val2 if x]
self.global_meta[val1] = val2
else:
self._raise_error(f"'{val1}' is not a valid global meta key")
class CRTFRegionParser:
"""
Parse a CRTF region string
This will turn a line containing a CRTF region into a `~regions.Shape` object.
Parameters
----------
global_meta : `dict`
Global meta data of the CRTF file which is used as default meta values for regions
include : `str` {'+', '-'}
Flag at the beginning of the line
type_ : `str` {'reg', 'ann'}
Kind of the region definition
region_type : `str`
Region type
reg_str : `str`
Region string to parse
meta_str : `str`
Meta string to parse
errors : ``warn``, ``ignore``, ``strict``, optional
The error handling scheme to use for handling parsing errors.
The default is 'strict', which will raise a `~regions.CRTFRegionParserError`.
``warn`` will raise a `~regions.CRTFRegionParserWarning`, and
``ignore`` will do nothing (i.e., be silent).
"""
# List of valid coordinate system
# TODO : There are still many reference systems to support
coordinate_systems = ['j2000', 'icrs', 'galactic', 'supergal', 'image', 'ecliptic']
# Maps CASA coordinate frame to appropriate astropy coordinate frames.
coordsys_mapping = dict(zip(coordinates.frame_transform_graph.get_names(),
coordinates.frame_transform_graph.get_names()))
coordsys_mapping['j2000'] = 'fk5'
coordsys_mapping['b1950'] = 'fk4'
coordsys_mapping['supergal'] = 'supergalactic'
coordsys_mapping['ecliptic'] = 'geocentrictrueecliptic'
# CRTF Format specifications. This define how a certain region is read.
# 'c' denotes a coordinates, 'l' denotes a length, 'pl' denotes a pair of lengths,
# 's' denotes a string(generally a text or symbol)
language_spec = {'circle': ['c', 'l'],
'box': ['c', 'c'],
'centerbox': ['c', 'pl'],
'rotbox': ['c', 'pl', 'l'],
'poly': itertools.cycle('c'),
'annulus': ['c', 'pl'],
'ellipse': ['c', 'pl', 'l'],
'line': ['c', 'c'],
'vector': ['c', 'c'],
'symbol': ['c', 's'],
'text': ['c', 's']}
def __init__(self, global_meta, include, type_, region_type, reg_str, meta_str, errors='strict'):
self.global_meta = global_meta
self.reg_str = reg_str
self.meta_str = meta_str
self.errors = errors
self.coord = None
self.coordsys = None
self.coord_str = None
self.type_ = type_
self.region_type = region_type
self.meta = copy.deepcopy(global_meta)
self.shape = None
self.include = include or '+'
self.parse()
def _raise_error(self, msg):
if self.errors == 'warn':
warn(msg, CRTFRegionParserWarning)
elif self.errors == 'strict':
raise CRTFRegionParserError(msg)
def parse(self):
"""
Starting point to parse the CRTF region string.
"""
self.convert_meta()
self.coordsys = self.meta.get('coord', 'image').lower()
self.set_coordsys()
self.convert_coordinates()
self.make_shape()
def set_coordsys(self):
"""
Mapping to astropy's coordinate system name
# TODO: needs expert attention (Most reference systems are not mapped)
"""
if self.coordsys.lower() in self.coordsys_mapping:
self.coordsys = self.coordsys_mapping[self.coordsys.lower()]
def convert_coordinates(self):
"""
Convert coordinate string to `~astropy.coordinates.Angle` or `~astropy.units.quantity.Quantity` objects
"""
coord_list_str = regex_coordinate.findall(self.reg_str) + regex_length.findall(self.reg_str)
coord_list = []
if self.region_type == 'poly':
if len(coord_list_str) < 4:
self._raise_error(f'Not in proper format: {self.reg_str} polygon should have > 4 coordinates')
if coord_list_str[0] != coord_list_str[-1]:
self._raise_error("Not in proper format: '{}', "
"In polygon, the last and first coordinates should be same".format(self.reg_str))
else:
if len(coord_list_str) != len(self.language_spec[self.region_type]):
self._raise_error("Not in proper format: '{}', "
"Does not contain expected number of parameters for the region '{}'"
.format(self.reg_str, self.region_type))
for attr_spec, val_str in zip(self.language_spec[self.region_type], coord_list_str):
if attr_spec == 'c':
if len(val_str) == 2 and val_str[1] != '':
coord_list.append(CoordinateParser.parse_coordinate(val_str[0]))
coord_list.append(CoordinateParser.parse_coordinate(val_str[1]))
else:
self._raise_error(f"Not in proper format: {val_str} should be a coordinate")
if attr_spec == 'pl':
if len(val_str) == 2 and val_str[1] != '':
coord_list.append(CoordinateParser.parse_angular_length_quantity(val_str[0]))
coord_list.append(CoordinateParser.parse_angular_length_quantity(val_str[1]))
else:
self._raise_error(f"Not in proper format: {val_str} should be a pair of length")
if attr_spec == 'l':
if isinstance(val_str, str):
coord_list.append(CoordinateParser.parse_angular_length_quantity(val_str))
else:
self._raise_error(f"Not in proper format: {val_str} should be a single length")
if attr_spec == 's':
if self.region_type == 'symbol':
if val_str in valid_symbols:
self.meta['symbol'] = val_str
else:
self._raise_error(f"Not in proper format: '{val_str}' should be a symbol")
elif self.region_type == 'text':
self.meta['text'] = val_str[1:-1]
self.coord = coord_list
def convert_meta(self):
"""
Parses the meta_str to python dictionary and stores in ``meta`` attribute.
"""
if self.meta_str:
self.meta_str = regex_meta.findall(self.meta_str + ',')
if self.meta_str:
for par in self.meta_str:
if par[0] != '':
val1 = par[0]
val2 = par[1]
else:
val1 = par[2]
val2 = par[3]
val1 = val1.strip()
val2 = val2.strip()
if val1 in CRTFParser.valid_global_keys or val1 == 'label':
if val1 in ('range', 'corr', 'labeloff'):
val2 = val2.split(',')
val2 = [x.strip() for x in val2]
self.meta[val1] = val2
else:
self._raise_error(f"'{val1}' is not a valid meta key")
self.meta['include'] = self.include != '-'
self.include = self.meta['include']
if 'range' in self.meta:
self.meta['range'] = [u.Quantity(x) for x in self.meta['range']]
self.meta['type'] = self.type_
def make_shape(self):
"""
Make shape object
"""
if self.region_type == 'ellipse':
self.coord[2:] = [x * 2 for x in self.coord[2:]]
# Map major and minor axis to height and width respectively
self.coord[2], self.coord[3] = self.coord[3], self.coord[2]
if len(self.coord) % 2 == 1: # This checks if the angle is present.
self.coord[-1] /= 2
if self.region_type == 'box':
x = (self.coord[0] + self.coord[2]) / 2
y = (self.coord[1] + self.coord[3]) / 2
w = u.Quantity(self.coord[0] - self.coord[2])
h = u.Quantity(self.coord[1] - self.coord[3])
self.coord = [x, y, abs(w), abs(h)]
self.meta.pop('coord', None)
self.shape = Shape(coordsys=self.coordsys,
region_type=reg_mapping['CRTF'][self.region_type],
coord=self.coord,
meta=self.meta,
composite=False,
include=self.include
)
class CoordinateParser:
"""
Helper class to structure coordinate parser
"""
@staticmethod
def parse_coordinate(string_rep):
"""
Parse a single coordinate
"""
# Any CRTF coordinate representation (sexagesimal or degrees)
if 'pix' in string_rep:
return u.Quantity(string_rep[:-3], u.dimensionless_unscaled)
if 'h' in string_rep or 'rad' in string_rep:
return coordinates.Angle(string_rep)
if len(string_rep.split('.')) >= 3:
string_rep = string_rep.replace('.', ':', 2)
return coordinates.Angle(string_rep, u.deg)
@staticmethod
def parse_angular_length_quantity(string_rep):
"""
Given a string that is a number and a unit, return a
Quantity of that string.Raise an Error If there is no unit. e.g.:
50" -> 50*u.arcsec
50 -> CRTFRegionParserError : Units must be specified for 50
"""
unit_mapping = {
'deg': u.deg,
'rad': u.rad,
'arcmin': u.arcmin,
'arcsec': u.arcsec,
'pix': u.dimensionless_unscaled,
'"': u.arcsec,
"'": u.arcmin,
}
regex_str = re.compile(r'([0-9+,-.]*)(.*)')
str = regex_str.search(string_rep)
unit = str.group(2)
if unit:
if unit in unit_mapping:
return u.Quantity(str.group(1), unit=unit_mapping[unit])
return u.Quantity(str.group(1))
else:
raise CRTFRegionParserError(f'Units must be specified for {string_rep} ')
| 38.083333 | 120 | 0.560449 |
4a1ca17e3c2ca28e5b76e91bd2eac22604816350
| 401 |
py
|
Python
|
run.py
|
bigrepedge/labpipestest
|
0a2f4ee5ecb8b9f57152ba670fe87d5871b0e606
|
[
"MIT"
] | null | null | null |
run.py
|
bigrepedge/labpipestest
|
0a2f4ee5ecb8b9f57152ba670fe87d5871b0e606
|
[
"MIT"
] | null | null | null |
run.py
|
bigrepedge/labpipestest
|
0a2f4ee5ecb8b9f57152ba670fe87d5871b0e606
|
[
"MIT"
] | null | null | null |
import logging
import os
logging.basicConfig(level=logging.INFO)
from app import app, opc_ua_machine
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
opc_ua_machine.start(virtual=True)
app.run(host='localhost', port=5000, debug=True, ssl_context = ('cert.pem', 'privkey.pem')) #added self signed certificate and private key alongwith https
finally:
opc_ua_machine.stop()
| 26.733333 | 158 | 0.735661 |
4a1ca38133907a6f6c1a928d0605fdb61c629fe1
| 5,448 |
py
|
Python
|
LocalMercurial/mercurial/sslutil.py
|
l2dy/machg
|
45695dced17431c914c69528f898d398dd695efe
|
[
"BSD-3-Clause"
] | null | null | null |
LocalMercurial/mercurial/sslutil.py
|
l2dy/machg
|
45695dced17431c914c69528f898d398dd695efe
|
[
"BSD-3-Clause"
] | null | null | null |
LocalMercurial/mercurial/sslutil.py
|
l2dy/machg
|
45695dced17431c914c69528f898d398dd695efe
|
[
"BSD-3-Clause"
] | 1 |
2021-12-16T23:31:37.000Z
|
2021-12-16T23:31:37.000Z
|
# sslutil.py - SSL handling for mercurial
#
# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
from mercurial import util
from mercurial.i18n import _
try:
# avoid using deprecated/broken FakeSocket in python 2.6
import ssl
ssl_wrap_socket = ssl.wrap_socket
CERT_REQUIRED = ssl.CERT_REQUIRED
except ImportError:
CERT_REQUIRED = 2
import socket, httplib
def ssl_wrap_socket(sock, key_file, cert_file,
cert_reqs=CERT_REQUIRED, ca_certs=None):
if not util.safehasattr(socket, 'ssl'):
raise util.Abort(_('Python SSL support not found'))
if ca_certs:
raise util.Abort(_(
'certificate checking requires Python 2.6'))
ssl = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl)
def _verifycert(cert, hostname):
'''Verify that cert (in socket.getpeercert() format) matches hostname.
CRLs is not handled.
Returns error message if any problems are found and None on success.
'''
if not cert:
return _('no certificate received')
dnsname = hostname.lower()
def matchdnsname(certname):
return (certname == dnsname or
'.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1])
san = cert.get('subjectAltName', [])
if san:
certnames = [value.lower() for key, value in san if key == 'DNS']
for name in certnames:
if matchdnsname(name):
return None
if certnames:
return _('certificate is for %s') % ', '.join(certnames)
# subject is only checked when subjectAltName is empty
for s in cert.get('subject', []):
key, value = s[0]
if key == 'commonName':
try:
# 'subject' entries are unicode
certname = value.lower().encode('ascii')
except UnicodeEncodeError:
return _('IDN in certificate not supported')
if matchdnsname(certname):
return None
return _('certificate is for %s') % certname
return _('no commonName or subjectAltName found in certificate')
# CERT_REQUIRED means fetch the cert from the server all the time AND
# validate it against the CA store provided in web.cacerts.
#
# We COMPLETELY ignore CERT_REQUIRED on Python <= 2.5, as it's totally
# busted on those versions.
def sslkwargs(ui, host):
cacerts = ui.config('web', 'cacerts')
hostfingerprint = ui.config('hostfingerprints', host)
if cacerts and not hostfingerprint:
cacerts = util.expandpath(cacerts)
if not os.path.exists(cacerts):
raise util.Abort(_('could not find web.cacerts: %s') % cacerts)
return {'ca_certs': cacerts,
'cert_reqs': CERT_REQUIRED,
}
return {}
class validator(object):
def __init__(self, ui, host):
self.ui = ui
self.host = host
def __call__(self, sock):
host = self.host
cacerts = self.ui.config('web', 'cacerts')
hostfingerprint = self.ui.config('hostfingerprints', host)
if cacerts and not hostfingerprint:
msg = _verifycert(sock.getpeercert(), host)
if msg:
raise util.Abort(_('%s certificate error: %s '
'(use --insecure to connect '
'insecurely)') % (host, msg))
self.ui.debug('%s certificate successfully verified\n' % host)
else:
if getattr(sock, 'getpeercert', False):
peercert = sock.getpeercert(True)
peerfingerprint = util.sha1(peercert).hexdigest()
nicefingerprint = ":".join([peerfingerprint[x:x + 2]
for x in xrange(0, len(peerfingerprint), 2)])
if hostfingerprint:
if peerfingerprint.lower() != \
hostfingerprint.replace(':', '').lower():
raise util.Abort(_('invalid certificate for %s '
'with fingerprint %s') %
(host, nicefingerprint))
self.ui.debug('%s certificate matched fingerprint %s\n' %
(host, nicefingerprint))
else:
strict = self.ui.config('web', 'strictCAverification') != 'off'
if strict:
self.ui.warn(_('warning: %s certificate '
'with fingerprint %s not verified '
'(check hostfingerprints or web.cacerts '
'config setting)\n') %
(host, nicefingerprint))
else: # python 2.5 ?
if hostfingerprint:
raise util.Abort(_("host fingerprint for %s can't be "
"verified (Python too old)") % host)
self.ui.warn(_("warning: certificate for %s can't be "
"verified (Python too old)\n") % host)
| 40.962406 | 83 | 0.557819 |
4a1ca4c963b621cc70abb1ea57bdf4b809ab0b5d
| 15,055 |
py
|
Python
|
bundle_conda.py
|
davidpross/napari
|
11f70f559159632c2af27697046b3d75a529f452
|
[
"BSD-3-Clause"
] | 1 |
2021-12-14T14:07:40.000Z
|
2021-12-14T14:07:40.000Z
|
bundle_conda.py
|
maweigert/napari
|
48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0
|
[
"BSD-3-Clause"
] | null | null | null |
bundle_conda.py
|
maweigert/napari
|
48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0
|
[
"BSD-3-Clause"
] | 1 |
2019-01-12T21:04:14.000Z
|
2019-01-12T21:04:14.000Z
|
"""
Create napari installers using `constructor`.
It creates a `construct.yaml` file with the needed settings
and then runs `constructor`.
For more information, see Documentation> Developers> Packaging.
Some environment variables we use:
CONSTRUCTOR_APP_NAME:
in case you want to build a non-default distribution that is not
named `napari`
CONSTRUCTOR_INSTALLER_DEFAULT_PATH_STEM:
The last component of the default installation path. Defaults to
{CONSTRUCTOR_APP_NAME}-app-{CONSTRUCTOR_INSTALLER_VERSION}
CONSTRUCTOR_INSTALLER_VERSION:
Version for the installer, separate from the app being installed.
This has an effect on the default install locations!
CONSTRUCTOR_TARGET_PLATFORM:
conda-style platform (as in `platform` in `conda info -a` output)
CONSTRUCTOR_USE_LOCAL:
whether to use the local channel (populated by `conda-build` actions)
CONSTRUCTOR_CONDA_EXE:
when the target platform is not the same as the host, constructor
needs a path to a conda-standalone (or micromamba) executable for
that platform. needs to be provided in this env var in that case!
CONSTRUCTOR_SIGNING_IDENTITY:
Apple ID Installer Certificate identity (common name) that should
be use to productsign the resulting PKG (macOS only)
CONSTRUCTOR_NOTARIZATION_IDENTITY:
Apple ID Developer Certificate identity (common name) that should
be use to codesign some binaries bundled in the pkg (macOS only)
CONSTRUCTOR_SIGNING_CERTIFICATE:
Path to PFX certificate to sign the EXE installer on Windows
CONSTRUCTOR_PFX_CERTIFICATE_PASSWORD:
Password to unlock the PFX certificate. This is not used here but
it might be needed by constructor.
"""
import json
import os
import platform
import re
import subprocess
import sys
import zipfile
from argparse import ArgumentParser
from distutils.spawn import find_executable
from pathlib import Path
from tempfile import NamedTemporaryFile
from textwrap import dedent
from ruamel import yaml
APP = os.environ.get("CONSTRUCTOR_APP_NAME", "napari")
# bump this when something in the installer infrastructure changes
# note that this will affect the default installation path across platforms!
INSTALLER_VERSION = os.environ.get("CONSTRUCTOR_INSTALLER_VERSION", "0.1")
INSTALLER_DEFAULT_PATH_STEM = os.environ.get(
"CONSTRUCTOR_INSTALLER_DEFAULT_PATH_STEM", f"{APP}-app-{INSTALLER_VERSION}"
)
HERE = os.path.abspath(os.path.dirname(__file__))
WINDOWS = os.name == 'nt'
MACOS = sys.platform == 'darwin'
LINUX = sys.platform.startswith("linux")
if os.environ.get("CONSTRUCTOR_TARGET_PLATFORM") == "osx-arm64":
ARCH = "arm64"
else:
ARCH = (platform.machine() or "generic").lower().replace("amd64", "x86_64")
if WINDOWS:
EXT, OS = 'exe', 'Windows'
elif LINUX:
EXT, OS = 'sh', 'Linux'
elif MACOS:
EXT, OS = 'pkg', 'macOS'
else:
raise RuntimeError(f"Unrecognized OS: {sys.platform}")
def _version():
with open(os.path.join(HERE, "napari", "_version.py")) as f:
match = re.search(r'version\s?=\s?\'([^\']+)', f.read())
if match:
return match.groups()[0].split('+')[0]
OUTPUT_FILENAME = f"{APP}-{_version()}-{OS}-{ARCH}.{EXT}"
clean_these_files = []
def _use_local():
"""
Detect whether we need to build Napari locally
(dev snapshots). This env var is set in the GHA workflow.
"""
return os.environ.get("CONSTRUCTOR_USE_LOCAL")
def _generate_background_images(installer_type, outpath="resources"):
if installer_type == "sh":
# shell installers are text-based, no graphics
return
from PIL import Image
import napari
logo_path = Path(napari.__file__).parent / "resources" / "logo.png"
logo = Image.open(logo_path, "r")
global clean_these_files
if installer_type in ("exe", "all"):
sidebar = Image.new("RGBA", (164, 314), (0, 0, 0, 0))
sidebar.paste(logo.resize((101, 101)), (32, 180))
output = Path(outpath, "napari_164x314.png")
sidebar.save(output, format="png")
clean_these_files.append(output)
banner = Image.new("RGBA", (150, 57), (0, 0, 0, 0))
banner.paste(logo.resize((44, 44)), (8, 6))
output = Path(outpath, "napari_150x57.png")
banner.save(output, format="png")
clean_these_files.append(output)
if installer_type in ("pkg", "all"):
background = Image.new("RGBA", (1227, 600), (0, 0, 0, 0))
background.paste(logo.resize((148, 148)), (95, 418))
output = Path(outpath, "napari_1227x600.png")
background.save(output, format="png")
clean_these_files.append(output)
def _get_condarc():
# we need defaults for tensorflow and others on windows only
defaults = "- defaults" if WINDOWS else ""
prompt = "[napari]({default_env}) "
contents = dedent(
f"""
channels: #!final
- napari
- conda-forge
{defaults}
repodata_fns: #!final
- repodata.json
auto_update_conda: false #!final
channel_priority: strict #!final
env_prompt: '{prompt}' #! final
"""
)
# the undocumented #!final comment is explained here
# https://www.anaconda.com/blog/conda-configuration-engine-power-users
with NamedTemporaryFile(delete=False, mode="w+") as f:
f.write(contents)
return f.name
def _constructor(version=_version(), extra_specs=None):
"""
Create a temporary `construct.yaml` input file and
run `constructor`.
Parameters
----------
version: str
Version of `napari` to be built. Defaults to the
one detected by `setuptools-scm` and written to
`napari/_version.py`. Run `pip install -e .` to
generate that file if it can't be found.
extra_specs: list of str
Additional packages to be included in the installer.
A list of conda spec strings (`python`, `python=3`, etc)
is expected.
"""
constructor = find_executable("constructor")
if not constructor:
raise RuntimeError("Constructor must be installed.")
if extra_specs is None:
extra_specs = []
# TODO: Temporary while pyside2 is not yet published for arm64
target_platform = os.environ.get("CONSTRUCTOR_TARGET_PLATFORM")
ARM64 = target_platform == "osx-arm64"
if ARM64:
napari = f"napari={version}=*pyqt*"
else:
napari = f"napari={version}=*pyside*"
base_specs = [
f"python={sys.version_info.major}.{sys.version_info.minor}.*",
"conda",
"mamba",
"pip",
]
napari_specs = [
napari,
f"napari-menu={version}",
f"python={sys.version_info.major}.{sys.version_info.minor}.*",
"conda",
"mamba",
"pip",
] + extra_specs
channels = (
["napari/label/nightly"]
+ (["andfoy"] if ARM64 else []) # TODO: temporary
+ ["napari/label/bundle_tools", "conda-forge"]
)
empty_file = NamedTemporaryFile(delete=False)
condarc = _get_condarc()
definitions = {
"name": APP,
"company": "Napari",
"reverse_domain_identifier": "org.napari",
"version": version,
"channels": channels,
"conda_default_channels": ["conda-forge"],
"installer_filename": OUTPUT_FILENAME,
"initialize_by_default": False,
"license_file": os.path.join(HERE, "resources", "bundle_license.rtf"),
"specs": base_specs,
"extra_envs": {f"napari-{version}": {"specs": napari_specs}},
"menu_packages": [
"napari-menu",
],
"extra_files": {
"resources/bundle_readme.md": "README.txt",
empty_file.name: ".napari_is_bundled_constructor",
condarc: ".condarc",
},
}
if _use_local():
definitions["channels"].insert(0, "local")
if LINUX:
definitions["default_prefix"] = os.path.join(
"$HOME", ".local", INSTALLER_DEFAULT_PATH_STEM
)
definitions["license_file"] = os.path.join(
HERE, "resources", "bundle_license.txt"
)
definitions["installer_type"] = "sh"
if MACOS:
# These two options control the default install location:
# ~/<default_location_pkg>/<pkg_name>
definitions["pkg_name"] = INSTALLER_DEFAULT_PATH_STEM
definitions["default_location_pkg"] = "Library"
definitions["installer_type"] = "pkg"
definitions["welcome_image"] = os.path.join(
HERE, "resources", "napari_1227x600.png"
)
welcome_text_tmpl = (
Path(HERE) / "resources" / "osx_pkg_welcome.rtf.tmpl"
).read_text()
welcome_file = Path(HERE) / "resources" / "osx_pkg_welcome.rtf"
clean_these_files.append(welcome_file)
welcome_file.write_text(
welcome_text_tmpl.replace("__VERSION__", version)
)
definitions["welcome_file"] = str(welcome_file)
definitions["conclusion_text"] = ""
definitions["readme_text"] = ""
signing_identity = os.environ.get("CONSTRUCTOR_SIGNING_IDENTITY")
if signing_identity:
definitions["signing_identity_name"] = signing_identity
notarization_identity = os.environ.get(
"CONSTRUCTOR_NOTARIZATION_IDENTITY"
)
if notarization_identity:
definitions["notarization_identity_name"] = notarization_identity
if WINDOWS:
definitions["conda_default_channels"].append("defaults")
definitions.update(
{
"welcome_image": os.path.join(
HERE, "resources", "napari_164x314.png"
),
"header_image": os.path.join(
HERE, "resources", "napari_150x57.png"
),
"icon_image": os.path.join(
HERE, "napari", "resources", "icon.ico"
),
"register_python_default": False,
"default_prefix": os.path.join(
'%LOCALAPPDATA%', INSTALLER_DEFAULT_PATH_STEM
),
"default_prefix_domain_user": os.path.join(
'%LOCALAPPDATA%', INSTALLER_DEFAULT_PATH_STEM
),
"default_prefix_all_users": os.path.join(
'%ALLUSERSPROFILE%', INSTALLER_DEFAULT_PATH_STEM
),
"check_path_length": False,
"installer_type": "exe",
}
)
signing_certificate = os.environ.get("CONSTRUCTOR_SIGNING_CERTIFICATE")
if signing_certificate:
definitions["signing_certificate"] = signing_certificate
if definitions.get("welcome_image") or definitions.get("header_image"):
_generate_background_images(
definitions.get("installer_type", "all"), outpath="resources"
)
clean_these_files.append("construct.yaml")
clean_these_files.append(empty_file.name)
clean_these_files.append(condarc)
# TODO: temporarily patching password - remove block when the secret has been fixed
# (I think it contains an ending newline or something like that, copypaste artifact?)
pfx_password = os.environ.get("CONSTRUCTOR_PFX_CERTIFICATE_PASSWORD")
if pfx_password:
os.environ[
"CONSTRUCTOR_PFX_CERTIFICATE_PASSWORD"
] = pfx_password.strip()
with open("construct.yaml", "w") as fin:
yaml.dump(definitions, fin, default_flow_style=False)
args = [constructor, "-v", "--debug", "."]
conda_exe = os.environ.get("CONSTRUCTOR_CONDA_EXE")
if target_platform and conda_exe:
args += ["--platform", target_platform, "--conda-exe", conda_exe]
env = os.environ.copy()
env["CONDA_CHANNEL_PRIORITY"] = "strict"
print(f"Calling {args} with these definitions:")
print(yaml.dump(definitions, default_flow_style=False))
subprocess.check_call(args, env=env)
return OUTPUT_FILENAME
def licenses():
try:
with open("info.json") as f:
info = json.load(f)
except FileNotFoundError:
print(
"!! Use `constructor --debug` to write info.json and get licenses",
file=sys.stderr,
)
raise
zipname = f"licenses.{OS}-{ARCH}.zip"
output_zip = zipfile.ZipFile(
zipname, mode="w", compression=zipfile.ZIP_DEFLATED
)
output_zip.write("info.json")
for package_id, license_info in info["_licenses"].items():
package_name = package_id.split("::", 1)[1]
for license_type, license_files in license_info.items():
for i, license_file in enumerate(license_files, 1):
arcname = (
f"{package_name}.{license_type.replace(' ', '_')}.{i}.txt"
)
output_zip.write(license_file, arcname=arcname)
output_zip.close()
return zipname
def main(extra_specs=None):
try:
_constructor(extra_specs=extra_specs)
finally:
for path in clean_these_files:
try:
os.unlink(path)
except OSError:
print("! Could not remove", path)
assert Path(OUTPUT_FILENAME).exists()
return OUTPUT_FILENAME
def cli(argv=None):
p = ArgumentParser(argv)
p.add_argument(
"--version",
action="store_true",
help="Print local napari version and exit.",
)
p.add_argument(
"--installer-version",
action="store_true",
help="Print installer version and exit.",
)
p.add_argument(
"--arch",
action="store_true",
help="Print machine architecture tag and exit.",
)
p.add_argument(
"--ext",
action="store_true",
help="Print installer extension for this platform and exit.",
)
p.add_argument(
"--artifact-name",
action="store_true",
help="Print computed artifact name and exit.",
)
p.add_argument(
"--extra-specs",
nargs="+",
help="One or more extra conda specs to add to the installer",
)
p.add_argument(
"--licenses",
action="store_true",
help="Post-process licenses AFTER having built the installer. "
"This must be run as a separate step.",
)
p.add_argument(
"--images",
action="store_true",
help="Generate background images from the logo (test only)",
)
return p.parse_args()
if __name__ == "__main__":
args = cli()
if args.version:
print(_version())
sys.exit()
if args.installer_version:
print(INSTALLER_VERSION)
sys.exit()
if args.arch:
print(ARCH)
sys.exit()
if args.ext:
print(EXT)
sys.exit()
if args.artifact_name:
print(OUTPUT_FILENAME)
sys.exit()
if args.licenses:
print(licenses())
sys.exit()
if args.images:
_generate_background_images()
sys.exit()
print('created', main(extra_specs=args.extra_specs))
| 33.455556 | 89 | 0.62916 |
4a1ca4f575298f94c5ac3b87d6385a265953b2c7
| 887 |
py
|
Python
|
config.py
|
FelipeSPB/client_weather_api
|
297a3801c76fed0abc338a4ebab8ddc499957e4a
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
FelipeSPB/client_weather_api
|
297a3801c76fed0abc338a4ebab8ddc499957e4a
|
[
"Apache-2.0"
] | 2 |
2020-06-05T21:41:49.000Z
|
2021-06-10T21:36:16.000Z
|
config.py
|
FelipeSPB/client_weather_api
|
297a3801c76fed0abc338a4ebab8ddc499957e4a
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
import requests
def creating_folder():
folder = Path("config/")
folder.mkdir(parents=True, exist_ok=True)
def creating_command_install_requirements():
Path('config/install.bat').touch()
write = Path('./config/install.bat')
write.write_text('cd..\npip install -r requirements.txt')
def config_token():
Path('./api/models/token.py').touch()
token = input('Type your token of API Climatempo in double quotes (ex: "seu_id"): ')
write = Path('./api/models/token.py')
write.write_text('token = {}\ntoken=str(token)'.format(token))
def config_run_server():
Path('config/run_server.bat').touch()
write = Path('./config/run_server.bat')
write.write_text('cd api/\npython app.py')
if __name__ == '__main__':
creating_folder()
creating_command_install_requirements()
config_token()
config_run_server()
| 28.612903 | 88 | 0.692221 |
4a1ca656960daf117f5af95379ff799560b6caaa
| 972 |
py
|
Python
|
models/download.py
|
alexlee-gk/citysim3d
|
37206572100e037f211f9cf1f947108765351c3d
|
[
"MIT"
] | 27 |
2016-11-24T04:42:23.000Z
|
2021-10-15T01:43:51.000Z
|
models/download.py
|
alexlee-gk/citysim3d
|
37206572100e037f211f9cf1f947108765351c3d
|
[
"MIT"
] | 1 |
2020-05-02T15:34:25.000Z
|
2020-05-02T15:34:25.000Z
|
models/download.py
|
alexlee-gk/citysim3d
|
37206572100e037f211f9cf1f947108765351c3d
|
[
"MIT"
] | 8 |
2017-04-05T12:54:07.000Z
|
2021-08-03T06:46:40.000Z
|
#!/usr/bin/env python
import os
import subprocess
import argparse
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import tarfile
assert "CITYSIM3D_DIR" in os.environ
parser = argparse.ArgumentParser()
parser.add_argument("--rsync", action="store_true")
args = parser.parse_args()
local_dir = os.path.expandvars("${CITYSIM3D_DIR}")
if args.rsync:
remote_files = "/var/www/citysim3d/models{,.mf}"
subprocess.check_call("rsync -azvu pabbeel@rll.berkeley.edu:%s %s" % (remote_files, local_dir), shell=True)
else:
print("downloading tar file (this might take a while)")
remote_fname = "http://rll.berkeley.edu/citysim3d/models.tar.gz"
local_fname = os.path.join(local_dir, "models.tar.gz")
urlinfo = urlopen(remote_fname)
with open(local_fname, "wb") as fh:
fh.write(urlinfo.read())
print("unpacking file")
with tarfile.open(local_fname) as tar:
tar.extractall(local_dir)
| 29.454545 | 111 | 0.720165 |
4a1ca6a6d2e56f344ac200e338aa9cf204e711f0
| 884 |
py
|
Python
|
src/azure-cli/azure/cli/command_modules/marketplaceordering/generated/_client_factory.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 3,287 |
2016-07-26T17:34:33.000Z
|
2022-03-31T09:52:13.000Z
|
src/azure-cli/azure/cli/command_modules/marketplaceordering/generated/_client_factory.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 19,206 |
2016-07-26T07:04:42.000Z
|
2022-03-31T23:57:09.000Z
|
src/azure-cli/azure/cli/command_modules/marketplaceordering/generated/_client_factory.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 2,575 |
2016-07-26T06:44:40.000Z
|
2022-03-31T22:56:06.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
def cf_marketplaceordering_cl(cli_ctx, *_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
return get_mgmt_service_client(cli_ctx,
MarketplaceOrderingAgreements)
def cf_marketplace_agreement(cli_ctx, *_):
return cf_marketplaceordering_cl(cli_ctx).marketplace_agreements
| 42.095238 | 78 | 0.640271 |
4a1ca6ca8e4b8dd24156c460547b3c02318564f5
| 6,035 |
py
|
Python
|
arcade/examples/perf_test/stress_test_draw_shapes.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | 1 |
2020-04-04T01:03:24.000Z
|
2020-04-04T01:03:24.000Z
|
arcade/examples/perf_test/stress_test_draw_shapes.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | 1 |
2019-08-11T18:47:27.000Z
|
2019-08-12T03:02:11.000Z
|
arcade/examples/perf_test/stress_test_draw_shapes.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | null | null | null |
"""
Moving Sprite Stress Test
Simple program to test how fast we can draw sprites that are moving
Artwork from http://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.stress_test_draw_moving
"""
import random
import arcade
import os
import timeit
import time
import collections
import pyglet
# --- Constants ---
START_COUNT = 10
STOP_COUNT = 200
SHAPE_INCREMENT = 10
RESULTS_FILE = "stress_test_draw_shapes.csv"
SCREEN_WIDTH = 1800
SCREEN_HEIGHT = 1000
SCREEN_TITLE = "Moving Sprite Stress Test"
TOP_MARGIN = 40
class FPSCounter:
def __init__(self):
self.time = time.perf_counter()
self.frame_times = collections.deque(maxlen=60)
def tick(self):
t1 = time.perf_counter()
dt = t1 - self.time
self.time = t1
self.frame_times.append(dt)
def get_fps(self):
total_time = sum(self.frame_times)
if total_time == 0:
return 0
else:
return len(self.frame_times) / sum(self.frame_times)
class Line:
def __init__(self):
self.start_x = random.randrange(SCREEN_WIDTH)
self.start_y = random.randrange(SCREEN_HEIGHT - TOP_MARGIN)
self.end_x = random.randrange(SCREEN_WIDTH)
self.end_y = random.randrange(SCREEN_HEIGHT - TOP_MARGIN)
def draw(self):
arcade.draw_line(self.start_x, self.start_y,
self.end_x, self.end_y, arcade.color.WOOD_BROWN, 4)
class MyGame(arcade.Window):
""" Our custom Window Class"""
def __init__(self):
""" Initializer """
# Call the parent class initializer
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Variables that will hold sprite lists
self.shape_list = []
self.processing_time = 0
self.draw_time = 0
self.program_start_time = timeit.default_timer()
self.sprite_count_list = []
self.fps_list = []
self.processing_time_list = []
self.drawing_time_list = []
self.last_fps_reading = 0
self.fps = FPSCounter()
arcade.set_background_color(arcade.color.AMAZON)
# Open file to save timings
self.results_file = open(RESULTS_FILE, "w")
def add_shapes(self):
# Create the coins
for i in range(SHAPE_INCREMENT):
shape = Line()
# Add the coin to the lists
self.shape_list.append(shape)
def setup(self):
""" Set up the game and initialize the variables. """
pass
def on_draw(self):
""" Draw everything """
# Start timing how long this takes
draw_start_time = timeit.default_timer()
arcade.start_render()
for shape in self.shape_list:
shape.draw()
# Display info on sprites
output = f"Shape count: {len(self.shape_list):,}"
arcade.draw_text(output, 20, SCREEN_HEIGHT - 20, arcade.color.BLACK, 16)
# Display timings
output = f"Processing time: {self.processing_time:.3f}"
arcade.draw_text(output, 20, SCREEN_HEIGHT - 40, arcade.color.BLACK, 16)
output = f"Drawing time: {self.draw_time:.3f}"
arcade.draw_text(output, 20, SCREEN_HEIGHT - 60, arcade.color.BLACK, 16)
fps = self.fps.get_fps()
output = f"FPS: {fps:3.0f}"
arcade.draw_text(output, 20, SCREEN_HEIGHT - 80, arcade.color.BLACK, 16)
self.draw_time = timeit.default_timer() - draw_start_time
self.fps.tick()
def update(self, delta_time):
# Start update timer
start_time = timeit.default_timer()
# self.shape_list.update()
# Save the time it took to do this.
self.processing_time = timeit.default_timer() - start_time
# Total time program has been running
total_program_time = int(timeit.default_timer() - self.program_start_time)
# Print out stats, or add more sprites
if total_program_time > self.last_fps_reading:
self.last_fps_reading = total_program_time
# It takes the program a while to "warm up", so the first
# few seconds our readings will be off. So wait some time
# before taking readings
if total_program_time > 5:
# We want the program to run for a while before taking
# timing measurements. We don't want the time it takes
# to add new sprites to be part of that measurement. So
# make sure we have a clear second of nothing but
# running the sprites, and not adding the sprites.
if total_program_time % 2 == 1:
# Take timings
output = f"{total_program_time}, {len(self.shape_list)}, {self.fps.get_fps():.1f}, " \
f"{self.processing_time:.4f}, {self.draw_time:.4f}\n"
self.results_file.write(output)
print(output, end="")
if len(self.shape_list) >= STOP_COUNT:
pyglet.app.exit()
return
self.sprite_count_list.append(len(self.shape_list))
self.fps_list.append(round(self.fps.get_fps(), 1))
self.processing_time_list.append(self.processing_time)
self.drawing_time_list.append(self.draw_time)
# Now add the coins
self.add_shapes()
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 31.108247 | 106 | 0.611599 |
4a1ca6f79e897db232a0d945dc144bca6ef3b92b
| 7,037 |
py
|
Python
|
Yachu.py
|
SeminKim/YachtDiscord
|
56f30559868c45b754ae37fdcb68de062a7b8a06
|
[
"MIT"
] | 1 |
2021-01-04T09:15:25.000Z
|
2021-01-04T09:15:25.000Z
|
Yachu.py
|
SeminKim/YachtDiscord
|
56f30559868c45b754ae37fdcb68de062a7b8a06
|
[
"MIT"
] | 2 |
2020-07-23T16:16:55.000Z
|
2020-08-01T06:54:45.000Z
|
Yachu.py
|
SeminKim/YachtDiscord
|
56f30559868c45b754ae37fdcb68de062a7b8a06
|
[
"MIT"
] | null | null | null |
import random
import discord
class Yachu():
def __init__(self):
self.save_log = True # Change to False if you don't want logging
self.score = [0] * 15
self.dice = [0] * 5
self.locked = [False] * 5
self.phase = 0
self.isAlive = [True] * 12
self.turn = 0
print("새 야추게임 생성")
if self.save_log:
with open(f'data/log.txt', 'a') as f:
f.write(f'-------------------------NEW YACHU-------------------------\n')
return
def make_log(self):
if self.save_log:
with open(f'data/log.txt', 'a') as f:
f.write(f'TURN:{self.turn} PHASE:{self.phase} DICE:{self.dice}\n')
return
def lock(self, num):
if not 0 < num < 6: raise ValueError
self.locked[num - 1] = True
def unlock(self, num):
if not 0 < num < 6: raise ValueError
self.locked[num - 1] = False
def lockAll(self):
self.locked = [True] * 5
def unlockAll(self):
self.locked = [False] * 5
def isAllLocked(self):
return self.locked == [True] * 5
def __setDice__(self, s):
self.dice = s
def rollDice(self):
assert self.phase < 3
self.phase += 1
for i in range(5):
if not self.locked[i]:
self.dice[i] = random.randint(1, 6)
self.make_log()
return str(self.dice)
def getScoreBoard(self, name=None):
def valueFiller(ind):
if self.isAlive[ind - 1]:
return '0*'
else:
if ind < 7:
return str(self.score[ind - 1])
else:
return str(self.score[ind + 1])
if name is None:
embed = discord.Embed(title=f"점수판 ({self.turn}/12)", color=0xff0000)
else:
embed = discord.Embed(title=f"{name}님의 점수판 ({self.turn}/12)", color=0xff0000)
embed.add_field(name="1. Aces", value=valueFiller(1), inline=True)
embed.add_field(name="2. Deuces", value=valueFiller(2), inline=True)
embed.add_field(name="3. Threes", value=valueFiller(3), inline=True)
embed.add_field(name="4. Fours", value=valueFiller(4), inline=True)
embed.add_field(name="5. Fives", value=valueFiller(5), inline=True)
embed.add_field(name="6. Sixes", value=valueFiller(6), inline=True)
embed.add_field(
name=f'---------------------------------------\nSubtotal: {self.score[6]} Bonus: {self.score[7]}',
value="(63점 이상이면 보너스 35점)", inline=False)
embed.add_field(name="---------------------------------------", value="특수족보", inline=False)
embed.add_field(name="A. Choices", value=valueFiller(7), inline=True)
embed.add_field(name="B. Four Cards", value=valueFiller(8), inline=True)
embed.add_field(name="C. Full House", value=valueFiller(9), inline=True)
embed.add_field(name="D. S. Straight", value=valueFiller(10), inline=True)
embed.add_field(name="E. L. Straight", value=valueFiller(11), inline=True)
embed.add_field(name="F. Yacht", value=valueFiller(12), inline=True)
embed.add_field(name="---------------------------------------\nTotal", value=str(self.score[14]), inline=True)
return embed
def subtotal(self):
return sum(self.score[:6])
def checkBonus(self):
if self.subtotal() >= 63:
return 35
return 0
def diceSum(self):
temp = 0
for die in self.dice:
temp += die
return temp
def isFourCards(self):
tempDice = self.dice[:]
tempDice.sort()
return tempDice[0] == tempDice[1] == tempDice[2] == tempDice[3] or tempDice[1] == tempDice[2] == tempDice[3] == \
tempDice[4]
def isFullHouse(self):
tempDice = self.dice[:]
tempDice.sort()
return (tempDice[0] == tempDice[1] == tempDice[2] and tempDice[3] == tempDice[4]) \
or (tempDice[0] == tempDice[1] and tempDice[2] == tempDice[3] == tempDice[4])
def isSmallStraight(self):
numcount = [0] * 6
for i in range(6):
if i + 1 in self.dice: numcount[i] = 1
if numcount[0] * numcount[1] * numcount[2] * numcount[3] == 1: return True
if numcount[1] * numcount[2] * numcount[3] * numcount[4] == 1: return True
if numcount[2] * numcount[3] * numcount[4] * numcount[5] == 1: return True
return False
def isLargeStraight(self):
tempDice = self.dice[:]
tempDice.sort()
if tempDice == [1, 2, 3, 4, 5] or tempDice == [2, 3, 4, 5, 6]: return True
return False
def setScore(self, ind):
if ind < 6:
temp = 0
for i in self.dice:
if i == ind + 1: temp += i
self.score[ind] = temp
self.score[6] = self.subtotal()
self.score[7] = self.checkBonus()
elif ind == 6:
self.score[8] = self.diceSum()
elif ind == 7:
if self.isFourCards():
self.score[9] = self.diceSum()
else:
self.score[9] = 0
elif ind == 8:
if self.isFullHouse():
self.score[10] = self.diceSum()
else:
self.score[10] = 0
elif ind == 9:
if self.isSmallStraight():
self.score[11] = 15
else:
self.score[11] = 0
elif ind == 10:
if self.isLargeStraight():
self.score[12] = 30
else:
self.score[12] = 0
elif ind == 11:
if self.dice[0] == self.dice[1] == self.dice[2] == self.dice[3] == self.dice[4]:
self.score[13] = 50
else:
self.score[13] = 0
else:
return
self.score[14] = sum(self.score[6:14])
self.phase = 0
self.locked = [False] * 5
self.isAlive[ind] = False
self.turn += 1
def isAvailable(self, ind):
try:
if not 0 <= ind <= 11: return False
return self.isAlive[ind]
except:
return False
def getTurn(self):
return self.turn
def getPhase(self):
return self.phase
def getTotal(self):
return self.score[14]
# demo for console
'''
def main():
yachu = Yachu()
for i in range(12):
while yachu.phase < 3:
print(yachu.getScoreBoard())
print(yachu.rollDice())
if yachu.phase == 3:
ind = int(input('저장할 칸 선택 : '))
else:
ind = int(input('저장할 칸 선택, 0은 다시굴림 : '))
if ind == 0:
for i in range(5): yachu.unlockAll()
temp = input('고정할 주사위 선택 - ex) 1 2 4 : ').split()
for i in temp: yachu.lock(int(i))
else:
yachu.setScore(ind)
break
main()
'''
| 31.275556 | 123 | 0.500355 |
4a1ca72fb30db0cd94eb6ef5562f3b2013902a44
| 10,325 |
py
|
Python
|
test/tool_shed/functional/test_1430_repair_installed_repository.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
test/tool_shed/functional/test_1430_repair_installed_repository.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
test/tool_shed/functional/test_1430_repair_installed_repository.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 1 |
2020-07-25T21:03:18.000Z
|
2020-07-25T21:03:18.000Z
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import tool_shed.base.test_db_util as test_db_util
import logging
log = logging.getLogger( __name__ )
category_name = 'Test 1430 Repair installed repository'
category_description = 'Test script 1430 for repairing an installed repository.'
filter_repository_name = 'filter_1430'
column_repository_name = 'column_1430'
filter_repository_description = "Galaxy's filter tool for test 1430"
column_repository_description = 'Add a value as a new column'
filter_repository_long_description = '%s: %s' % ( filter_repository_name, filter_repository_description )
column_repository_long_description = '%s: %s' % ( column_repository_name, column_repository_description )
'''
In the Tool Shed:
1) Create and populate the filter_1430 repository
2) Create and populate the column_1430 repository
3) Upload a repository_dependencies.xml file to the column_1430 repository that creates a repository dependency on the filter_1430 repository.
In Galaxy:
1) Install the column_1430 repository, making sure to check the checkbox to Handle repository dependencies so that the filter
repository is also installed. Make sure to install the repositories in a specified section of the tool panel.
2) Uninstall the filter_1430 repository.
3) Repair the column_1430 repository.
4) Make sure the filter_1430 repository is reinstalled and the tool is loaded into the tool panel in the same section specified in step 1.
'''
class TestRepairRepository( ShedTwillTestCase ):
'''Test repairing an installed repository.'''
def test_0000_initiate_users_and_category( self ):
"""Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
admin_user_private_role = test_db_util.get_private_role( admin_user )
self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
test_user_2 = test_db_util.get_user( common.test_user_2_email )
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
test_user_2_private_role = test_db_util.get_private_role( test_user_2 )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
def test_0005_create_filter_repository( self ):
'''Create and populate the filter_1430 repository.'''
'''
This is step 1 - Create and populate the filter_1430 repository.
This repository will be depended on by the column_1430 repository.
'''
category = test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=filter_repository_name,
description=filter_repository_description,
long_description=filter_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Populate filter_1430 with version 1.1.0.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_create_column_repository( self ):
'''Create and populate the column_1430 repository.'''
'''
This is step 2 - Create and populate the column_1430 repository.
This repository will depend on the filter_1430 repository.
'''
category = test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=column_repository_name,
description=column_repository_description,
long_description=column_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='column_maker/column_maker.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Populate column_1430 with tool definitions.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_create_repository_dependency( self ):
'''Create a dependency on filter_1430.'''
'''
This is step 3 - Upload a repository_dependencies.xml file to the column_1430 repository that creates a repository
dependency on the filter_1430 repository.
'''
column_repository = test_db_util.get_repository_by_name_and_owner( 'column_1430', common.test_user_1_name )
filter_repository = test_db_util.get_repository_by_name_and_owner( 'filter_1430', common.test_user_1_name )
tool_shed_url = self.url
name = filter_repository.name
owner = filter_repository.user.username
changeset_revision = self.get_repository_tip( filter_repository )
repository_dependency_tuple = ( tool_shed_url, name, owner, changeset_revision )
filepath = self.generate_temp_path( '1430_repository_dependency' )
self.create_repository_dependency( column_repository, [ repository_dependency_tuple ], filepath=filepath )
def test_0020_install_column_repository( self ):
'''Install the column_1430 repository into Galaxy.'''
'''
This is step 1 (galaxy side) - Install the column_1430 repository, making sure to check the checkbox to
handle repository dependencies so that the filter_1430 repository is also installed. Make sure to install
the repositories in a specified section of the tool panel.
'''
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
post_submit_strings_displayed = [ 'column_1430', 'filter_1430' ]
self.install_repository( 'column_1430',
common.test_user_1_name,
category_name,
new_tool_panel_section='repair',
post_submit_strings_displayed=post_submit_strings_displayed,
install_tool_dependencies=False,
install_repository_dependencies=True )
def test_0025_uninstall_filter_repository( self ):
'''Uninstall the filter_1430 repository from Galaxy.'''
'''
This is step 2 - Uninstall the filter_1430 repository.
'''
installed_repository = test_db_util.get_installed_repository_by_name_owner( 'filter_1430', common.test_user_1_name )
self.uninstall_repository( installed_repository, remove_from_disk=True )
strings_not_displayed = [ 'filter_1430',
"Galaxy's filter tool for test 1430",
installed_repository.installed_changeset_revision ]
self.display_galaxy_browse_repositories_page( strings_not_displayed=strings_not_displayed )
def test_0030_repair_column_repository( self ):
'''Repair the column_1430 repository.'''
'''
This is step 3 - Repair the column_1430 repository.
'''
column_repository = test_db_util.get_installed_repository_by_name_owner( 'column_1430', common.test_user_1_name )
self.repair_installed_repository( column_repository )
def test_0035_verify_tool_panel_section( self ):
'''Check the tool panel section after repairing.'''
'''
This is step 4 - Make sure the filter_1430 repository is reinstalled and the tool is loaded into the tool panel
in the same section specified in step 1.
'''
filter_repository = test_db_util.get_installed_repository_by_name_owner( 'filter_1430', common.test_user_1_name )
strings_displayed = [ 'filter_1430',
"Galaxy's filter tool for test 1430",
filter_repository.installed_changeset_revision ]
self.display_galaxy_browse_repositories_page( strings_displayed=strings_displayed )
# Uninstall the filter repository again, so that the tool panel section metadata gets populated.
self.uninstall_repository( filter_repository, remove_from_disk=True )
test_db_util.ga_refresh( filter_repository )
tool_panel_section_metadata = filter_repository.metadata[ 'tool_panel_section' ]
for tool_id in tool_panel_section_metadata:
for panel_section in tool_panel_section_metadata[ tool_id ]:
assert panel_section[ 'name' ] == 'repair', 'Tool %s in tool panel section %s after repair, expected %s.' % \
( tool_id, panel_section[ 'name' ], 'repair' )
| 57.044199 | 142 | 0.656659 |
4a1ca8842e2993f48cb70c8b03b37f181133c991
| 2,164 |
py
|
Python
|
SPY-games/code.py
|
Ruchita102/ga-learner-dsmp-repo
|
741b96019b2aaf730efd3b7d1b17965b4da5e743
|
[
"MIT"
] | null | null | null |
SPY-games/code.py
|
Ruchita102/ga-learner-dsmp-repo
|
741b96019b2aaf730efd3b7d1b17965b4da5e743
|
[
"MIT"
] | null | null | null |
SPY-games/code.py
|
Ruchita102/ga-learner-dsmp-repo
|
741b96019b2aaf730efd3b7d1b17965b4da5e743
|
[
"MIT"
] | null | null | null |
# --------------
def read_file(path):
file=open(path,'r')
sentence=file.readline()
file.close()
return sentence
sample_message=read_file(file_path)
print(sample_message)
# --------------
#Code starts here
message_1=read_file(file_path_1)
message_2=read_file(file_path_2)
print(message_1,message_2)
def fuse_msg(message_a,message_b):
quotient=int(message_b)//int(message_a)
return str(quotient)
secret_msg_1=fuse_msg(message_1,message_2)
print(secret_msg_1)
# --------------
message_3=read_file(file_path_3)
print(message_3)
def substitute_msg(message_c):
if message_c== "red":
sub='Army General'
return sub
elif message_c== "Green":
sub='Data Scientist'
return sub
elif message_c== "Blue":
sub='Marine Biologist'
return sub
secret_msg_2=substitute_msg(message_3)
print(secret_msg_2)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4=read_file(file_path_4 )
message_5=read_file(file_path_5 )
print(message_4,message_5)
def compare_msg(message_d ,message_e):
a_list=message_d.split()
b_list=message_e.split()
c_list=(x for x in a_list if x not in b_list)
final_msg=" ".join(c_list)
return final_msg
secret_msg_3=compare_msg(message_4,message_5)
print(secret_msg_3)
# --------------
#Code starts here
message_6=read_file(file_path_6)
print(message_6)
def extract_msg(message_f):
a_list=message_f.split()
even_word=lambda x:len(x)%2==0
b_list=list(filter(even_word,a_list))
final_msg=" ".join(b_list)
return final_msg
secret_msg_4=extract_msg(message_6)
print(secret_msg_4)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg=" ".join(message_parts)
print(secret_msg)
def write_file(secret_msg,path):
f=open(path,"a+")
f.write(secret_msg)
f.close()
write_file(secret_msg,final_path)
| 21.64 | 71 | 0.67329 |
4a1ca8847fb8d1c8c952447b9329ef53703253a0
| 12,762 |
py
|
Python
|
fHDHR_web/api/tuners.py
|
alexmerm/fHDHR
|
586ffe5540a69da1430bec3dbbdcc8e86232fd03
|
[
"WTFPL"
] | null | null | null |
fHDHR_web/api/tuners.py
|
alexmerm/fHDHR
|
586ffe5540a69da1430bec3dbbdcc8e86232fd03
|
[
"WTFPL"
] | null | null | null |
fHDHR_web/api/tuners.py
|
alexmerm/fHDHR
|
586ffe5540a69da1430bec3dbbdcc8e86232fd03
|
[
"WTFPL"
] | null | null | null |
from flask import Response, request, redirect, abort, stream_with_context, session
import urllib.parse
import json
from fHDHR.exceptions import TunerError
class Tuners():
endpoints = ["/api/tuners"]
endpoint_name = "api_tuners"
endpoint_methods = ["GET", "POST"]
endpoint_default_parameters = {
"method": "status"
}
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
base_url = request.url_root[:-1]
client_address = request.remote_addr
accessed_url = request.args.get('accessed', default=request.url, type=str)
method = request.args.get('method', default="stream", type=str)
tuner_number = request.args.get('tuner', default=None, type=str)
redirect_url = request.args.get('redirect', default=None, type=str)
origin_methods = self.fhdhr.origins.valid_origins
origin = request.args.get('origin', default=None, type=str)
if origin and origin not in origin_methods:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Origin"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
if method == "stream":
channel_number = request.args.get('channel', None, type=str)
if not channel_number:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Missing Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
if origin:
if str(channel_number) in [str(x) for x in self.fhdhr.device.channels.get_channel_list("number", origin)]:
chan_obj = self.fhdhr.device.channels.get_channel_obj("number", channel_number, origin)
if not chan_obj:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
elif str(channel_number) in [str(x) for x in self.fhdhr.device.channels.get_channel_list("id", origin)]:
chan_obj = self.fhdhr.device.channels.get_channel_obj("id", channel_number, origin)
if not chan_obj:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
else:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
else:
if str(channel_number) in [str(x) for x in self.fhdhr.device.channels.get_channel_list("id")]:
chan_obj = self.fhdhr.device.channels.get_channel_obj("id", channel_number)
if not chan_obj:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
else:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
if not chan_obj.dict["enabled"]:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str("806 - Tune Failed: Channel Disabled")
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
origin = chan_obj.origin
channel_number = chan_obj.number
channel_name = chan_obj.name
channel_callsign = chan_obj.callsign
self.fhdhr.logger.info("Client has requested stream for %s channel %s %s %s." %
(origin, channel_number, channel_name, channel_callsign))
stream_method = request.args.get('stream_method', default=self.fhdhr.origins.origins_dict[origin].stream_method, type=str)
if stream_method not in self.fhdhr.device.tuners.streaming_methods:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str("806 - Tune Failed: Invalid Stream Method")
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
duration = request.args.get('duration', default=0, type=int)
transcode_quality = request.args.get('transcode', default=None, type=str)
valid_transcode_types = [None, "heavy", "mobile", "internet720", "internet480", "internet360", "internet240"]
if transcode_quality not in valid_transcode_types:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = "802 - Unknown Transcode Profile"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
stream_args = {
"channel": channel_number,
"channel_name": channel_name,
"channel_callsign": channel_callsign,
"origin": origin,
"method": stream_method,
"duration": duration,
"origin_quality": self.fhdhr.config.dict["streaming"]["origin_quality"],
"transcode_quality": transcode_quality or self.fhdhr.config.dict["streaming"]["transcode_quality"],
"accessed": accessed_url,
"base_url": base_url,
"client": client_address,
"client_id": session["session_id"]
}
self.fhdhr.logger.info("Selected Stream Parameters: method=%s duration=%s origin_quality=%s transcode_quality=%s." %
(stream_method, duration, stream_args["origin_quality"], stream_args["transcode_quality"]))
if stream_method == "passthrough":
try:
stream_args = self.fhdhr.device.tuners.get_stream_info(stream_args)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for %s channel %s was rejected due to %s"
% (origin, stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
self.fhdhr.logger.info("Passthrough method selected, no tuner will be used. Redirecting Client to %s" % stream_args["stream_info"]["url"])
return redirect(stream_args["stream_info"]["url"])
self.fhdhr.logger.info("Attempting to Select an available tuner for this stream.")
try:
if not tuner_number:
tunernum = self.fhdhr.device.tuners.first_available(origin, channel_number)
else:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number, origin, channel_number)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
tuner = self.fhdhr.device.tuners.tuners[origin][str(tunernum)]
self.fhdhr.logger.info("%s Tuner #%s to be used for stream." % (origin, tunernum))
try:
stream_args = self.fhdhr.device.tuners.get_stream_info(stream_args)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for %s channel %s was rejected due to %s"
% (origin, stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
tuner.close()
abort(response)
self.fhdhr.logger.info("Preparing Stream...")
tuner.set_status(stream_args)
session["tuner_used"] = tunernum
try:
tuner.setup_stream(stream_args, tuner)
except TunerError as e:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
tuner.close()
abort(response)
self.fhdhr.logger.info("Tuning Stream...")
return Response(stream_with_context(tuner.stream.get()), mimetype=stream_args["content_type"])
elif method == "close":
if not origin:
return "Missing Origin"
if not tuner_number or str(tuner_number) not in list(self.fhdhr.device.tuners.tuners[origin].keys()):
return "%s Invalid tuner" % str(tuner_number)
session["tuner_used"] = tuner_number
tuner = self.fhdhr.device.tuners.tuners[origin][str(tuner_number)]
tuner.close()
elif method == "scan":
if not origin:
for origin in list(self.fhdhr.device.tuners.tuners.keys()):
if not tuner_number:
tunernum = self.fhdhr.device.tuners.first_available(origin, None)
else:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number, origin, None)
tuner = self.fhdhr.device.tuners.tuners[origin][str(tunernum)]
tuner.channel_scan(origin=origin, grabbed=False)
else:
if not tuner_number:
tunernum = self.fhdhr.device.tuners.first_available(origin, None)
else:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number, origin, None)
tuner = self.fhdhr.device.tuners.tuners[origin][str(tunernum)]
tuner.channel_scan(origin=origin, grabbed=True)
elif method == "status":
if not origin:
if not tuner_number:
tuner_status = self.fhdhr.device.tuners.status()
else:
tuner_status = ["Invalid Tuner %s" % tuner_number]
else:
if not tuner_number:
tuner_status = self.fhdhr.device.tuners.status(origin)
elif str(tuner_number) in list(self.fhdhr.device.tuners.tuners[origin].keys()):
tuner_status = self.fhdhr.device.tuners.tuners[origin][str(tuner_number)].get_status()
else:
tuner_status = ["Invalid Tuner %s" % tuner_number]
tuner_status_json = json.dumps(tuner_status, indent=4)
return Response(status=200,
response=tuner_status_json,
mimetype='application/json')
else:
return "%s Invalid Method" % method
if redirect_url:
if "?" in redirect_url:
return redirect("%s&retmessage=%s" % (redirect_url, urllib.parse.quote("%s Success" % method)))
else:
return redirect("%s?retmessage=%s" % (redirect_url, urllib.parse.quote("%s Success" % method)))
else:
return "%s Success" % method
| 48.158491 | 154 | 0.558768 |
4a1ca9697d104014d8cea3a47f68e33f8be8c14c
| 4,281 |
py
|
Python
|
baccarat.py
|
lnbalon/open-casino
|
2358b12fa2c3c6f17a06c261cf763a2709e4b034
|
[
"MIT"
] | null | null | null |
baccarat.py
|
lnbalon/open-casino
|
2358b12fa2c3c6f17a06c261cf763a2709e4b034
|
[
"MIT"
] | null | null | null |
baccarat.py
|
lnbalon/open-casino
|
2358b12fa2c3c6f17a06c261cf763a2709e4b034
|
[
"MIT"
] | null | null | null |
import random
def shuffle_shoe(n_decks=8):
cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K']
deck = cards * 4
shoe = deck * n_decks
random.shuffle(shoe)
return shoe
def deal_game(shoe):
# initialize a list to store cards for player and banker
player = []
banker = []
# define a card converter function
def card_converter(card):
return 10 if card in ['J', 'Q', 'K'] else card
# deal the first four cards
card1 = shoe.pop()
card2 = shoe.pop()
card3 = shoe.pop()
card4 = shoe.pop()
player.append(card_converter(card1))
banker.append(card_converter(card2))
player.append(card_converter(card3))
banker.append(card_converter(card4))
# test for player and banker pairs
if card1 == card3:
player_pair = 1
else:
player_pair = 0
if card2 == card4:
banker_pair = 1
else:
banker_pair = 0
# calculate the player score
player_score = sum(player) % 10
banker_score = sum(banker) % 10
# If either the player or banker is dealt a total of eight or nine,
# both the player and banker stand (i.e. a "Natural"). This rule
# overrules all others.
if player_score >= 8 or banker_score >= 8:
result = {'player': sum(player) % 10,
'banker': sum(banker) % 10,
'player_pair': player_pair,
'banker_pair': banker_pair}
return result
# If player has 6 or 7, he stands. Banker stands
# if he also has 6 or 7.
elif player_score >= 6 and banker_score >= 6:
result = {'player': sum(player) % 10,
'banker': sum(banker) % 10,
'player_pair': player_pair,
'banker_pair': banker_pair}
return result
# If a player stands, the banker can only draw a hand
# with a score of 5 and below.
elif player_score >= 6 and banker_score <= 5:
banker.append(card_converter(shoe.pop()))
# If the player_score is <=5 he draws another card.
elif player_score <= 5:
player_draw = card_converter(shoe.pop())
player.append(player_draw)
# If banker's first 2 hands totals <= 2, draw a card.
if banker_score <= 2:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 3 and if player_draw
# is in [1,2,3,4,5,6,7,9,10] banker draws.
elif banker_score == 3 and player_draw in [1, 2, 3, 4, 5, 6, 7, 9, 10]:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 4 and if player_draw
# is in [2,3,4,5,6,7] banker draws.
elif banker_score == 4 and player_draw in [2, 3, 4, 5, 6, 7]:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 5 and if player_draw
# is in [4,5,6,7] banker draws.
elif banker_score == 5 and player_draw in [4, 5, 6, 7]:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 6 and if player_draw
# is in [6,7] banker draws.
elif banker_score == 6 and player_draw in [6, 7]:
banker.append(card_converter(shoe.pop()))
# If banker score is 7 then he stands.
elif banker_score == 7:
pass
result = {'player': sum(player) % 10,
'banker': sum(banker) % 10,
'player_card': player,
'banker_card': banker,
'player_pair': player_pair,
'banker_pair': banker_pair}
return result
def simulator(number_shoe=10):
player_wins = 0
banker_wins = 0
ties = 0
while number_shoe > 0:
shoe = shuffle_shoe()
while len(shoe) > 10:
result = deal_game(shoe)
if result['player'] > result['banker']:
player_wins += 1
elif result['player'] < result['banker']:
banker_wins += 1
else:
ties += 1
number_shoe -= 1
total = player_wins + banker_wins + ties
return player_wins / total, banker_wins / total, ties / total
if __name__ == '__main__':
import sys
n_shoes = int(sys.argv[1])
print(simulator(number_shoe=n_shoes))
| 28.731544 | 79 | 0.5758 |
4a1ca9a2a1daa67ea7772e09aef562b0cc425898
| 3,017 |
py
|
Python
|
finetuning-transformer-lm/utils.py
|
take-cheeze/models
|
3ded8fd062c57f20f6154cac2dd0d998181de755
|
[
"MIT"
] | 112 |
2018-04-18T07:13:03.000Z
|
2022-03-11T03:36:34.000Z
|
finetuning-transformer-lm/utils.py
|
take-cheeze/models
|
3ded8fd062c57f20f6154cac2dd0d998181de755
|
[
"MIT"
] | 16 |
2018-05-11T11:41:08.000Z
|
2021-04-24T03:50:54.000Z
|
finetuning-transformer-lm/utils.py
|
take-cheeze/models
|
3ded8fd062c57f20f6154cac2dd0d998181de755
|
[
"MIT"
] | 45 |
2018-04-18T07:13:06.000Z
|
2021-12-22T03:46:18.000Z
|
import os
import sys
import json
import time
from functools import partial
import numpy as np
# import tensorflow as tf
# from tensorflow.python.framework import function
from tqdm import tqdm
def encode_dataset(*splits, encoder):
encoded_splits = []
for split in splits[0]:
fields = []
for field in split:
if isinstance(field[0], str):
field = encoder.encode(field)
fields.append(field)
encoded_splits.append(fields)
return encoded_splits
def stsb_label_encoding(labels, nclass=6):
"""
Label encoding from Tree LSTM paper (Tai, Socher, Manning)
"""
Y = np.zeros((len(labels), nclass)).astype(np.float32)
for j, y in enumerate(labels):
for i in range(nclass):
if i == np.floor(y) + 1:
Y[j, i] = y - np.floor(y)
if i == np.floor(y):
Y[j, i] = np.floor(y) - y + 1
return Y
def np_softmax(x, t=1):
x = x / t
x = x - np.max(x, axis=-1, keepdims=True)
ex = np.exp(x)
return ex / np.sum(ex, axis=-1, keepdims=True)
def make_path(f):
d = os.path.dirname(f)
if d and not os.path.exists(d):
os.makedirs(d)
return f
def _identity_init(shape, dtype, partition_info, scale):
n = shape[-1]
w = np.eye(n) * scale
if len([s for s in shape if s != 1]) == 2:
w = w.reshape(shape)
return w.astype(np.float32)
def identity_init(scale=1.0):
return partial(_identity_init, scale=scale)
def _np_init(shape, dtype, partition_info, w):
return w
def np_init(w):
return partial(_np_init, w=w)
class ResultLogger(object):
def __init__(self, path, *args, **kwargs):
if 'time' not in kwargs:
kwargs['time'] = time.time()
self.f_log = open(make_path(path), 'w')
self.f_log.write(json.dumps(kwargs) + '\n')
def log(self, **kwargs):
if 'time' not in kwargs:
kwargs['time'] = time.time()
self.f_log.write(json.dumps(kwargs) + '\n')
self.f_log.flush()
def close(self):
self.f_log.close()
def flatten(outer):
return [el for inner in outer for el in inner]
def remove_none(l):
return [e for e in l if e is not None]
def iter_data(
*datas,
n_batch=128,
truncate=False,
verbose=False,
max_batches=float("inf")):
n = len(datas[0])
if truncate:
n = (n // n_batch) * n_batch
n = min(n, max_batches * n_batch)
n_batches = 0
if verbose:
f = sys.stderr
else:
f = open(os.devnull, 'w')
for i in tqdm(
range(
0,
n,
n_batch),
total=n //
n_batch,
file=f,
ncols=80,
leave=False):
if n_batches >= max_batches:
raise StopIteration
if len(datas) == 1:
yield datas[0][i:i + n_batch]
else:
yield (d[i:i + n_batch] for d in datas)
n_batches += 1
| 23.387597 | 62 | 0.55585 |
4a1caa1eb1512628d47b8339ba4ff54366625f09
| 377 |
py
|
Python
|
other/lambda_variables.py
|
mlobf/python_advanced
|
9516f64c5a3540af2574e123eab9b36b5c242ec6
|
[
"BSD-3-Clause"
] | null | null | null |
other/lambda_variables.py
|
mlobf/python_advanced
|
9516f64c5a3540af2574e123eab9b36b5c242ec6
|
[
"BSD-3-Clause"
] | null | null | null |
other/lambda_variables.py
|
mlobf/python_advanced
|
9516f64c5a3540af2574e123eab9b36b5c242ec6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Do lambda function with
a external paramet
Do not forget to put params in the end of the lambda
function.
"""
x = 5
# print(lambda x: x + 1)
# print(x)
# l = (lambda x: x + 1)(5)
# print(l)
items = [1, 2, 3, 4, 5, 6, 7, 8, 9]
lista_compras = []
contador = 0
for _ in items:
lista_compras.append(contador)
contador += 1
print(lista_compras)
| 13.962963 | 57 | 0.596817 |
4a1caa3b95d30b7bd192f4ca055f347552c989cf
| 1,014 |
py
|
Python
|
accounts/admin.py
|
slk007/SahiGalat.com
|
786688e07237f3554187b90e01149225efaa1713
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
slk007/SahiGalat.com
|
786688e07237f3554187b90e01149225efaa1713
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
slk007/SahiGalat.com
|
786688e07237f3554187b90e01149225efaa1713
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.core.exceptions import ValidationError
from .forms import UserCreationForm, UserChangeForm
from .models import MyUser
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('username', 'email', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('username', 'email', 'password')}),
# ('Personal info', {'fields': ('date_of_birth',)}),
('Permissions', {'fields': ('is_admin', 'is_staff')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2'),
}),
)
search_fields = ('username', 'email',)
ordering = ('username', 'email',)
filter_horizontal = ()
admin.site.register(MyUser, UserAdmin)
admin.site.unregister(Group)
| 27.405405 | 70 | 0.635108 |
4a1caab33b43a164bb39532a0cde2c94d985fad0
| 67,536 |
py
|
Python
|
serial_scripts/asn4/test_asn4.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 5 |
2020-09-29T00:36:57.000Z
|
2022-02-16T06:51:32.000Z
|
serial_scripts/asn4/test_asn4.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 27 |
2019-11-02T02:18:34.000Z
|
2022-02-24T18:49:08.000Z
|
serial_scripts/asn4/test_asn4.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 20 |
2019-11-28T16:02:25.000Z
|
2022-01-06T05:56:58.000Z
|
from __future__ import absolute_import
from builtins import str
from builtins import range
import test
import uuid
import re
import copy
from .base import ASN4Base
from tcutils.control.cn_introspect_utils import *
from tcutils.wrappers import preposttest_wrapper
from physical_router_fixture import PhysicalRouterFixture
from physical_device_fixture import PhysicalDeviceFixture
from vn_test import VNFixture
from vm_test import VMFixture
import control_node
from common.bgpaas.base import BaseBGPaaS
from serial_scripts.bgpaas.base import LocalASBase
from tcutils.util import skip_because,retry
cluster_use_local_asn = False
class TestAsn4(ASN4Base, BaseBGPaaS, LocalASBase):
def setUp(self):
super(TestAsn4, self).setUp()
def get_neighbor_info(self):
bgp_neigh_info = {}
for control_ip in self.inputs.bgp_ips:
cn_bgp_entry = self.cn_inspect[control_ip].get_cn_bgp_neigh_entry()
for entry in range(len(cn_bgp_entry)):
peer = cn_bgp_entry[entry]
peer_info = {
'peer_ip': peer['peer_address'], 'flap_count': peer['flap_count']}
bgp_neigh_info[peer['peer']] = peer_info
return bgp_neigh_info
def get_4byte_enable(self):
gsc_obj = self.connections.vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
return gsc_obj.get_enable_4byte_as()
def set_4byte_enable(self, state):
if state in ['true','True',True]:
state = True
else:
state = False
self.logger.info("SET_4BYTE_ENABLE " + str(state ) )
gsc_obj = self.connections.vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc_obj.set_enable_4byte_as(state)
self.connections.vnc_lib.global_system_config_update(gsc_obj)
def get_global_asn(self):
existing_asn = self.connections.vnc_lib_fixture.get_global_asn()
return existing_asn
def set_global_asn(self, asn):
self.connections.vnc_lib_fixture.set_global_asn(asn)
def reset_bgp_router_asn(self, bgp_router_id, asn, local_asn):
self.set_4byte_enable(self.inputs.enable_4byte_as)
phy_router_obj = self.connections.vnc_lib.bgp_router_read(
id=bgp_router_id)
params = phy_router_obj.get_bgp_router_parameters()
existing_asn = params.get_autonomous_system()
params.set_autonomous_system(asn)
params.set_local_autonomous_system(local_asn)
phy_router_obj.set_bgp_router_parameters(params)
self.connections.vnc_lib.bgp_router_update(phy_router_obj)
def create_physical_device(self, router_params):
phy_device_fixture = PhysicalDeviceFixture(
router_params['name'], router_params['control_ip'],
role=None, peer_ip=router_params['control_ip'],
tunnel_ip=router_params['control_ip'],
ssh_username=router_params['ssh_username'],
ssh_password=router_params['ssh_password'],
connections=self.connections)
phy_device_fixture.setUp()
return phy_device_fixture
@retry(delay=25, tries=20)
def verify_bgp_peering(self,bgp_router_name):
result = True
for entry1 in self.inputs.bgp_ips:
self.cn_ispec = self.connections.get_control_node_inspect_handle(entry1)
cn_bgp_entry = self.cn_ispec.get_cn_bgp_neigh_entry(encoding='BGP')
if not cn_bgp_entry:
result = False
self.logger.error(
'Control Node %s does not have any BGP Peer' %
(entry1))
else:
for entry in cn_bgp_entry:
if entry['peer'] == bgp_router_name:
if entry['state'] != 'Established':
result = result and False
self.logger.error('!!! Node %s peering info:With Peer %s : %s peering is Current State is %s ' %
(entry['local_address'], bgp_router_name, entry['peer'], entry['state']))
else:
self.logger.info('Node %s peering info:With Peer %s : %s peering is Current State is %s ' %
(entry['local_address'], bgp_router_name, entry['peer'], entry['state']))
return result
def get_bgp_router_flap_count(self, bgp_router_name):
flap_info = {}
for entry1 in self.inputs.bgp_ips:
self.cn_ispec = ControlNodeInspect(entry1)
cn_bgp_entry = self.cn_ispec.get_cn_bgp_neigh_entry(encoding='BGP')
if not cn_bgp_entry:
result = False
self.logger.error(
'Control Node %s does not have any BGP Peer' %
(entry1))
else:
for entry in cn_bgp_entry:
if entry['peer'] == bgp_router_name:
flap_info[entry1] = entry['flap_count']
self.logger.info(
'Node %s peering info:With Peer %s : %s peering is Current State is %s ' %
(entry['local_address'], bgp_router_name, entry['peer'], entry['state']))
return flap_info
def create_bgp_router(self, router_params, phy_device_fixture):
phy_router_fixture = PhysicalRouterFixture(
router_params['name'], router_params['control_ip'],
model="mx",
vendor=router_params['vendor'],
asn=router_params['asn'],
ssh_username=router_params['ssh_username'],
ssh_password=router_params['ssh_password'],
mgmt_ip=router_params['control_ip'],
do_cleanup=False,
connections=self.connections)
phy_router_fixture.phy_device = phy_device_fixture
phy_router_fixture.setUp()
if phy_router_fixture.bgp_router_already_present:
params = phy_router_fixture.bgp_router.get_bgp_router_parameters()
existing_asn = params.get_autonomous_system()
params.set_autonomous_system(router_params['asn'])
params.set_router_type("router")
phy_router_fixture.bgp_router.set_bgp_router_parameters(params)
self.connections.vnc_lib.bgp_router_update(
phy_router_fixture.bgp_router)
self.addCleanup(self.reset_bgp_router_asn,
bgp_router_id=phy_router_fixture.bgp_router.uuid, asn=self.inputs.router_asn, local_asn=None)
return phy_router_fixture
def update_bgpaas_configuration(self, control_node_config, vm_config, bgpaas_fixt):
bgpaas_obj = self.connections.vnc_lib.bgp_as_a_service_read(
id=bgpaas_fixt.uuid)
four_byte_asn_enabled = self.get_4byte_enable()
if vm_config['asn'] > ASN4Base.AS_2BYTE_MAX and not four_byte_asn_enabled:
bgpaas_obj.set_autonomous_system(ASN4Base.AS_TRANS)
else:
bgpaas_obj.set_autonomous_system(vm_config['asn'])
session_attr = bgpaas_obj.get_bgpaas_session_attributes()
session_attr.local_autonomous_system = control_node_config['control_node_asn'][0]
bgpaas_obj.set_bgpaas_session_attributes(session_attr)
self.connections.vnc_lib.bgp_as_a_service_update(bgpaas_obj)
def reconfigure_bgpaas_vms(self, control_node_config, vm_fixt_list, vm_config_list):
test_vm = vm_fixt_list[0]
cmdList = []
for i, bgp_vm in enumerate(vm_config_list):
if i == 0:
continue
bgpaas_vm = vm_fixt_list[i]
autonomous_system = bgp_vm['asn']
vsrx_4b_capability = bgp_vm['4b_capability']
local_autonomous_system = control_node_config['control_node_asn'][0]
if not vsrx_4b_capability and local_autonomous_system > ASN4Base.AS_2BYTE_MAX:
cmdList.append(
'set protocols bgp group bgpaas peer-as %s' % ASN4Base.AS_TRANS)
else:
if local_autonomous_system:
cmdList.append(
'set protocols bgp group bgpaas peer-as ' + str(local_autonomous_system))
else:
# self.inputs.router_asn is 64512
cmdList.append(
'set protocols bgp group bgpaas peer-as ' + str(self.inputs.router_asn))
self.configure_vsrx(srv_vm=test_vm, dst_vm=bgpaas_vm, cmds=cmdList)
def configure_bgpaas_vms(self, control_node_config, vn_fixture, vm_fixt_list, vm_config_list=[]):
cluster_4b_capability = control_node_config['cluster_4b_capability']
test_vm = vm_fixt_list[0]
bgpaas_fixture_list = []
for i, bgp_vm in enumerate(vm_config_list):
if i == 0:
bgpaas_fixture_list.append(None)
continue
static_routes = bgp_vm['static_routes']
static_routes_aggr = bgp_vm['static_routes_aggr']
aggr_route = bgp_vm['aggr_route']
autonomous_system = bgp_vm['asn']
vsrx_4b_capability = bgp_vm['4b_capability']
local_autonomous_system = control_node_config['control_node_asn'][0]
bgpaas_vm = vm_fixt_list[i]
if not cluster_4b_capability and autonomous_system > ASN4Base.AS_2BYTE_MAX:
autonomous_system_ui = ASN4Base.AS_TRANS
else:
autonomous_system_ui = autonomous_system
bgpaas_fixture = self.create_bgpaas(
bgpaas_shared=True,
autonomous_system=autonomous_system_ui,
bgpaas_ip_address=bgpaas_vm.get_vm_ips()[0],
local_autonomous_system=local_autonomous_system)
bgpaas_fixture_list.append(bgpaas_fixture)
self.attach_port_to_bgpaas_obj(bgpaas_vm, bgpaas_fixture)
address_families = ['inet', 'inet6']
gw_ip = vn_fixture.get_subnets()[0]['gateway_ip']
dns_ip = vn_fixture.get_subnets()[0]['dns_server_address']
neighbors = [gw_ip, dns_ip]
self.logger.info('We will configure BGP on the vSRX')
local_ip = bgp_ip = bgpaas_vm.vm_ip
src_vm = test_vm
dst_vm = bgpaas_vm
cmdList = []
cmdList.extend(
('set routing-options router-id ' +
str(local_ip),
'set routing-options autonomous-system ' +
str(autonomous_system),
'set protocols bgp group bgpaas local-address ' +
str(bgp_ip)))
for family in address_families:
cmdList.append(
'set protocols bgp group bgpaas family ' +
str(family) +
' unicast')
for neighbor in neighbors:
cmdList.append(
'set protocols bgp group bgpaas neighbor ' + str(neighbor))
if not vsrx_4b_capability and local_autonomous_system > ASN4Base.AS_2BYTE_MAX:
cmdList.append(
'set protocols bgp group bgpaas peer-as %d' % ASN4Base.AS_TRANS)
else:
if local_autonomous_system:
cmdList.append(
'set protocols bgp group bgpaas peer-as ' + str(local_autonomous_system))
else:
# self.inputs.router_asn is 64512
cmdList.append(
'set protocols bgp group bgpaas peer-as ' + str(self.inputs.router_asn))
cmdList.extend(('set protocols bgp group bgpaas type external',
'set protocols bgp group bgpaas multihop',
'set protocols bgp group bgpaas export export-to-bgp',
'set protocols bgp group bgpaas export export_aggr',
'set protocols bgp group bgpaas hold-time 30',
'set protocols bgp group bgpaas traceoptions file bgp_log',
'set protocols bgp group bgpaas traceoptions file size 4294967295',
'set protocols bgp group bgpaas traceoptions file world-readable',
'set policy-options policy-statement export-to-bgp term allow_local from protocol static',
'set policy-options policy-statement export-to-bgp term allow_local then next-hop ' +
str(bgp_ip),
'set policy-options policy-statement export-to-bgp term allow_local then accept'))
for static_route in static_routes:
cmdList.append(
"set routing-options static route %s discard" % static_route)
cmdList.append(
"set policy-options policy-statement export_aggr from protocol aggregate")
cmdList.append(
"set policy-options policy-statement export_aggr then accept")
cmdList.append(
"set routing-options aggregate route %s policy export_static_aggr" % aggr_route)
cmdList.append(
"set policy-options policy-statement export_static_aggr from protocol static")
for static_route in static_routes_aggr:
cmdList.append(
"set routing-options static route %s discard" % static_route)
cmdList.append(
"set policy-options policy-statement export_static_aggr from route-filter %s exact" % static_route)
cmdList.append(
"set policy-options policy-statement export_static_aggr then accept")
if not vsrx_4b_capability:
cmdList.append(
"set protocols bgp group bgpaas disable-4byte-as")
self.configure_vsrx(srv_vm=test_vm, dst_vm=bgpaas_vm, cmds=cmdList)
bgpaas_vm.wait_for_ssh_on_vm()
agent = bgpaas_vm.vm_node_ip
ret = bgpaas_fixture.verify_in_control_node(
bgpaas_vm), 'BGPaaS Session not seen in the control-node'
if not ret:
assert False, 'BGPaaS Session not seen in the control-node'
return bgpaas_fixture_list
def configure_physical_devices(self, control_node_config, mx_config):
self.logger.info("Configure MX")
mx_control_ip_address = mx_config['mx_control_ip_address']
cluster_4b_capability = control_node_config['cluster_4b_capability']
mx_4b_capability = mx_config['mx_4b_capability']
mx_asn = mx_config['mx_asn']
mx_vrf_interfaces = mx_config['mx_vrf_interfaces']
cluster_group = None
control_node_asn = control_node_config['control_node_asn']
for device in self.inputs.physical_routers_data.items():
router_name = device[0]
if router_name != mx_config['mx_name']:
continue
flap_count_init1 = self.get_bgp_router_flap_count(router_name)
router_params = copy.deepcopy(device[1])
single_mx_bgp_router = True if len(
mx_control_ip_address) == 1 else False
for i, mx_ip in enumerate(mx_control_ip_address):
router_params['asn'] = ASN4Base.AS_TRANS if (
not cluster_4b_capability and mx_asn[i] > ASN4Base.AS_2BYTE_MAX) else mx_asn[i]
router_params['vrf_interface'] = mx_vrf_interfaces[i] + ".0"
router_params['rd'] = mx_control_ip_address[i].split(
"/")[0] + ":1"
router_params['tunnel_ip'] = mx_control_ip_address[i].split(
"/")[0]
router_params['control_ip'] = router_params['tunnel_ip']
router_params["ri_name"] = mx_config['ri_name']
router_params['mx_4b_capability'] = mx_4b_capability
router_params['name'] = mx_config['mx_name']
phy_device_fixture = self.create_physical_device(router_params)
self.phy_router_fixture = self.create_bgp_router(
router_params, phy_device_fixture)
if single_mx_bgp_router:
for cn_bgp_name in self.inputs.bgp_names:
fq_name = [u'default-domain', u'default-project',
u'ip-fabric', u'__default__', u'%s' % cn_bgp_name]
cn_node_obj = self.connections.vnc_lib.bgp_router_read(
fq_name=fq_name)
cn_node_obj.add_bgp_router(
self.phy_router_fixture.bgp_router)
self.connections.vnc_lib.bgp_router_update(cn_node_obj)
else:
cn_name = self.inputs.host_data[self.inputs.bgp_names[i]]['name']
fq_name = [u'default-domain', u'default-project',
u'ip-fabric', u'__default__', u'%s' % cn_name]
cn_node_obj = self.connections.vnc_lib.bgp_router_read(
fq_name=fq_name)
cn_node_obj.add_bgp_router(
self.phy_router_fixture.bgp_router)
self.connections.vnc_lib.bgp_router_update(cn_node_obj)
peer_config = {}
peer_config['peer_ips'] = self.inputs.bgp_control_ips
if cluster_use_local_asn:
peer_config['peer_asn'] = cluster_local_asn
else:
peer_config['peer_asn'] = control_node_asn[i]
router_params['asn'] = mx_asn[i]
router_params['group'] = None
router_params['cluster_group'] = None
router_params['test_group_name'] = mx_config['test_group_name']
router_params['test_bgp_proto_group_name'] = mx_config['test_bgp_proto_group_name']
self.logger.info("Configure mx_basic_bgp_test_configuration")
self.mx_basic_bgp_test_configuration(
router_params, peer_config, skip_cleanup=True)
self.verify_bgp_peering(router_name)
def configure_control_nodes(self, control_node_config, mx_config):
self.logger.info("Configure CONTROL_NODES")
existing_4b_capability = self.get_4byte_enable()
new_cluster_4b_capability = control_node_config['cluster_4b_capability']
new_cluster_global_asn = control_node_config['cluster_global_asn']
control_node_asn = control_node_config['control_node_asn']
if existing_4b_capability != new_cluster_4b_capability:
self.set_4byte_enable(new_cluster_4b_capability)
self.addCleanup(self.set_4byte_enable, self.inputs.enable_4byte_as)
existing_global_asn = self.get_global_asn()
if existing_global_asn != new_cluster_global_asn:
self.set_global_asn(new_cluster_global_asn)
for i, ctrl_node_name in enumerate(self.inputs.bgp_names):
ctrl_node_ip = self.inputs.host_data[ctrl_node_name]['control-ip']
ctrl_node_host_ip = self.inputs.host_data[ctrl_node_name]['host_ip']
ctrl_fixture = self.useFixture(
control_node.CNFixture(
connections=self.connections,
inputs=self.inputs,
router_name=ctrl_node_name,
router_ip=ctrl_node_ip
))
if ctrl_fixture.already_present:
fq_name = [u'default-domain', u'default-project',
u'ip-fabric', u'__default__', u'%s' % ctrl_node_name]
bgp_obj = self.connections.vnc_lib.bgp_router_read(
fq_name=fq_name)
router_params = bgp_obj.get_bgp_router_parameters()
existing_asn = router_params.get_autonomous_system()
existing_local_asn = router_params.get_local_autonomous_system()
router_params.set_autonomous_system(control_node_asn[i])
if cluster_use_local_asn:
router_params.set_local_autonomous_system(
cluster_local_asn)
else:
router_params.set_local_autonomous_system(None)
prev_local_asn = None
bgp_obj.set_bgp_router_parameters(router_params)
self.connections.vnc_lib.bgp_router_update(bgp_obj)
self.addCleanup(self.reset_bgp_router_asn,
bgp_router_id=bgp_obj.uuid,asn=self.inputs.router_asn,local_asn=prev_local_asn)
def create_vn(self, control_node_config):
vn_fixture = self.useFixture(VNFixture(connections=self.connections))
vn_fixture.verify_vn_in_api_server()
return vn_fixture
def create_vms(self, vn_fixture, vm_config_list=[]):
vm_fixt_list = []
for vm in vm_config_list:
vm_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name,
connections=self.connections,
vn_objs=[
vn_fixture.obj],
image_name=vm['image_name'],
vm_name=vm['vm_name']))
vm_fixt_list.append(vm_fixture)
return vm_fixt_list
def verify_vms(self,vm_fixt_list):
for vm_fixture in vm_fixt_list:
vm_fixture.verify_on_setup()
def verify_bgpaas_bgp_routes(self, control_node_config, vn_fixture, src_vm, bgpaas_vm, vm_config, expected_routes):
bgp_summary = self.get_config_via_netconf(
src_vm, bgpaas_vm, 'show bgp summary')
control_node_asn = control_node_config['control_node_asn'][0]
gw_ip = vn_fixture.get_subnets()[0]['gateway_ip']
dns_ip = vn_fixture.get_subnets()[0]['dns_server_address']
if control_node_asn > ASN4Base.AS_2BYTE_MAX and not vm_config['4b_capability']:
peer_asn = ASN4Base.AS_TRANS
else:
peer_asn = control_node_asn
gw_ip_state = re.search(r"%s\s+%d\s+.*Establ" %
(gw_ip, peer_asn), bgp_summary)
dns_ip_state = re.search(r"%s\s+%d\s+.*Establ" %
(dns_ip, peer_asn), bgp_summary)
if not (gw_ip_state and dns_ip_state):
assert False, 'BGPaaS Session is not in Established State'
routes_output = self.get_config_via_netconf(
src_vm, bgpaas_vm, 'show route receive-protocol bgp %s' % gw_ip)
for exp_route in expected_routes:
ret = re.search(exp_route, routes_output)
if ret:
self.logger.info("Route seen in BGPaaS: %s" % exp_route)
else:
assert False, "Route: %s not seen in BGPaaS" % exp_route
def peer_route_from_mx(self,control_node_config,mx_config,vn_fixture):
control_node_asn = control_node_config['control_node_asn']
vn_gw_ip = vn_fixture.get_subnets()[0]['gateway_ip']
control_mx_bgp_type = control_node_config['control,mx,bgp_type']
mx_asn = mx_config['mx_asn']
mx_static_routes = self.get_mx_configured_addnl_static_routes()
external_mx_static_routes = self.get_external_mx_configured_addnl_static_routes()
external_mx_aggr_route, external_mx_aggr_static_routes = self.get_external_mx_configured_aggr_routes()
mx_aggr_route, mx_aggr_static_routes = self.get_mx_configured_aggr_routes()
introspect_prefix_info = []
as_path = "-"
as4_path = "-"
vsrx_exp_routes = []
if control_node_config['cluster_4b_capability'] and control_mx_bgp_type == "internal":
as_path = "-"
as4_path = "-"
elif control_node_config['cluster_4b_capability'] and control_mx_bgp_type == "external":
as_path = "%s"%(mx_asn[0])
as4_path = "-"
elif not control_node_config['cluster_4b_capability']:
if mx_asn[0] > ASN4Base.AS_2BYTE_MAX and control_mx_bgp_type == "external":
as_path = "%s"%(ASN4Base.AS_TRANS)
as4_path = "%s"%(mx_asn[0])
elif control_mx_bgp_type == "external":
as_path = "%s"%(mx_asn[0])
as4_path = "-"
if control_mx_bgp_type == "internal":
for static_route in mx_static_routes:
vsrx_exp_routes.append(r'%s\s+%s\s+%d (I|\?)' %
(static_route, vn_gw_ip, control_node_asn[0]))
prefix_info = {'prefix': static_route,
'as_path': as_path, 'as4_path': as4_path}
introspect_prefix_info.append(prefix_info)
vsrx_exp_routes.append(r'%s\s+%s\s+%d (I|\?)' %
(mx_aggr_route, vn_gw_ip, control_node_asn[0]))
prefix_info = {
'prefix': mx_aggr_route, 'as_path': as_path, 'as4_path': as4_path}
introspect_prefix_info.append(prefix_info)
else:
for static_route in mx_static_routes:
vsrx_exp_routes.append(
r'%s\s+%s\s+%d %d' % (static_route, vn_gw_ip, control_node_asn[0], mx_asn[0]))
prefix_info = {
'prefix': static_route, 'as_path': as_path, 'as4_path': as4_path}
introspect_prefix_info.append(prefix_info)
vsrx_exp_routes.append(
r'%s\s+%s\s+%d %d' % (mx_aggr_route, vn_gw_ip, control_node_asn[0], mx_asn[0]))
prefix_info = {
'prefix': mx_aggr_route, 'as_path': as_path, 'as4_path': as4_path}
introspect_prefix_info.append(prefix_info)
return introspect_prefix_info,vsrx_exp_routes
def peer_route_to_mx(self,control_node_config,mx_config,vn_fixture,vm_fixt_list,vm_config_list):
control_mx_bgp_type = control_node_config['control,mx,bgp_type']
control_node_asn = control_node_config['control_node_asn']
mx_routes = []
for i, vm_config in enumerate(vm_config_list):
vm_ip = vm_fixt_list[i].vm_ips[0]
compute_data_ip = vm_fixt_list[i].vm_node_data_ip
if control_mx_bgp_type == "internal":
mx_routes.append(r'%s/32\s+%s\s+100\s+200\s+\?' %
(vm_ip, compute_data_ip))
else:
mx_routes.append(r'%s/32\s+%s\s+100\s+%d \?' %
(vm_ip, compute_data_ip, control_node_asn[0]))
if i == 0:
continue
if control_mx_bgp_type == "internal":
for static_route in vm_config['static_routes']:
mx_routes.append(r'%s\s+%s\s+100\s+%d I' %
(static_route, compute_data_ip, vm_config['asn']))
for static_route in vm_config['static_routes_aggr']:
mx_routes.append(r'%s\s+%s\s+100\s+%d I' %
(static_route, compute_data_ip, vm_config['asn']))
mx_routes.append(r'%s\s+%s\s+100\s+%d I' %
(vm_config['aggr_route'], compute_data_ip, vm_config['asn']))
else:
for static_route in vm_config['static_routes']:
mx_routes.append(r'%s\s+%s\s+%d %d I' % (static_route,
compute_data_ip, control_node_asn[0], vm_config['asn']))
for static_route in vm_config['static_routes_aggr']:
mx_routes.append(r'%s\s+%s\s+%d %d I' % (static_route,
compute_data_ip, control_node_asn[0], vm_config['asn']))
mx_routes.append(r'%s\s+%s\s+%d %d I' % (
vm_config['aggr_route'], compute_data_ip, control_node_asn[0], vm_config['asn']))
return mx_routes
def peer_route_from_vsrx(self,control_node_config,vn_fixture,vm_config,peer_vm_config):
control_node_asn = control_node_config['control_node_asn']
routes = []
peer_bgpaas_asn = peer_vm_config['asn']
vn_gw_ip = vn_fixture.get_subnets()[0]['gateway_ip']
introspect_prefix_info = []
as_path = "-"
as4_path = "-"
if control_node_config['cluster_4b_capability']:
as_path = "%s"%(peer_bgpaas_asn)
as4_path = "-"
else:
if peer_bgpaas_asn > ASN4Base.AS_2BYTE_MAX:
as_path = "%s"%(ASN4Base.AS_TRANS)
as4_path = "%s"%(peer_bgpaas_asn)
else:
as_path = "%s"%(peer_bgpaas_asn)
as4_path = "-"
for static_route in peer_vm_config['static_routes']:
routes.append(r'%s\s+%s\s+%d %d' % (static_route,
vn_gw_ip, control_node_asn[0], peer_bgpaas_asn))
prefix_info = {
'prefix': static_route, 'as_path': as_path, 'as4_path': as4_path}
introspect_prefix_info.append(prefix_info)
for static_route in peer_vm_config['static_routes_aggr']:
routes.append(r'%s\s+%s\s+%d %d' % (static_route,
vn_gw_ip, control_node_asn[0], peer_bgpaas_asn))
prefix_info = {
'prefix': static_route, 'as_path': as_path, 'as4_path': as4_path}
introspect_prefix_info.append(prefix_info)
routes.append(r'%s\s+%s\s+%d %d' % (
peer_vm_config['aggr_route'], vn_gw_ip, control_node_asn[0], peer_bgpaas_asn))
return introspect_prefix_info,routes
def peer_route_from_external_mx(self,control_node_config,mx_config,mx1_config,mx2_config):
mx1_asn = mx1_config['mx_asn']
mx2_asn = mx2_config['mx_asn']
if control_node_config['control_node_asn'] != mx_config['mx_asn']:
cluster_mx_external = True
else:
cluster_mx_external = False
if cluster_mx_external and control_node_config['cluster_4b_capability']:
ext_mx_as_path = "%s %s %s" % (
mx_config['mx_asn'][0], mx1_asn,mx2_asn)
ext_mx_as4_path = "-"
elif not cluster_mx_external and control_node_config['cluster_4b_capability']:
ext_mx_as_path = "%s %s" % (mx1_asn,mx2_asn)
ext_mx_as4_path = "-"
elif cluster_mx_external and not control_node_config['cluster_4b_capability']:
if mx_config['mx_asn'][0] > ASN4Base.AS_2BYTE_MAX:
mx_asn_t = "%d" % ASN4Base.AS_TRANS
else:
mx_asn_t = mx_config['mx_asn'][0]
if mx1_asn > ASN4Base.AS_2BYTE_MAX:
mx1_asn_t = "%d" % ASN4Base.AS_TRANS
else:
mx1_asn_t = mx1_asn
if mx2_asn > ASN4Base.AS_2BYTE_MAX:
mx2_asn_t = "%d" % ASN4Base.AS_TRANS
else:
mx2_asn_t = mx2_asn
ext_mx_as_path = "%s %s %s" % (mx_asn_t,mx1_asn_t,mx2_asn_t)
ext_mx_as4_path = "%s %s %s" % (mx_config['mx_asn'][0],mx1_asn,mx2_asn)
ext_mx_aggr_route,ext_mx_static_route = self.get_external_mx_configured_aggr_routes()
ext_mx_additional_routes = self.get_external_mx_configured_addnl_static_routes()
introspect_prefix_info = []
for route in ext_mx_static_route:
prefix_info = {
'prefix': route, 'as_path': ext_mx_as_path, 'as4_path': ext_mx_as4_path}
introspect_prefix_info.append(prefix_info)
for route in ext_mx_additional_routes:
prefix_info = {
'prefix': route, 'as_path': ext_mx_as_path, 'as4_path': ext_mx_as4_path}
introspect_prefix_info.append(prefix_info)
prefix_info = {
'prefix': ext_mx_aggr_route, 'as_path': ext_mx_as_path, 'as4_path': ext_mx_as4_path}
introspect_prefix_info.append(prefix_info)
return introspect_prefix_info
def generate_expected_routes(self, control_node_config, vn_fixture, mx_config, mx1_config, mx2_config, vm_fixt_list, vm_config_list):
control_node_asn = control_node_config['control_node_asn']
mx_asn = mx_config['mx_asn']
cluster_4byte_capability = control_node_config['cluster_4b_capability']
introspect_prefix_info_l = []
vsrx1_routes = []
vsrx2_routes = []
mx_routes = []
if len(vm_config_list) == 3:
# routes from other vsrx
introspect_prefix_info,route = self.peer_route_from_vsrx(control_node_config,vn_fixture,vm_config_list[1],vm_config_list[2])
vsrx1_routes.extend(route)
introspect_prefix_info_l.extend(introspect_prefix_info)
introspect_prefix_info,route = self.peer_route_from_vsrx(control_node_config,vn_fixture,vm_config_list[2],vm_config_list[1])
vsrx2_routes.extend(route)
introspect_prefix_info_l.extend(introspect_prefix_info)
introspect_prefix_info,exp_routes_from_mx = self.peer_route_from_mx(control_node_config,mx_config,vn_fixture)
introspect_prefix_info_l.extend(introspect_prefix_info)
vsrx1_routes.extend(exp_routes_from_mx)
if len(vm_config_list) == 3:
vsrx2_routes.extend(exp_routes_from_mx)
mx_routes = self.peer_route_to_mx(control_node_config,mx_config,vn_fixture,vm_fixt_list,vm_config_list)
if mx1_config:
introspect_prefix_info_ext_mx_routes = self.peer_route_from_external_mx(control_node_config,mx_config,mx1_config,mx2_config)
introspect_prefix_info_l.extend(introspect_prefix_info_ext_mx_routes)
return introspect_prefix_info_l, mx_routes, vsrx1_routes, vsrx2_routes
def verify_received_routes_in_mx(self, mx_config, peer_ips, expected_routes, bgp_type):
conn = self.phy_router_fixture.get_connection_obj(
'juniper', mx_config['control_ip'], 'root', 'c0ntrail123')
output_dict = {}
for control_ip in peer_ips:
cmd = 'show route receive-protocol bgp %s table %s' % (
control_ip, mx_config['ri_name'])
output_dict[control_ip] = conn.handle.cli(cmd)
route_seen_dict = {}
for route in expected_routes:
route_seen_dict[route] = []
for control_ip in peer_ips:
for route in expected_routes:
ret = re.search(route, output_dict[control_ip])
if ret:
route_seen_dict[route].append(control_ip)
for route in expected_routes:
if len(route_seen_dict[route]) == 0:
assert False, "Route: %s not seen in receive-protocol in MX" % route
elif bgp_type == "internal" and len(route_seen_dict[route]) != 2:
self.logger.info("iBGP Update not seen from 2 CN for Route: %s , count: %d" % (
route, len(route_seen_dict[route])))
elif bgp_type == "external" and len(route_seen_dict[route]) != 3:
self.logger.info("eBGP Update not seen from 3 CN for Route: %s , count: %d" % (
route, len(route_seen_dict[route])))
else:
self.logger.info("BGP Update Seen correctly for Route: %s , count: %d" % (
route, len(route_seen_dict[route])))
def verify_cn_instrospect(self, vn_fixture, prefixes):
ri_name = vn_fixture.ri_name
for prefix_info in prefixes:
prefix = prefix_info['prefix']
as_path = prefix_info['as_path']
if as_path == "-":
as_path = None
as4_path = prefix_info['as4_path']
if as4_path == "-":
as4_path = None
for cn in self.inputs.bgp_control_ips:
cn_entry = self.cn_inspect[cn].get_cn_route_table_entry(
prefix=prefix, table="inet.0", ri_name=ri_name)
if cn_entry[0]['as_path'] != as_path or cn_entry[0]['as4_path'] != as4_path:
import pdb;pdb.set_trace()
assert False, "AS_PATH or AS4_PATH is incorrect for prefix: %s" % prefix
def configure_external_routes(self, mx1_mx2_router_params, vrf_target):
mx_name = self.inputs.ext_routers[0][0]
mx_info = self.inputs.physical_routers_data[mx_name]
ext_mx_name = self.inputs.as4_ext_routers[0][0]
ext_mx_info = self.inputs.physical_routers_data[ext_mx_name]
mx1_config = {}
mx1_config['mx_4b_capability'] = mx1_mx2_router_params['mx1,4b_capability']
mx1_config['mx_asn'] = mx1_mx2_router_params['mx1,local_asn']
mx1_config['test_group_name'] = "as4_ext_group1"
mx1_config['test_bgp_proto_group_name'] = "as4_ext_group1_bgp"
mx1_config["vrf_interface"] = mx_info['as4_ext_interface']
mx1_config["vrf_interface_ip"] = mx_info['as4_ext_interface_ip']
mx1_config["control_ip"] = mx_info['as4_ext_interface_ip']
mx1_config["loopback_ip"] = mx_info['loopback_ip']
mx1_config['mgmt_ip'] = mx_info['mgmt_ip']
mx1_config['mx_name'] = mx_info['name']
mx1_config['remote_mx'] = False
mx2_config = {}
mx2_config['remote_mx'] = True
mx2_config['mx_4b_capability'] = mx1_mx2_router_params['mx2,4b_capability']
mx2_config['mx_asn'] = mx1_mx2_router_params['mx2,local_asn']
mx2_config['test_group_name'] = "as4_ext_group2"
mx2_config['test_bgp_proto_group_name'] = "as4_ext_group2_bgp"
mx2_config["vrf_interface"] = ext_mx_info['as4_ext_interface']
mx2_config['control_ip'] = ext_mx_info['as4_ext_interface_ip']
mx2_config["vrf_interface_ip"] = ext_mx_info['as4_ext_interface_ip']
mx2_config["loopback_ip"] = ext_mx_info['loopback_ip']
mx2_config['mgmt_ip'] = ext_mx_info['mgmt_ip']
mx2_config['mx_name'] = ext_mx_info['name']
mx2_config['addnl_static_routes'] = self.get_external_mx_configured_addnl_static_routes()
mx2_config['aggr_route'], mx2_config['static_routes'] = self.get_external_mx_configured_aggr_routes()
self.external_route_config(mx1_config, mx2_config, vrf_target)
return mx1_config, mx2_config
@preposttest_wrapper
def test_as4_two_mx(self):
mx_name = self.inputs.ext_routers[0][0]
mx_info = self.inputs.physical_routers_data[mx_name]
initial_neigh_info = self.get_neighbor_info()
topology_info = {}
topology_info['mx_control_ip_address'] = [mx_info['control_ip']]
topology_info['mx_tunnel_ip'] = mx_info['tunnel_ip']
topology_info['mx_vrf_interfaces'] = [mx_info['vrf_interface']]
topology_info['mx_bgp_protocol_group'] = mx_info.get(
'bgp_protocol_group', None)
topology_info['test_group_name'] = "testbed_%s_4byte" % mx_name
topology_info['test_bgp_proto_group_name'] = "testbed_%s_4byte_bgp" % mx_name
topology_info['test_ri_name'] = "ri_4byte_test"
topology_info['mx_cluster_group'] = mx_info.get('cluster_group', None)
topology_info['mx_name'] = mx_name
mx1_mx2_router_params = {}
mx1_mx2_router_params['mx1,local_asn'] = 1
mx1_mx2_router_params['mx1,4b_capability'] = True
mx1_mx2_router_params['mx2,local_asn'] = 970000
mx1_mx2_router_params['mx2,4b_capability'] = True
cluster_global_asn = 4600
control_node_asn = [4600, 4600, 4600]
mx_asn = [4500, 4500, 4500]
cluster_4b_capability = False
mx_4b_capability = True
mx_control_ip_address = topology_info['mx_control_ip_address']
mx_tunnel_ip = topology_info['mx_tunnel_ip']
if cluster_global_asn == mx_asn:
bgp_type = "internal"
else:
bgp_type = "external"
mx_config = {}
mx_config['mx_4b_capability'] = mx_4b_capability
mx_config['mx_asn'] = mx_asn
mx_config['mx_vrf_interfaces'] = topology_info['mx_vrf_interfaces']
mx_config['bgp_protocol_group'] = topology_info['mx_bgp_protocol_group']
mx_config['cluster_group'] = topology_info['mx_cluster_group']
mx_config['test_group_name'] = topology_info['test_group_name']
mx_config['test_bgp_proto_group_name'] = topology_info['test_bgp_proto_group_name']
mx_config['ri_name'] = topology_info['test_ri_name']
mx_config["vrf_interface"] = mx_config['mx_vrf_interfaces'][0] + ".0"
mx_config["rd"] = mx_control_ip_address[0].split("/")[0] + ":100"
mx_config['control_ip'] = mx_tunnel_ip
mx_config['mx_control_ip_address'] = topology_info['mx_control_ip_address']
mx_config['mx_name'] = topology_info['mx_name']
control_node_config = {}
control_node_config['cluster_4b_capability'] = cluster_4b_capability
control_node_config['cluster_global_asn'] = cluster_global_asn
control_node_config['control_node_asn'] = control_node_asn
control_node_config['mx_config'] = mx_config
control_node_config['control,mx,bgp_type'] = bgp_type
existing_global_asn = self.get_global_asn()
self.addCleanup(self.connections.vnc_lib_fixture.set_global_asn, self.inputs.bgp_asn)
self.configure_control_nodes(control_node_config,mx_config)
self.deactivate_mx_cluster_configuration(mx_config)
self.configure_physical_devices(control_node_config,mx_config)
self.addCleanup(self.activate_mx_cluster_configuration,mx_config)
vm_config_list = []
vm_config = {}
vm_config['image_name'] = 'ubuntu-traffic'
vm_config['vm_name'] = 'test-vm'
vm_config_list.append(vm_config)
vm_config = {}
vm_config['image_name'] = 'vsrx'
vm_config['vm_name'] = 'bgpaas-vm1'
vm_config['asn'] = 9000
vm_config['static_routes'] = self.vsrx1_addnl_static_routes()
vm_config['aggr_route'], vm_config['static_routes_aggr'] = self.vsrx1_aggr_routes()
vm_config['peer_asn'] = control_node_asn[0]
vm_config['4b_capability'] = False
vm_config_list.append(vm_config)
vm_config = {}
vm_config['image_name'] = 'vsrx'
vm_config['vm_name'] = 'bgpaas-vm2'
vm_config['asn'] = 90000
vm_config['4b_capability'] = True
vm_config['peer_asn'] = control_node_asn[0]
vm_config['static_routes'] = self.vsrx2_addnl_static_routes()
vm_config['aggr_route'], vm_config['static_routes_aggr'] = self.vsrx2_aggr_routes()
vm_config['4b_capability'] = True
vm_config_list.append(vm_config)
##### START OF STEP1: ####################
vn_fixture = self.create_vn(control_node_config=control_node_config)
vm_fixt_list = self.create_vms(
vn_fixture=vn_fixture, vm_config_list=vm_config_list)
self.verify_vms(vm_fixt_list)
bgpaas_fixture_list = self.configure_bgpaas_vms(control_node_config=control_node_config,vn_fixture=vn_fixture,vm_fixt_list=vm_fixt_list,vm_config_list=vm_config_list)
self.update_sg_group()
self.mx_create_vrf(control_node_config=control_node_config,
mx_config=mx_config, vn_fixture=vn_fixture)
self.mx_aggregated_routes_configuration(
control_node_config, mx_config, vn_fixture)
self.mx_static_routes_configuration(
control_node_config, mx_config, vn_fixture)
if mx1_mx2_router_params['mx2,local_asn'] < ASN4Base.AS_2BYTE_MAX or (cluster_4b_capability and mx1_mx2_router_params['mx2,local_asn'] > ASN4Base.AS_2BYTE_MAX):
mx2_vrf_target = "target:%d:12345" % (
mx1_mx2_router_params['mx2,local_asn'])
else:
mx2_vrf_target = "target:%d:12345" % (41000)
mx1_config, mx2_config = self.configure_external_routes(
mx1_mx2_router_params, vrf_target=mx2_vrf_target)
self.add_vrf_target(control_node_config=control_node_config,
vn_fixture=vn_fixture, vrf_target=mx2_vrf_target)
introspect_prefix_info, mx_routes, vsrx1_routes, vsrx2_routes = self.generate_expected_routes(
control_node_config, vn_fixture, mx_config, mx1_config, mx2_config, vm_fixt_list, vm_config_list)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[1], vm_config=vm_config_list[1], expected_routes=vsrx1_routes)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[2], vm_config=vm_config_list[1], expected_routes=vsrx2_routes)
self.verify_received_routes_in_mx(
mx_config, peer_ips=self.inputs.bgp_control_ips, expected_routes=mx_routes, bgp_type=bgp_type)
self.verify_cn_instrospect(vn_fixture, introspect_prefix_info)
@preposttest_wrapper
@test.attr(type=['sanity'])
def test_basic_as4_ibgp_sanity(self):
if len(self.inputs.ext_routers) != 1:
raise self.skipTest(
"Skipping Test. At least 1 external router required to run the test")
test_info = {}
test_info['cluster_global_asn'] = 89000
test_info['control_node_asn'] = [89000, 89000, 89000]
test_info['mx_asn'] = [89000, 89000, 89000]
test_info['cluster_4b_capability'] = True
test_info['mx_4b_capability'] = True
mx_name = self.inputs.ext_routers[0][0]
mx_info = self.inputs.physical_routers_data[mx_name]
topology_info = {}
topology_info['mx_control_ip_address'] = [mx_info['control_ip']]
topology_info['mx_tunnel_ip'] = mx_info['tunnel_ip']
topology_info['mx_vrf_interfaces'] = [mx_info['vrf_interface']]
topology_info['mx_bgp_protocol_group'] = mx_info.get(
'bgp_protocol_group', None)
topology_info['test_group_name'] = "testbed_%s_4byte" % mx_name
topology_info['test_bgp_proto_group_name'] = "testbed_%s_4byte_bgp" % mx_name
topology_info['test_ri_name'] = "ri_4byte_test"
topology_info['mx_cluster_group'] = mx_info.get('cluster_group', None)
topology_info['mx_name'] = mx_name
initial_neigh_info = self.get_neighbor_info()
cluster_global_asn = test_info['cluster_global_asn']
control_node_asn = test_info['control_node_asn']
mx_asn = test_info['mx_asn']
cluster_4b_capability = test_info['cluster_4b_capability']
mx_4b_capability = test_info['mx_4b_capability']
mx_control_ip_address = topology_info['mx_control_ip_address']
mx_tunnel_ip = topology_info['mx_tunnel_ip']
if test_info['control_node_asn'] == test_info['mx_asn']:
bgp_type = "internal"
else:
bgp_type = "external"
mx_config = {}
mx_config['mx_4b_capability'] = test_info['mx_4b_capability']
mx_config['mx_asn'] = test_info['mx_asn']
mx_config['mx_vrf_interfaces'] = topology_info['mx_vrf_interfaces']
mx_config['bgp_protocol_group'] = topology_info['mx_bgp_protocol_group']
mx_config['cluster_group'] = topology_info['mx_cluster_group']
mx_config['test_group_name'] = topology_info['test_group_name']
mx_config['test_bgp_proto_group_name'] = topology_info['test_bgp_proto_group_name']
mx_config['ri_name'] = topology_info['test_ri_name']
mx_config["vrf_interface"] = mx_config['mx_vrf_interfaces'][0] + ".0"
mx_config["rd"] = mx_control_ip_address[0].split("/")[0] + ":100"
mx_config['control_ip'] = mx_tunnel_ip
mx_config['mx_control_ip_address'] = topology_info['mx_control_ip_address']
mx_config['mx_name'] = topology_info['mx_name']
control_node_config = {}
control_node_config['cluster_4b_capability'] = cluster_4b_capability
control_node_config['cluster_global_asn'] = cluster_global_asn
control_node_config['control_node_asn'] = control_node_asn
control_node_config['mx_config'] = mx_config
control_node_config['control,mx,bgp_type'] = bgp_type
topology_info = {}
topology_info['mx_control_ip_address'] = [mx_info['control_ip']]
topology_info['mx_tunnel_ip'] = mx_info['tunnel_ip']
topology_info['mx_vrf_interfaces'] = [mx_info['vrf_interface']]
topology_info['mx_bgp_protocol_group'] = mx_info.get(
'bgp_protocol_group', None)
topology_info['test_group_name'] = "testbed_%s_4byte" % mx_name
topology_info['test_bgp_proto_group_name'] = "testbed_%s_4byte_bgp" % mx_name
topology_info['test_ri_name'] = "ri_4byte_test"
topology_info['mx_cluster_group'] = mx_info.get('cluster_group', None)
topology_info['mx_name'] = mx_name
existing_global_asn = self.get_global_asn()
self.addCleanup(
self.connections.vnc_lib_fixture.set_global_asn, existing_global_asn)
self.configure_control_nodes(control_node_config, mx_config)
self.deactivate_mx_cluster_configuration(mx_config=mx_config)
self.configure_physical_devices(control_node_config, mx_config)
self.addCleanup(self.activate_mx_cluster_configuration, mx_config)
vm_config_list = []
vm_config = {}
vm_config['image_name'] = 'ubuntu-traffic'
vm_config['vm_name'] = 'test-vm'
vm_config_list.append(vm_config)
vm_config = {}
vm_config['image_name'] = 'vsrx'
vm_config['vm_name'] = 'bgpaas-vm1'
vm_config['asn'] = 9000
vm_config['static_routes'] = self.vsrx1_addnl_static_routes()
vm_config['aggr_route'], vm_config['static_routes_aggr'] = self.vsrx1_aggr_routes()
vm_config['peer_asn'] = control_node_asn[0]
vm_config['4b_capability'] = False
vm_config_list.append(vm_config)
self.update_sg_group()
vn_fixture = self.create_vn(control_node_config=control_node_config)
vm_fixt_list = self.create_vms(
vn_fixture=vn_fixture, vm_config_list=vm_config_list)
self.mx_create_vrf(control_node_config=control_node_config,
mx_config=mx_config, vn_fixture=vn_fixture)
self.mx_aggregated_routes_configuration(
control_node_config, mx_config, vn_fixture)
self.mx_static_routes_configuration(
control_node_config, mx_config, vn_fixture)
self.verify_vms(vm_fixt_list)
bgpaas_fixture_list = self.configure_bgpaas_vms(
control_node_config=control_node_config, vn_fixture=vn_fixture, vm_fixt_list=vm_fixt_list, vm_config_list=vm_config_list)
mx1_config = mx2_config = None
introspect_prefix_info, mx_routes, vsrx1_routes, vsrx2_routes = self.generate_expected_routes(
control_node_config,vn_fixture, mx_config, mx1_config, mx2_config, vm_fixt_list, vm_config_list)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[1], vm_config=vm_config_list[1], expected_routes=vsrx1_routes)
self.verify_received_routes_in_mx(
mx_config, peer_ips=self.inputs.bgp_control_ips, expected_routes=mx_routes, bgp_type=bgp_type)
@preposttest_wrapper
# @skip_because(mx_gw=False,msg='Need to set MX_GW=True and atleast one Physical Router')
def test_basic_as4_ibgp(self):
if len(self.inputs.ext_routers) != 1:
raise self.skipTest(
"Skipping Test. At least 1 external router required to run the test")
mx_name = self.inputs.ext_routers[0][0]
mx_info = self.inputs.physical_routers_data[mx_name]
topology_info = {}
topology_info['mx_control_ip_address'] = [mx_info['control_ip']]
topology_info['mx_tunnel_ip'] = mx_info['tunnel_ip']
topology_info['mx_vrf_interfaces'] = [mx_info['vrf_interface']]
topology_info['mx_bgp_protocol_group'] = mx_info.get(
'bgp_protocol_group', None)
topology_info['test_group_name'] = "testbed_%s_4byte" % mx_name
topology_info['test_bgp_proto_group_name'] = "testbed_%s_4byte_bgp" % mx_name
topology_info['test_ri_name'] = "ri_4byte_test"
topology_info['mx_cluster_group'] = mx_info.get('cluster_group', None)
topology_info['mx_name'] = mx_name
test_info = {}
test_info['step1,cluster_global_asn'] = 8901
test_info['step1,control_node_asn'] = [8901, 8901, 8901]
test_info['step1,mx_asn'] = [8901, 8901, 8901]
test_info['step1,cluster_4b_capability'] = False
test_info['step1,mx_4b_capability'] = False
test_info['step2,cluster_global_asn'] = 89000
test_info['step2,control_node_asn'] = [89000, 89000, 89000]
test_info['step2,mx_asn'] = [89000, 89000, 89000]
test_info['step2,cluster_4b_capability'] = True
test_info['step2,mx_4b_capability'] = True
self.basic_as4(topology_info, test_info)
@preposttest_wrapper
# @skip_because(mx_gw=False,msg='Need to set MX_GW=True and atleast one Physical Router')
def test_basic_as4_ebgp(self):
if len(self.inputs.ext_routers) != 1:
raise self.skipTest(
"Skipping Test. At least 1 external router required to run the test")
mx_name = self.inputs.ext_routers[0][0]
mx_info = self.inputs.physical_routers_data[mx_name]
topology_info = {}
topology_info['mx_control_ip_address'] = [mx_info['control_ip']]
topology_info['mx_tunnel_ip'] = mx_info['tunnel_ip']
topology_info['mx_vrf_interfaces'] = [mx_info['vrf_interface']]
topology_info['mx_bgp_protocol_group'] = mx_info.get(
'bgp_protocol_group', None)
topology_info['test_group_name'] = "testbed_%s_4byte" % mx_name
topology_info['test_bgp_proto_group_name'] = "testbed_%s_4byte_bgp" % mx_name
topology_info['test_ri_name'] = "ri_4byte_test"
topology_info['mx_cluster_group'] = mx_info.get('cluster_group', None)
topology_info['mx_name'] = mx_name
test_info = {}
test_info['step1,cluster_global_asn'] = 8902
test_info['step1,control_node_asn'] = [8902, 8902, 8902]
test_info['step1,mx_asn'] = [8903, 8903, 8903]
test_info['step1,cluster_4b_capability'] = False
test_info['step1,mx_4b_capability'] = False
test_info['step2,cluster_global_asn'] = 89002
test_info['step2,control_node_asn'] = [89002, 89002, 89002]
test_info['step2,mx_asn'] = [89003, 89003, 89003]
test_info['step2,cluster_4b_capability'] = True
test_info['step2,mx_4b_capability'] = True
self.basic_as4(topology_info, test_info)
def basic_as4(self, topology_info, test_info):
initial_neigh_info = self.get_neighbor_info()
cluster_global_asn = test_info['step1,cluster_global_asn']
control_node_asn = test_info['step1,control_node_asn']
mx_asn = test_info['step1,mx_asn']
cluster_4b_capability = test_info['step1,cluster_4b_capability']
mx_4b_capability = test_info['step1,mx_4b_capability']
mx_control_ip_address = topology_info['mx_control_ip_address']
mx_tunnel_ip = topology_info['mx_tunnel_ip']
if test_info['step1,control_node_asn'] == test_info['step1,mx_asn']:
bgp_type = "internal"
else:
bgp_type = "external"
mx_config = {}
mx_config['mx_4b_capability'] = test_info['step1,mx_4b_capability']
mx_config['mx_asn'] = test_info['step1,mx_asn']
mx_config['mx_vrf_interfaces'] = topology_info['mx_vrf_interfaces']
mx_config['bgp_protocol_group'] = topology_info['mx_bgp_protocol_group']
mx_config['cluster_group'] = topology_info['mx_cluster_group']
mx_config['test_group_name'] = topology_info['test_group_name']
mx_config['test_bgp_proto_group_name'] = topology_info['test_bgp_proto_group_name']
mx_config['ri_name'] = topology_info['test_ri_name']
mx_config["vrf_interface"] = mx_config['mx_vrf_interfaces'][0] + ".0"
mx_config["rd"] = mx_control_ip_address[0].split("/")[0] + ":100"
mx_config['control_ip'] = mx_tunnel_ip
mx_config['mx_control_ip_address'] = topology_info['mx_control_ip_address']
mx_config['mx_name'] = topology_info['mx_name']
control_node_config = {}
control_node_config['cluster_4b_capability'] = cluster_4b_capability
control_node_config['cluster_global_asn'] = cluster_global_asn
control_node_config['control_node_asn'] = control_node_asn
control_node_config['mx_config'] = mx_config
control_node_config['control,mx,bgp_type'] = bgp_type
self.logger.info(
"STEP:1 a. Disable '4 Byte Enabled' in Global System Configuration")
self.logger.info(
" b. Configure 2 Byte AS in Global System Configuration")
self.logger.info(" c. Configure iBGP with MX")
self.logger.info(
" d. Create 1st BGPaaS with 2 Byte AS and 4Byte Capability Disabled")
self.logger.info(
" e. Create 2nd BGPaaS with 4 Byte AS and 4Byte Capability Enabled")
self.logger.info(
" f. Configure static routes and aggregate routes to be advertised by MX.Update routing-instance with VN rt and add community")
self.logger.info(
"Verification: a. Verify Routes from MX are received and seen in Both BGPaaS.Verify AS_PATH")
self.logger.info(
" b. Verify Routes from both BGPaaS are seen in MX")
self.logger.info(
"STEP:2 a. Enable '4 Byte Enabled' in Global System Configuration")
self.logger.info(
"Verification: a. Verify Routes from MX are received and seen in Both BGPaaS.Verify AS_PATH")
self.logger.info(
" b. Verify Routes from both BGPaaS are seen in MX")
self.logger.info(
"STEP:3 \n a. Update 4 Byte AS in Global System Configuration")
self.logger.info(
" b. Update Cluster 4 Byte AS,RT in MX and AS in BGPaaS")
self.logger.info(
"Verification: a. Verify Routes from MX are received and seen in Both BGPaaS.Verify AS_PATH")
self.logger.info(
" b. Verify Routes from both BGPaaS are seen in MX")
existing_global_asn = self.get_global_asn()
self.addCleanup(
self.connections.vnc_lib_fixture.set_global_asn, self.inputs.bgp_asn)
self.configure_control_nodes(control_node_config, mx_config)
self.deactivate_mx_cluster_configuration(mx_config)
self.configure_physical_devices(control_node_config, mx_config)
self.addCleanup(self.activate_mx_cluster_configuration, mx_config)
vm_config_list = []
vm_config = {}
vm_config['image_name'] = 'ubuntu-traffic'
vm_config['vm_name'] = 'test-vm'
vm_config_list.append(vm_config)
vm_config = {}
vm_config['image_name'] = 'vsrx'
vm_config['vm_name'] = 'bgpaas-vm1'
vm_config['asn'] = 9000
vm_config['static_routes'] = self.vsrx1_addnl_static_routes()
vm_config['aggr_route'], vm_config['static_routes_aggr'] = self.vsrx1_aggr_routes()
vm_config['peer_asn'] = control_node_asn[0]
vm_config['4b_capability'] = False
vm_config_list.append(vm_config)
vm_config = {}
vm_config['image_name'] = 'vsrx'
vm_config['vm_name'] = 'bgpaas-vm2'
vm_config['asn'] = 90000
vm_config['4b_capability'] = True
vm_config['peer_asn'] = control_node_asn[0]
vm_config['static_routes'] = self.vsrx2_addnl_static_routes()
vm_config['aggr_route'], vm_config['static_routes_aggr'] = self.vsrx2_aggr_routes()
vm_config['4b_capability'] = True
vm_config_list.append(vm_config)
##### START OF STEP1: ####################
vn_fixture = self.create_vn(control_node_config=control_node_config)
vm_fixt_list = self.create_vms(
vn_fixture=vn_fixture, vm_config_list=vm_config_list)
self.verify_vms(vm_fixt_list)
bgpaas_fixture_list = self.configure_bgpaas_vms(
control_node_config=control_node_config, vn_fixture=vn_fixture, vm_fixt_list=vm_fixt_list, vm_config_list=vm_config_list)
self.update_sg_group()
self.mx_create_vrf(control_node_config=control_node_config,
mx_config=mx_config, vn_fixture=vn_fixture)
self.mx_aggregated_routes_configuration(
control_node_config, mx_config, vn_fixture)
self.mx_static_routes_configuration(
control_node_config, mx_config, vn_fixture)
mx1_config = mx2_config = None
introspect_prefix_info, mx_routes, vsrx1_routes, vsrx2_routes = self.generate_expected_routes(
control_node_config,vn_fixture, mx_config, mx1_config, mx2_config, vm_fixt_list, vm_config_list)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[1], vm_config=vm_config_list[1], expected_routes=vsrx1_routes)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[2], vm_config=vm_config_list[1], expected_routes=vsrx2_routes)
self.verify_received_routes_in_mx(
mx_config, peer_ips=self.inputs.bgp_control_ips, expected_routes=mx_routes, bgp_type=bgp_type)
###### END OF STEP1: ####################
###### START OF STEP2: ####################
#self.logger.info("START of STEP2")
cluster_4b_capability = True
self.set_4byte_enable(cluster_4b_capability)
self.update_bgpaas_configuration(
control_node_config=control_node_config, vm_config=vm_config_list[1], bgpaas_fixt=bgpaas_fixture_list[1])
self.update_bgpaas_configuration(
control_node_config=control_node_config, vm_config=vm_config_list[2], bgpaas_fixt=bgpaas_fixture_list[2])
time.sleep(60) # To allow re-generation of RT , after a global ASN change
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[1], vm_config=vm_config_list[1], expected_routes=vsrx1_routes)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[2], vm_config=vm_config_list[1], expected_routes=vsrx2_routes)
self.verify_received_routes_in_mx(
mx_config, peer_ips=self.inputs.bgp_control_ips, expected_routes=mx_routes, bgp_type=bgp_type)
###### END OF STEP2: ####################
self.logger.info("END of STEP2")
self.logger.info("START of STEP3")
cluster_global_asn = test_info['step2,cluster_global_asn']
self.set_global_asn(cluster_global_asn)
control_node_asn = test_info['step2,control_node_asn']
mx_asn = test_info['step2,mx_asn']
mx_config['mx_4b_capability'] = test_info['step2,mx_4b_capability']
control_node_config['cluster_4b_capability'] = test_info['step2,cluster_4b_capability']
control_node_config['control_node_asn'] = control_node_asn
control_node_config['cluster_global_asn'] = cluster_global_asn
mx_config['mx_asn'] = mx_asn
self.configure_physical_devices(control_node_config, mx_config)
self.reconfigure_bgpaas_vms(control_node_config=control_node_config,
vm_fixt_list=vm_fixt_list, vm_config_list=vm_config_list)
self.update_bgpaas_configuration(
control_node_config=control_node_config, vm_config=vm_config_list[1], bgpaas_fixt=bgpaas_fixture_list[1])
self.update_bgpaas_configuration(
control_node_config=control_node_config, vm_config=vm_config_list[2], bgpaas_fixt=bgpaas_fixture_list[2])
self.mx_create_vrf(control_node_config=control_node_config,
mx_config=mx_config, vn_fixture=vn_fixture)
self.mx_aggregated_routes_configuration(
control_node_config, mx_config, vn_fixture)
self.mx_static_routes_configuration(
control_node_config, mx_config, vn_fixture)
introspect_prefix_info, mx_routes, vsrx1_routes, vsrx2_routes = self.generate_expected_routes(
control_node_config, vn_fixture, mx_config, mx1_config, mx2_config, vm_fixt_list, vm_config_list)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[1], vm_config=vm_config_list[1], expected_routes=vsrx1_routes)
self.verify_bgpaas_bgp_routes(control_node_config=control_node_config, vn_fixture=vn_fixture,
src_vm=vm_fixt_list[0], bgpaas_vm=vm_fixt_list[2], vm_config=vm_config_list[2], expected_routes=vsrx2_routes)
self.verify_received_routes_in_mx(
mx_config, peer_ips=self.inputs.bgp_control_ips, expected_routes=mx_routes, bgp_type=bgp_type)
##### END OF STEP3: ####################
self.logger.info("END of STEP3")
final_neigh_info = self.get_neighbor_info()
self.logger.info("Initial_flap_count", initial_neigh_info)
self.logger.info("Final_flap_count", initial_neigh_info)
def update_sg_group(self):
sg_obj = self.connections.vnc_lib.security_group_read(
fq_name=[u'default-domain', u'%s' % self.connections.project_name, u'default'])
self.connections.orch.delete_security_group_rules(
sg_id=sg_obj.uuid, project_id=self.connections.project_id)
uuid_1 = uuid.uuid1().urn.split(':')[2]
uuid_2 = uuid.uuid1().urn.split(':')[2]
secgrp_rules = [
{'direction': '>',
'protocol': 'any',
'dst_addresses': [{'security_group': 'local', 'subnet': None}],
'dst_ports': [{'start_port': 0, 'end_port': 65535}],
'src_ports': [{'start_port': 0, 'end_port': 65535}],
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'rule_uuid': uuid_1
}, {'direction': '<',
'protocol': 'any',
'src_addresses': [{'security_group': 'local', 'subnet': None}],
'dst_ports': [{'start_port': 0, 'end_port': 65535}],
'src_ports': [{'start_port': 0, 'end_port': 65535}],
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], 'rule_uuid': uuid_2}, ]
self.connections.orch.set_security_group_rules(
sg_id=sg_obj.uuid, sg_entries=secgrp_rules)
| 49.658824 | 174 | 0.640873 |
4a1cac67098488a272d8eae4f788330c16fabc37
| 4,052 |
py
|
Python
|
alipay/aop/api/request/AlipayFincoreComplianceTemplateAnswerQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayFincoreComplianceTemplateAnswerQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayFincoreComplianceTemplateAnswerQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayFincoreComplianceTemplateAnswerQueryModel import AlipayFincoreComplianceTemplateAnswerQueryModel
class AlipayFincoreComplianceTemplateAnswerQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayFincoreComplianceTemplateAnswerQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayFincoreComplianceTemplateAnswerQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.fincore.compliance.template.answer.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.944828 | 148 | 0.65153 |
4a1cad50e593690f8df1a821299042130ad8d7f9
| 1,047 |
py
|
Python
|
setup.py
|
mgorny/WSGIProxy2
|
0b3c034cb4e9ab24984ea73e102d8376c21a51c7
|
[
"MIT"
] | null | null | null |
setup.py
|
mgorny/WSGIProxy2
|
0b3c034cb4e9ab24984ea73e102d8376c21a51c7
|
[
"MIT"
] | null | null | null |
setup.py
|
mgorny/WSGIProxy2
|
0b3c034cb4e9ab24984ea73e102d8376c21a51c7
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
version = '0.5.1.dev0'
def read(name):
try:
with open(name) as fd:
return fd.read()
except Exception:
return ''
setup(name='WSGIProxy2',
version=version,
long_description=read('README.rst') + '\n' + read('CHANGES.rst'),
description='A WSGI Proxy with various http client backends',
classifiers=[
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='wsgi proxy',
author='Gael Pasgrimaud',
author_email='gael@gawel.org',
url='https://github.com/gawel/WSGIProxy2/',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'README_fixt', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['webob', 'six'],
entry_points="""
# -*- Entry points: -*-
""",
)
| 28.297297 | 75 | 0.581662 |
4a1cad71fc931280b8bc053e85b9ecfa6f6936f7
| 293 |
py
|
Python
|
userinterface/migrations/0002_delete_timetable.py
|
Anand911/E-LEARNING-SCLMAXO-
|
a16c317ae482c91f4f91c967ddc3e498a43ac7e9
|
[
"MIT"
] | 1 |
2021-02-14T10:43:21.000Z
|
2021-02-14T10:43:21.000Z
|
userinterface/migrations/0002_delete_timetable.py
|
Anand911/E-LEARNING-SCLMAXO-
|
a16c317ae482c91f4f91c967ddc3e498a43ac7e9
|
[
"MIT"
] | 1 |
2021-01-12T07:22:08.000Z
|
2021-01-13T19:07:02.000Z
|
userinterface/migrations/0002_delete_timetable.py
|
Anand911/E-LEARNING-SCLMAXO-
|
a16c317ae482c91f4f91c967ddc3e498a43ac7e9
|
[
"MIT"
] | 6 |
2020-12-13T17:46:37.000Z
|
2021-02-10T13:47:25.000Z
|
# Generated by Django 3.1.3 on 2020-12-18 13:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userinterface', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='timetable',
),
]
| 17.235294 | 47 | 0.600683 |
4a1cadd687f1a5856c9c5fb11c49ddac48029fb3
| 8,544 |
py
|
Python
|
pgcli/packages/sqlcompletion.py
|
czchen/debian-pgcli
|
67498d4e8f6d153de7f2f73380d2b749c550c247
|
[
"BSD-3-Clause"
] | null | null | null |
pgcli/packages/sqlcompletion.py
|
czchen/debian-pgcli
|
67498d4e8f6d153de7f2f73380d2b749c550c247
|
[
"BSD-3-Clause"
] | null | null | null |
pgcli/packages/sqlcompletion.py
|
czchen/debian-pgcli
|
67498d4e8f6d153de7f2f73380d2b749c550c247
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import sys
import sqlparse
from sqlparse.sql import Comparison, Identifier
from .parseutils import last_word, extract_tables, find_prev_keyword
from .pgspecial import parse_special_command
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
def suggest_type(full_text, text_before_cursor):
"""Takes the full_text that is typed so far and also the text before the
cursor to suggest completion type and scope.
Returns a tuple with a type of entity ('table', 'column' etc) and a scope.
A scope for a column category will be a list of tables.
"""
word_before_cursor = last_word(text_before_cursor,
include='many_punctuations')
identifier = None
# If we've partially typed a word then word_before_cursor won't be an empty
# string. In that case we want to remove the partially typed string before
# sending it to the sqlparser. Otherwise the last token will always be the
# partially typed string which renders the smart completion useless because
# it will always return the list of keywords as completion.
if word_before_cursor:
if word_before_cursor[-1] == '(' or word_before_cursor[0] == '\\':
parsed = sqlparse.parse(text_before_cursor)
else:
parsed = sqlparse.parse(
text_before_cursor[:-len(word_before_cursor)])
# word_before_cursor may include a schema qualification, like
# "schema_name.partial_name" or "schema_name.", so parse it
# separately
p = sqlparse.parse(word_before_cursor)[0]
if p.tokens and isinstance(p.tokens[0], Identifier):
identifier = p.tokens[0]
else:
parsed = sqlparse.parse(text_before_cursor)
if len(parsed) > 1:
# Multiple statements being edited -- isolate the current one by
# cumulatively summing statement lengths to find the one that bounds the
# current position
current_pos = len(text_before_cursor)
stmt_start, stmt_end = 0, 0
for statement in parsed:
stmt_len = len(statement.to_unicode())
stmt_start, stmt_end = stmt_end, stmt_end + stmt_len
if stmt_end >= current_pos:
text_before_cursor = full_text[stmt_start:current_pos]
full_text = full_text[stmt_start:]
break
elif parsed:
# A single statement
statement = parsed[0]
else:
# The empty string
statement = None
# Check for special commands and handle those separately
if statement:
# Be careful here because trivial whitespace is parsed as a statement,
# but the statement won't have a first token
tok1 = statement.token_first()
if tok1 and tok1.value == '\\':
return suggest_special(text_before_cursor)
last_token = statement and statement.token_prev(len(statement.tokens)) or ''
return suggest_based_on_last_token(last_token, text_before_cursor,
full_text, identifier)
def suggest_special(text):
text = text.lstrip()
cmd, _, arg = parse_special_command(text)
if cmd == text:
# Trying to complete the special command itself
return [{'type': 'special'}]
if cmd == '\\c':
return [{'type': 'database'}]
if cmd == '\\dn':
return [{'type': 'schema'}]
if arg:
# Try to distinguish "\d name" from "\d schema.name"
# Note that this will fail to obtain a schema name if wildcards are
# used, e.g. "\d schema???.name"
parsed = sqlparse.parse(arg)[0].tokens[0]
try:
schema = parsed.get_parent_name()
except AttributeError:
schema = None
else:
schema = None
if cmd[1:] in ('d', 'dt', 'dv'):
if schema:
return [{'type': 'table', 'schema': schema}]
else:
return [{'type': 'schema'}, {'type': 'table', 'schema': []}]
elif cmd[1:] == 'df':
if schema:
return [{'type': 'function', 'schema': schema}]
else:
return [{'type': 'schema'}, {'type': 'function', 'schema': []}]
return [{'type': 'keyword'}, {'type': 'special'}]
def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier):
if isinstance(token, string_types):
token_v = token
else:
# If 'token' is a Comparison type such as
# 'select * FROM abc a JOIN def d ON a.id = d.'. Then calling
# token.value on the comparison type will only return the lhs of the
# comparison. In this case a.id. So we need to do token.tokens to get
# both sides of the comparison and pick the last token out of that
# list.
if isinstance(token, Comparison):
token_v = token.tokens[-1].value
else:
token_v = token.value
if not token:
return [{'type': 'keyword'}, {'type': 'special'}]
elif token_v.lower().endswith('('):
p = sqlparse.parse(text_before_cursor)[0]
if p.token_first().value.lower() == 'select':
# If the lparen is preceeded by a space chances are we're about to
# do a sub-select.
if last_word(text_before_cursor,
'all_punctuations').startswith('('):
return [{'type': 'keyword'}]
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v.lower() in ('set', 'by', 'distinct'):
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v.lower() in ('select', 'where', 'having'):
# Check for a table alias or schema qualification
parent = (identifier and identifier.get_parent_name()) or []
if parent:
tables = extract_tables(full_text)
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
return [{'type': 'column', 'tables': extract_tables(full_text)},
{'type': 'function', 'schema': []}]
elif token_v.lower() in ('copy', 'from', 'update', 'into', 'describe',
'join', 'table'):
schema = (identifier and identifier.get_parent_name()) or []
if schema:
# If already schema-qualified, suggest only tables
return [{'type': 'table', 'schema': schema}]
else:
# Suggest schemas OR public tables
return [{'type': 'schema'}, {'type': 'table', 'schema': []}]
elif token_v.lower() == 'function':
# E.g. 'DROP FUNCTION <funcname>'
schema = (identifier and identifier.get_parent_name()) or []
if schema:
return [{'type': 'function', 'schema': schema}]
else:
return [{'type': 'schema'}, {'type': 'function', 'schema': []}]
elif token_v.lower() == 'on':
tables = extract_tables(full_text) # [(schema, table, alias), ...]
parent = (identifier and identifier.get_parent_name()) or []
if parent:
# "ON parent.<suggestion>"
# parent can be either a schema name or table alias
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
# ON <suggestion>
# Use table alias if there is one, otherwise the table name
aliases = [t[2] or t[1] for t in tables]
return [{'type': 'alias', 'aliases': aliases}]
elif token_v.lower() in ('c', 'use', 'database', 'template'):
# "\c <db", "use <db>", "DROP DATABASE <db>",
# "CREATE DATABASE <newdb> WITH TEMPLATE <db>"
return [{'type': 'database'}]
elif token_v.endswith(',') or token_v == '=':
prev_keyword = find_prev_keyword(text_before_cursor)
if prev_keyword:
return suggest_based_on_last_token(
prev_keyword, text_before_cursor, full_text, identifier)
else:
return [{'type': 'keyword'}]
def identifies(id, schema, table, alias):
return id == alias or id == table or (
schema and (id == schema + '.' + table))
| 39.192661 | 82 | 0.588834 |
4a1caef93d89a2f95c20b2eb02fc921c46f0a438
| 10,861 |
py
|
Python
|
archive/sra_experiment/nbconverted/DEG_validation.py
|
ajlee21/core-accessory-interactome
|
e2d8344e8c8abb1d0bda845ce2292b08ae590c51
|
[
"BSD-3-Clause"
] | null | null | null |
archive/sra_experiment/nbconverted/DEG_validation.py
|
ajlee21/core-accessory-interactome
|
e2d8344e8c8abb1d0bda845ce2292b08ae590c51
|
[
"BSD-3-Clause"
] | 33 |
2020-04-24T23:07:49.000Z
|
2022-03-10T22:53:09.000Z
|
archive/sra_experiment/nbconverted/DEG_validation.py
|
ajlee21/core-accessory-interactome
|
e2d8344e8c8abb1d0bda845ce2292b08ae590c51
|
[
"BSD-3-Clause"
] | 1 |
2020-04-01T17:09:27.000Z
|
2020-04-01T17:09:27.000Z
|
# coding: utf-8
# # Differential expression validation
# This notebook performs a differential expression (DE) analysis comparing PAO1 samples vs PA14 samples. We can compare our results with those published in the literature as an additional step to validate that our RNA-seq processing pipeline is reasonable.
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('load_ext', 'rpy2.ipython')
get_ipython().run_line_magic('autoreload', '2')
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from core_acc_modules import utils, paths
from rpy2.robjects import pandas2ri
pandas2ri.activate()
# ### Download data for validation
# Data from [Sana et. al](https://jb.asm.org/content/201/21/e00362-19) found ~ 2K DEGs between 2 strains where Quorum Sensing (QS) genes were DEGs. QS or cell-to-cell signaling controls expression of genes involved in virulence and biofilm formation. QS systems make use of a transcriptional activator protein that acts in concert with a small signaling molecule to stimulate expression of target genes.
# In[2]:
# Download sra data files
get_ipython().system(' prefetch --option-file $paths.SRA_ACC_TEST')
# In[3]:
get_ipython().run_cell_magic('bash', '-s $paths.FASTQ_TEST_DIR $paths.SRA_DIR $paths.FASTQ_TEST_DIR/', 'mkdir -p $1\nfastq-dump $2/SRR8486287.sra --outdir $3\nfastq-dump $2/SRR8486288.sra --outdir $3\nfastq-dump $2/SRR8486289.sra --outdir $3\nfastq-dump $2/SRR8486290.sra --outdir $3')
# In[4]:
get_ipython().run_cell_magic('bash', '-s $paths.PAO1_QUANT_TEST $paths.FASTQ_TEST_DIR $paths.PAO1_INDEX', 'mkdir -p $1\n\nfor FILE_PATH in $2/*;\ndo\n\n# get file name\nsample_name=`basename ${FILE_PATH}`\n\n# remove extension from file name\nsample_name="${sample_name%_*}"\n\n# get base path\nbase_name=${FILE_PATH%/*}\n\necho "Processing sample ${sample_name}"\n\nsalmon quant -i $3 -l A \\\n -r ${base_name}/${sample_name} \\\n -p 8 --validateMappings -o $1/${sample_name}_quant\ndone')
# In[5]:
# Get raw read counts using PAO1 reference
# Read through all sample subdirectories in quant/
# Within each sample subdirectory, get quant.sf file
data_dir = paths.PAO1_QUANT_TEST
expression_data = pd.DataFrame(
pd.read_csv(file, sep="\t", index_col=0)["NumReads"].
rename(file.parent.name.split("_")[0])
for file in data_dir.rglob("*/quant.sf")
)
# Map gene ids to gene names
pao1_fasta_file = paths.PAO1_REF
seq_id_to_gene_id_pao1 = utils.dict_gene_num_to_ids(pao1_fasta_file)
expression_data.rename(mapper=seq_id_to_gene_id_pao1, axis="columns", inplace=True)
expression_data.head()
# In[6]:
# Format sample ids
# There was probably a way to get around doing this step in bash but to get a quick validation I have done this
new_index = [name.split(".")[0] for name in list(expression_data.index)]
expression_data.index = new_index
expression_data.head()
# ### Process data
# 1. Get core genes
# 2. Round read counts to integer value
# In[7]:
# Get mapping between PAO1 and PA14 genes using PAO1 reference
gene_annot_file = paths.GENE_PAO1_ANNOT
gene_mapping_pao1 = utils.get_pao1_pa14_gene_map(gene_annot_file, 'pao1')
gene_annot_file = paths.GENE_PA14_ANNOT
gene_mapping_pa14 = utils.get_pao1_pa14_gene_map(gene_annot_file, 'pa14')
core_pao1_genes, core_pa14_genes = utils.get_core_genes(gene_mapping_pao1,
gene_mapping_pa14,
False)
print(f"Number of PAO1 core genes: {len(core_pao1_genes)}")
print(f"Number of PA14 core genes: {len(core_pa14_genes)}")
core_pao1_genes = set(core_pao1_genes) - set(["PA4215", "PA4214","PA4213"])
expression_data = expression_data.reindex(columns=core_pao1_genes)
print(expression_data.shape)
expression_data.head()
# In[8]:
# Convert values to integers for DE analysis
# Salmon returns estimate read counts.
# Usually scientists would use Salmon to get estimated transcript abundances
# and then use tximport to aggregate the transcript abundances to gene abundances
# TO DO: Need to look into tximport and DESeq processing to make sure this rounding step is ok
expression_data = expression_data.astype(int)
# In[9]:
# Save file
expression_data.to_csv(paths.PAO1_GE_DE, sep='\t')
# ### Examine gene expression
# 1. Look at consistency of gene expression within PAO1 samples, within PA14 samples
# 2. What does average PAO1 vs averge PA14 look like? To give us an expectation for our DESeq analysis.
# In[10]:
expression_data = pd.read_csv(paths.PAO1_GE_DE, sep="\t", index_col=0)
expression_data.head()
# In[11]:
# Group samples as PAO1 or PA14 based on experiment metadata
sample_annot_file = paths.SAMPLE_ANNOT_TEST
pao1_ids, pa14_ids = utils.get_sample_grps(sample_annot_file)
# In[12]:
# Split expression by genotype
pao1_expression = expression_data.loc[pao1_ids]
pa14_expression = expression_data.loc[pa14_ids]
# In[13]:
# Get within sample correlation
pao1_corr = pao1_expression.T.corr()
pa14_corr = pa14_expression.T.corr()
# In[14]:
ax = sns.heatmap(
pao1_corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_title("PAO1 sample correlation")
# In[15]:
ax = sns.heatmap(
pa14_corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_title("PA14 sample correlation")
# In[16]:
# Get mean expression for pao1 and pa14
mean_pao1 = pao1_expression.mean()
mean_pa14 = pa14_expression.mean()
pao1_v_pa14_df = pd.DataFrame(data={'pao1_mean': mean_pao1.values,
'pa14_mean': mean_pa14.values},
index=pao1_expression.columns)
pao1_v_pa14_df.head()
# In[17]:
sns.scatterplot(data=pao1_v_pa14_df, x='pao1_mean', y='pa14_mean')
plt.plot([0,140000],[0,140000])
# In[18]:
# Rough calculation of the number of genes that differ
# Here we are counting the genes with an expression difference between PAO1 and PA14
# that is more than one standard deviation away from the overall mean of expression differences across genes
std_x_eq_y = np.std(abs(pao1_v_pa14_df['pao1_mean']-pao1_v_pa14_df['pa14_mean']))
gene_differences = pao1_v_pa14_df[abs(pao1_v_pa14_df['pao1_mean']-pao1_v_pa14_df['pa14_mean']) > std_x_eq_y]
print(gene_differences.shape)
genes_found_from_GE = list(gene_differences.index)
gene_differences.head()
# **Observations:**
# * Looks like there is consistent gene expression patterns within sample-type (i.e. Both PAO1 samples have a similar gene expression profile, similarly for both PA14 samples) as expected
# * Comparing the mean expression of PAO1 and PA14 we see that there are ~200 genes changed. This gives us some indication about what to expect for our DESeq analysis. However, we shouldn't expect the numbers to align because we are using different methods -- above we are comparing the raw gene expression values and looking for a threshold difference; below we DESeq fits the negative binomial model to the data and performs hypothesis testing to determine if there is a difference between the groups of samples.
# ### Differential expression analysis
# In[19]:
get_ipython().run_cell_magic('R', '', '# Select 59\n# Run one time\n#if (!requireNamespace("BiocManager", quietly = TRUE))\n# install.packages("BiocManager")\n#BiocManager::install("DESeq2")')
# In[20]:
get_ipython().run_cell_magic('R', '', '# Load the DESeq2 library\nsuppressPackageStartupMessages(library("DESeq2"))')
# In[21]:
# Files to load into DE analysis (R)
metadata_file = str(paths.SAMPLE_ANNOT_TEST)
expression_data_file = str(paths.PAO1_GE_DE)
out_file = str(paths.DE_STATS)
# In[22]:
# Check ordering of sample ids
utils.check_sample_ordering(expression_data_file, metadata_file)
# In[23]:
get_ipython().run_cell_magic('R', '-i metadata_file -i expression_data_file -i out_file', "\nsource('../core_acc_modules/DE_analysis.R')\n\nget_DE_stats_DESeq(metadata_file,\n expression_data_file,\n out_file)")
# In[24]:
# Read in DE stats file
DE_stats = pd.read_csv(paths.DE_STATS, sep='\t', header=0, index_col=0)
print(DE_stats.shape)
DE_stats.head()
# In[25]:
# Volcano plot
DE_stats["-log10(padj)"] = -np.log10(DE_stats["padj"])
sns.scatterplot(data=DE_stats, x="log2FoldChange", y="-log10(padj)")
# ### Compare our DE results with publication
# In[26]:
# Get number of DEGs
selected_DE_stats = DE_stats[(DE_stats['padj']<0.01)]
print(selected_DE_stats.shape)
selected_DE_stats.head()
# In[27]:
# Compare our findings against Sana et. al.
degs_sana = ["PA3431", "PA3432", "PA1244", "PA4685"]
for gene in degs_sana:
if gene in list(selected_DE_stats.index):
print(gene)
# In[28]:
# Compare genes whose mean gene expression differed between PAO1 and PA14
# with those genes found using DESeq
top_degs_by_padj = selected_DE_stats.index
len(set(top_degs_by_padj).intersection(genes_found_from_GE))/len(genes_found_from_GE)
# **Conclusions:**
#
# Our DE analysis found ~1.1K significantly differentially expressed genes
#
# (Check 1): The DEGs identified using DESeq (\~1.1K genes) is fairly consistent with the genes that were 1 standard deviation outside the correlation threshold (\~200 genes) -- there was a 75% overlap in these gene sets. This very roughly validates that DESeq is working as expected. I wouldn't expect the numbers to be this different though.
#
# (Check 2) The number of DEGs identified (\~1.1K genes) using DESeq is fairly consistent with the number of differentially expressed genes found in [Sana et. al](https://jb.asm.org/content/201/21/e00362-19) (\~2K genes). We also spot checked specific genes that were found. We found the 4 genes highlighted in the Sana et. al. publication, including the main qsIA gene (PA1244) that the paper found to be more highly expressed in PAO1 vs PA14. Difference are likely due to differences in the package used.
#
# Approach used in [Sana et. al](https://jb.asm.org/content/201/21/e00362-19) found ~ 2K DEGs between 2 strains where QS genes were DEGs:
# ```
# Illumina reads were mapped to the P. aeruginosa genome PAO1 (GenBank accession number AE004091.2 [61]) and PA14 (GenBank accession number NC_008463.1 [33]) by Bowtie (version Bowtie1 v0.12.9 [62]). Data were normalized by reads per kilobase per million (RPKM) and filtered to the 5,263 orthologous genes conserved between P. aeruginosa strains PA14 and PAO1. Two biological replicates were performed per condition. Differential
# expression analysis was analyzed using the Bioconductor package NOISeq version 2.22.1 (64), a nonparametric approach suitable for lowly replicated data, and using a q value of 0.99 for strong control of
# false positives
# ```
| 34.047022 | 516 | 0.740908 |
4a1caf537b2a00ca223591d4d86fef915ea110cb
| 91,604 |
py
|
Python
|
weldx/tests/test_geometry.py
|
marscher/weldx
|
a5debd8af957009b12fd366589fed1aa41f78176
|
[
"BSD-3-Clause"
] | null | null | null |
weldx/tests/test_geometry.py
|
marscher/weldx
|
a5debd8af957009b12fd366589fed1aa41f78176
|
[
"BSD-3-Clause"
] | null | null | null |
weldx/tests/test_geometry.py
|
marscher/weldx
|
a5debd8af957009b12fd366589fed1aa41f78176
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests the geometry package."""
import copy
import math
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Union
import numpy as np
import pint
import pytest
from xarray import DataArray
import weldx.geometry as geo
import weldx.tests._helpers as helpers
import weldx.transformations as tf
import weldx.util as ut
from weldx import Q_
from weldx.geometry import SpatialData
from weldx.transformations import WXRotation
# helpers ---------------------------------------------------------------------
def check_segments_identical(seg_a, seg_b):
"""Check if 2 segments are identical within floating point tolerance.
Parameters
----------
seg_a :
First segment
seg_b :
Second segment
"""
assert isinstance(seg_a, type(seg_b))
assert ut.matrix_is_close(seg_a.points, seg_b.points)
if isinstance(seg_a, geo.ArcSegment):
assert seg_a.arc_winding_ccw == seg_b.arc_winding_ccw
assert ut.vector_is_close(seg_a.point_center, seg_b.point_center)
def check_shapes_identical(shp_a, shp_b):
"""Check if 2 shapes are identical within floating point tolerance.
Parameters
----------
shp_a :
First profile
shp_b :
Second profile
"""
assert shp_a.num_segments == shp_b.num_segments
for i in range(shp_a.num_segments):
check_segments_identical(shp_a.segments[i], shp_b.segments[i])
def check_profiles_identical(pro_a, pro_b):
"""Check if 2 profiles are identical within floating point tolerance.
Parameters
----------
pro_a :
First profile
pro_b :
Second profile
"""
assert pro_a.num_shapes == pro_b.num_shapes
for i in range(pro_a.num_shapes):
check_shapes_identical(pro_a.shapes[i], pro_b.shapes[i])
def check_variable_profiles_identical(vp_a, vp_b):
"""Check if 2 variable profiles are identical within floating point tolerance.
Parameters
----------
vp_a :
First variable profile
vp_b :
Second variable profile
"""
assert vp_a.num_profiles == vp_b.num_profiles
assert vp_a.num_locations == vp_b.num_locations
assert vp_a.num_interpolation_schemes == vp_b.num_interpolation_schemes
for i in range(vp_a.num_profiles):
check_profiles_identical(vp_a.profiles[i], vp_b.profiles[i])
for i in range(vp_a.num_locations):
assert math.isclose(vp_a.locations[i], vp_b.locations[i])
for i in range(vp_a.num_interpolation_schemes):
assert isinstance(
vp_a.interpolation_schemes[i], type(vp_b.interpolation_schemes[i])
)
def check_trace_segments_identical(seg_a, seg_b):
"""Check if 2 trace segments are identical within floating point tolerance.
Parameters
----------
seg_a :
First segment
seg_b :
Second segment
"""
assert isinstance(seg_a, type(seg_b))
if isinstance(seg_a, geo.LinearHorizontalTraceSegment):
assert seg_a.length == seg_b.length
else:
assert seg_a.is_clockwise == seg_b.is_clockwise
assert math.isclose(seg_a.angle, seg_b.angle)
assert math.isclose(seg_a.length, seg_b.length)
assert math.isclose(seg_a.radius, seg_b.radius)
def check_traces_identical(trc_a, trc_b):
"""Check if 2 traces are identical within floating point tolerance.
Parameters
----------
trc_a :
First trace
trc_b :
Second trace
"""
assert trc_a.num_segments == trc_b.num_segments
for i in range(trc_a.num_segments):
check_trace_segments_identical(trc_a.segments[i], trc_b.segments[i])
def check_coordinate_systems_identical(lcs_a, lcs_b, abs_tol=1e-9):
"""Check if 2 local coordinate systems are identical within a tolerance.
Parameters
----------
lcs_a :
First local coordinate system
lcs_b :
Second local coordinate system
abs_tol :
Absolute tolerance (Default value = 1e-9)
"""
assert ut.matrix_is_close(lcs_a.orientation, lcs_b.orientation, abs_tol)
assert ut.vector_is_close(lcs_a.coordinates, lcs_b.coordinates, abs_tol)
def get_default_profiles() -> List:
"""Get 2 profiles.
Returns
-------
list
List containing 2 profiles
"""
a_0 = [0, 0]
a_1 = [8, 16]
a_2 = [16, 0]
shape_a01 = geo.Shape().add_line_segments([a_0, a_1])
shape_a12 = geo.Shape().add_line_segments([a_1, a_2])
profile_a = geo.Profile([shape_a01, shape_a12])
b_0 = [-4, 8]
b_1 = [0, 8]
b_2 = [16, -16]
shape_b01 = geo.Shape().add_line_segments([b_0, b_1])
shape_b12 = geo.Shape().add_line_segments([b_1, b_2])
profile_b = geo.Profile([shape_b01, shape_b12])
return [profile_a, profile_b]
# helper for segment tests ----------------------------------------------------
def default_segment_rasterization_tests(
segment: Union[geo.ArcSegment, geo.LineSegment], raster_width
):
"""Perform some default checks for a passed segment's rasterization method.
The segment is rasterized and tested afterwards. The purpose of every
test is explained by a comment in the code.
Parameters
----------
segment :
Instance of a segment class
raster_width :
Raster width
"""
data = segment.rasterize(raster_width)
# check dimensions are correct
assert len(data.shape) == 2
point_dimension = data.shape[0]
num_points = data.shape[1]
assert point_dimension == 2
# Check if first and last point of the data are identical to the segment
# start and end
assert ut.vector_is_close(data[:, 0], segment.point_start)
assert ut.vector_is_close(data[:, -1], segment.point_end)
for i in range(num_points - 1):
point = data[:, i]
next_point = data[:, i + 1]
raster_width_eff = np.linalg.norm(next_point - point)
# effective raster width is close to specified one
assert np.abs(raster_width_eff - raster_width) < 0.1 * raster_width
# effective raster width is constant (equidistant points)
assert math.isclose(raster_width_eff, np.linalg.norm(data[:, 1] - data[:, 0]))
# check that there are no duplicate points
assert helpers.are_all_columns_unique(data)
# check that rasterization with too large raster width still works
data_200 = segment.rasterize(200)
num_points_200 = data_200.shape[1]
assert num_points_200 == 2
# only 2 points must be segment start and end
assert ut.vector_is_close(segment.point_start, data_200[:, 0])
assert ut.vector_is_close(segment.point_end, data_200[:, 1])
# exceptions ------------------------------------------
# raster width <= 0
with pytest.raises(ValueError):
segment.rasterize(0)
with pytest.raises(ValueError):
segment.rasterize(-3)
# test LineSegment ------------------------------------------------------------
def test_line_segment_construction():
"""Test constructor and factories."""
# class constructor -----------------------------------
segment = geo.LineSegment([[3, 5], [3, 4]])
assert math.isclose(segment.length, np.sqrt(5))
# exceptions ------------------------------------------
# length = 0
with pytest.raises(ValueError):
geo.LineSegment([[0, 0], [1, 1]])
# not 2x2
with pytest.raises(ValueError):
geo.LineSegment([[3, 5], [3, 4], [3, 2]])
# not a 2d array
with pytest.raises(ValueError):
geo.LineSegment([[[3, 5], [3, 4]]])
# factories -------------------------------------------
segment = geo.LineSegment.construct_with_points([3, 3], [4, 5])
assert math.isclose(segment.length, np.sqrt(5))
def test_line_segment_rasterization():
"""Test line segment rasterization.
This test checks, if every rasterized point lies on the line that
connects the start and the end of the segment. It also checks that those
points lie between the segments start and end point.
"""
raster_width = 0.1
point_start = np.array([3, 3])
point_end = np.array([4, 5])
segment = geo.LineSegment.construct_with_points(point_start, point_end)
# perform default tests
default_segment_rasterization_tests(segment, raster_width)
# rasterize data
raster_data = segment.rasterize(raster_width)
num_points = raster_data.shape[1]
# check that points lie between start and end
vec_start_end = point_end - point_start
unit_vec_start_end = tf.normalize(vec_start_end)
length_start_end = np.linalg.norm(vec_start_end)
for i in np.arange(1, num_points - 1, 1):
vec_start_point = raster_data[:, i] - point_start
unit_vec_start_point = tf.normalize(vec_start_point)
length_start_point = np.linalg.norm(vec_start_point)
assert ut.vector_is_close(unit_vec_start_point, unit_vec_start_end)
assert length_start_point < length_start_end
def line_segment_transformation_test_case(
point_start,
point_end,
exp_start,
exp_end,
exp_length,
translation=None,
transformation=None,
):
"""Perform a single transformation test on a line segment.
The test applies a transformation and compares the result to the
expected values.
Parameters
----------
point_start :
Start point of the line segment
point_end :
End point of the line segment
exp_start :
Expected start point of the transformed line segment
exp_end :
Expected end point of the transformed line segment
exp_length :
Expected length of the transformed line segment
translation :
Translation that should be applied (optional) (Default value = None)
transformation :
Transformation that should be applied (optional) (Default value = None)
"""
if translation is not None:
assert transformation is None, "No mixed test cases supported"
segment = geo.LineSegment.construct_with_points(point_start, point_end)
if translation is not None:
segment_trans = segment.translate(translation)
else:
segment_trans = segment.transform(transformation)
# original segment not modified
assert ut.vector_is_close(segment.point_start, point_start)
assert ut.vector_is_close(segment.point_end, point_end)
# check new segment
assert ut.vector_is_close(segment_trans.point_start, exp_start)
assert ut.vector_is_close(segment_trans.point_end, exp_end)
assert math.isclose(segment_trans.length, exp_length)
# apply same transformation in place
if translation is not None:
segment.apply_translation(translation)
else:
segment.apply_transformation(transformation)
check_segments_identical(segment, segment_trans)
def test_line_segment_transformations():
"""Test line segment transformations.
This test tests all relevant transformations and exceptions.
"""
# translation -----------------------------------------
line_segment_transformation_test_case(
point_start=[3, 3],
point_end=[4, 5],
translation=[-1, 4],
exp_start=[2, 7],
exp_end=[3, 9],
exp_length=np.sqrt(5),
)
# 45 degree rotation ----------------------------------
s = np.sin(np.pi / 4.0)
c = np.cos(np.pi / 4.0)
rotation_matrix = [[c, -s], [s, c]]
line_segment_transformation_test_case(
point_start=[2, 2],
point_end=[3, 6],
transformation=rotation_matrix,
exp_start=[0, np.sqrt(8)],
exp_end=np.matmul(rotation_matrix, [3, 6]),
exp_length=np.sqrt(17),
)
# reflection at 45 degree line ------------------------
v = np.array([-1, 1], dtype=float)
reflection_matrix = np.identity(2) - 2 / np.dot(v, v) * np.outer(v, v)
line_segment_transformation_test_case(
point_start=[-1, 3],
point_end=[6, 1],
transformation=reflection_matrix,
exp_start=[3, -1],
exp_end=[1, 6],
exp_length=np.sqrt(53),
)
# scaling ---------------------------------------------
scale_matrix = [[4, 0], [0, 0.5]]
line_segment_transformation_test_case(
point_start=[-2, 2],
point_end=[1, 4],
transformation=scale_matrix,
exp_start=[-8, 1],
exp_end=[4, 2],
exp_length=np.sqrt(145),
)
# exceptions ------------------------------------------
# transformation results in length = 0
zero_matrix = np.zeros((2, 2))
segment = geo.LineSegment.construct_with_points([0, 0], [1, 2])
with pytest.raises(Exception):
segment.apply_transformation(zero_matrix)
with pytest.raises(Exception):
segment.transform(zero_matrix)
def test_line_segment_interpolation():
"""Test the line segments linear interpolation function.
Two segments are created and interpolated using different weights. The
result is compared to the expected values.
"""
segment_a = geo.LineSegment.construct_with_points([1, 3], [7, -3])
segment_b = geo.LineSegment.construct_with_points([5, -5], [-1, 13])
for i in range(5):
weight = i / 4
segment_c = geo.LineSegment.linear_interpolation(segment_a, segment_b, weight)
exp_point_start = [1 + i, 3 - 2 * i]
exp_point_end = [7 - 2 * i, -3 + 4 * i]
assert ut.vector_is_close(segment_c.point_start, exp_point_start)
assert ut.vector_is_close(segment_c.point_end, exp_point_end)
# check weight clipped to valid range -----------------
segment_c = geo.LineSegment.linear_interpolation(segment_a, segment_b, -3)
assert ut.vector_is_close(segment_c.point_start, segment_a.point_start)
assert ut.vector_is_close(segment_c.point_end, segment_a.point_end)
segment_c = geo.LineSegment.linear_interpolation(segment_a, segment_b, 6)
assert ut.vector_is_close(segment_c.point_start, segment_b.point_start)
assert ut.vector_is_close(segment_c.point_end, segment_b.point_end)
# exceptions ------------------------------------------
# wrong types
arc_segment = geo.ArcSegment.construct_with_points([0, 0], [1, 1], [1, 0])
with pytest.raises(TypeError):
geo.LineSegment.linear_interpolation(segment_a, arc_segment, 0.5)
with pytest.raises(TypeError):
geo.LineSegment.linear_interpolation(arc_segment, segment_a, 0.5)
with pytest.raises(TypeError):
geo.LineSegment.linear_interpolation(arc_segment, arc_segment, 0.5)
# test ArcSegment ------------------------------------------------------------
def check_arc_segment_values(
segment,
point_start,
point_end,
point_center,
winding_ccw,
radius,
arc_angle,
arc_length,
):
"""Check if the internal values are identical with the expected values.
Parameters
----------
segment :
Arc segment that should be checked
point_start :
Expected start point of the segment
point_end :
Expected end point of the segment
point_center :
Expected center point of the segment
winding_ccw :
Expected winding bool (see ArcSegment documentation)
radius :
Expected radius
arc_angle :
Expected arc angle
arc_length :
Expected arc length
"""
assert ut.vector_is_close(segment.point_start, point_start)
assert ut.vector_is_close(segment.point_end, point_end)
assert ut.vector_is_close(segment.point_center, point_center)
assert segment.arc_winding_ccw is winding_ccw
assert math.isclose(segment.radius, radius)
assert math.isclose(segment.arc_angle, arc_angle)
assert math.isclose(segment.arc_length, arc_length)
def arc_segment_rasterization_test(
point_center,
point_start,
point_end,
raster_width,
arc_winding_ccw,
is_point_location_valid_func,
):
"""Test the arc segment's rasterize function.
Performs the default segment rasterization test and some additional ones
specific to the arc segment.
Parameters
----------
point_center :
Center point of the segment
point_start :
Start point of the segment
point_end :
End point of the segment
raster_width :
Raster width
arc_winding_ccw :
Bool that determines the winding order
is_point_location_valid_func :
Function that returns a bool which
specifies whether a point is valid or not. Interface: (point,
point_center_arc) -> bool
"""
point_center = np.array(point_center)
point_start = np.array(point_start)
point_end = np.array(point_end)
radius_arc = np.linalg.norm(point_start - point_center)
arc_segment = geo.ArcSegment.construct_with_points(
point_start, point_end, point_center, arc_winding_ccw
)
# Perform standard segment rasterization tests
default_segment_rasterization_tests(arc_segment, raster_width)
# rasterize segment
data = arc_segment.rasterize(raster_width)
num_points = data.shape[1]
for i in range(num_points):
point = data[:, i]
# Check that winding is correct
assert is_point_location_valid_func(point, point_center)
# Check that points have the correct distance to the arcs center
distance_center_point = np.linalg.norm(point - point_center)
assert math.isclose(distance_center_point, radius_arc, abs_tol=1e-6)
def test_arc_segment_constructor():
"""Test the arc segment constructor."""
points = [[3, 6, 6], [3, 6, 3]]
segment_cw = geo.ArcSegment(points, False)
segment_ccw = geo.ArcSegment(points, True)
check_arc_segment_values(
segment=segment_cw,
point_start=[3, 3],
point_end=[6, 6],
point_center=[6, 3],
winding_ccw=False,
radius=3,
arc_angle=1 / 2 * np.pi,
arc_length=3 / 2 * np.pi,
)
check_arc_segment_values(
segment=segment_ccw,
point_start=[3, 3],
point_end=[6, 6],
point_center=[6, 3],
winding_ccw=True,
radius=3,
arc_angle=3 / 2 * np.pi,
arc_length=9 / 2 * np.pi,
)
# check exceptions ------------------------------------
# radius differs
points = [[3, 6, 6], [3, 10, 3]]
with pytest.raises(Exception):
geo.ArcSegment(points, False)
# radius is zero
points = [[3, 3, 3], [3, 3, 3]]
with pytest.raises(Exception):
geo.ArcSegment(points, False)
# arc length zero
points = [[3, 3, 6], [3, 3, 3]]
with pytest.raises(Exception):
geo.ArcSegment(points, False)
with pytest.raises(Exception):
geo.ArcSegment(points, True)
# not 2x3
points = [[3, 3], [3, 3]]
with pytest.raises(ValueError):
geo.ArcSegment(points)
# not a 2d array
with pytest.raises(ValueError):
geo.ArcSegment([[[3, 5], [3, 4]]])
def test_arc_segment_factories():
"""Test the arc segment's factory functions.
Creates arc segments using the factory functions and checks if they are
constructed as expected.
"""
# construction with center point ----------------------
point_start = [3, 3]
point_end = [6, 6]
point_center_left = [3, 6]
point_center_right = [6, 3]
# expected results
radius = 3
angle_small = np.pi * 0.5
angle_large = np.pi * 1.5
arc_length_small = np.pi * 1.5
arc_length_large = np.pi * 4.5
segment_cw = geo.ArcSegment.construct_with_points(
point_start, point_end, point_center_right, False
)
segment_ccw = geo.ArcSegment.construct_with_points(
point_start, point_end, point_center_right, True
)
check_arc_segment_values(
segment_cw,
point_start,
point_end,
point_center_right,
False,
radius,
angle_small,
arc_length_small,
)
check_arc_segment_values(
segment_ccw,
point_start,
point_end,
point_center_right,
True,
radius,
angle_large,
arc_length_large,
)
# construction with radius ----------------------
# center left of line
segment_cw = geo.ArcSegment.construct_with_radius(
point_start, point_end, radius, True, False
)
segment_ccw = geo.ArcSegment.construct_with_radius(
point_start, point_end, radius, True, True
)
check_arc_segment_values(
segment_cw,
point_start,
point_end,
point_center_left,
False,
radius,
angle_large,
arc_length_large,
)
check_arc_segment_values(
segment_ccw,
point_start,
point_end,
point_center_left,
True,
radius,
angle_small,
arc_length_small,
)
# center right of line
segment_cw = geo.ArcSegment.construct_with_radius(
point_start, point_end, radius, False, False
)
segment_ccw = geo.ArcSegment.construct_with_radius(
point_start, point_end, radius, False, True
)
check_arc_segment_values(
segment_cw,
point_start,
point_end,
point_center_right,
False,
radius,
angle_small,
arc_length_small,
)
check_arc_segment_values(
segment_ccw,
point_start,
point_end,
point_center_right,
True,
radius,
angle_large,
arc_length_large,
)
# check that too small radii will be clipped to minimal radius
segment_cw = geo.ArcSegment.construct_with_radius(
point_start, point_end, 0.1, False, False
)
segment_ccw = geo.ArcSegment.construct_with_radius(
point_start, point_end, 0.1, False, True
)
check_arc_segment_values(
segment_cw,
point_start,
point_end,
[4.5, 4.5],
False,
np.sqrt(18) / 2,
np.pi,
np.pi * np.sqrt(18) / 2,
)
check_arc_segment_values(
segment_ccw,
point_start,
point_end,
[4.5, 4.5],
True,
np.sqrt(18) / 2,
np.pi,
np.pi * np.sqrt(18) / 2,
)
def point_in_second_quadrant(p, c):
"""Return True if a point is inside a circle's second quadrant.
A point that lies directly on the boundary is considered as being inside.
Parameters
----------
p :
Point that should be checked
c :
Center point of the circle
Returns
-------
bool
True or False
"""
return p[0] - 1e-9 <= c[0] and p[1] >= c[1] - 1e-9
def point_not_in_second_quadrant(p, c):
"""Return True if a point is not inside a circle's second quadrant.
A point that lies directly on the boundary is considered as being outside.
Parameters
----------
p :
Point that should be checked
c :
Center point of the circle
Returns
-------
bool
True or False
"""
return not (p[0] + 1e-9 < c[0] and p[1] > c[1] + 1e-9)
def point_not_below_center(p, c):
"""Return True if a point lies not below (y-value) a circle's center point.
Parameters
----------
p :
Point that should be checked
c :
Center point of the circle
Returns
-------
bool
True or False
"""
return p[1] >= c[1] - 1e-9
def point_not_above_center(p, c):
"""Return True if a point lies not above (y-value) a circle's center point.
Parameters
----------
p :
Point that should be checked
c :
Center point of the circle
Returns
-------
bool
True or False
"""
return p[1] - 1e-9 <= c[1]
def test_arc_segment_rasterization():
"""Test the arc segment's rasterize function.
Creates some simple arc segments (semi-circle and quadrant) and test the
rasterization results.
"""
# center right of line point_start -> point_end
# ---------------------------------------------
point_center = [3, 2]
point_start = [1, 2]
point_end = [3, 4]
raster_width = 0.2
arc_segment_rasterization_test(
point_center,
point_start,
point_end,
raster_width,
False,
point_in_second_quadrant,
)
arc_segment_rasterization_test(
point_center,
point_start,
point_end,
raster_width,
True,
point_not_in_second_quadrant,
)
# center left of line point_start -> point_end
# --------------------------------------------
point_center = [-4, -7]
point_start = [-4, -2]
point_end = [-9, -7]
raster_width = 0.1
arc_segment_rasterization_test(
point_center,
point_start,
point_end,
raster_width,
False,
point_not_in_second_quadrant,
)
arc_segment_rasterization_test(
point_center,
point_start,
point_end,
raster_width,
True,
point_in_second_quadrant,
)
# center on line point_start -> point_end
# ---------------------------------------
point_center = [3, 2]
point_start = [2, 2]
point_end = [4, 2]
raster_width = 0.1
arc_segment_rasterization_test(
point_center,
point_start,
point_end,
raster_width,
False,
point_not_below_center,
)
arc_segment_rasterization_test(
point_center, point_start, point_end, raster_width, True, point_not_above_center
)
def arc_segment_transformation_test_case(
point_start,
point_end,
point_center,
exp_start,
exp_end,
exp_center,
exp_is_winding_changed,
exp_radius,
exp_angle_ccw,
translation=None,
transformation=None,
):
"""Perform a single transformation test on an arc segment.
The test applies a transformation and compares the result to the
expected values.
Parameters
----------
point_start :
Start point of the arc segment
point_end :
End point of the arc segment
point_center :
End point of the arc segment
exp_start :
Expected start point of the transformed arc segment
exp_end :
Expected end point of the transformed arc segment
exp_center :
Expected center point of the transformed arc segment
exp_is_winding_changed :
Bool that specifies if the transformation
should change the winding order
exp_radius :
Expected radius of the transformed arc segment
exp_angle_ccw :
Expected angle of the transformed counter
clockwise winding arc segment (refers to the winding before transformation)
translation :
Translation that should be applied (optional) (Default value = None)
transformation :
Transformation that should be applied (optional) (Default value = None)
"""
if translation is not None:
assert transformation is None, "No mixed test cases supported"
segment_cw = geo.ArcSegment.construct_with_points(
point_start, point_end, point_center, False
)
segment_ccw = geo.ArcSegment.construct_with_points(
point_start, point_end, point_center, True
)
# store some values
radius_original = segment_cw.radius
arc_angle_cw_original = segment_cw.arc_angle
arc_angle_ccw_original = segment_ccw.arc_angle
arc_length_cw_original = segment_cw.arc_length
arc_length_ccw_original = segment_ccw.arc_length
if translation is not None:
segment_cw_trans = segment_cw.translate(translation)
segment_ccw_trans = segment_ccw.translate(translation)
else:
segment_cw_trans = segment_cw.transform(transformation)
segment_ccw_trans = segment_ccw.transform(transformation)
# original segments not modified
check_arc_segment_values(
segment_cw,
point_start,
point_end,
point_center,
False,
radius_original,
arc_angle_cw_original,
arc_length_cw_original,
)
check_arc_segment_values(
segment_ccw,
point_start,
point_end,
point_center,
True,
radius_original,
arc_angle_ccw_original,
arc_length_ccw_original,
)
# check new segment
exp_angle_cw = 2 * np.pi - exp_angle_ccw
exp_arc_length_cw = exp_angle_cw * exp_radius
exp_arc_length_ccw = exp_angle_ccw * exp_radius
check_arc_segment_values(
segment_cw_trans,
exp_start,
exp_end,
exp_center,
exp_is_winding_changed,
exp_radius,
exp_angle_cw,
exp_arc_length_cw,
)
check_arc_segment_values(
segment_ccw_trans,
exp_start,
exp_end,
exp_center,
not exp_is_winding_changed,
exp_radius,
exp_angle_ccw,
exp_arc_length_ccw,
)
# apply same transformation in place
if translation is not None:
segment_cw.apply_translation(translation)
segment_ccw.apply_translation(translation)
else:
segment_cw.apply_transformation(transformation)
segment_ccw.apply_transformation(transformation)
check_segments_identical(segment_cw, segment_cw_trans)
check_segments_identical(segment_ccw, segment_ccw_trans)
def test_arc_segment_transformations():
"""Test the arc segments transformation functions."""
# translation -----------------------------------------
arc_segment_transformation_test_case(
point_start=[3, 3],
point_end=[5, 5],
point_center=[5, 3],
exp_start=[2, 7],
exp_end=[4, 9],
exp_center=[4, 7],
exp_is_winding_changed=False,
exp_radius=2,
exp_angle_ccw=1.5 * np.pi,
translation=[-1, 4],
)
# 45 degree rotation ----------------------------------
s = np.sin(np.pi / 4.0)
c = np.cos(np.pi / 4.0)
rotation_matrix = [[c, -s], [s, c]]
arc_segment_transformation_test_case(
point_start=[3, 3],
point_end=[5, 5],
point_center=[5, 3],
exp_start=[0, np.sqrt(18)],
exp_end=[0, np.sqrt(50)],
exp_center=np.matmul(rotation_matrix, [5, 3]),
exp_is_winding_changed=False,
exp_radius=2,
exp_angle_ccw=1.5 * np.pi,
transformation=rotation_matrix,
)
# reflection at 45 degree line ------------------------
v = np.array([-1, 1], dtype=float)
reflection_matrix = np.identity(2) - 2 / np.dot(v, v) * np.outer(v, v)
arc_segment_transformation_test_case(
point_start=[3, 2],
point_end=[5, 4],
point_center=[5, 2],
exp_start=[2, 3],
exp_end=[4, 5],
exp_center=[2, 5],
exp_is_winding_changed=True,
exp_radius=2,
exp_angle_ccw=1.5 * np.pi,
transformation=reflection_matrix,
)
# scaling both coordinates equally --------------------
scaling_matrix = [[4, 0], [0, 4]]
arc_segment_transformation_test_case(
point_start=[3, 2],
point_end=[5, 4],
point_center=[5, 2],
exp_start=[12, 8],
exp_end=[20, 16],
exp_center=[20, 8],
exp_is_winding_changed=False,
exp_radius=8,
exp_angle_ccw=1.5 * np.pi,
transformation=scaling_matrix,
)
# non-uniform scaling which results in a valid arc ----
scaling_matrix = [[0.25, 0], [0, 2]]
exp_angle_ccw = 2 * np.pi - 2 * np.arcsin(3 / 5)
arc_segment_transformation_test_case(
point_start=[8, 4],
point_end=[32, 4],
point_center=[20, 2],
exp_start=[2, 8],
exp_end=[8, 8],
exp_center=[5, 4],
exp_is_winding_changed=False,
exp_radius=5,
exp_angle_ccw=exp_angle_ccw,
transformation=scaling_matrix,
)
# exceptions ------------------------------------------
# transformation distorts arc
segment = geo.ArcSegment.construct_with_points([3, 2], [5, 4], [5, 2], False)
with pytest.raises(Exception):
segment.transform(scaling_matrix)
with pytest.raises(Exception):
segment.apply_transformation(scaling_matrix)
# transformation results in length = 0
segment = geo.ArcSegment.construct_with_points([3, 2], [5, 4], [5, 2], False)
zero_matrix = np.zeros((2, 2))
with pytest.raises(Exception):
segment.transform(zero_matrix)
with pytest.raises(Exception):
segment.apply_transformation(zero_matrix)
def test_arc_segment_interpolation():
"""Test the arc segment interpolation.
Since it is not implemented, check if an exception is raised.
"""
segment_a = geo.ArcSegment.construct_with_points([0, 0], [1, 1], [1, 0])
segment_b = geo.ArcSegment.construct_with_points([0, 0], [2, 2], [0, 2])
# not implemented yet
with pytest.raises(Exception):
geo.ArcSegment.linear_interpolation(segment_a, segment_b, 1)
# test Shape ------------------------------------------------------------------
def test_shape_construction():
"""Test the constructor of the shape.
Constructs some shapes in various ways and checks the results.
"""
line_segment = geo.LineSegment.construct_with_points([1, 1], [1, 2])
arc_segment = geo.ArcSegment.construct_with_points([0, 0], [1, 1], [0, 1])
# Empty construction
shape = geo.Shape()
assert shape.num_segments == 0
# Single element construction shape
shape = geo.Shape(line_segment)
assert shape.num_segments == 1
# Multi segment construction
shape = geo.Shape([arc_segment, line_segment])
assert shape.num_segments == 2
assert isinstance(shape.segments[0], geo.ArcSegment)
assert isinstance(shape.segments[1], geo.LineSegment)
# exceptions ------------------------------------------
# segments not connected
with pytest.raises(Exception):
shape = geo.Shape([line_segment, arc_segment])
def test_shape_segment_addition():
"""Test the add_segments function of the shape.
Test should be self explanatory.
"""
# Create shape and add segments
line_segment = geo.LineSegment.construct_with_points([1, 1], [0, 0])
arc_segment = geo.ArcSegment.construct_with_points([0, 0], [1, 1], [0, 1])
arc_segment2 = geo.ArcSegment.construct_with_points([1, 1], [0, 0], [0, 1])
shape = geo.Shape()
shape.add_segments(line_segment)
assert shape.num_segments == 1
shape.add_segments([arc_segment, arc_segment2])
assert shape.num_segments == 3
assert isinstance(shape.segments[0], geo.LineSegment)
assert isinstance(shape.segments[1], geo.ArcSegment)
assert isinstance(shape.segments[2], geo.ArcSegment)
# exceptions ------------------------------------------
# new segment are not connected to already included segments
with pytest.raises(Exception):
shape.add_segments(arc_segment2)
assert shape.num_segments == 3 # ensure shape is unmodified
with pytest.raises(Exception):
shape.add_segments([arc_segment2, arc_segment])
assert shape.num_segments == 3 # ensure shape is unmodified
with pytest.raises(Exception):
shape.add_segments([arc_segment, arc_segment])
assert shape.num_segments == 3 # ensure shape is unmodified
def test_shape_line_segment_addition():
"""Test the shape's add_line_segments function.
Test should be self explanatory.
"""
shape_0 = geo.Shape()
shape_0.add_line_segments([[0, 0], [1, 0]])
assert shape_0.num_segments == 1
shape_1 = geo.Shape()
shape_1.add_line_segments([[0, 0], [1, 0], [2, 0]])
assert shape_1.num_segments == 2
# test possible formats to add single line segment ----
shape_0.add_line_segments([2, 0])
assert shape_0.num_segments == 2
shape_0.add_line_segments([[3, 0]])
assert shape_0.num_segments == 3
shape_0.add_line_segments(np.array([4, 0]))
assert shape_0.num_segments == 4
shape_0.add_line_segments(np.array([[5, 0]]))
assert shape_0.num_segments == 5
# add multiple segments -------------------------------
shape_0.add_line_segments([[6, 0], [7, 0], [8, 0]])
assert shape_0.num_segments == 8
shape_0.add_line_segments(np.array([[9, 0], [10, 0], [11, 0]]))
assert shape_0.num_segments == 11
for i in range(11):
expected_segment = geo.LineSegment.construct_with_points([i, 0], [i + 1, 0])
check_segments_identical(shape_0.segments[i], expected_segment)
if i < 2:
check_segments_identical(shape_1.segments[i], expected_segment)
# exceptions ------------------------------------------
shape_2 = geo.Shape()
# invalid inputs
with pytest.raises(Exception):
shape_2.add_line_segments([])
assert shape_2.num_segments == 0
with pytest.raises(Exception):
shape_2.add_line_segments(None)
assert shape_2.num_segments == 0
# single point with empty shape
with pytest.raises(Exception):
shape_2.add_line_segments([0, 1])
assert shape_2.num_segments == 0
# invalid point format
with pytest.raises(Exception):
shape_2.add_line_segments([[0, 1, 2], [1, 2, 3]])
assert shape_2.num_segments == 0
def test_shape_rasterization():
"""Test rasterization function of the shape.
The test uses three line segment of equal length, making it easy to
check the rasterized points. Every step of the test is documented
with comments.
"""
points = np.array([[0, 0], [0, 1], [1, 1], [1, 0]])
shape = geo.Shape().add_line_segments(points)
# rasterize shape
raster_width = 0.2
data = shape.rasterize(raster_width)
# no duplications
assert helpers.are_all_columns_unique(data)
# check each data point
num_data_points = data.shape[1]
for i in range(num_data_points):
if i < 6:
assert ut.vector_is_close([0, i * 0.2], data[:, i])
elif i < 11:
assert ut.vector_is_close([(i - 5) * 0.2, 1], data[:, i])
else:
assert ut.vector_is_close([1, 1 - (i - 10) * 0.2], data[:, i])
# Test with too large raster width --------------------
# The shape does not clip large values to the valid range itself. The
# added segments do the clipping. If a custom segment does not do that,
# there is currently no mechanism to correct it.
# However, this test somewhat ensures, that each segment is rasterized
# individually.
data = shape.rasterize(10)
for point in points:
assert ut.is_column_in_matrix(point, data)
assert data.shape[1] == 4
# no duplication if shape is closed -------------------
shape.add_line_segments(points[0])
data = shape.rasterize(10)
assert data.shape[1] == 4
assert helpers.are_all_columns_unique(data)
# exceptions ------------------------------------------
with pytest.raises(Exception):
shape.rasterize(0)
with pytest.raises(Exception):
shape.rasterize(-3)
# empty shape
shape_empty = geo.Shape()
with pytest.raises(Exception):
shape_empty.rasterize(0.2)
def default_test_shape():
"""Get a default shape for tests.
Returns
-------
weldx.geometry.Shape
Default shape for tests
"""
# create shape
arc_segment = geo.ArcSegment.construct_with_points([3, 4], [5, 0], [6, 3])
line_segment = geo.LineSegment.construct_with_points([5, 0], [11, 3])
return geo.Shape([arc_segment, line_segment])
def default_translation_vector():
"""Get a default translation for transformation tests.
Returns
-------
numpy.ndarray
Translation vector
"""
return ut.to_float_array([3, 4])
def check_point_translation(point_trans, point_original):
"""Check if a point is translated by the default translation test vector.
Parameters
----------
point_trans :
Translated point
point_original :
Original point
"""
assert ut.vector_is_close(
point_trans - default_translation_vector(), point_original
)
def check_point_rotation_90_degree(point_trans, point_original):
"""Check if a point is rotated by 90 degrees.
Parameters
----------
point_trans :
Transformed point
point_original :
Original point
"""
assert point_trans[0] == point_original[1]
assert point_trans[1] == -point_original[0]
def check_point_reflection_at_line_with_slope_1(point_trans, point_original):
"""Check if a point is reflected at a line through the origin with slope 1.
Parameters
----------
point_trans :
Transformed point
point_original :
Original point
"""
assert point_trans[0] == point_original[1]
assert point_trans[1] == point_original[0]
def shape_transformation_test_case(
check_point_func, exp_winding_change, translation=None, transformation=None
):
"""Test a shape transformation.
Parameters
----------
check_point_func :
Function that checks if a point is transformed
correctly. Interface: (point_transformed, point_original) -> None
exp_winding_change :
Bool that specifies if the transformation
should change the winding order of arc segments.
translation :
Translation vector (optional) (Default value = None)
transformation :
Transformation matrix (optional) (Default value = None)
"""
if translation is not None:
assert transformation is None, "No mixed test cases supported"
shape = default_test_shape()
if translation is not None:
shape_trans = shape.translate(translation)
else:
shape_trans = shape.transform(transformation)
# original shape unchanged
check_shapes_identical(shape, default_test_shape())
# extract segments
arc_segment = shape.segments[0]
line_segment = shape.segments[1]
arc_segment_trans = shape_trans.segments[0]
line_segment_trans = shape_trans.segments[1]
# check transformed arc segment's winding order
assert arc_segment_trans.arc_winding_ccw is not exp_winding_change
# check segment points
check_point_func(arc_segment_trans.point_start, arc_segment.point_start)
check_point_func(arc_segment_trans.point_end, arc_segment.point_end)
check_point_func(arc_segment_trans.point_center, arc_segment.point_center)
check_point_func(line_segment_trans.point_start, line_segment.point_start)
check_point_func(line_segment_trans.point_end, line_segment.point_end)
# apply same transformation in place
if translation is not None:
shape.apply_translation(translation)
else:
shape.apply_transformation(transformation)
check_shapes_identical(shape_trans, shape)
def test_shape_transformation():
"""Test the shapes transformation functions.
Dedicated reflection functions are tested separately.
"""
# translation -----------------------------------------
shape_transformation_test_case(
check_point_func=check_point_translation,
exp_winding_change=False,
translation=default_translation_vector(),
)
# transformation without reflection -------------------
rotation_matrix = np.array([[0, 1], [-1, 0]])
shape_transformation_test_case(
check_point_func=check_point_rotation_90_degree,
exp_winding_change=False,
transformation=rotation_matrix,
)
# transformation with reflection ----------------------
reflection_matrix = np.array([[0, 1], [1, 0]])
shape_transformation_test_case(
check_point_func=check_point_reflection_at_line_with_slope_1,
exp_winding_change=True,
transformation=reflection_matrix,
)
def check_reflected_point(
point_original, point_reflected, reflection_axis_offset, reflection_axis_direction
):
"""Check if a point is reflected correctly.
The function determines if the midpoint of the line
point->reflected_point lies on the reflection axis. The reflection axis
is specified by a normal and an offset.
Parameters
----------
point_original :
Original point
point_reflected :
Reflected point
reflection_axis_offset :
Offset vector of the reflection axis
towards the origin.
reflection_axis_direction :
Direction vector of the reflection axis.
"""
vec_original_reflected = point_reflected - point_original
midpoint = point_original + 0.5 * vec_original_reflected
shifted_mid_point = midpoint - reflection_axis_offset
determinant = np.linalg.det([shifted_mid_point, reflection_axis_direction])
assert math.isclose(determinant, 0, abs_tol=1e-9)
def shape_reflection_test_case(normal, distance_to_origin):
"""Test the shape's reflection functions.
Only the functions that use a normal and a distance to the origin to
specify the reflection axis are tested by this test.
Parameters
----------
normal :
Normal of the reflection axis
distance_to_origin :
Distance to the origin of the reflection axis.
"""
direction_reflection_axis = np.array([normal[1], -normal[0]])
normal_length = np.linalg.norm(normal)
unit_normal = np.array(normal) / normal_length
offset = distance_to_origin * unit_normal
shape = default_test_shape()
# create reflected shape
shape_reflected = shape.reflect(normal, distance_to_origin)
# original shape is not modified
check_shapes_identical(shape, default_test_shape())
arc_segment = shape.segments[0]
arc_segment_ref = shape_reflected.segments[0]
line_segment = shape.segments[1]
line_segment_ref = shape_reflected.segments[1]
# check reflected points
check_reflected_point(
arc_segment.point_start,
arc_segment_ref.point_start,
offset,
direction_reflection_axis,
)
check_reflected_point(
arc_segment.point_end,
arc_segment_ref.point_end,
offset,
direction_reflection_axis,
)
check_reflected_point(
arc_segment.point_center,
arc_segment_ref.point_center,
offset,
direction_reflection_axis,
)
check_reflected_point(
line_segment.point_start,
line_segment_ref.point_start,
offset,
direction_reflection_axis,
)
check_reflected_point(
line_segment.point_end,
line_segment_ref.point_end,
offset,
direction_reflection_axis,
)
# apply same reflection in place
shape.apply_reflection(normal, distance_to_origin)
check_shapes_identical(shape, shape_reflected)
def test_shape_reflection():
"""Test multiple reflections."""
shape_reflection_test_case([2, 1], np.linalg.norm([2, 1]))
shape_reflection_test_case([0, 1], 5)
shape_reflection_test_case([1, 0], 3)
shape_reflection_test_case([1, 0], -3)
shape_reflection_test_case([-7, 2], 4.12)
shape_reflection_test_case([-7, -2], 4.12)
shape_reflection_test_case([7, -2], 4.12)
# exceptions ------------------------------------------
shape = default_test_shape()
with pytest.raises(Exception):
shape.reflect([0, 0], 2)
with pytest.raises(Exception):
shape.apply_reflection([0, 0])
def check_point_reflected_across_line(
point_original, point_reflected, point_start, point_end
):
"""Check if a point is reflected correctly.
The function determines if the midpoint of the line
point->reflected_point lies on the reflection axis. The reflection axis
is specified by 2 points.
Parameters
----------
point_original :
Original point
point_reflected :
Reflected point
point_start :
First point of the reflection axis
point_end :
Second point of the reflection axis
"""
vec_original_reflected = point_reflected - point_original
mid_point = point_original + 0.5 * vec_original_reflected
vec_start_mid = mid_point - point_start
vec_start_end = point_end - point_start
determinant = np.linalg.det([vec_start_end, vec_start_mid])
assert math.isclose(determinant, 0, abs_tol=1e-9)
def shape_reflection_across_line_test_case(point_start, point_end):
"""Test the shape's reflection functions.
Only the functions that use 2 points to specify the reflection axis are
tested by this test.
Parameters
----------
point_start :
First point of the reflection axis
point_end :
Second point of the reflection axis
"""
point_start = np.array(point_start, float)
point_end = np.array(point_end, float)
shape = default_test_shape()
# create reflected shape
shape_reflected = shape.reflect_across_line(point_start, point_end)
# original shape is not modified
check_shapes_identical(shape, default_test_shape())
arc_segment = shape.segments[0]
arc_segment_ref = shape_reflected.segments[0]
line_segment = shape.segments[1]
line_segment_ref = shape_reflected.segments[1]
# check reflected points
check_point_reflected_across_line(
arc_segment.point_start, arc_segment_ref.point_start, point_start, point_end
)
check_point_reflected_across_line(
arc_segment.point_end, arc_segment_ref.point_end, point_start, point_end
)
check_point_reflected_across_line(
arc_segment.point_center, arc_segment_ref.point_center, point_start, point_end
)
check_point_reflected_across_line(
line_segment.point_start, line_segment_ref.point_start, point_start, point_end
)
check_point_reflected_across_line(
line_segment.point_end, line_segment_ref.point_end, point_start, point_end
)
# apply same reflection in place
shape.apply_reflection_across_line(point_start, point_end)
check_shapes_identical(shape, shape_reflected)
def test_shape_reflection_across_line():
"""Test multiple reflections."""
shape_reflection_across_line_test_case([0, 0], [0, 1])
shape_reflection_across_line_test_case([0, 0], [1, 0])
shape_reflection_across_line_test_case([-3, 2.5], [31.53, -23.44])
shape_reflection_across_line_test_case([7, 8], [9, 10])
shape_reflection_across_line_test_case([-4.26, -23.1], [-8, -0.12])
shape_reflection_across_line_test_case([-2, 1], [2, -4.5])
# exceptions ------------------------------------------
shape = default_test_shape()
with pytest.raises(Exception):
shape.reflect_across_line([2, 5], [2, 5])
with pytest.raises(Exception):
shape.apply_reflection_across_line([-3, 2], [-3, 2])
def segment_interpolation_nearest(segment_a, segment_b, weight):
"""Interpolate 2 segments by taking the nearest one.
Parameters
----------
segment_a :
First segment
segment_b :
Second segment
weight :
Interpolation weight
Returns
-------
weldx.geometry.LineSegment
Nearest segment
"""
if weight > 0.5:
return segment_b
return segment_a
def test_shape_interpolation_general():
"""Test the shapes interpolation function.
Creates 2 shapes, each containing 2 segments. Different segment
interpolations are used. Afterwards, the shapes are interpolated using
different weights and the results are compared to the expected values.
"""
# create shapes
shape_a = geo.Shape().add_line_segments([[-1, -1], [1, 1], [3, -1]])
shape_b = geo.Shape().add_line_segments([[-1, 4], [1, 1], [3, 4]])
# define interpolation schemes
interpolations = [
geo.LineSegment.linear_interpolation,
segment_interpolation_nearest,
]
for i in range(6):
# interpolate shapes
weight = i / 5.0
shape_c = geo.Shape.interpolate(shape_a, shape_b, weight, interpolations)
# check result
if weight > 0.5:
last_point_exp = [3, 4]
else:
last_point_exp = [3, -1]
points_exp = [[-1, -1 + 5 * weight], [1, 1], last_point_exp]
shape_c_exp = geo.Shape().add_line_segments(points_exp)
check_shapes_identical(shape_c, shape_c_exp)
# check weight clipped to valid range -----------------
shape_d = geo.Shape.linear_interpolation(shape_a, shape_b, -3)
check_shapes_identical(shape_d, shape_a)
shape_e = geo.Shape.linear_interpolation(shape_a, shape_b, 100)
check_shapes_identical(shape_e, shape_b)
# exceptions ------------------------------------------
# interpolation destroys shape continuity
shape_f = geo.Shape().add_line_segments([[-1, 4], [2, 2], [3, 4]])
with pytest.raises(Exception):
geo.Shape.interpolate(shape_a, shape_f, 0.5, interpolations)
# number of segments differ
shape_a.add_line_segments([2, 2])
with pytest.raises(Exception):
geo.Shape.linear_interpolation(shape_a, shape_b, 0.25)
def test_shape_linear_interpolation():
"""Test the shapes linear interpolation function.
Creates 2 shapes, each containing 2 segments. Afterwards, the shapes are
interpolated using different weights and the results are compared to the
expected values.
"""
# create shapes
shape_a = geo.Shape().add_line_segments([[0, 0], [1, 1], [2, 0]])
shape_b = geo.Shape().add_line_segments([[1, 1], [2, -1], [3, 5]])
for i in range(5):
# interpolate shapes
weight = i / 4.0
shape_c = geo.Shape.linear_interpolation(shape_a, shape_b, weight)
# check result
points_exp = [
[weight, weight],
[1 + weight, 1 - 2 * weight],
[2 + weight, 5 * weight],
]
shape_c_exp = geo.Shape().add_line_segments(points_exp)
check_shapes_identical(shape_c, shape_c_exp)
# check weight clipped to valid range -----------------
shape_d = geo.Shape.linear_interpolation(shape_a, shape_b, -3)
check_shapes_identical(shape_d, shape_a)
shape_e = geo.Shape.linear_interpolation(shape_a, shape_b, 100)
check_shapes_identical(shape_e, shape_b)
# exceptions ------------------------------------------
# number of segments differ
shape_a.add_line_segments([2, 2])
with pytest.raises(Exception):
geo.Shape.linear_interpolation(shape_a, shape_b, 0.25)
# Test profile class ----------------------------------------------------------
def test_profile_construction_and_shape_addition():
"""Test profile construction and addition of shapes.
Test details are explained by comments.
"""
arc_segment = geo.ArcSegment.construct_with_radius([-2, -2], [-1, -1], 1)
shape = geo.Shape(arc_segment)
shape.add_line_segments([[0, 0], [1, 0], [2, -1], [0, -1]])
# Check invalid types
with pytest.raises(TypeError):
geo.Profile(3)
with pytest.raises(TypeError):
geo.Profile("This is not right")
with pytest.raises(TypeError):
geo.Profile([2, 8, 1])
# Check valid types
profile = geo.Profile(shape)
assert profile.num_shapes == 1
profile = geo.Profile([shape, shape])
assert profile.num_shapes == 2
# Check invalid addition
with pytest.raises(TypeError):
profile.add_shapes([shape, 0.1])
with pytest.raises(TypeError):
profile.add_shapes(["shape"])
with pytest.raises(TypeError):
profile.add_shapes(0.1)
# Check that invalid calls only raise an exception and do not invalidate
# the internal data
assert profile.num_shapes == 2
# Check valid addition
profile.add_shapes(shape)
assert profile.num_shapes == 3
profile.add_shapes([shape, shape])
assert profile.num_shapes == 5
# Check shapes
shapes_profile = profile.shapes
for shape_profile in shapes_profile:
check_shapes_identical(shape, shape_profile)
def test_profile_rasterization():
"""Test the profile's rasterize function.
The test creates a profile where all its shapes lie on the y axis. The
gaps between each shape are identical to the raster width and they are
added in ascending order to the profile. Therefore, all raster points
are equidistant and can be checked easily.
"""
raster_width = 0.1
# create shapes
shape0 = geo.Shape().add_line_segments([[-1, 0], [-raster_width, 0]])
shape1 = geo.Shape().add_line_segments([[0, 0], [1, 0]])
shape2 = geo.Shape().add_line_segments([[1 + raster_width, 0], [2, 0]])
# create profile
profile = geo.Profile([shape0, shape1, shape2])
# rasterize
data = profile.rasterize(raster_width)
# no duplications
assert helpers.are_all_columns_unique(data)
# check raster data size
expected_number_raster_points = int(round(3 / raster_width)) + 1
assert data.shape[1] == expected_number_raster_points
# Check that all shapes are rasterized correct
for i in range(int(round(3 / raster_width)) + 1):
assert ut.vector_is_close(data[:, i], [i * raster_width - 1, 0])
# exceptions
with pytest.raises(Exception):
profile.rasterize(0)
with pytest.raises(Exception):
profile.rasterize(-3)
# Test trace segment classes --------------------------------------------------
def check_trace_segment_length(segment, tolerance=1e-9):
"""Check if a trace segment returns the correct length.
The check calculates the segment length numerically and compares it to
the length returned by the segment.
The numerical algorithm calculates the distances between several points
on the trace and sums them up. The number of points is increased until
the difference of the sum between two iterations is way below the
specified tolerance.
Parameters
----------
segment :
Trace segment (any type)
tolerance :
Numerical tolerance (Default value = 1e-9)
"""
lcs = segment.local_coordinate_system(1)
length_numeric_prev = np.linalg.norm(lcs.coordinates)
# calculate numerical length by linearization
num_segments = 2.0
num_iterations = 20
# calculate numerical length with increasing number of segments until
# the rate of change between 2 calculations is small enough
for i in range(num_iterations):
length_numeric = 0
increment = 1.0 / num_segments
cs_0 = segment.local_coordinate_system(0)
for rel_pos in np.arange(increment, 1.0 + increment / 2, increment):
cs_1 = segment.local_coordinate_system(rel_pos)
length_numeric += np.linalg.norm(cs_1.coordinates - cs_0.coordinates)
cs_0 = copy.deepcopy(cs_1)
relative_change = length_numeric / length_numeric_prev
length_numeric_prev = copy.deepcopy(length_numeric)
num_segments *= 2
if math.isclose(relative_change, 1, abs_tol=tolerance / 10):
break
assert i < num_iterations - 1, (
"Segment length could not be " "determined numerically"
)
assert math.isclose(length_numeric, segment.length, abs_tol=tolerance)
def check_trace_segment_orientation(segment):
"""Test if the segment's local coordinate system is always oriented correctly.
The orientation of the trace is determined numerically. A small delta is
applied to the tested location to approximate the local direction of the
trace. The result is compared to the local coordinate systems x-axis,
which should always point into the trace's direction.
Parameters
----------
segment :
Trace segment (any type)
"""
# The initial orientation of a segment must be [1, 0, 0]
lcs = segment.local_coordinate_system(0)
assert ut.vector_is_close(lcs.orientation[:, 0], np.array([1, 0, 0]))
delta = 1e-9
for rel_pos in np.arange(0.1, 1.01, 0.1):
lcs = segment.local_coordinate_system(rel_pos)
lcs_d = segment.local_coordinate_system(rel_pos - delta)
trace_direction_approx = tf.normalize(lcs.coordinates - lcs_d.coordinates)
# Check if the x-axis is aligned with the approximate trace direction
assert ut.vector_is_close(lcs.orientation[:, 0], trace_direction_approx, 1e-6)
def default_trace_segment_tests(segment, tolerance_length=1e-9):
"""Perform some default tests on trace segment.
Parameters
----------
segment :
Trace segment (any type)
tolerance_length :
Tolerance for the length test (Default value = 1e-9)
"""
lcs = segment.local_coordinate_system(0)
# test that function actually returns a coordinate system class
assert isinstance(lcs, tf.LocalCoordinateSystem)
# check that coordinates for weight 0 are at [0, 0, 0]
assert ut.vector_is_close(lcs.coordinates, [0, 0, 0])
# length and orientation tests
check_trace_segment_length(segment, tolerance_length)
check_trace_segment_orientation(segment)
def test_linear_horizontal_trace_segment():
"""Test the linear horizontal trace segment.
Each sub test is documented by comments.
"""
length = 7.13
segment = geo.LinearHorizontalTraceSegment(length)
# default tests
default_trace_segment_tests(segment)
# getter tests
assert math.isclose(segment.length, length)
# invalid inputs
with pytest.raises(ValueError):
geo.LinearHorizontalTraceSegment(0)
with pytest.raises(ValueError):
geo.LinearHorizontalTraceSegment(-4.61)
@pytest.mark.slow
def test_radial_horizontal_trace_segment():
"""Test the radial horizontal trace segment.
Each sub test is documented by comments.
"""
radius = 4.74
angle = np.pi / 1.23
segment_cw = geo.RadialHorizontalTraceSegment(radius, angle, True)
segment_ccw = geo.RadialHorizontalTraceSegment(radius, angle, False)
# default tests
default_trace_segment_tests(segment_cw, 1e-4)
default_trace_segment_tests(segment_ccw, 1e-4)
# getter tests
assert math.isclose(segment_cw.angle, angle)
assert math.isclose(segment_ccw.angle, angle)
assert math.isclose(segment_cw.radius, radius)
assert math.isclose(segment_ccw.radius, radius)
assert segment_cw.is_clockwise
assert not segment_ccw.is_clockwise
# check positions
for weight in np.arange(0.1, 1, 0.1):
current_angle = angle * weight
x_exp = np.sin(current_angle) * radius
y_exp = (1 - np.cos(current_angle)) * radius
lcs_cw = segment_cw.local_coordinate_system(weight)
lcs_ccw = segment_ccw.local_coordinate_system(weight)
assert ut.vector_is_close(lcs_cw.coordinates, [x_exp, -y_exp, 0])
assert ut.vector_is_close(lcs_ccw.coordinates, [x_exp, y_exp, 0])
# invalid inputs
with pytest.raises(ValueError):
geo.RadialHorizontalTraceSegment(0, np.pi)
with pytest.raises(ValueError):
geo.RadialHorizontalTraceSegment(-0.53, np.pi)
with pytest.raises(ValueError):
geo.RadialHorizontalTraceSegment(1, 0)
with pytest.raises(ValueError):
geo.RadialHorizontalTraceSegment(1, -np.pi)
# Test trace class ------------------------------------------------------------
class CustomSegment:
"""Custom trace segment for tests."""
def __init__(self):
"""Construct a custom segment."""
self.length = None
@staticmethod
def local_coordinate_system(*_args):
"""Get the local coordinate system.
Parameters
----------
_args :
Unused parameters
Returns
-------
weldx.transformations.LocalCoordinateSystem
Local coordinate system
"""
return tf.LocalCoordinateSystem()
def test_trace_construction():
"""Test the trace's construction."""
linear_segment = geo.LinearHorizontalTraceSegment(1)
radial_segment = geo.RadialHorizontalTraceSegment(1, np.pi)
cs_coordinates = np.array([2, 3, -2])
cs_initial = helpers.rotated_coordinate_system(coordinates=cs_coordinates)
# test single segment construction --------------------
trace = geo.Trace(linear_segment, cs_initial)
assert math.isclose(trace.length, linear_segment.length)
assert trace.num_segments == 1
segments = trace.segments
assert len(segments) == 1
check_trace_segments_identical(trace.segments[0], linear_segment)
check_coordinate_systems_identical(trace.coordinate_system, cs_initial)
# test multi segment construction ---------------------
trace = geo.Trace([radial_segment, linear_segment])
assert math.isclose(trace.length, linear_segment.length + radial_segment.length)
assert trace.num_segments == 2
check_trace_segments_identical(trace.segments[0], radial_segment)
check_trace_segments_identical(trace.segments[1], linear_segment)
check_coordinate_systems_identical(
trace.coordinate_system, tf.LocalCoordinateSystem()
)
# check invalid inputs --------------------------------
with pytest.raises(TypeError):
geo.Trace(radial_segment, linear_segment)
with pytest.raises(TypeError):
geo.Trace(radial_segment, 2)
with pytest.raises(Exception):
geo.Trace(None)
# check construction with custom segment --------------
custom_segment = CustomSegment()
custom_segment.length = 3
geo.Trace(custom_segment)
# trace length <= 0
with pytest.raises(Exception):
custom_segment.length = -12
geo.Trace(custom_segment)
with pytest.raises(Exception):
custom_segment.length = 0
geo.Trace(custom_segment)
@pytest.mark.slow
def test_trace_local_coordinate_system():
"""Test the trace's local coordinate system function.
The tested trace starts with a semicircle of radius 1 turning to the left
and continues with a straight line of length 1.
"""
radial_segment = geo.RadialHorizontalTraceSegment(1, np.pi)
linear_segment = geo.LinearHorizontalTraceSegment(1)
# check with default coordinate system ----------------
trace = geo.Trace([radial_segment, linear_segment])
# check first segment (radial)
for i in range(11):
weight = i / 10
position = radial_segment.length * weight
cs_trace = trace.local_coordinate_system(position)
cs_segment = radial_segment.local_coordinate_system(weight)
check_coordinate_systems_identical(cs_trace, cs_segment)
# check second segment (linear)
expected_orientation = radial_segment.local_coordinate_system(1).orientation
for i in range(11):
weight = i / 10
position_on_segment = linear_segment.length * weight
position = radial_segment.length + position_on_segment
expected_coordinates = np.array([-position_on_segment, 2, 0])
cs_expected = tf.LocalCoordinateSystem(
orientation=expected_orientation, coordinates=expected_coordinates
)
cs_trace = trace.local_coordinate_system(position)
check_coordinate_systems_identical(cs_trace, cs_expected)
# check with arbitrary coordinate system --------------
orientation = WXRotation.from_euler("x", np.pi / 2).as_matrix()
coordinates = np.array([-3, 2.5, 5])
cs_base = tf.LocalCoordinateSystem(orientation, coordinates)
trace = geo.Trace([radial_segment, linear_segment], cs_base)
# check first segment
for i in range(11):
weight = i / 10
position = radial_segment.length * weight
cs_trace = trace.local_coordinate_system(position)
cs_segment = radial_segment.local_coordinate_system(weight)
cs_expected = cs_segment + cs_base
check_coordinate_systems_identical(cs_trace, cs_expected)
# check second segment
cs_start_seg2 = radial_segment.local_coordinate_system(1) + cs_base
for i in range(11):
weight = i / 10
position_on_segment = linear_segment.length * weight
position = radial_segment.length + position_on_segment
lcs_coordinates = [position_on_segment, 0, 0]
cs_exp = tf.LocalCoordinateSystem(coordinates=lcs_coordinates) + cs_start_seg2
cs_trace = trace.local_coordinate_system(position)
check_coordinate_systems_identical(cs_trace, cs_exp)
@pytest.mark.slow
def test_trace_rasterization():
"""Test the trace's rasterize function.
The tested trace starts with a line segment of length 1 and continues
with a radial segment of radius 1 and counter clockwise winding.
"""
radial_segment = geo.RadialHorizontalTraceSegment(1, np.pi)
linear_segment = geo.LinearHorizontalTraceSegment(1)
# check with default coordinate system ----------------
trace = geo.Trace([linear_segment, radial_segment])
data = trace.rasterize(0.1)
# no duplications
assert helpers.are_all_columns_unique(data)
raster_width_eff = trace.length / (data.shape[1] - 1)
for i in range(data.shape[1]):
trace_location = i * raster_width_eff
if trace_location <= 1:
assert ut.vector_is_close([trace_location, 0, 0], data[:, i])
else:
arc_length = trace_location - 1
angle = arc_length # radius 1 -> arc_length = arc_angle * radius
x = np.sin(angle) + 1 # radius 1 -> sin(arc_angle) = x / radius
y = 1 - np.cos(angle)
assert ut.vector_is_close([x, y, 0], data[:, i])
# check with arbitrary coordinate system --------------
orientation = WXRotation.from_euler("y", np.pi / 2).as_matrix()
coordinates = np.array([-3, 2.5, 5])
cs_base = tf.LocalCoordinateSystem(orientation, coordinates)
trace = geo.Trace([linear_segment, radial_segment], cs_base)
data = trace.rasterize(0.1)
raster_width_eff = trace.length / (data.shape[1] - 1)
for i in range(data.shape[1]):
trace_location = i * raster_width_eff
if trace_location <= 1:
x = coordinates[0]
y = coordinates[1]
z = coordinates[2] - trace_location
else:
arc_length = trace_location - 1
angle = arc_length # radius 1 -> arc_length = arc_angle * radius
x = coordinates[0]
y = coordinates[1] + 1 - np.cos(angle)
z = coordinates[2] - 1 - np.sin(angle)
assert ut.vector_is_close([x, y, z], data[:, i])
# check if raster width is clipped to valid range -----
data = trace.rasterize(1000)
assert data.shape[1] == 2
assert ut.vector_is_close([-3, 2.5, 5], data[:, 0])
assert ut.vector_is_close([-3, 4.5, 4], data[:, 1])
# exceptions ------------------------------------------
with pytest.raises(Exception):
trace.rasterize(0)
with pytest.raises(Exception):
trace.rasterize(-23.1)
# Profile interpolation classes -----------------------------------------------
def check_interpolated_profile_points(profile, c_0, c_1, c_2):
"""Check the points of an interpolated profile from the interpolation test.
Parameters
----------
profile :
Interpolated profile.
c_0 :
First expected point
c_1 :
Second expected point
c_2 :
Third expected point
"""
assert ut.vector_is_close(profile.shapes[0].segments[0].point_start, c_0)
assert ut.vector_is_close(profile.shapes[0].segments[0].point_end, c_1)
assert ut.vector_is_close(profile.shapes[1].segments[0].point_start, c_1)
assert ut.vector_is_close(profile.shapes[1].segments[0].point_end, c_2)
def test_linear_profile_interpolation_sbs():
"""Test linear profile interpolation.
Uses the default profiles which consist of two shapes. Each shape
contains just a single line segment.
"""
[profile_a, profile_b] = get_default_profiles()
for i in range(5):
weight = i / 4.0
profile_c = geo.linear_profile_interpolation_sbs(profile_a, profile_b, weight)
check_interpolated_profile_points(
profile_c, [-i, 2 * i], [8 - 2 * i, 16 - 2 * i], [16, -4 * i]
)
# check weight clipped to valid range -----------------
a_0 = profile_a.shapes[0].segments[0].point_start
a_1 = profile_a.shapes[1].segments[0].point_start
a_2 = profile_a.shapes[1].segments[0].point_end
profile_c = geo.linear_profile_interpolation_sbs(profile_a, profile_b, -3)
check_interpolated_profile_points(profile_c, a_0, a_1, a_2)
profile_c = geo.linear_profile_interpolation_sbs(profile_a, profile_b, 42)
b_0 = profile_b.shapes[0].segments[0].point_start
b_1 = profile_b.shapes[1].segments[0].point_start
b_2 = profile_b.shapes[1].segments[0].point_end
check_interpolated_profile_points(profile_c, b_0, b_1, b_2)
# exceptions ------------------------------------------
shape_a12 = profile_a.shapes[1]
shape_b01 = profile_b.shapes[0]
shape_b12 = profile_b.shapes[1]
# number of shapes differ
profile_d = geo.Profile([shape_b01, shape_b12, shape_a12])
with pytest.raises(Exception):
geo.linear_profile_interpolation_sbs(profile_d, profile_b, 0.5)
# number of segments differ
shape_b012 = geo.Shape(
[
geo.LineSegment.construct_with_points(b_0, b_1),
geo.LineSegment.construct_with_points(b_1, b_2),
]
)
profile_b2 = geo.Profile([shape_b01, shape_b012])
with pytest.raises(Exception):
geo.linear_profile_interpolation_sbs(profile_a, profile_b2, 0.2)
# test variable profile -------------------------------------------------------
def check_variable_profile_state(variable_profile, profiles_exp, locations_exp):
"""Check the state of a variable profile.
Parameters
----------
variable_profile :
Variable profile that should be checked.
profiles_exp :
Expected stored profiles
locations_exp :
Expected stored locations
"""
num_profiles = len(locations_exp)
assert variable_profile.num_interpolation_schemes == num_profiles - 1
assert variable_profile.num_locations == num_profiles
assert variable_profile.num_profiles == num_profiles
for i in range(num_profiles):
assert math.isclose(variable_profile.locations[i], locations_exp[i])
check_profiles_identical(variable_profile.profiles[i], profiles_exp[i])
def test_variable_profile_construction():
"""Test construction of variable profiles."""
interpol = geo.linear_profile_interpolation_sbs
profile_a, profile_b = get_default_profiles()
# construction with single location and interpolation
variable_profile = geo.VariableProfile([profile_a, profile_b], 1, interpol)
check_variable_profile_state(variable_profile, [profile_a, profile_b], [0, 1])
variable_profile = geo.VariableProfile([profile_a, profile_b], [1], [interpol])
check_variable_profile_state(variable_profile, [profile_a, profile_b], [0, 1])
# construction with location list
variable_profile = geo.VariableProfile([profile_a, profile_b], [0, 1], interpol)
check_variable_profile_state(variable_profile, [profile_a, profile_b], [0, 1])
variable_profile = geo.VariableProfile(
[profile_a, profile_b, profile_a], [1, 2], [interpol, interpol]
)
check_variable_profile_state(
variable_profile, [profile_a, profile_b, profile_a], [0, 1, 2]
)
variable_profile = geo.VariableProfile(
[profile_a, profile_b, profile_a], [0, 1, 2], [interpol, interpol]
)
check_variable_profile_state(
variable_profile, [profile_a, profile_b, profile_a], [0, 1, 2]
)
# exceptions ------------------------------------------
# first location is not 0
with pytest.raises(Exception):
geo.VariableProfile([profile_a, profile_b], [1, 2], interpol)
# number of locations is not correct
with pytest.raises(Exception):
geo.VariableProfile(
[profile_a, profile_b, profile_a], [1], [interpol, interpol]
)
with pytest.raises(Exception):
geo.VariableProfile([profile_a, profile_b], [0, 1, 2], interpol)
# number of interpolations is not correct
with pytest.raises(Exception):
geo.VariableProfile([profile_a, profile_b, profile_a], [0, 1, 2], [interpol])
with pytest.raises(Exception):
geo.VariableProfile(
[profile_a, profile_b, profile_a], [0, 1, 2], [interpol, interpol, interpol]
)
# locations not ordered
with pytest.raises(Exception):
geo.VariableProfile(
[profile_a, profile_b, profile_a], [0, 2, 1], [interpol, interpol]
)
def test_variable_profile_local_profile():
"""Test if the local profiles of a variable profile are calculated correctly."""
interpol = geo.linear_profile_interpolation_sbs
profile_a, profile_b = get_default_profiles()
variable_profile = geo.VariableProfile(
[profile_a, profile_b, profile_a], [0, 1, 2], [interpol, interpol]
)
for i in range(5):
# first segment
location = i / 4.0
profile = variable_profile.local_profile(location)
check_interpolated_profile_points(
profile, [-i, 2 * i], [8 - 2 * i, 16 - 2 * i], [16, -4 * i]
)
# second segment
location += 1
profile = variable_profile.local_profile(location)
check_interpolated_profile_points(
profile, [-4 + i, 8 - 2 * i], [2 * i, 8 + 2 * i], [16, -16 + 4 * i]
)
# check if values are clipped to valid range ----------
profile = variable_profile.local_profile(177)
check_interpolated_profile_points(profile, [0, 0], [8, 16], [16, 0])
profile = variable_profile.local_profile(-2)
check_interpolated_profile_points(profile, [0, 0], [8, 16], [16, 0])
# test geometry class ---------------------------------------------------------
def test_geometry_construction():
"""Test construction of the geometry class."""
profile_a, profile_b = get_default_profiles()
variable_profile = geo.VariableProfile(
[profile_a, profile_b], [0, 1], geo.linear_profile_interpolation_sbs
)
radial_segment = geo.RadialHorizontalTraceSegment(1, np.pi)
linear_segment = geo.LinearHorizontalTraceSegment(1)
trace = geo.Trace([radial_segment, linear_segment])
# single profile construction
geometry = geo.Geometry(profile_a, trace)
check_profiles_identical(geometry.profile, profile_a)
check_traces_identical(geometry.trace, trace)
# variable profile construction
geometry = geo.Geometry(variable_profile, trace)
check_variable_profiles_identical(geometry.profile, variable_profile)
check_traces_identical(geometry.trace, trace)
# exceptions ------------------------------------------
# wrong types
with pytest.raises(TypeError):
geo.Geometry(variable_profile, profile_b)
with pytest.raises(TypeError):
geo.Geometry(trace, trace)
with pytest.raises(TypeError):
geo.Geometry(trace, profile_b)
with pytest.raises(TypeError):
geo.Geometry(variable_profile, "a")
with pytest.raises(TypeError):
geo.Geometry("42", trace)
@pytest.mark.slow
def test_geometry_rasterization_trace():
"""Test if the rasterized geometry data follows the trace.
The utilized trace starts with a line segment of length 1 and continues
with a radial segment of radius 1 and counter clockwise winding. Each
individual step is documented by comments.
"""
a0 = [1, 0]
a1 = [1, 1]
a2 = [0, 1]
a3 = [-1, 1]
a4 = [-1, 0]
profile_points = ut.to_float_array([a0, a1, a2, a2, a3, a4]).transpose()
# create profile
shape_a012 = geo.Shape().add_line_segments([a0, a1, a2])
shape_a234 = geo.Shape().add_line_segments([a2, a3, a4])
profile_a = geo.Profile([shape_a012, shape_a234])
# create trace
radial_segment = geo.RadialHorizontalTraceSegment(1, np.pi / 2, False)
linear_segment = geo.LinearHorizontalTraceSegment(1)
trace = geo.Trace([linear_segment, radial_segment])
# create geometry
geometry = geo.Geometry(profile_a, trace)
# rasterize
# Note, if the raster width is larger than the segment, it is automatically
# adjusted to the segment width. Hence, each rasterized profile has 6
# points, which were defined at the beginning of the test (a2 is
# included twice)
data = geometry.rasterize(7, 0.1)
# calculate the number of rasterized profiles
num_raster_profiles = int(np.round(data.shape[1] / 6))
# calculate effective raster width
eff_raster_width = trace.length / (data.shape[1] / 6 - 1)
arc_point_distance_on_trace = 2 * np.sin(eff_raster_width / 2)
for i in range(num_raster_profiles):
# get index of the current profiles first point
idx_0 = i * 6
# check first segment (line)
if data[0, idx_0 + 2] <= 1:
for j in range(6):
point_exp = [
eff_raster_width * i,
profile_points[0, j],
profile_points[1, j],
]
assert ut.vector_is_close(data[:, idx_0 + j], point_exp)
# check second segment (arc)
else:
# first 2 profile points lie on the arcs center point
assert ut.vector_is_close(data[:, idx_0], [1, a0[0], a0[1]])
assert ut.vector_is_close(data[:, idx_0 + 1], [1, a1[0], a1[1]])
# z-values are constant
for j in np.arange(2, 6, 1):
assert math.isclose(data[2, idx_0 + j], profile_points[1, j])
# all profile points in a common x-y plane
exp_radius = np.array([1, 1, 2, 2])
vec_02 = data[0:2, idx_0 + 2] - data[0:2, idx_0]
assert math.isclose(np.linalg.norm(vec_02), exp_radius[0])
for j in np.arange(3, 6, 1):
vec_0j = data[0:2, idx_0 + j] - data[0:2, idx_0]
assert math.isclose(np.linalg.norm(vec_0j), exp_radius[j - 2])
unit_vec_0j = tf.normalize(vec_0j)
assert math.isclose(np.dot(unit_vec_0j, vec_02), 1)
# check point distance between profiles
if data[1, idx_0 - 4] > 1:
exp_point_distance = arc_point_distance_on_trace * exp_radius
for j in np.arange(2, 6, 1):
point_distance = np.linalg.norm(
data[:, idx_0 + j] - data[:, idx_0 + j - 6]
)
assert math.isclose(exp_point_distance[j - 2], point_distance)
# check if raster width is clipped to valid range -----
data = geometry.rasterize(7, 1000)
assert data.shape[1] == 12
for i in range(12):
if i < 6:
math.isclose(data[0, i], 0)
else:
assert math.isclose(data[1, i], 1)
# exceptions ------------------------------------------
with pytest.raises(Exception):
geometry.rasterize(0, 1)
with pytest.raises(Exception):
geometry.rasterize(1, 0)
with pytest.raises(Exception):
geometry.rasterize(0, 0)
with pytest.raises(Exception):
geometry.rasterize(-2.3, 1)
with pytest.raises(Exception):
geometry.rasterize(1, -4.6)
with pytest.raises(Exception):
geometry.rasterize(-2.3, -4.6)
@pytest.mark.slow
def test_geometry_rasterization_profile_interpolation():
"""Check if the rasterized geometry interpolates profiles correctly."""
interpol = geo.linear_profile_interpolation_sbs
a0 = [1, 0]
a1 = [1, 1]
a2 = [0, 1]
a3 = [-1, 1]
a4 = [-1, 0]
# create shapes
shape_a012 = geo.Shape().add_line_segments([a0, a1, a2])
shape_a234 = geo.Shape().add_line_segments([a2, a3, a4])
shape_b012 = copy.deepcopy(shape_a012)
shape_b234 = copy.deepcopy(shape_a234)
shape_b012.apply_transformation([[2, 0], [0, 2]])
shape_b234.apply_transformation([[2, 0], [0, 2]])
# create variable profile
profile_a = geo.Profile([shape_a012, shape_a234])
profile_b = geo.Profile([shape_b012, shape_b234])
variable_profile = geo.VariableProfile(
[profile_a, profile_b, profile_a], [0, 2, 6], [interpol, interpol]
)
linear_segment_l1 = geo.LinearHorizontalTraceSegment(1)
linear_segment_l2 = geo.LinearHorizontalTraceSegment(2)
# Note: The profile in the middle of the variable profile is not located
# at the start of the second trace segment
trace = geo.Trace([linear_segment_l2, linear_segment_l1])
geometry = geo.Geometry(variable_profile, trace)
# Note: If the raster width is larger than the segment, it is automatically
# adjusted to the segment width. Hence each rasterized profile has 6
# points, which were defined at the beginning of the test (a2 is
# included twice)
data = geometry.rasterize(7, 0.1)
assert data.shape[1] == 186
profile_points = np.array([a0, a1, a2, a2, a3, a4]).transpose()
# check first profile interpolation
for i in range(11):
idx_0 = i * 6
for j in range(6):
point_exp = np.array(
[
i * 0.1,
profile_points[0, j] * (1 + i * 0.1),
profile_points[1, j] * (1 + i * 0.1),
]
)
assert ut.vector_is_close(data[:, idx_0 + j], point_exp)
# check second profile interpolation
for i in range(20):
idx_0 = (30 - i) * 6
for j in range(6):
point_exp = np.array(
[
3 - i * 0.1,
profile_points[0, j] * (1 + i * 0.05),
profile_points[1, j] * (1 + i * 0.05),
]
)
assert ut.vector_is_close(data[:, idx_0 + j], point_exp)
def get_test_profile() -> geo.Profile:
"""Create a `weldx.geometry.Profile` for tests.
Returns
-------
weldx.geometry.Profile :
`weldx.geometry.Profile` for tests.
"""
shape_0 = geo.Shape().add_line_segments(Q_([[1, 0], [1, 1], [3, 1]], "cm"))
shape_1 = geo.Shape().add_line_segments(Q_([[-1, 0], [-1, 1]], "cm"))
return geo.Profile([shape_0, shape_1])
def get_test_geometry_constant_profile() -> geo.Geometry:
"""Create a `weldx.geometry.Geometry` with constant profile for tests.
Returns
-------
weldx.geometry.Geometry :
`weldx.geometry.Geometry` with constant profile for tests.
"""
profile = get_test_profile()
trace = geo.Trace([geo.LinearHorizontalTraceSegment(Q_(1, "cm"))])
return geo.Geometry(profile=profile, trace=trace)
def get_test_geometry_variable_profile():
"""Create a `weldx.geometry.Geometry` with variable profile for tests.
Returns
-------
weldx.geometry.Geometry :
`weldx.geometry.Geometry` with constant profile for tests.
"""
profile = get_test_profile()
variable_profile = geo.VariableProfile(
[profile, profile], [0, 1], [geo.linear_profile_interpolation_sbs]
)
trace = geo.Trace([geo.LinearHorizontalTraceSegment(Q_(1, "cm"))])
return geo.Geometry(profile=variable_profile, trace=trace)
class TestGeometry:
"""Test the geometry class."""
@staticmethod
@pytest.mark.parametrize(
"geometry, p_rw, t_rw, exp_num_points, exp_num_triangles",
[
(get_test_geometry_constant_profile(), Q_(1, "cm"), Q_(1, "cm"), 12, 8),
(get_test_geometry_variable_profile(), Q_(1, "cm"), Q_(1, "cm"), 12, 0),
],
)
def test_spatial_data(
geometry: geo.Geometry,
p_rw: pint.Quantity,
t_rw: pint.Quantity,
exp_num_points: int,
exp_num_triangles: int,
):
"""Test the `spatial_data` function.
Parameters
----------
geometry : weldx.geometry.Geometry
Geometry that should be tested
p_rw : pint.Quantity
Profile raster width that is passed to the function
t_rw : pint.Quantity
Trace raster width that is passed to the function
exp_num_points : int
Expected number of points of the returned `weldx.geometry.SpatialData`
instance
exp_num_triangles : int
Expected number of triangles of the returned `weldx.geometry.SpatialData`
instance
"""
spatial_data = geometry.spatial_data(p_rw, t_rw)
assert len(spatial_data.coordinates.data) == exp_num_points
num_triangles = 0
if spatial_data.triangles is not None:
num_triangles = len(spatial_data.triangles)
assert num_triangles == exp_num_triangles
# --------------------------------------------------------------------------------------
# SpatialData
# --------------------------------------------------------------------------------------
class TestSpatialData:
"""Test the functionality of the `SpatialData` class."""
@staticmethod
@pytest.mark.parametrize(
"arguments",
[
(np.ones((5, 3)),),
(np.ones((5, 3)), [[0, 1, 2], [0, 2, 3]]),
(np.ones((5, 3)), [[0, 1, 2], [0, 2, 3]], {}),
(np.ones((5, 3)), None, {}),
],
)
def test_class_creation(arguments):
"""Test creation of a `SpatialData` instance.
Parameters
----------
arguments :
Tuple of arguments that are passed to the `__init__` method
"""
pc = SpatialData(*arguments)
assert isinstance(pc.coordinates, DataArray)
assert np.allclose(pc.coordinates.data, arguments[0])
if len(arguments) > 1 and arguments[1] is not None:
np.all(arguments[1] == pc.triangles)
@staticmethod
@pytest.mark.parametrize(
"arguments, exception_type, test_name",
[
((np.ones((5, 3)), [[0, 1], [2, 3]]), ValueError, "# inv. triangulation 1"),
((np.ones((5, 3)), [[0, 1, 2, 3]]), ValueError, "# inv. triangulation 2"),
((np.ones((5, 3)), [0, 1, 2]), ValueError, "# inv. triangulation 3"),
],
)
def test_class_creation_exceptions(arguments, exception_type, test_name):
"""Test exceptions during creation of a `SpatialData` instance.
Parameters
----------
arguments :
Tuple of arguments that are passed to the `__init__` method
exception_type :
Expected exception type
test_name : str
A string starting with an `#` that describes the test.
"""
with pytest.raises(exception_type):
SpatialData(*arguments)
@staticmethod
@pytest.mark.parametrize(
"filename",
["test.ply", "test.stl", "test.vtk", Path("test.stl")],
)
def test_read_write_file(filename: Union[str, Path]):
"""Test the `from_file` and `write_to_file` functions.
The test simply creates a `SpatialData` instance, writes it to a file and reads
it back. The result is compared to the original object.
Parameters
----------
filename :
Name of the file
"""
points = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]
triangles = [[0, 1, 2], [2, 3, 0]]
data = SpatialData(points, triangles)
with TemporaryDirectory(dir=Path(__file__).parent) as tmpdirname:
filepath = f"{tmpdirname}/{filename}"
if isinstance(filename, Path):
filepath = Path(filepath)
data.write_to_file(filepath)
data_read = SpatialData.from_file(filepath)
assert np.allclose(data.coordinates, data_read.coordinates)
assert np.allclose(data.triangles, data_read.triangles)
| 30.905533 | 88 | 0.641839 |
4a1cb003d6072e92fa6f7de10ff0a5c1006156e7
| 322 |
py
|
Python
|
python/Lib/lib2to3/ot.py
|
mrnbd/ZCHelper_full
|
e40560522b3eca417b5fad97efe8b1627eca2a22
|
[
"0BSD"
] | null | null | null |
python/Lib/lib2to3/ot.py
|
mrnbd/ZCHelper_full
|
e40560522b3eca417b5fad97efe8b1627eca2a22
|
[
"0BSD"
] | null | null | null |
python/Lib/lib2to3/ot.py
|
mrnbd/ZCHelper_full
|
e40560522b3eca417b5fad97efe8b1627eca2a22
|
[
"0BSD"
] | null | null | null |
import os
import sys
import time
imf = '\ZCHelper.exe'
exe = sys.executable
dname = os.path.dirname(exe)
utp = os.path.split(dname)
os.chdir(utp[0])
puti = str(utp[0]) + imf
try: w = os.system('taskkill /f /im ZCHelper.exe')
except Exception: pass
if w == 0: time.sleep(2)
os.startfile(puti,'runas')
sys.exit()
| 12.88 | 51 | 0.670807 |
4a1cb06e7ce93a3c8ec16ade5b44341e00a17c91
| 778 |
py
|
Python
|
data/rename_image_names.py
|
LONG-9621/IQA_02
|
22ca65cd0156b5b428cecd55ed939366fb64d2e5
|
[
"MIT"
] | 406 |
2017-07-27T02:26:41.000Z
|
2022-03-31T12:55:48.000Z
|
data/rename_image_names.py
|
LONG-9621/IQA_02
|
22ca65cd0156b5b428cecd55ed939366fb64d2e5
|
[
"MIT"
] | 66 |
2017-11-07T07:30:21.000Z
|
2021-07-28T15:30:46.000Z
|
data/rename_image_names.py
|
LONG-9621/IQA_02
|
22ca65cd0156b5b428cecd55ed939366fb64d2e5
|
[
"MIT"
] | 135 |
2017-08-02T01:02:43.000Z
|
2022-03-30T10:29:19.000Z
|
import numpy as np
import os
import os.path as osp
import cv2
import pdb
shape = ['gblur','wn','jpeg','jp2k','fastfading']
data = './live/'
for tp in shape:
file_root = data + tp + '/'
list_file = 'info' + '.txt'
filename = [line.rstrip('\n') for line in open(
osp.join(file_root, list_file))]
N_name = []
for i in filename:
N_name.append(i.split()[1])
pdb.set_trace()
for j in range(len(N_name)):
folder = data +tp + '/' + N_name[j]
tmp = cv2.imread(folder)
cv2.imwrite(data+ tp + '/' + 'img' + str(int(N_name[j][3:-4])).zfill(3)+'.bmp',tmp)
if int(N_name[j][3:-4])<100:
os.remove(folder)
os.remove(data+tp+'/'+'Thumbs.db')
| 18.97561 | 91 | 0.517995 |
4a1cb1533fafdc008428804aae5a28014c7c9e96
| 3,777 |
py
|
Python
|
ch7-blog-app-with-users/django_project/settings.py
|
wsvincent/djangoforbeginners_32
|
aba7c99aa6050cfe8fb9d588af58c9f67411ae8a
|
[
"MIT"
] | 5 |
2021-12-14T03:33:39.000Z
|
2022-01-11T14:13:21.000Z
|
ch7-blog-app-with-users/django_project/settings.py
|
wsvincent/djangoforbeginners_32
|
aba7c99aa6050cfe8fb9d588af58c9f67411ae8a
|
[
"MIT"
] | null | null | null |
ch7-blog-app-with-users/django_project/settings.py
|
wsvincent/djangoforbeginners_32
|
aba7c99aa6050cfe8fb9d588af58c9f67411ae8a
|
[
"MIT"
] | null | null | null |
"""
Django settings for django_project project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-5&4s6$765*&)a1l3u%^ikcfd)@pza+d9j4_u03wezevf)128$+"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [".herokuapp.com", "localhost", "127.0.0.1"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic", # new
"django.contrib.staticfiles",
"blog.apps.BlogConfig",
"accounts.apps.AccountsConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware", # new
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "django_project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [str(BASE_DIR.joinpath("templates"))], # new
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "django_project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = [str(BASE_DIR.joinpath("static"))]
STATIC_ROOT = str(BASE_DIR.joinpath("staticfiles")) # new
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" # new
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
LOGIN_REDIRECT_URL = "home"
LOGOUT_REDIRECT_URL = "home" # new
| 27.772059 | 91 | 0.709293 |
4a1cb1b6149919f9a4be0be288bd8255e221392b
| 3,226 |
py
|
Python
|
utils/bills.py
|
jdubansky/openstates.org
|
6fd5592aae554c4bb201f0a76ed3605bff5204c2
|
[
"MIT"
] | null | null | null |
utils/bills.py
|
jdubansky/openstates.org
|
6fd5592aae554c4bb201f0a76ed3605bff5204c2
|
[
"MIT"
] | null | null | null |
utils/bills.py
|
jdubansky/openstates.org
|
6fd5592aae554c4bb201f0a76ed3605bff5204c2
|
[
"MIT"
] | null | null | null |
import re
from django.db.models import F
from openstates.data.models import Bill
from .websearchquery import WebSearchQuery as SearchQuery
from .common import abbr_to_jid
# decision was made in openstates/issues#193 to exclude these by default to not confuse users
EXCLUDED_CLASSIFICATIONS = ["proposed bill"]
# This function has to match openstates.transformers
_bill_id_re = re.compile(r"([A-Z]*)\s*0*([-\d]+)")
_mi_bill_id_re = re.compile(r"(SJR|HJR)\s*([A-Z]+)")
def fix_bill_id(bill_id):
# special case for MI Joint Resolutions
if _mi_bill_id_re.match(bill_id):
return _mi_bill_id_re.sub(r"\1 \2", bill_id, 1).strip()
return _bill_id_re.sub(r"\1 \2", bill_id, 1).strip()
def search_bills(
*,
sort,
bills=None,
query=None,
state=None,
chamber=None,
session=None,
sponsor=None,
classification=None,
exclude_classifications=None,
subjects=None,
status=None,
):
if bills is None:
bills = Bill.objects.all().select_related(
"legislative_session", "legislative_session__jurisdiction",
)
if state:
jid = abbr_to_jid(state.lower())
bills = bills.filter(legislative_session__jurisdiction_id=jid)
if query:
if re.match(r"\w{1,3}\s*\d{1,5}", query):
bills = bills.filter(identifier__iexact=fix_bill_id(query))
else:
bills = bills.filter(
searchable__search_vector=SearchQuery(
query, search_type="web", config="english"
)
)
if chamber:
bills = bills.filter(from_organization__classification=chamber)
if session:
bills = bills.filter(legislative_session__identifier=session)
if sponsor:
bills = bills.filter(sponsorships__person_id=sponsor)
if classification:
bills = bills.filter(classification__contains=[classification])
elif exclude_classifications:
bills = bills.exclude(classification__contains=exclude_classifications)
if subjects:
bills = bills.filter(subject__overlap=subjects)
if not status:
status = []
if "passed-lower-chamber" in status:
bills = bills.filter(
actions__classification__contains=["passage"],
actions__organization__classification="lower",
)
elif "passed-upper-chamber" in status:
bills = bills.filter(
actions__classification__contains=["passage"],
actions__organization__classification="upper",
)
elif "signed" in status:
bills = bills.filter(actions__classification__contains=["executive-signature"])
if sort is None:
pass
elif sort == "-updated":
bills = bills.order_by("-updated_at")
elif sort == "first_action":
bills = bills.order_by(F("first_action_date").asc(nulls_last=True))
elif sort == "-first_action":
bills = bills.order_by(F("first_action_date").desc(nulls_last=True))
elif sort == "latest_action":
bills = bills.order_by(F("latest_action_date").asc(nulls_last=True))
else: # -latest_action, or not specified
bills = bills.order_by(F("latest_action_date").desc(nulls_last=True))
return bills
| 34.319149 | 93 | 0.66367 |
4a1cb2542073ce40e6faaa6b0cb423c922089f16
| 9,687 |
py
|
Python
|
hydro/core.py
|
capruitt/hydro
|
bb128b3c1381eff735bc8e89ef84273f3ee1f550
|
[
"MIT"
] | 3 |
2016-12-21T16:31:51.000Z
|
2017-01-22T12:50:26.000Z
|
hydro/core.py
|
capruitt/hydro
|
bb128b3c1381eff735bc8e89ef84273f3ee1f550
|
[
"MIT"
] | null | null | null |
hydro/core.py
|
capruitt/hydro
|
bb128b3c1381eff735bc8e89ef84273f3ee1f550
|
[
"MIT"
] | 5 |
2016-08-19T23:23:55.000Z
|
2020-10-22T18:13:01.000Z
|
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from datetime import datetime
from matplotlib.ticker import FuncFormatter
from scipy.optimize import curve_fit
plt.style.use("seaborn-ticks")
def exp_curve(x, a, b):
"""Exponential curve used for rating curves"""
return (a * x**b)
def r_squ(x, y, pred):
"""
Coefficient of determination
x: independent variable\n
y: dependent variable\n
pred: predicted values
"""
a = 0.0
b = 0.0
ybar = np.mean(y)
for i, yhat in zip(y, pred):
a += (i - yhat)**2
b += (i - ybar)**2
return 1 - a / b
class RC(object):
def __init__(self, stage, discharge):
"""
Stage-Discharge rating curve
stage: pandas series containing stage values correspoding to discharges\n
discharge: pandas series containing discharge measurements
"""
self.stage = stage
self.discharge = discharge
# curve_fit
self.popt, self.pcov = curve_fit(exp_curve, self.stage, self.discharge)
# r-squared
self.pred = [exp_curve(j, self.popt[0], self.popt[1]) for j in self.stage]
self.r = r_squ(self.stage, self.discharge, self.pred)
def Q(self, allstages):
""" Compute discharges for entire series of stages"""
return list(map(lambda x: round(exp_curve(x, self.popt[0], self.popt[1]), 3), allstages))
def plot(self, title='Rating Curve', log=True):
""" plot the rating curve """
fig = plt.figure()
ax1 = fig.add_subplot(111, facecolor=[.95,.95,.95])
plt.grid(True, which='both', color='w', ls='-', zorder=0)
ax1.scatter(self.stage, self.discharge, color='k', s=10)
ax1.set_ylabel(r'Discharge, cfs')
ax1.set_xlabel(r'Stage, ft')
if log:
ax1.set_ylim(0.01, 100)
ax1.set_yscale('log'); ax1.set_xscale('log') # log scale x and y
ax1.yaxis.set_major_formatter(FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
ax1.xaxis.set_major_formatter(FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
plt.title(title)
ax1.set_axisbelow(True) # puts grid below plot
# write the equation in the plot
ax1.text(0.05, 0.7, f'y = {self.popt[0]:.3f}x^{self.popt[1]:.3f}',
fontsize=15, transform=ax1.transAxes)
# draw the model line
line = np.linspace(min(self.stage), max(self.stage), 100)
ax1.plot(line, exp_curve(line, self.popt[0], self.popt[1]), color='k')
plt.show()
class Discharge(object):
def __init__(self, time, Q, rain=[]):
"""
time: timeseries
Q: discharge values
"""
self.time = time
self.Q = Q
self.rain = rain
self.bflow = []
def dailyQ(self, method='mean'):
"""
Calculates the daily flow of a set of disharge data.
Method specifies the method of aggregating each day -- either by 'mean'
or by 'sum'. Default is mean.\n
Returns daily flow and day in a dataframe.
"""
daily = pd.DataFrame({'Q':self.Q, 'time':self.time})
daily['day'] = daily.time.apply(lambda x: datetime(x.year, x.month, x.day))
if method == 'mean':
daily = pd.DataFrame(daily.groupby(['day'])['Q'].mean())
daily['meanQ'] = daily['Q']
del daily['Q']
elif method == 'sum':
daily = pd.DataFrame(daily.groupby(['day'])['Q'].sum())
daily['sumQ'] = daily['Q']
del daily['Q']
daily.reset_index(inplace=True)
return daily
def RB_Flashiness(self):
"""Richards-Baker Flashiness Index for a series of daily mean discharges."""
Q = self.dailyQ().meanQ
Qsum = np.sum(Q) # sum of daily mean discharges
Qpath = 0.0
for i in range(len(Q)):
if i == 0:
Qpath = Q.iloc[i] # first entry only
else:
Qpath += np.abs(Q.iloc[i] - Q.iloc[i-1]) # sum the absolute differences of the mean discharges
return Qpath/Qsum
def flow_duration(self, plot=False):
"""
Creates the flow duration curve for a discharge dataset. Returns a pandas
series whose index is the discharge values and series is exceedance probability.
"""
fd = pd.Series(self.Q).value_counts() # frequency of unique values
fd.sort_index(inplace=True) # sort in order of increasing discharges
fd = fd.cumsum() # cumulative sum of frequencies
fd = fd.apply(lambda x: 100 - x/fd.max() * 100) # normalize
fd = pd.DataFrame(fd.reset_index())
fd['exeedance_prob'] = fd['index']; del fd['index']
if plot:
import probscale # flow duration curves use a probability scale for the x axis
fig = plt.figure(figsize=[8, 10])
ax1 = fig.add_subplot(111, facecolor=[.95,.95,.95])
plt.grid(True, which='both', color='w', ls='-', zorder=0)
ax1.plot(fd['discharge_cfs'], fd['exeedance_prob'], 'x', ls='',
color='k', label='Total Flow', ms=5)
# set y axis to log scale and x axis to probability scale
ax1.set_yscale('log')
ax1.set_xscale('prob') # from import probscale
plt.xticks([.01,.1,.5,1,2,5,10,20,30,40,50,60,70,80,90,95,98,99,99.5,99.9,99.99],
rotation='vertical')
plt.legend()
plt.title('Flow Duration Curve')
plt.ylabel('Flow (cfs)')
plt.xlabel('Percentage of time flow was equaled or exceeded')
plt.show()
return fd
def Lyne_Hollick(self, alpha=.925, direction='f'):
"""
Recursive digital filter for baseflow separation. Based on Lyne and Hollick, 1979.
series : array of discharge measurements\n
alpha : filter parameter\n
direction : (f)orward or (r)everse calculation
"""
# first looks to see if there has alread been a run
if len(self.bflow) > 0:
Q = np.array(self.bflow)
else:
Q = np.array(self.Q)
f = np.zeros(len(Q))
if direction[0] == 'f':
for t in np.arange(1,len(Q)):
# algorithm
f[t] = alpha * f[t-1] + (1 + alpha)/2 * (Q[t] - Q[t-1])
# to prevent negative values
if Q[t] - f[t] > Q[t]:
f[t] = 0
elif direction[0] == 'r':
for t in np.arange(len(Q)-2, 1, -1):
f[t] = alpha * f[t+1] + (1 + alpha)/2 * (Q[t] - Q[t+1])
if Q[t] - f[t] > Q[t]:
f[t] = 0
# adds the baseflow to self variables so it can be called recursively
self.bflow = np.array(Q - f)
# calls method again if multiple passes are specified
if len(direction) > 1:
self.Lyne_Hollick(alpha, direction=direction[1:])
return self.bflow
def Eckhardt(self, alpha=.98, BFI=.80, re=1):
"""
Recursive digital filter for baseflow separation. Based on Eckhardt, 2004.\n
series : array of discharge measurements\n
alpha : filter parameter\n
BFI : BFI_max (maximum baseflow index)\n
re : number of times to run filter
"""
print('round ' + str(re))
print(self.bflow[:5])
# first looks to see if there has alread been a run
if len(self.bflow) > 0:
Q = np.array(self.bflow)
else:
Q = np.array(self.Q)
f = np.zeros(len(Q))
f[0] = Q[0]
for t in np.arange(1,len(Q)):
# algorithm
f[t] = ((1 - BFI) * alpha * f[t-1] + (1 - alpha) * BFI * Q[t]) / (1 - alpha * BFI)
if f[t] > Q[t]:
f[t] = Q[t]
# adds the baseflow to self variables so it can be called recursively
self.bflow = f
print(max(self.bflow))
# calls method again if multiple passes are specified
if re > 1:
self.Eckhardt(alpha=alpha, BFI=BFI, re=re-1)
return self.bflow
def plot(self, addseries=[], log=True, title='Discharge'):
"""
Quick plot with or without rain data.\n
If you wish to plot more than one series to compare them, use addseries
to list in order of [time, Q, ...] for each additional series.
"""
fig = plt.figure()
ax1 = fig.add_subplot(111, facecolor=[.95,.95,.95])
plt.grid(True, which='both', color='w', ls='-', zorder=0)
ax1.plot(self.time, self.Q, label='Series1')
if len(self.rain) != 0:
ax2 = ax1.twinx()
ax2.plot(self.time, self.rain, alpha=.5, c='b', lw=1, label='Rain')
ax2.set_ylim(1, 0)
ax2.set_ylabel(r'Rain, in')
ax1.set_ylabel('Discharge, cfs')
ax1.set_xlabel('Stage, ft')
# log scale for y axis
if log:
ax1.set_yscale('log')
ax1.yaxis.set_major_formatter(FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
# add ablity to plot multiple time series
more = len(addseries)
while more > 0:
ax1.plot(addseries[more-2], addseries[more-1],
label=f'Series{int(len(addseries)/2-more/2 +2)}')
more -= 2
ax1.legend(loc='best')
plt.title(title)
plt.show()
| 39.864198 | 138 | 0.546299 |
4a1cb33b3106f263678f634ea3a84f198f77504b
| 1,897 |
py
|
Python
|
examples/tf/trpo_swimmer_ray_sampler.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | 1 |
2021-01-11T18:40:52.000Z
|
2021-01-11T18:40:52.000Z
|
examples/tf/trpo_swimmer_ray_sampler.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | null | null | null |
examples/tf/trpo_swimmer_ray_sampler.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm.
Uses Ray sampler instead of on_policy vectorized
sampler.
Here it runs Swimmer-v2 environment with 40 iterations.
"""
import ray
from garage import wrap_experiment
from garage.envs import GymEnv
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import RaySampler
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
from garage.trainer import TFTrainer
@wrap_experiment
def trpo_swimmer_ray_sampler(ctxt=None, seed=1):
"""tf_trpo_swimmer.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
# Since this is an example, we are running ray in a reduced state.
# One can comment this line out in order to run ray at full capacity
ray.init(_memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_dashboard=False)
with TFTrainer(snapshot_config=ctxt) as trainer:
set_seed(seed)
env = GymEnv('Swimmer-v2')
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
discount=0.99,
max_kl_step=0.01)
trainer.setup(algo,
env,
sampler_cls=RaySampler,
sampler_args={'seed': seed})
trainer.train(n_epochs=40, batch_size=4000)
trpo_swimmer_ray_sampler(seed=100)
| 31.098361 | 76 | 0.670005 |
4a1cb33b8e8d3a46d82dcd35c10709d1f7c4da7b
| 250 |
py
|
Python
|
cart/models.py
|
weiliang1103/onlinelibrary
|
993f7969cb6c8242631c13c7714652e73484e313
|
[
"MIT"
] | null | null | null |
cart/models.py
|
weiliang1103/onlinelibrary
|
993f7969cb6c8242631c13c7714652e73484e313
|
[
"MIT"
] | null | null | null |
cart/models.py
|
weiliang1103/onlinelibrary
|
993f7969cb6c8242631c13c7714652e73484e313
|
[
"MIT"
] | null | null | null |
from django.db import models
from catalog.models import Book
# Create your models here
class History(models.Model):
books = models.ManyToManyField(Book)
def __str__(self):
return ', '.join([book.title for book in self.books.all()])
| 25 | 67 | 0.712 |
4a1cb429df088d41b0c371ae8ed34c4d2c810576
| 1,800 |
py
|
Python
|
createEquitiesDatabase.py
|
rkadiya/ProjectTrinity
|
b8cc5d2ca7318aa338741c215714a1f58b07b89a
|
[
"MIT"
] | 1 |
2018-06-28T13:31:29.000Z
|
2018-06-28T13:31:29.000Z
|
createEquitiesDatabase.py
|
rkadiya/ProjectTrinity
|
b8cc5d2ca7318aa338741c215714a1f58b07b89a
|
[
"MIT"
] | null | null | null |
createEquitiesDatabase.py
|
rkadiya/ProjectTrinity
|
b8cc5d2ca7318aa338741c215714a1f58b07b89a
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from pymongo import MongoClient
import datetime
import json
import time
import urllib2
import os
from pymongo.errors import DuplicateKeyError
count = 0
log = open("./error_tickers", "w+")
client = MongoClient()
db = client.equitiesdb
stocks = db.stocks
stocks.ensure_index("dataset.dataset_code", unique=True)
exchanges = ["AMEX","NYSE","Nasdaq"]
#exchanges = ["RAVI"]
for exchange in exchanges:
with open("./Tickers/" + exchange + "_tickerlist.txt") as f1:
for line in f1:
if count > 1800:
time.sleep(60) # Quandl has a limit of 2000 queries per 10 mins.
count = 0
ticker = line.rstrip()
try:
count += 1
response = urllib2.urlopen("https://www.quandl.com/api/v3/datasets/YAHOO/" + ticker + ".json?auth_token=sBEu_6B6oDDp2uBRdpNL")
data = json.load(response)
stocks.insert_one(data)
print(ticker)
except urllib2.HTTPError, err:
if err.code == 404:
print("Page not found!+" + ticker,file=log)
elif err.code == 403:
print ("Access denied!+" + ticker,file=log)
else:
print ("Something happened! Error code!+" + ticker,file=log), err.code
except urllib2.URLError, err:
print ("Some other error happened!+" + ticker, file.log), err.reason
except ValueError as e:
print (e.message + "+" + ticker,file=log)
except DuplicateKeyError as dke:
print (dke.message + "+" + ticker,file=log)
log.close()
| 34.615385 | 146 | 0.542222 |
4a1cb4fd1f4d9daf3cc1fed470f3bc9f7b2476ab
| 606 |
py
|
Python
|
neutron_plugin_contrail/plugins/opencontrail/loadbalancer/utils.py
|
hamzazafar/contrail-neutron-plugin
|
fb8dbcabc8240e5c47753ae6e2af5556d0a38421
|
[
"Apache-2.0"
] | null | null | null |
neutron_plugin_contrail/plugins/opencontrail/loadbalancer/utils.py
|
hamzazafar/contrail-neutron-plugin
|
fb8dbcabc8240e5c47753ae6e2af5556d0a38421
|
[
"Apache-2.0"
] | null | null | null |
neutron_plugin_contrail/plugins/opencontrail/loadbalancer/utils.py
|
hamzazafar/contrail-neutron-plugin
|
fb8dbcabc8240e5c47753ae6e2af5556d0a38421
|
[
"Apache-2.0"
] | null | null | null |
try:
from neutron.common.exceptions import SubnetNotFound
except ImportError:
from neutron_lib.exceptions import SubnetNotFound
from cfgm_common import exceptions as vnc_exc
def get_subnet_network_id(client, subnet_id):
try:
kv_pair = client.kv_retrieve(subnet_id)
except vnc_exc.NoIdError:
raise SubnetNotFound(subnet_id=subnet_id)
return kv_pair.split()[0]
def get_subnet_cidr(client, subnet_id):
try:
kv_pair = client.kv_retrieve(subnet_id)
except vnc_exc.NoIdError:
raise SubnetNotFound(subnet_id=subnet_id)
return kv_pair.split()[1]
| 27.545455 | 56 | 0.745875 |
4a1cb506c9add40706e2d722c6e0e448105cc5c2
| 3,727 |
py
|
Python
|
tests/apis/test_sandbox_api.py
|
usbo/tinvest
|
e6b7348af76c9db4d8430878efe69820a4079c21
|
[
"MIT"
] | null | null | null |
tests/apis/test_sandbox_api.py
|
usbo/tinvest
|
e6b7348af76c9db4d8430878efe69820a4079c21
|
[
"MIT"
] | null | null | null |
tests/apis/test_sandbox_api.py
|
usbo/tinvest
|
e6b7348af76c9db4d8430878efe69820a4079c21
|
[
"MIT"
] | null | null | null |
# pylint:disable=redefined-outer-name
import pytest
from tinvest import (
BrokerAccountType,
Empty,
SandboxApi,
SandboxRegisterRequest,
SandboxRegisterResponse,
SandboxSetCurrencyBalanceRequest,
SandboxSetPositionBalanceRequest,
)
@pytest.fixture()
def api_client(http_client):
return SandboxApi(http_client)
def test_sandbox_register(api_client, http_client):
body = SandboxRegisterRequest(broker_account_type=BrokerAccountType.tinkoff)
api_client.sandbox_register_post(body)
http_client.request.assert_called_once_with(
'POST',
'/sandbox/register',
response_model=SandboxRegisterResponse,
data=body.json(by_alias=True),
)
def test_sandbox_currencies_balance(api_client, http_client, broker_account_id):
body = SandboxSetCurrencyBalanceRequest.parse_obj(
{'balance': 1000.0, 'currency': 'USD'}
)
api_client.sandbox_currencies_balance_post(body, broker_account_id)
http_client.request.assert_called_once_with(
'POST',
'/sandbox/currencies/balance',
response_model=Empty,
params={'brokerAccountId': broker_account_id},
data=body.json(by_alias=True),
)
def test_sandbox_currencies_balance_without_broker_account_id(api_client, http_client):
body = SandboxSetCurrencyBalanceRequest.parse_obj(
{'balance': 1000.0, 'currency': 'USD'}
)
api_client.sandbox_currencies_balance_post(body)
http_client.request.assert_called_once_with(
'POST',
'/sandbox/currencies/balance',
response_model=Empty,
params={},
data=body.json(by_alias=True),
)
def test_sandbox_positions_balance(api_client, http_client, broker_account_id):
body = SandboxSetPositionBalanceRequest.parse_obj(
{'balance': 1000.0, 'figi': '<FIGI>'}
)
api_client.sandbox_positions_balance_post(body, broker_account_id)
http_client.request.assert_called_once_with(
'POST',
'/sandbox/positions/balance',
response_model=Empty,
params={'brokerAccountId': broker_account_id},
data=body.json(by_alias=True),
)
def test_sandbox_positions_balance_without_broker_account_id(api_client, http_client):
body = SandboxSetPositionBalanceRequest.parse_obj(
{'balance': 1000.0, 'figi': '<FIGI>'}
)
api_client.sandbox_positions_balance_post(body)
http_client.request.assert_called_once_with(
'POST',
'/sandbox/positions/balance',
response_model=Empty,
params={},
data=body.json(by_alias=True),
)
def test_sandbox_remove(api_client, http_client, broker_account_id):
api_client.sandbox_remove_post(broker_account_id)
http_client.request.assert_called_once_with(
'POST',
'/sandbox/remove',
response_model=Empty,
params={'brokerAccountId': broker_account_id},
)
def test_sandbox_remove_without_broker_account_id(api_client, http_client):
api_client.sandbox_remove_post()
http_client.request.assert_called_once_with(
'POST',
'/sandbox/remove',
response_model=Empty,
params={},
)
def test_sandbox_clear(api_client, http_client, broker_account_id):
api_client.sandbox_clear_post(broker_account_id)
http_client.request.assert_called_once_with(
'POST',
'/sandbox/clear',
response_model=Empty,
params={'brokerAccountId': broker_account_id},
)
def test_sandbox_clear_without_broker_account_id(api_client, http_client):
api_client.sandbox_clear_post()
http_client.request.assert_called_once_with(
'POST',
'/sandbox/clear',
response_model=Empty,
params={},
)
| 29.816 | 87 | 0.710759 |
4a1cb5d2fc2e5a4b25873caea06376805c0025bc
| 26,175 |
py
|
Python
|
tools/ldgen/generation.py
|
pabloVecchio/framework-espidf_v4.0.1
|
fabed4c56ccc6b4e1a985aebdcf31bac22819ffc
|
[
"Apache-2.0"
] | null | null | null |
tools/ldgen/generation.py
|
pabloVecchio/framework-espidf_v4.0.1
|
fabed4c56ccc6b4e1a985aebdcf31bac22819ffc
|
[
"Apache-2.0"
] | null | null | null |
tools/ldgen/generation.py
|
pabloVecchio/framework-espidf_v4.0.1
|
fabed4c56ccc6b4e1a985aebdcf31bac22819ffc
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import itertools
import os
import fnmatch
from fragments import Sections, Scheme, Mapping, Fragment
from pyparsing import Suppress, White, ParseException, Literal, Group, ZeroOrMore
from pyparsing import Word, OneOrMore, nums, alphanums, alphas, Optional, LineEnd, printables
from ldgen_common import LdGenFailure
class PlacementRule():
"""
Encapsulates a generated placement rule placed under a target
"""
DEFAULT_SPECIFICITY = 0
ARCHIVE_SPECIFICITY = 1
OBJECT_SPECIFICITY = 2
SYMBOL_SPECIFICITY = 3
class __container():
def __init__(self, content):
self.content = content
__metadata = collections.namedtuple("__metadata", "excludes expansions expanded")
def __init__(self, archive, obj, symbol, sections, target):
if archive == "*":
archive = None
if obj == "*":
obj = None
self.archive = archive
self.obj = obj
self.symbol = symbol
self.target = target
self.sections = dict()
self.specificity = 0
self.specificity += 1 if self.archive else 0
self.specificity += 1 if (self.obj and not self.obj == '*') else 0
self.specificity += 1 if self.symbol else 0
for section in sections:
section_data = Sections.get_section_data_from_entry(section, self.symbol)
if not self.symbol:
for s in section_data:
metadata = self.__metadata(self.__container([]), self.__container([]), self.__container(False))
self.sections[s] = metadata
else:
(section, expansion) = section_data
if expansion:
metadata = self.__metadata(self.__container([]), self.__container([expansion]), self.__container(True))
self.sections[section] = metadata
def get_section_names(self):
return self.sections.keys()
def add_exclusion(self, other, sections_infos=None):
# Utility functions for this method
def do_section_expansion(rule, section):
if section in rule.get_section_names():
sections_in_obj = sections_infos.get_obj_sections(rule.archive, rule.obj)
expansions = fnmatch.filter(sections_in_obj, section)
return expansions
def remove_section_expansions(rule, section, expansions):
existing_expansions = self.sections[section].expansions.content
self.sections[section].expansions.content = [e for e in existing_expansions if e not in expansions]
# Exit immediately if the exclusion to be added is more general than this rule.
if not other.is_more_specific_rule_of(self):
return
for section in self.get_sections_intersection(other):
if(other.specificity == PlacementRule.SYMBOL_SPECIFICITY):
# If this sections has not been expanded previously, expand now and keep track.
previously_expanded = self.sections[section].expanded.content
if not previously_expanded:
expansions = do_section_expansion(self, section)
if expansions:
self.sections[section].expansions.content = expansions
self.sections[section].expanded.content = True
previously_expanded = True
# Remove the sections corresponding to the symbol name
remove_section_expansions(self, section, other.sections[section].expansions.content)
# If it has been expanded previously but now the expansions list is empty,
# it means adding exclusions has exhausted the list. Remove the section entirely.
if previously_expanded and not self.sections[section].expanded.content:
del self.sections[section]
else:
# A rule section can have multiple rule sections excluded from it. Get the
# most specific rule from the list, and if an even more specific rule is found,
# replace it entirely. Otherwise, keep appending.
exclusions = self.sections[section].excludes
exclusions_list = exclusions.content if exclusions.content is not None else []
exclusions_to_remove = filter(lambda r: r.is_more_specific_rule_of(other), exclusions_list)
remaining_exclusions = [e for e in exclusions_list if e not in exclusions_to_remove]
remaining_exclusions.append(other)
self.sections[section].excludes.content = remaining_exclusions
def get_sections_intersection(self, other):
return set(self.sections.keys()).intersection(set(other.sections.keys()))
def is_more_specific_rule_of(self, other):
if (self.specificity <= other.specificity):
return False
# Compare archive, obj and target
for entity_index in range(1, other.specificity + 1):
if self[entity_index] != other[entity_index] and other[entity_index] is not None:
return False
return True
def maps_same_entities_as(self, other):
if self.specificity != other.specificity:
return False
# Compare archive, obj and target
for entity_index in range(1, other.specificity + 1):
if self[entity_index] != other[entity_index] and other[entity_index] is not None:
return False
return True
def __getitem__(self, key):
if key == PlacementRule.ARCHIVE_SPECIFICITY:
return self.archive
elif key == PlacementRule.OBJECT_SPECIFICITY:
return self.obj
elif key == PlacementRule.SYMBOL_SPECIFICITY:
return self.symbol
else:
return None
def __str__(self):
sorted_sections = sorted(self.get_section_names())
sections_string = list()
for section in sorted_sections:
exclusions = self.sections[section].excludes.content
exclusion_string = None
if exclusions:
exclusion_string = " ".join(map(lambda e: "*" + e.archive + (":" + e.obj + ".*" if e.obj else ""), exclusions))
exclusion_string = "EXCLUDE_FILE(" + exclusion_string + ")"
else:
exclusion_string = ""
section_string = None
exclusion_section_string = None
section_expansions = self.sections[section].expansions.content
section_expanded = self.sections[section].expanded.content
if section_expansions and section_expanded:
section_string = " ".join(section_expansions)
exclusion_section_string = section_string
else:
section_string = section
exclusion_section_string = exclusion_string + " " + section_string
sections_string.append(exclusion_section_string)
sections_string = " ".join(sections_string)
archive = str(self.archive) if self.archive else ""
obj = (str(self.obj) + (".*" if self.obj else "")) if self.obj else ""
# Handle output string generation based on information available
if self.specificity == PlacementRule.DEFAULT_SPECIFICITY:
rule_string = "*(%s)" % (sections_string)
elif self.specificity == PlacementRule.ARCHIVE_SPECIFICITY:
rule_string = "*%s:(%s)" % (archive, sections_string)
else:
rule_string = "*%s:%s(%s)" % (archive, obj, sections_string)
return rule_string
def __eq__(self, other):
if id(self) == id(other):
return True
def exclusions_set(exclusions):
exclusions_set = {(e.archive, e.obj, e.symbol, e.target) for e in exclusions}
return exclusions_set
if self.archive != other.archive:
return False
if self.obj != other.obj:
return False
if self.symbol != other.symbol:
return False
if set(self.sections.keys()) != set(other.sections.keys()):
return False
for (section, metadata) in self.sections.items():
self_meta = metadata
other_meta = other.sections[section]
if exclusions_set(self_meta.excludes.content) != exclusions_set(other_meta.excludes.content):
return False
if set(self_meta.expansions.content) != set(other_meta.expansions.content):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
yield self.archive
yield self.obj
yield self.symbol
raise StopIteration
class GenerationModel:
"""
Implements generation of placement rules based on collected sections, scheme and mapping fragment.
"""
DEFAULT_SCHEME = "default"
def __init__(self):
self.schemes = {}
self.sections = {}
self.mappings = {}
def _add_mapping_rules(self, archive, obj, symbol, scheme_name, scheme_dict, rules):
# Use an ordinary dictionary to raise exception on non-existing keys
temp_dict = dict(scheme_dict)
sections_bucket = temp_dict[scheme_name]
for (target, sections) in sections_bucket.items():
section_entries = []
for section in sections:
section_entries.extend(section.entries)
rule = PlacementRule(archive, obj, symbol, section_entries, target)
if rule not in rules:
rules.append(rule)
def _build_scheme_dictionary(self):
scheme_dictionary = collections.defaultdict(dict)
# Collect sections into buckets based on target name
for scheme in self.schemes.values():
sections_bucket = collections.defaultdict(list)
for (sections_name, target_name) in scheme.entries:
# Get the sections under the bucket 'target_name'. If this bucket does not exist
# is is created automatically
sections_in_bucket = sections_bucket[target_name]
try:
sections = self.sections[sections_name]
except KeyError:
message = GenerationException.UNDEFINED_REFERENCE + " to sections '" + sections + "'."
raise GenerationException(message, scheme)
sections_in_bucket.append(sections)
scheme_dictionary[scheme.name] = sections_bucket
# Search for and raise exception on first instance of sections mapped to multiple targets
for (scheme_name, sections_bucket) in scheme_dictionary.items():
for sections_a, sections_b in itertools.combinations(sections_bucket.values(), 2):
set_a = set()
set_b = set()
for sections in sections_a:
set_a.update(sections.entries)
for sections in sections_b:
set_b.update(sections.entries)
intersection = set_a.intersection(set_b)
# If the intersection is a non-empty set, it means sections are mapped to multiple
# targets. Raise exception.
if intersection:
scheme = self.schemes[scheme_name]
message = "Sections " + str(intersection) + " mapped to multiple targets."
raise GenerationException(message, scheme)
return scheme_dictionary
def generate_rules(self, sections_infos):
placement_rules = collections.defaultdict(list)
scheme_dictionary = self._build_scheme_dictionary()
# Generate default rules
default_rules = list()
self._add_mapping_rules(None, None, None, GenerationModel.DEFAULT_SCHEME, scheme_dictionary, default_rules)
all_mapping_rules = collections.defaultdict(list)
# Generate rules based on mapping fragments
for mapping in self.mappings.values():
archive = mapping.archive
mapping_rules = all_mapping_rules[archive]
for (obj, symbol, scheme_name) in mapping.entries:
try:
if not (obj == Mapping.MAPPING_ALL_OBJECTS and symbol is None and
scheme_name == GenerationModel.DEFAULT_SCHEME):
self._add_mapping_rules(archive, obj, symbol, scheme_name, scheme_dictionary, mapping_rules)
except KeyError:
message = GenerationException.UNDEFINED_REFERENCE + " to scheme '" + scheme_name + "'."
raise GenerationException(message, mapping)
# Detect rule conflicts
for mapping_rules in all_mapping_rules.items():
self._detect_conflicts(mapping_rules)
# Add exclusions
for mapping_rules in all_mapping_rules.values():
self._create_exclusions(mapping_rules, default_rules, sections_infos)
# Add the default rules grouped by target
for default_rule in default_rules:
existing_rules = placement_rules[default_rule.target]
if default_rule.get_section_names():
existing_rules.append(default_rule)
for mapping_rules in all_mapping_rules.values():
# Add the mapping rules grouped by target
for mapping_rule in mapping_rules:
existing_rules = placement_rules[mapping_rule.target]
if mapping_rule.get_section_names():
existing_rules.append(mapping_rule)
return placement_rules
def _detect_conflicts(self, rules):
(archive, rules_list) = rules
for specificity in range(0, PlacementRule.OBJECT_SPECIFICITY + 1):
rules_with_specificity = filter(lambda r: r.specificity == specificity, rules_list)
for rule_a, rule_b in itertools.combinations(rules_with_specificity, 2):
intersections = rule_a.get_sections_intersection(rule_b)
if intersections and rule_a.maps_same_entities_as(rule_b):
rules_string = str([str(rule_a), str(rule_b)])
message = "Rules " + rules_string + " map sections " + str(list(intersections)) + " into multiple targets."
raise GenerationException(message)
def _create_extra_rules(self, rules):
# This function generates extra rules for symbol specific rules. The reason for generating extra rules is to isolate,
# as much as possible, rules that require expansion. Particularly, object specific extra rules are generated.
rules_to_process = sorted(rules, key=lambda r: r.specificity)
symbol_specific_rules = list(filter(lambda r: r.specificity == PlacementRule.SYMBOL_SPECIFICITY, rules_to_process))
extra_rules = dict()
for symbol_specific_rule in symbol_specific_rules:
extra_rule_candidate = {s: None for s in symbol_specific_rule.get_section_names()}
super_rules = filter(lambda r: symbol_specific_rule.is_more_specific_rule_of(r), rules_to_process)
# Take a look at the existing rules that are more general than the current symbol-specific rule.
# Only generate an extra rule if there is no existing object specific rule for that section
for super_rule in super_rules:
intersections = symbol_specific_rule.get_sections_intersection(super_rule)
for intersection in intersections:
if super_rule.specificity != PlacementRule.OBJECT_SPECIFICITY:
extra_rule_candidate[intersection] = super_rule
else:
extra_rule_candidate[intersection] = None
# Generate the extra rules for the symbol specific rule section, keeping track of the generated extra rules
for (section, section_rule) in extra_rule_candidate.items():
if section_rule:
extra_rule = None
extra_rules_key = (symbol_specific_rule.archive, symbol_specific_rule.obj, section_rule.target)
try:
extra_rule = extra_rules[extra_rules_key]
if section not in extra_rule.get_section_names():
new_rule = PlacementRule(extra_rule.archive, extra_rule.obj, extra_rule.symbol,
list(extra_rule.get_section_names()) + [section], extra_rule.target)
extra_rules[extra_rules_key] = new_rule
except KeyError:
extra_rule = PlacementRule(symbol_specific_rule.archive, symbol_specific_rule.obj, None, [section], section_rule.target)
extra_rules[extra_rules_key] = extra_rule
return extra_rules.values()
def _create_exclusions(self, mapping_rules, default_rules, sections_info):
rules = list(default_rules)
rules.extend(mapping_rules)
extra_rules = self._create_extra_rules(rules)
mapping_rules.extend(extra_rules)
rules.extend(extra_rules)
# Sort the rules by means of how specific they are. Sort by specificity from lowest to highest
# * -> lib:* -> lib:obj -> lib:obj:symbol
sorted_rules = sorted(rules, key=lambda r: r.specificity)
# Now that the rules have been sorted, loop through each rule, and then loop
# through rules below it (higher indeces), adding exclusions whenever appropriate.
for general_rule in sorted_rules:
for specific_rule in reversed(sorted_rules):
if (specific_rule.specificity > general_rule.specificity and
specific_rule.specificity != PlacementRule.SYMBOL_SPECIFICITY) or \
(specific_rule.specificity == PlacementRule.SYMBOL_SPECIFICITY and
general_rule.specificity == PlacementRule.OBJECT_SPECIFICITY):
general_rule.add_exclusion(specific_rule, sections_info)
def add_fragments_from_file(self, fragment_file):
for fragment in fragment_file.fragments:
dict_to_append_to = None
if isinstance(fragment, Mapping) and fragment.deprecated and fragment.name in self.mappings.keys():
self.mappings[fragment.name].entries |= fragment.entries
else:
if isinstance(fragment, Scheme):
dict_to_append_to = self.schemes
elif isinstance(fragment, Sections):
dict_to_append_to = self.sections
else:
dict_to_append_to = self.mappings
# Raise exception when the fragment of the same type is already in the stored fragments
if fragment.name in dict_to_append_to.keys():
stored = dict_to_append_to[fragment.name].path
new = fragment.path
message = "Duplicate definition of fragment '%s' found in %s and %s." % (fragment.name, stored, new)
raise GenerationException(message)
dict_to_append_to[fragment.name] = fragment
class TemplateModel:
"""
Encapsulates a linker script template file. Finds marker syntax and handles replacement to generate the
final output.
"""
Marker = collections.namedtuple("Marker", "target indent rules")
def __init__(self, template_file):
self.members = []
self.file = os.path.realpath(template_file.name)
self._generate_members(template_file)
def _generate_members(self, template_file):
lines = template_file.readlines()
target = Fragment.IDENTIFIER
reference = Suppress("mapping") + Suppress("[") + target.setResultsName("target") + Suppress("]")
pattern = White(" \t").setResultsName("indent") + reference
# Find the markers in the template file line by line. If line does not match marker grammar,
# set it as a literal to be copied as is to the output file.
for line in lines:
try:
parsed = pattern.parseString(line)
indent = parsed.indent
target = parsed.target
marker = TemplateModel.Marker(target, indent, [])
self.members.append(marker)
except ParseException:
# Does not match marker syntax
self.members.append(line)
def fill(self, mapping_rules):
for member in self.members:
target = None
try:
target = member.target
rules = member.rules
del rules[:]
rules.extend(mapping_rules[target])
except KeyError:
message = GenerationException.UNDEFINED_REFERENCE + " to target '" + target + "'."
raise GenerationException(message)
except AttributeError:
pass
def write(self, output_file):
# Add information that this is a generated file.
output_file.write("/* Automatically generated file; DO NOT EDIT */\n")
output_file.write("/* Espressif IoT Development Framework Linker Script */\n")
output_file.write("/* Generated from: %s */\n" % self.file)
output_file.write("\n")
# Do the text replacement
for member in self.members:
try:
indent = member.indent
rules = member.rules
for rule in rules:
generated_line = "".join([indent, str(rule), '\n'])
output_file.write(generated_line)
except AttributeError:
output_file.write(member)
class GenerationException(LdGenFailure):
"""
Exception for linker script generation failures such as undefined references/ failure to
evaluate conditions, duplicate mappings, etc.
"""
UNDEFINED_REFERENCE = "Undefined reference"
def __init__(self, message, fragment=None):
self.fragment = fragment
self.message = message
def __str__(self):
if self.fragment:
return "%s\nIn fragment '%s' defined in '%s'." % (self.message, self.fragment.name, self.fragment.path)
else:
return self.message
class SectionsInfo(dict):
"""
Encapsulates an output of objdump. Contains information about the static library sections
and names
"""
__info = collections.namedtuple("__info", "filename content")
def __init__(self):
self.sections = dict()
def add_sections_info(self, sections_info_dump):
first_line = sections_info_dump.readline()
archive_path = (Literal("In archive").suppress() +
# trim the last character from archive_path, :
Word(printables + " ").setResultsName("archive_path").setParseAction(lambda t: t[0][:-1]) +
LineEnd())
parser = archive_path
results = None
try:
results = parser.parseString(first_line)
except ParseException as p:
raise ParseException("Parsing sections info for library " + sections_info_dump.name + " failed. " + p.message)
archive = os.path.basename(results.archive_path)
self.sections[archive] = SectionsInfo.__info(sections_info_dump.name, sections_info_dump.read())
def _get_infos_from_file(self, info):
# Object file line: '{object}: file format elf32-xtensa-le'
object = Fragment.ENTITY.setResultsName("object") + Literal(":").suppress() + Literal("file format elf32-xtensa-le").suppress()
# Sections table
header = Suppress(Literal("Sections:") + Literal("Idx") + Literal("Name") + Literal("Size") + Literal("VMA") +
Literal("LMA") + Literal("File off") + Literal("Algn"))
entry = Word(nums).suppress() + Fragment.ENTITY + Suppress(OneOrMore(Word(alphanums, exact=8)) +
Word(nums + "*") + ZeroOrMore(Word(alphas.upper()) +
Optional(Literal(","))))
# Content is object file line + sections table
content = Group(object + header + Group(ZeroOrMore(entry)).setResultsName("sections"))
parser = Group(ZeroOrMore(content)).setResultsName("contents")
sections_info_text = info.content
results = None
try:
results = parser.parseString(sections_info_text)
except ParseException as p:
raise ParseException("Unable to parse section info file " + info.filename + ". " + p.message)
return results
def get_obj_sections(self, archive, obj):
stored = self.sections[archive]
# Parse the contents of the sections file
if not isinstance(stored, dict):
parsed = self._get_infos_from_file(stored)
stored = dict()
for content in parsed.contents:
sections = list(map(lambda s: s, content.sections))
stored[content.object] = sections
self.sections[archive] = stored
for obj_key in stored.keys():
if any(obj_key == obj + ext for ext in (".o", ".c.obj", ".c.o")):
return stored[obj_key]
| 41.091052 | 144 | 0.623114 |
4a1cb5fab0002e7da848df84de590a0ac6ef5042
| 767 |
py
|
Python
|
examples/Graph_Neural_Networks/PyTorch/PPNP.py
|
TobiasSchmidtDE/GraphGallery
|
e627e4f454e0ce3813171305a524f5190a6e6f45
|
[
"MIT"
] | null | null | null |
examples/Graph_Neural_Networks/PyTorch/PPNP.py
|
TobiasSchmidtDE/GraphGallery
|
e627e4f454e0ce3813171305a524f5190a6e6f45
|
[
"MIT"
] | null | null | null |
examples/Graph_Neural_Networks/PyTorch/PPNP.py
|
TobiasSchmidtDE/GraphGallery
|
e627e4f454e0ce3813171305a524f5190a6e6f45
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import torch
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
print("Torch version: ", torch.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
graphgallery.set_backend("pytorch")
from graphgallery.gallery.nodeclas import PPNP
trainer = PPNP(device="gpu", seed=123).setup_graph(graph, attr_transform="normalize_attr").build()
his = trainer.fit(splits.train_nodes, splits.val_nodes, verbose=1, epochs=100)
results = trainer.evaluate(splits.test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
| 29.5 | 98 | 0.767927 |
4a1cb6a7f7f443e19fd5711f41b0f347c3378578
| 505 |
py
|
Python
|
test_day12.py
|
Yolgie/AdventOfCode2017
|
bf823b049659c6c528b354f0895cf311ad69ad5d
|
[
"MIT"
] | null | null | null |
test_day12.py
|
Yolgie/AdventOfCode2017
|
bf823b049659c6c528b354f0895cf311ad69ad5d
|
[
"MIT"
] | null | null | null |
test_day12.py
|
Yolgie/AdventOfCode2017
|
bf823b049659c6c528b354f0895cf311ad69ad5d
|
[
"MIT"
] | null | null | null |
import unittest
from day12 import Pipes
class Tests(unittest.TestCase):
def test_part_1(self):
testObject = Pipes()
testObject.test = 1
sample_input = ["0 <-> 2",
"1 <-> 1",
"2 <-> 0, 3, 4",
"3 <-> 2, 4",
"4 <-> 2, 3, 6",
"5 <-> 6",
"6 <-> 4, 5"]
self.assertEqual(6, testObject.process(sample_input))
| 25.25 | 62 | 0.366337 |
4a1cb76d99b20b7a5ca339c513ce463e3f09dfa2
| 3,805 |
py
|
Python
|
get_disaster_data.py
|
apmechev/Hackathon_for_peace_EAAA
|
e45f8394c86d094c4a3e421bab7c16fe8ad6100d
|
[
"Apache-2.0"
] | null | null | null |
get_disaster_data.py
|
apmechev/Hackathon_for_peace_EAAA
|
e45f8394c86d094c4a3e421bab7c16fe8ad6100d
|
[
"Apache-2.0"
] | null | null | null |
get_disaster_data.py
|
apmechev/Hackathon_for_peace_EAAA
|
e45f8394c86d094c4a3e421bab7c16fe8ad6100d
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
APP_NAME= 'EA4'
LATEST_EVENTS = "https://api.reliefweb.int/v1/disasters?appname={}&preset=latest".format(APP_NAME)
def get_n_latest(n=20):
r = requests.get("{}&limit={}".format(LATEST_EVENTS,n))
if not r.ok:
raise Exception(r.json())
event_links=[i.get('href') for i in r.json().get('data')]
return event_links
def get_country(event_json):
countries=[]
for data in event_json.get('data'):
for country in data.get('fields').get('country'):
countries.append(country.get('name'))
return countries
def get_country_codes(event_json):
codes=[]
for data in event_json.get('data'):
for country in data.get('fields').get('country'):
codes.append(country.get('iso3'))
return codes
def get_name(event_json):
event_json.get('data')[0].get('fields').get('name')
def get_country(event_json):
countries=[]
for data in event_json.get('data'):
for country in data.get('fields').get('country'):
countries.append(country.get('name'))
return countries
def get_name(event_json):
return event_json.get('data')[0].get('fields').get('name')
def get_description(event_json):
return event_json.get('data')[0].get('fields').get('description')
def get_status(event_json):
return event_json.get('data')[0].get('fields').get('status')
def get_date(event_json):
dates = list(event_json.get('data')[0].get('fields').get('date').keys())
return event_json.get('data')[0].get('fields').get('date').get(dates[0])
def get_locations(event_json):
locations=[]
for data in event_json.get('data'):
for country in data.get('fields').get('country'):
locations.append(country.get('location'))
return locations
def get_event_type(event_json):
return event_json.get('data')[0].get('fields').get('type')[0]
def get_event_current(event_json):
return event_json.get('data')[0].get('fields').get('curent')
def get_event_id(event_json):
return event_json.get('data')[0].get('fields').get('id')
def get_event_data(event_href):
r = requests.get(event_href)
if not r.ok:
raise Exception(r.json())
event_json = r.json()
event_data={}
event_data['countries']=get_country(event_json)
event_data['iso3_codes']=get_country_codes(event_json)
event_data['name']=get_name(event_json)
event_data['description']=get_description(event_json)
event_data['status']=get_status(event_json)
event_data['date']=get_date(event_json)
event_data['location']=get_locations(event_json)
event_data['type']= get_event_type(event_json)
event_data['current']=get_event_current(event_json)
return event_data
def get_n_latest_events_data(n=10):
events=[]
for event in get_n_latest(n):
events.append(get_event_data(event))
return events
| 36.238095 | 109 | 0.512746 |
4a1cba18e21a431fe025087b500a7288681f3bf4
| 197 |
py
|
Python
|
src/test/backtrace_syscall.py
|
staktrace/rr
|
90615c6276b7289dafb2c2787ff6eae64843dea8
|
[
"MIT"
] | 3 |
2015-03-01T23:26:27.000Z
|
2021-01-14T03:31:36.000Z
|
src/test/backtrace_syscall.py
|
rogerwang/rr
|
619b8951d392b46c6f62b2bad7c0861a05d9f32e
|
[
"MIT"
] | null | null | null |
src/test/backtrace_syscall.py
|
rogerwang/rr
|
619b8951d392b46c6f62b2bad7c0861a05d9f32e
|
[
"MIT"
] | null | null | null |
from rrutil import *
send_gdb('b __kernel_vsyscall\n')
expect_gdb('Breakpoint 1')
send_gdb('c\n')
expect_gdb('Breakpoint 1')
send_gdb('bt\n')
expect_gdb(r'#0 [^_]*__kernel_vsyscall \(\)')
ok()
| 15.153846 | 45 | 0.700508 |
4a1cba5b86ca263f2fd0463830a3dc1e6101df77
| 3,309 |
py
|
Python
|
Photo-Gallery/gallery/settings.py
|
martinmandina/Photo-Gallery
|
a9c113a08d71f18b415e083cdeb8dc3f3fa60e80
|
[
"MIT"
] | null | null | null |
Photo-Gallery/gallery/settings.py
|
martinmandina/Photo-Gallery
|
a9c113a08d71f18b415e083cdeb8dc3f3fa60e80
|
[
"MIT"
] | null | null | null |
Photo-Gallery/gallery/settings.py
|
martinmandina/Photo-Gallery
|
a9c113a08d71f18b415e083cdeb8dc3f3fa60e80
|
[
"MIT"
] | null | null | null |
"""
Django settings for gallery project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#tgj-$*s$x+w7w=0teeryvn3el8c9$cb5kh11%-)0y9nv8-z9j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photos',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'gallery',
'USER': 'martinmandina',
'PASSWORD':'alicewambui',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| 24.879699 | 91 | 0.689634 |
4a1cbaceb81edca0df60b96669369ae19052856b
| 1,355 |
py
|
Python
|
analysis/processNplotOutput_onlyvar.py
|
jkikstra/PAGE-VAR
|
3edfe7301e394d879252b5afb01d29990fa091e2
|
[
"MIT"
] | null | null | null |
analysis/processNplotOutput_onlyvar.py
|
jkikstra/PAGE-VAR
|
3edfe7301e394d879252b5afb01d29990fa091e2
|
[
"MIT"
] | null | null | null |
analysis/processNplotOutput_onlyvar.py
|
jkikstra/PAGE-VAR
|
3edfe7301e394d879252b5afb01d29990fa091e2
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numpy import genfromtxt
import seaborn as sns
#### ONLY VAR ####
######################
# TAKES A FEW HOURS! #
######################
mcruns = 100000
df_VAR_onlyvar = pd.DataFrame() # x scenarios, y mc runs per scenario
i = 0
# scenarios = ["1_5C", "2_0C", "2_5C", "NDC", "BAU", "RCP2_6_SSP1", "RCP4_5_SSP2", "RCP8_5_SSP5"]
# scenarioslabels = ["1.5C", "2.0C", "2.5C", "NDC", "BAU", "RCP2.6 & \n SSP1", "RCP4.5 & \n SSP2", "RCP8.5 & \n SSP5"]
scenarios = ["2_0C", "NDC", "RCP4_5_SSP2"]
scenarioslabels = ["2.0C", "NDC", "RCP4.5 & SSP2"]
for sc in scenarios:
path_VAR_onlyvar = "..\\PAGEoutput\\mcPAGEVAR\\finalscc\\onlyvarMC\\%s\\scc.csv" % (sc)
data_VAR_onlyvar = genfromtxt(path_VAR_onlyvar, delimiter=',')
for ii in range(mcruns):
df_VAR_onlyvar = df_VAR_onlyvar.append({'Scenario': sc, 'USD': data_VAR_onlyvar[ii]}, ignore_index=True)
df_VAR_onlyvar.to_csv('df_VAR_onlyvar.csv', sep=',')
# plotPoints=2500
# sns.set_palette("muted")
#
#
# fig, ax = plt.subplots()
# fig.set_size_inches(14, 7)
# ax = sns.violinplot(x="Scenario", y="USD", data=df_VAR_onlyvar, gridsize=plotPoints, cut=0)
# ax.set_ylim(-100,2000)
# ax.set_xticks(range(len(scenarios)))
# ax.set_xticklabels(scenarioslabels)
#
# plt.show()
| 33.04878 | 119 | 0.631734 |
4a1cbca022bc1e0c95b08d29cc07366b582fc22b
| 4,230 |
py
|
Python
|
Ansible-Custom-Module-Basic/housekeeping/validate_dns.py
|
ginigangadharan/ansible-real-life
|
897c2fc0d05babbb540768b336b6ad399dad5bfa
|
[
"MIT"
] | 22 |
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Ansible-Custom-Module-Basic/housekeeping/validate_dns.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | null | null | null |
Ansible-Custom-Module-Basic/housekeeping/validate_dns.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | 39 |
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python3
# Deprecated due to the introduction of collection; still keeping here
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: validate_dns
short_description: DNS Validation Module
version_added: "2.10"
description:
- "DNS Validation Module"
options:
dns_server_ip:
description:
- The DNS Server to validate the DNS entries.
required: true
dns_address:
description:
- The DNS Address to be validated.
required: true
type: list
target_ip_address:
description:
- The Target IP Address to be matched.
required: true
author:
- Gineesh Madapparambath (@ginigangadharan)
'''
EXAMPLES = '''
# Validate single URL
- name: validate_dns
validate_dns:
target_ip_address: 10.1.10.10
dns_server_ip: 1.1.1.1
dns_address:
- example.com
# Validate Multiple URLs
- name: validate_dns
validate_dns:
target_ip_address: 10.1.10.10
dns_server_ip: 1.1.1.1
dns_address:
- example.com
- abc.com
- xyz.com
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
#import os
#import socket
#import subprocess, shlex
#
#class bcolors:
# HEADER = '\033[95m'
# OKBLUE = '\033[94m'
# OKGREEN = '\033[92m'
# WARNING = '\033[93m'
# FAIL = '\033[91m'
# ENDC = '\033[0m' #no color
# BOLD = '\033[1m'
# UNDERLINE = '\033[4m'
# INFO='\033[0;36m'
#myfilename = 'ping.txt'
#
#def pinghost(hostname):
# command_line = "/bin/ping -c1 " + hostname
# args = shlex.split(command_line)
# p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# pingStatus = 'ok';
# for line in p.stdout:
# output = line.rstrip().decode('UTF-8')
# if (output.endswith('unreachable.')) :
# #No route from the local system. Packets sent were never put on the wire.
# pingStatus = 'unreacheable'
# break
# elif (output.startswith('Ping request could not find host')) :
# pingStatus = 'host_not_found'
# break
# elif ('unknown' in output ) :
# pingStatus = 'host_not_found'
# break
# elif (output.startswith('1 packets transmitted, 0 received')) :
# pingStatus = 'no'
# break
# if (output.startswith('Request timed out.')) :
# #No Echo Reply messages were received within the default time of 1 second.
# pingStatus = 'timed_out'
# break
# #end if
# #endFor
# return pingStatus
##endDef
#
#print (bcolors.INFO + 'DNS Test - ver 2.2.0.\n' + bcolors.ENDC)
#timestart = "$(date)"
#counter = 0
#pingcount = 0
#dnscount = 0
#nodnscount = 0
#print (bcolors.OKBLUE + bcolors.UNDERLINE +'%-4s |%-18s |%-6s |%s' % ('No.',"Hostname","Ping","STATUS") + bcolors.ENDC)
#with open(myfilename,mode='r') as varfile:
# for line in varfile:
# counter = counter + 1
# line = line.replace('\n','')
# try:
# startcolor = bcolors.OKGREEN
# statusText2 = ''
# addr = socket.gethostbyname(line)
# pingresp = pinghost(addr)
# if addr:
# fqdn = socket.getfqdn(line)
# dnscount = dnscount + 1
# if pingresp == 'ok':
# pingcount = pingcount + 1
# else:
# startcolor = bcolors.WARNING
# statusText2 = bcolors.FAIL + '[host not reachable]'
# pingResponse = pingresp
# statusText = fqdn + ',' + addr + statusText2
# except IOError:
# nodnscount = nodnscount + 1
# statusText = 'NO DNS Entry Found'
# pingResponse = 'na'
# startcolor = bcolors.FAIL
# #else:
# #print 'Done'
# finally:
# print (startcolor + '%-4s |%-18s |%-6s |%s' % ( counter ,line,pingResponse,statusText) + bcolors.ENDC)
#
#varfile.close() #close the file
#
#timeend = "$(date)"
#print (bcolors.OKBLUE + "\n======================== Summary ======================================" + bcolors.ENDC)
#print (bcolors.OKGREEN , dnscount , "with DNS |" + bcolors.WARNING , nodnscount , "without DNS |" + bcolors.OKGREEN , pingcount , " reachable" + bcolors.ENDC)
#
| 28.581081 | 159 | 0.591017 |
4a1cbd99f755a88819d8a4fda1d3455a5527cbc6
| 1,642 |
py
|
Python
|
example.py
|
sgraton/python-emploi-store
|
a9def8d40e7583b907eef7c486cd7170d108b02f
|
[
"MIT"
] | 9 |
2016-11-16T17:40:52.000Z
|
2021-06-21T10:48:31.000Z
|
example.py
|
sgraton/python-emploi-store
|
a9def8d40e7583b907eef7c486cd7170d108b02f
|
[
"MIT"
] | 44 |
2016-04-07T09:37:46.000Z
|
2021-02-24T23:18:48.000Z
|
example.py
|
sgraton/python-emploi-store
|
a9def8d40e7583b907eef7c486cd7170d108b02f
|
[
"MIT"
] | 10 |
2016-07-21T14:07:31.000Z
|
2020-09-17T08:19:01.000Z
|
"""An example server to use emploi_store library.
This runs a HTTP server locally with a simple page that helps retrieve the job
names for a given ROME ID.
To run it, you will need to set your environment variable:
EMPLOI_STORE_CLIENT_ID and EMPLOI_STORE_CLIENT_SECRET.
See documentation about accessing the REST API at
https://www.emploi-store-dev.fr/portail-developpeur/donneesdoctechnique
There are few environment variables that allow you to specify how to run the
server:
- DEBUG: set it to 1 to turn on debug mode.
- PORT: set it to the port you want the server to listen on.
- BIND_HOST: set it to 0.0.0.0 to listen on all interfaces.
"""
import os
import re
import emploi_store
import flask
app = flask.Flask(__name__) # pylint: disable=invalid-name
# Access to the ROME appellations resource on Emploi Store Dev.
_ROME_APPELLATIONS = (
emploi_store.Client()
.get_package('rome')
.get_resource(name_re=re.compile(r'.*appellations.*')))
@app.route("/")
def main():
"""Homepage."""
page = (
'<form action=".">'
'<input name="rome" placeholder="ROME code, e.g. F1402"/>'
'</form>')
rome = flask.request.args.get('rome', '')
if rome:
page += '<ul>'
filters = {'ROME_PROFESSION_CARD_CODE': rome}
for appellation in _ROME_APPELLATIONS.records(filters=filters):
page += '<li>%s</li>' % appellation['ROME_PROFESSION_NAME']
page += '</ul>'
return page
if __name__ == '__main__':
app.run(
debug=bool(os.getenv('DEBUG')),
host=os.getenv('BIND_HOST', 'localhost'),
port=int(os.getenv('PORT', '80')))
| 30.407407 | 78 | 0.67296 |
4a1cbf5e62794989671d98bcaf3bf9515e7d5ce8
| 922 |
py
|
Python
|
setup.py
|
IAmGadget/Pythactyl
|
b0cca838d434e8ceba1a90328708b9cde3d9fbcc
|
[
"MIT"
] | 2 |
2021-07-13T21:43:44.000Z
|
2021-09-06T08:05:35.000Z
|
setup.py
|
IAmGadget/Pythactyl
|
b0cca838d434e8ceba1a90328708b9cde3d9fbcc
|
[
"MIT"
] | 1 |
2021-09-11T16:08:43.000Z
|
2021-10-30T16:10:56.000Z
|
setup.py
|
IAmGadget/Pythactyl
|
b0cca838d434e8ceba1a90328708b9cde3d9fbcc
|
[
"MIT"
] | null | null | null |
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="Pythactyl",
version="1.01",
description="Pterodactyl panel API wrapper",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/iamgadget/Pythactyl",
author="IAmGadget",
author_email="info@iamgadget.tk",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["Pythactyl"],
include_package_data=True,
install_requires=["requests"],
entry_points={
"console_scripts": [
"realpython=reader.__main__:main",
]
},
)
| 26.342857 | 50 | 0.652928 |
4a1cc01c98f6bc7bd9ea13b07c75d957083a7bbd
| 1,554 |
py
|
Python
|
mhvdb2/models.py
|
jamiereid/mhvdb2
|
e3e9dcb149c4e0b4dc4eb8e28f082aa03e07383c
|
[
"MIT"
] | 2 |
2015-04-11T09:45:41.000Z
|
2015-06-16T03:59:48.000Z
|
mhvdb2/models.py
|
jamiereid/mhvdb2
|
e3e9dcb149c4e0b4dc4eb8e28f082aa03e07383c
|
[
"MIT"
] | 32 |
2015-01-27T05:39:41.000Z
|
2019-11-10T09:57:27.000Z
|
mhvdb2/models.py
|
jamiereid/mhvdb2
|
e3e9dcb149c4e0b4dc4eb8e28f082aa03e07383c
|
[
"MIT"
] | 2 |
2015-04-11T09:38:46.000Z
|
2015-04-14T08:01:49.000Z
|
from mhvdb2 import database
from peewee import * # noqa
from peewee import CharField, BooleanField, DateField, DateTimeField, Model
from dateutil.relativedelta import relativedelta
from datetime import datetime
class BaseModel(Model):
class Meta:
database = database
class User(BaseModel):
"""
A User is someone who has special access to the system that requires
a login (only administrators, in this case)
"""
name = CharField()
email = CharField()
password = CharField()
class Entity(BaseModel):
"""
An Entity sends money to the organisation or recieves money from the
organistaion. Members are a special type of entity.
"""
is_member = BooleanField() # Is the entity a member (past or present)
name = CharField()
email = CharField(null=True) # Email is required for members
phone = CharField(null=True)
reminder_date = DateField(null=True) # When reminder has been sent to member
joined_date = DateField(null=True) # date the person first joined
agreement_date = DateField(null=True) # date the person agreed to rules
is_keyholder = BooleanField(null=True) # Does the member have a key?
token = CharField(null=True) # to authenticate members via email
token_expiry = DateTimeField(null=True) # expiry for the token
def active_member(self):
one_year_ago = (datetime.now() - relativedelta(years=1)).date()
if self.agreement_date <= one_year_ago:
return False
else:
return True
| 34.533333 | 81 | 0.69112 |
4a1cc03400296b17da0b54af30afa59cbfcfb6bb
| 388 |
py
|
Python
|
wmtmetadata/utils.py
|
csdms/wmt-metadata
|
39207acc376f1cd21b2ae1d5581a1e2c317a6441
|
[
"MIT"
] | null | null | null |
wmtmetadata/utils.py
|
csdms/wmt-metadata
|
39207acc376f1cd21b2ae1d5581a1e2c317a6441
|
[
"MIT"
] | 10 |
2016-09-27T21:13:22.000Z
|
2018-10-31T19:42:32.000Z
|
wmtmetadata/utils.py
|
csdms/wmt-metadata
|
39207acc376f1cd21b2ae1d5581a1e2c317a6441
|
[
"MIT"
] | null | null | null |
"""Utility routines for working with WMT components."""
import os
# See https://stackoverflow.com/a/21499676/1563298
def commonpath(l):
cp = []
ls = [p.split(os.path.sep) for p in l]
ml = min(len(p) for p in ls)
for i in range(ml):
s = set(p[i] for p in ls)
if len(s) != 1:
break
cp.append(s.pop())
return os.path.sep.join(cp)
| 20.421053 | 55 | 0.564433 |
4a1cc092e22482b446eb2fdc8988e60a8e82e3cd
| 3,025 |
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/models/__init__.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | 1 |
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/models/__init__.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | 2 |
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/models/__init__.py
|
xiafu-msft/azure-sdk-for-python
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
[
"MIT"
] | 1 |
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .resource_py3 import Resource
from .disk_sku_py3 import DiskSku
from .image_disk_reference_py3 import ImageDiskReference
from .creation_data_py3 import CreationData
from .source_vault_py3 import SourceVault
from .key_vault_and_secret_reference_py3 import KeyVaultAndSecretReference
from .key_vault_and_key_reference_py3 import KeyVaultAndKeyReference
from .encryption_settings_element_py3 import EncryptionSettingsElement
from .encryption_settings_collection_py3 import EncryptionSettingsCollection
from .disk_py3 import Disk
from .disk_update_py3 import DiskUpdate
from .snapshot_sku_py3 import SnapshotSku
from .grant_access_data_py3 import GrantAccessData
from .access_uri_py3 import AccessUri
from .snapshot_py3 import Snapshot
from .snapshot_update_py3 import SnapshotUpdate
except (SyntaxError, ImportError):
from .resource import Resource
from .disk_sku import DiskSku
from .image_disk_reference import ImageDiskReference
from .creation_data import CreationData
from .source_vault import SourceVault
from .key_vault_and_secret_reference import KeyVaultAndSecretReference
from .key_vault_and_key_reference import KeyVaultAndKeyReference
from .encryption_settings_element import EncryptionSettingsElement
from .encryption_settings_collection import EncryptionSettingsCollection
from .disk import Disk
from .disk_update import DiskUpdate
from .snapshot_sku import SnapshotSku
from .grant_access_data import GrantAccessData
from .access_uri import AccessUri
from .snapshot import Snapshot
from .snapshot_update import SnapshotUpdate
from .disk_paged import DiskPaged
from .snapshot_paged import SnapshotPaged
from .compute_management_client_enums import (
DiskStorageAccountTypes,
OperatingSystemTypes,
HyperVGeneration,
DiskCreateOption,
DiskState,
SnapshotStorageAccountTypes,
AccessLevel,
)
__all__ = [
'Resource',
'DiskSku',
'ImageDiskReference',
'CreationData',
'SourceVault',
'KeyVaultAndSecretReference',
'KeyVaultAndKeyReference',
'EncryptionSettingsElement',
'EncryptionSettingsCollection',
'Disk',
'DiskUpdate',
'SnapshotSku',
'GrantAccessData',
'AccessUri',
'Snapshot',
'SnapshotUpdate',
'DiskPaged',
'SnapshotPaged',
'DiskStorageAccountTypes',
'OperatingSystemTypes',
'HyperVGeneration',
'DiskCreateOption',
'DiskState',
'SnapshotStorageAccountTypes',
'AccessLevel',
]
| 35.588235 | 80 | 0.737851 |
4a1cc099efed88b43379ed0fcd85957d6858d5f0
| 9,109 |
py
|
Python
|
strawberryfields/backends/gaussianbackend/backend.py
|
trbromley/strawberryfields
|
b0792f6cad19cea1e60df89f22776f6e02191a1c
|
[
"Apache-2.0"
] | null | null | null |
strawberryfields/backends/gaussianbackend/backend.py
|
trbromley/strawberryfields
|
b0792f6cad19cea1e60df89f22776f6e02191a1c
|
[
"Apache-2.0"
] | null | null | null |
strawberryfields/backends/gaussianbackend/backend.py
|
trbromley/strawberryfields
|
b0792f6cad19cea1e60df89f22776f6e02191a1c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-public-methods
"""Gaussian backend"""
import warnings
from numpy import (
empty,
concatenate,
array,
identity,
arctan2,
angle,
sqrt,
dot,
vstack,
zeros_like,
allclose,
ix_,
)
from numpy.linalg import inv
from hafnian.samples import hafnian_sample_state
from strawberryfields.backends import BaseGaussian
from strawberryfields.backends.shared_ops import changebasis
from .ops import xmat
from .gaussiancircuit import GaussianModes
from .states import GaussianState
class GaussianBackend(BaseGaussian):
"""Gaussian backend implementation"""
def __init__(self):
"""Initialize the backend.
"""
super().__init__()
self._supported["mixed_states"] = True
self._short_name = "gaussian"
self._init_modes = None
self.circuit = None
def begin_circuit(self, num_subsystems, **kwargs):
self._init_modes = num_subsystems
self.circuit = GaussianModes(num_subsystems)
def add_mode(self, n=1):
self.circuit.add_mode(n)
def del_mode(self, modes):
self.circuit.del_mode(modes)
def get_modes(self):
return self.circuit.get_modes()
def reset(self, pure=True, **kwargs):
self.circuit.reset(self._init_modes)
def prepare_thermal_state(self, nbar, mode):
self.circuit.init_thermal(nbar, mode)
def prepare_vacuum_state(self, mode):
self.circuit.loss(0.0, mode)
def prepare_coherent_state(self, alpha, mode):
self.circuit.loss(0.0, mode)
self.circuit.displace(alpha, mode)
def prepare_squeezed_state(self, r, phi, mode):
self.circuit.loss(0.0, mode)
self.circuit.squeeze(r, phi, mode)
def prepare_displaced_squeezed_state(self, alpha, r, phi, mode):
self.circuit.loss(0.0, mode)
self.circuit.squeeze(r, phi, mode)
self.circuit.displace(alpha, mode)
def rotation(self, phi, mode):
self.circuit.phase_shift(phi, mode)
def displacement(self, alpha, mode):
self.circuit.displace(alpha, mode)
def squeeze(self, z, mode):
phi = angle(z)
r = abs(z)
self.circuit.squeeze(r, phi, mode)
def beamsplitter(self, t, r, mode1, mode2):
if isinstance(t, complex):
raise ValueError("Beamsplitter transmittivity t must be a float.")
theta = arctan2(abs(r), t)
phi = angle(r)
self.circuit.beamsplitter(-theta, -phi, mode1, mode2)
def measure_homodyne(self, phi, mode, shots=1, select=None, **kwargs):
r"""Measure a :ref:`phase space quadrature <homodyne>` of the given mode.
See :meth:`.BaseBackend.measure_homodyne`.
Keyword Args:
eps (float): Homodyne amounts to projection onto a quadrature eigenstate.
This eigenstate is approximated by a squeezed state whose variance has been
squeezed to the amount ``eps``, :math:`V_\text{meas} = \texttt{eps}^2`.
Perfect homodyning is obtained when ``eps`` :math:`\to 0`.
Returns:
float: measured value
"""
if shots != 1:
if select is not None:
raise NotImplementedError("Gaussian backend currently does not support "
"postselection if shots != 1 for homodyne measurement")
raise NotImplementedError("Gaussian backend currently does not support "
"shots != 1 for homodyne measurement")
# phi is the rotation of the measurement operator, hence the minus
self.circuit.phase_shift(-phi, mode)
if select is None:
qs = self.circuit.homodyne(mode, **kwargs)[0, 0]
else:
val = select * 2 / sqrt(2 * self.circuit.hbar)
qs = self.circuit.post_select_homodyne(mode, val, **kwargs)
return qs * sqrt(2 * self.circuit.hbar) / 2
def measure_heterodyne(self, mode, shots=1, select=None):
if shots != 1:
if select is not None:
raise NotImplementedError("Gaussian backend currently does not support "
"postselection if shots != 1 for heterodyne measurement")
raise NotImplementedError("Gaussian backend currently does not support "
"shots != 1 for heterodyne measurement")
if select is None:
m = identity(2)
res = 0.5 * self.circuit.measure_dyne(m, [mode], shots=shots)
return res[0, 0] + 1j * res[0, 1]
res = select
self.circuit.post_select_heterodyne(mode, select)
return res
def prepare_gaussian_state(self, r, V, modes):
if isinstance(modes, int):
modes = [modes]
# make sure number of modes matches shape of r and V
N = len(modes)
if len(r) != 2 * N:
raise ValueError(
"Length of means vector must be twice the number of modes."
)
if V.shape != (2 * N, 2 * N):
raise ValueError(
"Shape of covariance matrix must be [2N, 2N], where N is the number of modes."
)
# convert xp-ordering to symmetric ordering
means = vstack([r[:N], r[N:]]).reshape(-1, order="F")
C = changebasis(N)
cov = C @ V @ C.T
self.circuit.fromscovmat(cov, modes)
self.circuit.fromsmean(means, modes)
def is_vacuum(self, tol=0.0, **kwargs):
return self.circuit.is_vacuum(tol)
def loss(self, T, mode):
self.circuit.loss(T, mode)
def thermal_loss(self, T, nbar, mode):
self.circuit.thermal_loss(T, nbar, mode)
def measure_fock(self, modes, shots=1, select=None):
if shots != 1:
if select is not None:
raise NotImplementedError("Gaussian backend currently does not support "
"postselection if shots != 1 for Fock measurement")
warnings.warn("Cannot simulate non-Gaussian states. "
"Conditional state after Fock measurement has not been updated.")
mu = self.circuit.mean
cov = self.circuit.scovmatxp()
# check we are sampling from a gaussian state with zero mean
if not allclose(mu, zeros_like(mu)):
raise NotImplementedError("PNR measurement is only supported for "
"Gaussian states with zero mean")
x_idxs = array(modes)
p_idxs = x_idxs + len(mu)
modes_idxs = concatenate([x_idxs, p_idxs])
reduced_cov = cov[ix_(modes_idxs, modes_idxs)]
samples = hafnian_sample_state(reduced_cov, shots)
# for backward compatibility with previous measurement behaviour,
# if only one shot, then we drop the shots axis
if shots == 1:
samples = samples.reshape((len(modes),))
return samples
def state(self, modes=None, **kwargs):
"""Returns the state of the quantum simulation.
See :meth:`.BaseBackend.state`.
Returns:
GaussianState: state description
"""
m = self.circuit.scovmat()
r = self.circuit.smean()
if modes is None:
modes = list(range(len(self.get_modes())))
listmodes = list(concatenate((2 * array(modes), 2 * array(modes) + 1)))
covmat = empty((2 * len(modes), 2 * len(modes)))
means = r[listmodes]
for i, ii in enumerate(listmodes):
for j, jj in enumerate(listmodes):
covmat[i, j] = m[ii, jj]
means *= sqrt(2 * self.circuit.hbar) / 2
covmat *= self.circuit.hbar / 2
mode_names = ["q[{}]".format(i) for i in array(self.get_modes())[modes]]
# qmat and amat
qmat = self.circuit.qmat()
N = qmat.shape[0] // 2
# work out if qmat and Amat need to be reduced
if 1 <= len(modes) < N:
# reduce qmat
ind = concatenate([array(modes), N + array(modes)])
rows = ind.reshape((-1, 1))
cols = ind.reshape((1, -1))
qmat = qmat[rows, cols]
# calculate reduced Amat
N = qmat.shape[0] // 2
Amat = dot(xmat(N), identity(2 * N) - inv(qmat))
else:
Amat = self.circuit.Amat()
return GaussianState(
(means, covmat), len(modes), qmat, Amat, mode_names=mode_names
)
| 34.244361 | 99 | 0.598968 |
4a1cc0d855b3cefc120709468dc5c97ef4e61c32
| 41,427 |
py
|
Python
|
show/main.py
|
fk410167/sonic-utilities
|
e32b5ac4b33235723b220d5c97981f22d0823f45
|
[
"Apache-2.0"
] | null | null | null |
show/main.py
|
fk410167/sonic-utilities
|
e32b5ac4b33235723b220d5c97981f22d0823f45
|
[
"Apache-2.0"
] | null | null | null |
show/main.py
|
fk410167/sonic-utilities
|
e32b5ac4b33235723b220d5c97981f22d0823f45
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import subprocess
import sys
import re
import click
import utilities_common.cli as clicommon
import utilities_common.multi_asic as multi_asic_util
from natsort import natsorted
from sonic_py_common import device_info, multi_asic
from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector
from tabulate import tabulate
from utilities_common.db import Db
from . import acl
from . import bgp_common
from . import chassis_modules
from . import dropcounters
from . import feature
from . import fgnhg
from . import gearbox
from . import interfaces
from . import kdump
from . import kube
from . import mlnx
from . import muxcable
from . import nat
from . import platform
from . import processes
from . import reboot_cause
from . import sflow
from . import vlan
from . import vnet
from . import vxlan
from . import system_health
from . import warm_restart
# Global Variables
PLATFORM_JSON = 'platform.json'
HWSKU_JSON = 'hwsku.json'
PORT_STR = "Ethernet"
VLAN_SUB_INTERFACE_SEPARATOR = '.'
GEARBOX_TABLE_PHY_PATTERN = r"_GEARBOX_TABLE:phy:*"
# To be enhanced. Routing-stack information should be collected from a global
# location (configdb?), so that we prevent the continous execution of this
# bash oneliner. To be revisited once routing-stack info is tracked somewhere.
def get_routing_stack():
command = "sudo docker ps | grep bgp | awk '{print$2}' | cut -d'-' -f3 | cut -d':' -f1 | head -n 1"
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
shell=True,
text=True)
stdout = proc.communicate()[0]
proc.wait()
result = stdout.rstrip('\n')
except OSError as e:
raise OSError("Cannot detect routing-stack")
return (result)
# Global Routing-Stack variable
routing_stack = get_routing_stack()
# Read given JSON file
def readJsonFile(fileName):
try:
with open(fileName) as f:
result = json.load(f)
except Exception as e:
click.echo(str(e))
raise click.Abort()
return result
def run_command(command, display_cmd=False, return_cmd=False):
if display_cmd:
click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green'))
# No conversion needed for intfutil commands as it already displays
# both SONiC interface name and alias name for all interfaces.
if clicommon.get_interface_naming_mode() == "alias" and not command.startswith("intfutil"):
clicommon.run_command_in_alias_mode(command)
raise sys.exit(0)
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
while True:
if return_cmd:
output = proc.communicate()[0]
return output
output = proc.stdout.readline()
if output == "" and proc.poll() is not None:
break
if output:
click.echo(output.rstrip('\n'))
rc = proc.poll()
if rc != 0:
sys.exit(rc)
# Global class instance for SONiC interface name to alias conversion
iface_alias_converter = clicommon.InterfaceAliasConverter()
def connect_config_db():
"""
Connects to config_db
"""
config_db = ConfigDBConnector()
config_db.connect()
return config_db
def is_gearbox_configured():
"""
Checks whether Gearbox is configured or not
"""
app_db = SonicV2Connector()
app_db.connect(app_db.APPL_DB)
keys = app_db.keys(app_db.APPL_DB, '*')
# If any _GEARBOX_TABLE:phy:* records present in APPL_DB, then the gearbox is configured
if any(re.match(GEARBOX_TABLE_PHY_PATTERN, key) for key in keys):
return True
else:
return False
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?'])
#
# 'cli' group (root group)
#
# This is our entrypoint - the main "show" command
# TODO: Consider changing function name to 'show' for better understandability
@click.group(cls=clicommon.AliasedGroup, context_settings=CONTEXT_SETTINGS)
@click.pass_context
def cli(ctx):
"""SONiC command line - 'show' command"""
ctx.obj = Db()
# Add groups from other modules
cli.add_command(acl.acl)
cli.add_command(chassis_modules.chassis_modules)
cli.add_command(dropcounters.dropcounters)
cli.add_command(feature.feature)
cli.add_command(fgnhg.fgnhg)
cli.add_command(kdump.kdump)
cli.add_command(interfaces.interfaces)
cli.add_command(kdump.kdump)
cli.add_command(kube.kubernetes)
cli.add_command(muxcable.muxcable)
cli.add_command(nat.nat)
cli.add_command(platform.platform)
cli.add_command(processes.processes)
cli.add_command(reboot_cause.reboot_cause)
cli.add_command(sflow.sflow)
cli.add_command(vlan.vlan)
cli.add_command(vnet.vnet)
cli.add_command(vxlan.vxlan)
cli.add_command(system_health.system_health)
cli.add_command(warm_restart.warm_restart)
# Add greabox commands only if GEARBOX is configured
if is_gearbox_configured():
cli.add_command(gearbox.gearbox)
#
# 'vrf' command ("show vrf")
#
def get_interface_bind_to_vrf(config_db, vrf_name):
"""Get interfaces belong to vrf
"""
tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']
data = []
for table_name in tables:
interface_dict = config_db.get_table(table_name)
if interface_dict:
for interface in interface_dict:
if 'vrf_name' in interface_dict[interface] and vrf_name == interface_dict[interface]['vrf_name']:
data.append(interface)
return data
@cli.command()
@click.argument('vrf_name', required=False)
def vrf(vrf_name):
"""Show vrf config"""
config_db = ConfigDBConnector()
config_db.connect()
header = ['VRF', 'Interfaces']
body = []
vrf_dict = config_db.get_table('VRF')
if vrf_dict:
vrfs = []
if vrf_name is None:
vrfs = list(vrf_dict.keys())
elif vrf_name in vrf_dict:
vrfs = [vrf_name]
for vrf in vrfs:
intfs = get_interface_bind_to_vrf(config_db, vrf)
if len(intfs) == 0:
body.append([vrf, ""])
else:
body.append([vrf, intfs[0]])
for intf in intfs[1:]:
body.append(["", intf])
click.echo(tabulate(body, header))
#
# 'arp' command ("show arp")
#
@cli.command()
@click.argument('ipaddress', required=False)
@click.option('-if', '--iface')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def arp(ipaddress, iface, verbose):
"""Show IP ARP table"""
cmd = "nbrshow -4"
if ipaddress is not None:
cmd += " -ip {}".format(ipaddress)
if iface is not None:
if clicommon.get_interface_naming_mode() == "alias":
if not ((iface.startswith("PortChannel")) or
(iface.startswith("eth"))):
iface = iface_alias_converter.alias_to_name(iface)
cmd += " -if {}".format(iface)
run_command(cmd, display_cmd=verbose)
#
# 'ndp' command ("show ndp")
#
@cli.command()
@click.argument('ip6address', required=False)
@click.option('-if', '--iface')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def ndp(ip6address, iface, verbose):
"""Show IPv6 Neighbour table"""
cmd = "nbrshow -6"
if ip6address is not None:
cmd += " -ip {}".format(ip6address)
if iface is not None:
cmd += " -if {}".format(iface)
run_command(cmd, display_cmd=verbose)
def is_mgmt_vrf_enabled(ctx):
"""Check if management VRF is enabled"""
if ctx.invoked_subcommand is None:
cmd = 'sonic-cfggen -d --var-json "MGMT_VRF_CONFIG"'
p = subprocess.Popen(cmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try :
mvrf_dict = json.loads(p.stdout.read())
except ValueError:
print("MGMT_VRF_CONFIG is not present.")
return False
# if the mgmtVrfEnabled attribute is configured, check the value
# and return True accordingly.
if 'mgmtVrfEnabled' in mvrf_dict['vrf_global']:
if (mvrf_dict['vrf_global']['mgmtVrfEnabled'] == "true"):
#ManagementVRF is enabled. Return True.
return True
return False
#
# 'mgmt-vrf' group ("show mgmt-vrf ...")
#
@cli.group('mgmt-vrf', invoke_without_command=True)
@click.argument('routes', required=False, type=click.Choice(["routes"]))
@click.pass_context
def mgmt_vrf(ctx,routes):
"""Show management VRF attributes"""
if is_mgmt_vrf_enabled(ctx) is False:
click.echo("\nManagementVRF : Disabled")
return
else:
if routes is None:
click.echo("\nManagementVRF : Enabled")
click.echo("\nManagement VRF interfaces in Linux:")
cmd = "ip -d link show mgmt"
run_command(cmd)
cmd = "ip link show vrf mgmt"
run_command(cmd)
else:
click.echo("\nRoutes in Management VRF Routing Table:")
cmd = "ip route show table 5000"
run_command(cmd)
#
# 'management_interface' group ("show management_interface ...")
#
@cli.group(name='management_interface', cls=clicommon.AliasedGroup)
def management_interface():
"""Show management interface parameters"""
pass
# 'address' subcommand ("show management_interface address")
@management_interface.command()
def address ():
"""Show IP address configured for management interface"""
config_db = ConfigDBConnector()
config_db.connect()
# Fetching data from config_db for MGMT_INTERFACE
mgmt_ip_data = config_db.get_table('MGMT_INTERFACE')
for key in natsorted(list(mgmt_ip_data.keys())):
click.echo("Management IP address = {0}".format(key[1]))
click.echo("Management Network Default Gateway = {0}".format(mgmt_ip_data[key]['gwaddr']))
#
# 'snmpagentaddress' group ("show snmpagentaddress ...")
#
@cli.group('snmpagentaddress', invoke_without_command=True)
@click.pass_context
def snmpagentaddress (ctx):
"""Show SNMP agent listening IP address configuration"""
config_db = ConfigDBConnector()
config_db.connect()
agenttable = config_db.get_table('SNMP_AGENT_ADDRESS_CONFIG')
header = ['ListenIP', 'ListenPort', 'ListenVrf']
body = []
for agent in agenttable:
body.append([agent[0], agent[1], agent[2]])
click.echo(tabulate(body, header))
#
# 'snmptrap' group ("show snmptrap ...")
#
@cli.group('snmptrap', invoke_without_command=True)
@click.pass_context
def snmptrap (ctx):
"""Show SNMP agent Trap server configuration"""
config_db = ConfigDBConnector()
config_db.connect()
traptable = config_db.get_table('SNMP_TRAP_CONFIG')
header = ['Version', 'TrapReceiverIP', 'Port', 'VRF', 'Community']
body = []
for row in traptable:
if row == "v1TrapDest":
ver=1
elif row == "v2TrapDest":
ver=2
else:
ver=3
body.append([ver, traptable[row]['DestIp'], traptable[row]['DestPort'], traptable[row]['vrf'], traptable[row]['Community']])
click.echo(tabulate(body, header))
#
# 'subinterfaces' group ("show subinterfaces ...")
#
@cli.group(cls=clicommon.AliasedGroup)
def subinterfaces():
"""Show details of the sub port interfaces"""
pass
# 'subinterfaces' subcommand ("show subinterfaces status")
@subinterfaces.command()
@click.argument('subinterfacename', type=str, required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def status(subinterfacename, verbose):
"""Show sub port interface status information"""
cmd = "intfutil -c status"
if subinterfacename is not None:
sub_intf_sep_idx = subinterfacename.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx == -1:
print("Invalid sub port interface name")
return
if clicommon.get_interface_naming_mode() == "alias":
subinterfacename = iface_alias_converter.alias_to_name(subinterfacename)
cmd += " -i {}".format(subinterfacename)
else:
cmd += " -i subport"
run_command(cmd, display_cmd=verbose)
#
# 'pfc' group ("show pfc ...")
#
@cli.group(cls=clicommon.AliasedGroup)
def pfc():
"""Show details of the priority-flow-control (pfc) """
pass
# 'counters' subcommand ("show interfaces pfccounters")
@pfc.command()
@multi_asic_util.multi_asic_click_options
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def counters(namespace, display, verbose):
"""Show pfc counters"""
cmd = "pfcstat -s {}".format(display)
if namespace is not None:
cmd += " -n {}".format(namespace)
run_command(cmd, display_cmd=verbose)
@pfc.command()
@click.argument('interface', type=click.STRING, required=False)
def priority(interface):
"""Show pfc priority"""
cmd = 'pfc show priority'
if interface is not None and clicommon.get_interface_naming_mode() == "alias":
interface = iface_alias_converter.alias_to_name(interface)
if interface is not None:
cmd += ' {0}'.format(interface)
run_command(cmd)
@pfc.command()
@click.argument('interface', type=click.STRING, required=False)
def asymmetric(interface):
"""Show asymmetric pfc"""
cmd = 'pfc show asymmetric'
if interface is not None and clicommon.get_interface_naming_mode() == "alias":
interface = iface_alias_converter.alias_to_name(interface)
if interface is not None:
cmd += ' {0}'.format(interface)
run_command(cmd)
# 'pfcwd' subcommand ("show pfcwd...")
@cli.group(cls=clicommon.AliasedGroup)
def pfcwd():
"""Show details of the pfc watchdog """
pass
@pfcwd.command()
@multi_asic_util.multi_asic_click_options
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def config(namespace, display, verbose):
"""Show pfc watchdog config"""
cmd = "pfcwd show config -d {}".format(display)
if namespace is not None:
cmd += " -n {}".format(namespace)
run_command(cmd, display_cmd=verbose)
@pfcwd.command()
@multi_asic_util.multi_asic_click_options
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def stats(namespace, display, verbose):
"""Show pfc watchdog stats"""
cmd = "pfcwd show stats -d {}".format(display)
if namespace is not None:
cmd += " -n {}".format(namespace)
run_command(cmd, display_cmd=verbose)
#
# 'watermark' group ("show watermark telemetry interval")
#
@cli.group(cls=clicommon.AliasedGroup)
def watermark():
"""Show details of watermark """
pass
@watermark.group()
def telemetry():
"""Show watermark telemetry info"""
pass
@telemetry.command('interval')
def show_tm_interval():
"""Show telemetry interval"""
command = 'watermarkcfg --show-interval'
run_command(command)
#
# 'queue' group ("show queue ...")
#
@cli.group(cls=clicommon.AliasedGroup)
def queue():
"""Show details of the queues """
pass
# 'counters' subcommand ("show queue counters")
@queue.command()
@click.argument('interfacename', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def counters(interfacename, verbose):
"""Show queue counters"""
cmd = "queuestat"
if interfacename is not None:
if clicommon.get_interface_naming_mode() == "alias":
interfacename = iface_alias_converter.alias_to_name(interfacename)
if interfacename is not None:
cmd += " -p {}".format(interfacename)
run_command(cmd, display_cmd=verbose)
#
# 'watermarks' subgroup ("show queue watermarks ...")
#
@queue.group()
def watermark():
"""Show user WM for queues"""
pass
# 'unicast' subcommand ("show queue watermarks unicast")
@watermark.command('unicast')
def wm_q_uni():
"""Show user WM for unicast queues"""
command = 'watermarkstat -t q_shared_uni'
run_command(command)
# 'multicast' subcommand ("show queue watermarks multicast")
@watermark.command('multicast')
def wm_q_multi():
"""Show user WM for multicast queues"""
command = 'watermarkstat -t q_shared_multi'
run_command(command)
#
# 'persistent-watermarks' subgroup ("show queue persistent-watermarks ...")
#
@queue.group(name='persistent-watermark')
def persistent_watermark():
"""Show persistent WM for queues"""
pass
# 'unicast' subcommand ("show queue persistent-watermarks unicast")
@persistent_watermark.command('unicast')
def pwm_q_uni():
"""Show persistent WM for unicast queues"""
command = 'watermarkstat -p -t q_shared_uni'
run_command(command)
# 'multicast' subcommand ("show queue persistent-watermarks multicast")
@persistent_watermark.command('multicast')
def pwm_q_multi():
"""Show persistent WM for multicast queues"""
command = 'watermarkstat -p -t q_shared_multi'
run_command(command)
#
# 'priority-group' group ("show priority-group ...")
#
@cli.group(name='priority-group', cls=clicommon.AliasedGroup)
def priority_group():
"""Show details of the PGs """
@priority_group.group()
def watermark():
"""Show priority-group user WM"""
pass
@watermark.command('headroom')
def wm_pg_headroom():
"""Show user headroom WM for pg"""
command = 'watermarkstat -t pg_headroom'
run_command(command)
@watermark.command('shared')
def wm_pg_shared():
"""Show user shared WM for pg"""
command = 'watermarkstat -t pg_shared'
run_command(command)
@priority_group.group(name='persistent-watermark')
def persistent_watermark():
"""Show priority-group persistent WM"""
pass
@persistent_watermark.command('headroom')
def pwm_pg_headroom():
"""Show persistent headroom WM for pg"""
command = 'watermarkstat -p -t pg_headroom'
run_command(command)
@persistent_watermark.command('shared')
def pwm_pg_shared():
"""Show persistent shared WM for pg"""
command = 'watermarkstat -p -t pg_shared'
run_command(command)
#
# 'buffer_pool' group ("show buffer_pool ...")
#
@cli.group(name='buffer_pool', cls=clicommon.AliasedGroup)
def buffer_pool():
"""Show details of the buffer pools"""
@buffer_pool.command('watermark')
def wm_buffer_pool():
"""Show user WM for buffer pools"""
command = 'watermarkstat -t buffer_pool'
run_command(command)
@buffer_pool.command('persistent-watermark')
def pwm_buffer_pool():
"""Show persistent WM for buffer pools"""
command = 'watermarkstat -p -t buffer_pool'
run_command(command)
#
# 'headroom-pool' group ("show headroom-pool ...")
#
@cli.group(name='headroom-pool', cls=clicommon.AliasedGroup)
def headroom_pool():
"""Show details of headroom pool"""
@headroom_pool.command('watermark')
def wm_headroom_pool():
"""Show user WM for headroom pool"""
command = 'watermarkstat -t headroom_pool'
run_command(command)
@headroom_pool.command('persistent-watermark')
def pwm_headroom_pool():
"""Show persistent WM for headroom pool"""
command = 'watermarkstat -p -t headroom_pool'
run_command(command)
#
# 'mac' command ("show mac ...")
#
@cli.command()
@click.option('-v', '--vlan')
@click.option('-p', '--port')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def mac(vlan, port, verbose):
"""Show MAC (FDB) entries"""
cmd = "fdbshow"
if vlan is not None:
cmd += " -v {}".format(vlan)
if port is not None:
cmd += " -p {}".format(port)
run_command(cmd, display_cmd=verbose)
#
# 'show route-map' command ("show route-map")
#
@cli.command('route-map')
@click.argument('route_map_name', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def route_map(route_map_name, verbose):
"""show route-map"""
cmd = 'sudo vtysh -c "show route-map'
if route_map_name is not None:
cmd += ' {}'.format(route_map_name)
cmd += '"'
run_command(cmd, display_cmd=verbose)
#
# 'ip' group ("show ip ...")
#
# This group houses IP (i.e., IPv4) commands and subgroups
@cli.group(cls=clicommon.AliasedGroup)
def ip():
"""Show IP (IPv4) commands"""
pass
#
# 'show ip interfaces' command
#
# Display all interfaces with master, an IPv4 address, admin/oper states, their BGP neighbor name and peer ip.
# Addresses from all scopes are included. Interfaces with no addresses are
# excluded.
#
@ip.command()
@multi_asic_util.multi_asic_click_options
def interfaces(namespace, display):
cmd = "sudo ipintutil -a ipv4"
if namespace is not None:
cmd += " -n {}".format(namespace)
cmd += " -d {}".format(display)
clicommon.run_command(cmd)
#
# 'route' subcommand ("show ip route")
#
@ip.command()
@click.argument('args', metavar='[IPADDRESS] [vrf <vrf_name>] [...]', nargs=-1, required=False)
@click.option('--display', '-d', 'display', default=None, show_default=False, type=str, help='all|frontend')
@click.option('--namespace', '-n', 'namespace', default=None, type=str, show_default=False, help='Namespace name or all')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def route(args, namespace, display, verbose):
"""Show IP (IPv4) routing table"""
# Call common handler to handle the show ip route cmd
bgp_common.show_routes(args, namespace, display, verbose, "ip")
#
# 'prefix-list' subcommand ("show ip prefix-list")
#
@ip.command('prefix-list')
@click.argument('prefix_list_name', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def prefix_list(prefix_list_name, verbose):
"""show ip prefix-list"""
cmd = 'sudo vtysh -c "show ip prefix-list'
if prefix_list_name is not None:
cmd += ' {}'.format(prefix_list_name)
cmd += '"'
run_command(cmd, display_cmd=verbose)
# 'protocol' command
@ip.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def protocol(verbose):
"""Show IPv4 protocol information"""
cmd = 'sudo vtysh -c "show ip protocol"'
run_command(cmd, display_cmd=verbose)
#
# 'ipv6' group ("show ipv6 ...")
#
# This group houses IPv6-related commands and subgroups
@cli.group(cls=clicommon.AliasedGroup)
def ipv6():
"""Show IPv6 commands"""
pass
#
# 'prefix-list' subcommand ("show ipv6 prefix-list")
#
@ipv6.command('prefix-list')
@click.argument('prefix_list_name', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def prefix_list(prefix_list_name, verbose):
"""show ip prefix-list"""
cmd = 'sudo vtysh -c "show ipv6 prefix-list'
if prefix_list_name is not None:
cmd += ' {}'.format(prefix_list_name)
cmd += '"'
run_command(cmd, display_cmd=verbose)
#
# 'show ipv6 interfaces' command
#
# Display all interfaces with master, an IPv6 address, admin/oper states, their BGP neighbor name and peer ip.
# Addresses from all scopes are included. Interfaces with no addresses are
# excluded.
#
@ipv6.command()
@multi_asic_util.multi_asic_click_options
def interfaces(namespace, display):
cmd = "sudo ipintutil -a ipv6"
if namespace is not None:
cmd += " -n {}".format(namespace)
cmd += " -d {}".format(display)
clicommon.run_command(cmd)
#
# 'route' subcommand ("show ipv6 route")
#
@ipv6.command()
@click.argument('args', metavar='[IPADDRESS] [vrf <vrf_name>] [...]', nargs=-1, required=False)
@click.option('--display', '-d', 'display', default=None, show_default=False, type=str, help='all|frontend')
@click.option('--namespace', '-n', 'namespace', default=None, type=str, show_default=False, help='Namespace name or all')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def route(args, namespace, display, verbose):
"""Show IPv6 routing table"""
# Call common handler to handle the show ipv6 route cmd
bgp_common.show_routes(args, namespace, display, verbose, "ipv6")
# 'protocol' command
@ipv6.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def protocol(verbose):
"""Show IPv6 protocol information"""
cmd = 'sudo vtysh -c "show ipv6 protocol"'
run_command(cmd, display_cmd=verbose)
#
# Inserting BGP functionality into cli's show parse-chain.
# BGP commands are determined by the routing-stack being elected.
#
if routing_stack == "quagga":
from .bgp_quagga_v4 import bgp
ip.add_command(bgp)
from .bgp_quagga_v6 import bgp
ipv6.add_command(bgp)
elif routing_stack == "frr":
from .bgp_frr_v4 import bgp
ip.add_command(bgp)
from .bgp_frr_v6 import bgp
ipv6.add_command(bgp)
#
# 'lldp' group ("show lldp ...")
#
@cli.group(cls=clicommon.AliasedGroup)
def lldp():
"""LLDP (Link Layer Discovery Protocol) information"""
pass
# Default 'lldp' command (called if no subcommands or their aliases were passed)
@lldp.command()
@click.argument('interfacename', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def neighbors(interfacename, verbose):
"""Show LLDP neighbors"""
cmd = "sudo lldpshow -d"
if interfacename is not None:
if clicommon.get_interface_naming_mode() == "alias":
interfacename = iface_alias_converter.alias_to_name(interfacename)
cmd += " -p {}".format(interfacename)
run_command(cmd, display_cmd=verbose)
# 'table' subcommand ("show lldp table")
@lldp.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def table(verbose):
"""Show LLDP neighbors in tabular format"""
cmd = "sudo lldpshow"
run_command(cmd, display_cmd=verbose)
#
# 'logging' command ("show logging")
#
@cli.command()
@click.argument('process', required=False)
@click.option('-l', '--lines')
@click.option('-f', '--follow', is_flag=True)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def logging(process, lines, follow, verbose):
"""Show system log"""
if follow:
cmd = "sudo tail -F /var/log/syslog"
run_command(cmd, display_cmd=verbose)
else:
if os.path.isfile("/var/log/syslog.1"):
cmd = "sudo cat /var/log/syslog.1 /var/log/syslog"
else:
cmd = "sudo cat /var/log/syslog"
if process is not None:
cmd += " | grep '{}'".format(process)
if lines is not None:
cmd += " | tail -{}".format(lines)
run_command(cmd, display_cmd=verbose)
#
# 'version' command ("show version")
#
@cli.command()
@click.option("--verbose", is_flag=True, help="Enable verbose output")
def version(verbose):
"""Show version information"""
version_info = device_info.get_sonic_version_info()
platform = device_info.get_platform()
hwsku = device_info.get_hwsku()
asic_type = version_info['asic_type']
asic_count = multi_asic.get_num_asics()
serial_number_cmd = "sudo decode-syseeprom -s"
serial_number = subprocess.Popen(serial_number_cmd, shell=True, text=True, stdout=subprocess.PIPE)
sys_uptime_cmd = "uptime"
sys_uptime = subprocess.Popen(sys_uptime_cmd, shell=True, text=True, stdout=subprocess.PIPE)
click.echo("\nSONiC Software Version: SONiC.{}".format(version_info['build_version']))
click.echo("Distribution: Debian {}".format(version_info['debian_version']))
click.echo("Kernel: {}".format(version_info['kernel_version']))
click.echo("Build commit: {}".format(version_info['commit_id']))
click.echo("Build date: {}".format(version_info['build_date']))
click.echo("Built by: {}".format(version_info['built_by']))
click.echo("\nPlatform: {}".format(platform))
click.echo("HwSKU: {}".format(hwsku))
click.echo("ASIC: {}".format(asic_type))
click.echo("ASIC Count: {}".format(asic_count))
click.echo("Serial Number: {}".format(serial_number.stdout.read().strip()))
click.echo("Uptime: {}".format(sys_uptime.stdout.read().strip()))
click.echo("\nDocker images:")
cmd = 'sudo docker images --format "table {{.Repository}}\\t{{.Tag}}\\t{{.ID}}\\t{{.Size}}"'
p = subprocess.Popen(cmd, shell=True, text=True, stdout=subprocess.PIPE)
click.echo(p.stdout.read())
#
# 'environment' command ("show environment")
#
@cli.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def environment(verbose):
"""Show environmentals (voltages, fans, temps)"""
cmd = "sudo sensors"
run_command(cmd, display_cmd=verbose)
#
# 'users' command ("show users")
#
@cli.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def users(verbose):
"""Show users"""
cmd = "who"
run_command(cmd, display_cmd=verbose)
#
# 'techsupport' command ("show techsupport")
#
@cli.command()
@click.option('--since', required=False, help="Collect logs and core files since given date")
@click.option('-g', '--global-timeout', default=30, type=int, help="Global timeout in minutes. Default 30 mins")
@click.option('-c', '--cmd-timeout', default=5, type=int, help="Individual command timeout in minutes. Default 5 mins")
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.option('--allow-process-stop', is_flag=True, help="Dump additional data which may require system interruption")
@click.option('--silent', is_flag=True, help="Run techsupport in silent mode")
def techsupport(since, global_timeout, cmd_timeout, verbose, allow_process_stop, silent):
"""Gather information for troubleshooting"""
cmd = "sudo timeout -s SIGTERM --foreground {}m".format(global_timeout)
if allow_process_stop:
cmd += " -a"
if silent:
cmd += " generate_dump"
click.echo("Techsupport is running with silent option. This command might take a long time.")
else:
cmd += " generate_dump -v"
if since:
cmd += " -s '{}'".format(since)
cmd += " -t {}".format(cmd_timeout)
run_command(cmd, display_cmd=verbose)
#
# 'runningconfiguration' group ("show runningconfiguration")
#
@cli.group(cls=clicommon.AliasedGroup)
def runningconfiguration():
"""Show current running configuration information"""
pass
# 'all' subcommand ("show runningconfiguration all")
@runningconfiguration.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def all(verbose):
"""Show full running configuration"""
cmd = "sonic-cfggen -d --print-data"
run_command(cmd, display_cmd=verbose)
# 'acl' subcommand ("show runningconfiguration acl")
@runningconfiguration.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def acl(verbose):
"""Show acl running configuration"""
cmd = "sonic-cfggen -d --var-json ACL_RULE"
run_command(cmd, display_cmd=verbose)
# 'ports' subcommand ("show runningconfiguration ports <portname>")
@runningconfiguration.command()
@click.argument('portname', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def ports(portname, verbose):
"""Show ports running configuration"""
cmd = "sonic-cfggen -d --var-json PORT"
if portname is not None:
cmd += " {0} {1}".format("--key", portname)
run_command(cmd, display_cmd=verbose)
# 'bgp' subcommand ("show runningconfiguration bgp")
@runningconfiguration.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def bgp(verbose):
"""Show BGP running configuration"""
cmd = 'sudo vtysh -c "show running-config"'
run_command(cmd, display_cmd=verbose)
# 'interfaces' subcommand ("show runningconfiguration interfaces")
@runningconfiguration.command()
@click.argument('interfacename', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def interfaces(interfacename, verbose):
"""Show interfaces running configuration"""
cmd = "sonic-cfggen -d --var-json INTERFACE"
if interfacename is not None:
cmd += " {0} {1}".format("--key", interfacename)
run_command(cmd, display_cmd=verbose)
# 'snmp' subcommand ("show runningconfiguration snmp")
@runningconfiguration.command()
@click.argument('server', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def snmp(server, verbose):
"""Show SNMP information"""
cmd = "sudo docker exec snmp cat /etc/snmp/snmpd.conf"
if server is not None:
cmd += " | grep -i agentAddress"
run_command(cmd, display_cmd=verbose)
# 'ntp' subcommand ("show runningconfiguration ntp")
@runningconfiguration.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def ntp(verbose):
"""Show NTP running configuration"""
ntp_servers = []
ntp_dict = {}
with open("/etc/ntp.conf") as ntp_file:
data = ntp_file.readlines()
for line in data:
if line.startswith("server "):
ntp_server = line.split(" ")[1]
ntp_servers.append(ntp_server)
ntp_dict['NTP Servers'] = ntp_servers
print(tabulate(ntp_dict, headers=list(ntp_dict.keys()), tablefmt="simple", stralign='left', missingval=""))
# 'syslog' subcommand ("show runningconfiguration syslog")
@runningconfiguration.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def syslog(verbose):
"""Show Syslog running configuration"""
syslog_servers = []
syslog_dict = {}
with open("/etc/rsyslog.conf") as syslog_file:
data = syslog_file.readlines()
for line in data:
if line.startswith("*.* @"):
line = line.split(":")
server = line[0][5:]
syslog_servers.append(server)
syslog_dict['Syslog Servers'] = syslog_servers
print(tabulate(syslog_dict, headers=list(syslog_dict.keys()), tablefmt="simple", stralign='left', missingval=""))
#
# 'startupconfiguration' group ("show startupconfiguration ...")
#
@cli.group(cls=clicommon.AliasedGroup)
def startupconfiguration():
"""Show startup configuration information"""
pass
# 'bgp' subcommand ("show startupconfiguration bgp")
@startupconfiguration.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def bgp(verbose):
"""Show BGP startup configuration"""
cmd = "sudo docker ps | grep bgp | awk '{print$2}' | cut -d'-' -f3 | cut -d':' -f1"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
result = proc.stdout.read().rstrip()
click.echo("Routing-Stack is: {}".format(result))
if result == "quagga":
run_command('sudo docker exec bgp cat /etc/quagga/bgpd.conf', display_cmd=verbose)
elif result == "frr":
run_command('sudo docker exec bgp cat /etc/frr/bgpd.conf', display_cmd=verbose)
elif result == "gobgp":
run_command('sudo docker exec bgp cat /etc/gpbgp/bgpd.conf', display_cmd=verbose)
else:
click.echo("Unidentified routing-stack")
#
# 'ntp' command ("show ntp")
#
@cli.command()
@click.pass_context
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def ntp(ctx, verbose):
"""Show NTP information"""
from pkg_resources import parse_version
ntpstat_cmd = "ntpstat"
ntpcmd = "ntpq -p -n"
if is_mgmt_vrf_enabled(ctx) is True:
#ManagementVRF is enabled. Call ntpq using "ip vrf exec" or cgexec based on linux version
os_info = os.uname()
release = os_info[2].split('-')
if parse_version(release[0]) > parse_version("4.9.0"):
ntpstat_cmd = "sudo ip vrf exec mgmt ntpstat"
ntpcmd = "sudo ip vrf exec mgmt ntpq -p -n"
else:
ntpstat_cmd = "sudo cgexec -g l3mdev:mgmt ntpstat"
ntpcmd = "sudo cgexec -g l3mdev:mgmt ntpq -p -n"
run_command(ntpstat_cmd, display_cmd=verbose)
run_command(ntpcmd, display_cmd=verbose)
#
# 'uptime' command ("show uptime")
#
@cli.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def uptime(verbose):
"""Show system uptime"""
cmd = "uptime -p"
run_command(cmd, display_cmd=verbose)
@cli.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def clock(verbose):
"""Show date and time"""
cmd ="date"
run_command(cmd, display_cmd=verbose)
@cli.command('system-memory')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def system_memory(verbose):
"""Show memory information"""
cmd = "free -m"
run_command(cmd, display_cmd=verbose)
@cli.command('services')
def services():
"""Show all daemon services"""
cmd = "sudo docker ps --format '{{.Names}}'"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
while True:
line = proc.stdout.readline()
if line != '':
print(line.rstrip()+'\t'+"docker")
print("---------------------------")
cmd = "sudo docker exec {} ps aux | sed '$d'".format(line.rstrip())
proc1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
print(proc1.stdout.read())
else:
break
@cli.command()
def aaa():
"""Show AAA configuration"""
config_db = ConfigDBConnector()
config_db.connect()
data = config_db.get_table('AAA')
output = ''
aaa = {
'authentication': {
'login': 'local (default)',
'failthrough': 'False (default)'
}
}
if 'authentication' in data:
aaa['authentication'].update(data['authentication'])
for row in aaa:
entry = aaa[row]
for key in entry:
output += ('AAA %s %s %s\n' % (row, key, str(entry[key])))
click.echo(output)
@cli.command()
def tacacs():
"""Show TACACS+ configuration"""
config_db = ConfigDBConnector()
config_db.connect()
output = ''
data = config_db.get_table('TACPLUS')
tacplus = {
'global': {
'auth_type': 'pap (default)',
'timeout': '5 (default)',
'passkey': '<EMPTY_STRING> (default)'
}
}
if 'global' in data:
tacplus['global'].update(data['global'])
for key in tacplus['global']:
output += ('TACPLUS global %s %s\n' % (str(key), str(tacplus['global'][key])))
data = config_db.get_table('TACPLUS_SERVER')
if data != {}:
for row in data:
entry = data[row]
output += ('\nTACPLUS_SERVER address %s\n' % row)
for key in entry:
output += (' %s %s\n' % (key, str(entry[key])))
click.echo(output)
#
# 'mirror_session' command ("show mirror_session ...")
#
@cli.command('mirror_session')
@click.argument('session_name', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def mirror_session(session_name, verbose):
"""Show existing everflow sessions"""
cmd = "acl-loader show session"
if session_name is not None:
cmd += " {}".format(session_name)
run_command(cmd, display_cmd=verbose)
#
# 'policer' command ("show policer ...")
#
@cli.command()
@click.argument('policer_name', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def policer(policer_name, verbose):
"""Show existing policers"""
cmd = "acl-loader show policer"
if policer_name is not None:
cmd += " {}".format(policer_name)
run_command(cmd, display_cmd=verbose)
#
# 'ecn' command ("show ecn")
#
@cli.command('ecn')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def ecn(verbose):
"""Show ECN configuration"""
cmd = "ecnconfig -l"
run_command(cmd, display_cmd=verbose)
#
# 'boot' command ("show boot")
#
@cli.command('boot')
def boot():
"""Show boot configuration"""
cmd = "sudo sonic-installer list"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
click.echo(proc.stdout.read())
#
# 'mmu' command ("show mmu")
#
@cli.command('mmu')
def mmu():
"""Show mmu configuration"""
cmd = "mmuconfig -l"
run_command(cmd)
#
# 'buffer' command ("show buffer")
#
@cli.group(cls=clicommon.AliasedGroup)
def buffer():
"""Show buffer information"""
pass
#
# 'configuration' command ("show buffer command")
#
@buffer.command()
def configuration():
"""show buffer configuration"""
cmd = "mmuconfig -l"
run_command(cmd)
#
# 'information' command ("show buffer state")
#
@buffer.command()
def information():
"""show buffer information"""
cmd = "buffershow -l"
run_command(cmd)
#
# 'line' command ("show line")
#
@cli.command('line')
@click.option('--brief', '-b', metavar='<brief_mode>', required=False, is_flag=True)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def line(brief, verbose):
"""Show all console lines and their info include available ttyUSB devices unless specified brief mode"""
cmd = "consutil show" + (" -b" if brief else "")
run_command(cmd, display_cmd=verbose)
return
#
# 'ztp status' command ("show ztp status")
#
@cli.command()
@click.argument('status', required=False, type=click.Choice(["status"]))
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def ztp(status, verbose):
"""Show Zero Touch Provisioning status"""
if os.path.isfile('/usr/bin/ztp') is False:
exit("ZTP feature unavailable in this image version")
cmd = "ztp status"
if verbose:
cmd = cmd + " --verbose"
run_command(cmd, display_cmd=verbose)
if __name__ == '__main__':
cli()
| 29.718077 | 132 | 0.668525 |
4a1cc1b2313dc8dcb0a5892d019f0a1e0d767619
| 32,421 |
py
|
Python
|
twink/base.py
|
krsna1729/twink
|
6c2f1546bc2a8b1574e37e27676ef4d7a853b4bf
|
[
"Apache-2.0"
] | 1 |
2016-06-01T21:25:01.000Z
|
2016-06-01T21:25:01.000Z
|
twink/base.py
|
krsna1729/twink
|
6c2f1546bc2a8b1574e37e27676ef4d7a853b4bf
|
[
"Apache-2.0"
] | null | null | null |
twink/base.py
|
krsna1729/twink
|
6c2f1546bc2a8b1574e37e27676ef4d7a853b4bf
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import binascii
import contextlib
import logging
import os
import struct
import types
import weakref
import datetime
from collections import namedtuple
_use_gevent = False
def use_gevent():
_use_gevent = True
class _sched_proxy(object):
def __getattr__(self, name):
_sched = None
if _use_gevent:
_sched = __import__("sched_gevent", globals(), level=1)
else:
_sched = __import__("sched_basic", globals(), level=1)
if name in "subprocess socket Queue Lock Event spawn serve_forever".split():
return getattr(_sched, name)
raise AttributeError("No such attribute")
sched = _sched_proxy()
def default_wrapper(func):
def wrap(*args, **kwargs):
socket = sched.socket
try:
return func(*args, **kwargs)
except socket.timeout:
return None
except socket.error as e:
if e.errno in (os.errno.EAGAIN, os.errno.ECONNRESET, os.errno.EBADF):
return b""
elif e.errno in (os.errno.EINTR,):
return None
raise
except KeyboardInterrupt:
return b""
return wrap
class ReadWrapper(object):
def __init__(self, channel, read_wrap):
self.channel = channel
self.read_wrap = read_wrap
def __enter__(self):
self.installed_wrapper = self.channel.read_wrap
self.channel.read_wrap = self
return self.channel
def __exit__(self, *args, **kwargs):
self.channel.read_wrap = self.installed_wrapper
def __call__(self, func):
def wrap(*args, **kwargs):
if self.channel.closed:
return b""
return self.read_wrap(func)(*args, **kwargs)
return wrap
class Channel(object):
'''
Openflow abstract connection class
This is not only for TCP but also for UDP.
This is the reason that the name is not "Connection" but "Channel".
You can subclass this to have instance members, of which lifecycle is
the same with channel.
'''
def __init__(self, *args, **kwargs):
self._socket = kwargs.pop("socket", None) # dedicated socket
self._sendto = kwargs.pop("sendto", None) # only if channel prefers sendto()
self.reader = kwargs.pop("reader", None)
self.read_wrap = kwargs.pop("read_wrap", default_wrapper)
self.remote_address = kwargs.pop("remote_address", None)
self.local_address = kwargs.pop("local_address", None)
if self._socket:
if self.remote_address is None:
self.remote_address = self._socket.getpeername()
if self.local_address is None:
self.local_address = self._socket.getsockname()
if hasattr(self._socket, "settimeout") and self._socket.gettimeout() == None:
self._socket.settimeout(0.5)
def attach(self, stream, **kwargs):
self._socket = stream
if hasattr(self._socket, "settimeout") and self._socket.gettimeout() == None:
self._socket.settimeout(0.5)
self.remote_address = stream.getpeername()
self.local_address = stream.getsockname()
@property
def closed(self):
# This is not self._socket.closed because in some use cases,
# self._socket is not available, for example with gevent.server.DatagramServer
return self.remote_address is None
def close(self):
if self._socket:
self._socket.close()
if self.remote_address is not None:
self.remote_address = None
def send(self, message, **kwargs):
if self._sendto:
self._sendto(message, self.remote_address)
elif self._socket:
self._socket.send(message)
else:
raise ValueError("socket or sendto is required")
def _recv(self, num):
if self.reader:
reader = self.reader
else:
reader = self._socket.recv
return ReadWrapper(self, self.read_wrap)(reader)(num)
class Error(Exception):
pass
class ChannelClose(Error):
pass
class OpenflowBaseChannel(Channel):
version = None # The negotiated version
accept_versions = [4,] # defaults to openflow 1.3
def __init__(self, *args, **kwargs):
super(OpenflowBaseChannel, self).__init__(*args, **kwargs)
self.buffer = b""
def __iter__(self):
while True:
ret = self.recv()
if ret:
yield ret
else:
break
def recv(self):
required_len = 8
while len(self.buffer) < required_len:
tmp = super(OpenflowBaseChannel, self)._recv(8192)
if tmp is None:
continue
elif len(tmp)==0:
return tmp
self.buffer += tmp
p = struct.unpack_from("!BBHI", self.buffer)
required_len = p[2]
while len(self.buffer) < required_len:
tmp = super(OpenflowBaseChannel, self)._recv(8192)
if tmp is None:
continue
elif len(tmp)==0:
return tmp
self.buffer += tmp
ret = self.buffer[0:required_len]
self.buffer = self.buffer[required_len:]
return ret
class LoggingChannel(OpenflowBaseChannel):
channel_log_name = "channel"
send_log_name = "send"
recv_log_name = "recv"
remote = ""
def __init__(self, *args, **kwargs):
super(LoggingChannel, self).__init__(*args, **kwargs)
if self.remote_address:
self.remote = " from %s" % self.remote_address[0]
logging.getLogger(self.channel_log_name).info("%s connect%s" % (self, self.remote))
def send(self, message, **kwargs):
logging.getLogger(self.send_log_name).debug("%s %s" % (self, binascii.b2a_hex(message)))
return super(LoggingChannel, self).send(message, **kwargs)
def recv(self):
message = super(LoggingChannel, self).recv()
if message: # ignore b"" and None
logging.getLogger(self.recv_log_name).debug("%s %s" % (self, binascii.b2a_hex(message)))
return message
def close(self):
if not self.closed:
super(LoggingChannel, self).close()
logging.getLogger(self.channel_log_name).info("%s close%s" % (self, self.remote))
class OpenflowChannel(OpenflowBaseChannel):
_start = None
def attach(self, stream, **kwargs):
super(OpenflowBaseChannel, self).attach(stream, **kwargs)
if kwargs.get("autostart", True):
self.start()
def start(self):
if self._start is None:
self.send(hello(self.accept_versions))
self._start = True
def recv(self):
message = super(OpenflowChannel, self).recv()
if message:
(version, oftype, length, xid) = parse_ofp_header(message)
if oftype==0: # HELLO
accept_versions = ofp_version_normalize(self.accept_versions)
if not accept_versions:
accept_versions = set([1,])
cross_versions = parse_hello(message) & accept_versions
if cross_versions:
self.version = max(cross_versions)
else:
ascii_txt = "Accept versions: %s" % ["- 1.0 1.1 1.2 1.3 1.4".split()[x] for x in list(accept_versions)]
self.send(struct.pack("!BBHIHH", max(accept_versions), 1,
struct.calcsize("!BBHIHH")+len(ascii_txt), hms_xid(),
0, 0) + ascii_txt.encode("ASCII"))
raise ChannelClose(ascii_txt)
return message
def parse_ofp_header(message):
'''
@return (version, oftype, message_len, xid)
'''
return struct.unpack_from("!BBHI", message)
def ofp_header_only(oftype, version=1, xid=None):
if xid is None:
xid = hms_xid()
return struct.pack("!BBHI", version, oftype, 8, xid)
def hms_xid():
'''Xid looks readable datetime like format when logged as int.'''
now = datetime.datetime.now()
candidate = int(("%02d"*3+"%04d") % (now.hour, now.minute, now.second, now.microsecond/100))
if hasattr(hms_xid, "dedup"):
if hms_xid.dedup >= candidate:
candidate = hms_xid.dedup+1
setattr(hms_xid, "dedup", candidate)
return candidate
def ofp_version_normalize(versions):
if isinstance(versions, list) or isinstance(versions, tuple) or isinstance(versions, set):
vset = set()
for version in versions:
if isinstance(version, float):
version = [1.0, 1.1, 1.2, 1.3, 1.4].index(version) + 1
assert isinstance(version, int), "unknown version %s" % version
vset.add(version)
return vset
elif versions is None:
return set()
assert False, "unknown versions %s" % versions
def hello(versions, **kwargs):
xid = kwargs.get("xid", hms_xid())
if versions:
vset = ofp_version_normalize(versions)
else:
vset = set((1,))
version = max(vset)
if version < 4:
return struct.pack("!BBHI", version, 0, 8, xid)
else:
units = [0,]*(1 + version//32)
for v in vset:
units[v//32] |= 1<<(v%32)
versionbitmap_length = 4 + len(units)*4
fmt = "!BBHIHH%dI%dx" % (len(units), 8*((len(units)-1)%2))
return struct.pack(fmt, version, 0, struct.calcsize(fmt), xid, # HELLO
1, versionbitmap_length, *units) # VERSIONBITMAP
def parse_hello(message):
(version, oftype, length, xid) = parse_ofp_header(message)
assert oftype==0 # HELLO
versions = set()
if length == 8:
versions.add(version)
else:
(subtype, sublength) = struct.unpack_from("!HH", message, offset=8)
assert subtype == 1 # VERSIONBITMAP
units = struct.unpack_from("!%dI" % (sublength/4 - 1), message, offset=12)
for idx,unit in zip(range(len(units)),units):
for s in range(32):
if unit&(1<<s):
versions.add(idx*32 + s)
return versions
class OpenflowServerChannel(OpenflowChannel):
def loop(self):
try:
for message in self:
if not message:
break
self.handle_proxy(self.handle)(message, self)
except ChannelClose:
self.close()
def handle_proxy(self, handle):
return handle
def handle(self, message, channel):
logging.getLogger(__name__).warn("check MRO")
pass
class AutoEchoChannel(OpenflowServerChannel):
'''
AuthEchoChannel steals ECHO_REQUEST and automatically send echo response.
'''
def handle_proxy(self, handle):
def intercept(message, channel):
if message:
(version, oftype, length, xid) = parse_ofp_header(message)
if oftype==2: # ECHO
self.send(struct.pack("!BBHI", self.version, 3, length, xid)+message[8:])
else:
super(AutoEchoChannel, self).handle_proxy(handle)(message, channel)
return intercept
class WeakCallbackCaller(object):
@property
def callback(self):
if self.cbref:
return self.cbref()
class Barrier(WeakCallbackCaller):
def __init__(self, xid, message_handler=None):
if message_handler:
self.cbref = weakref.ref(message_handler)
else:
self.cbref = None
self.xid = xid
class Chunk(WeakCallbackCaller):
def __init__(self, message_handler):
if message_handler:
self.cbref = weakref.ref(message_handler)
else:
self.cbref = None
class ControllerChannel(OpenflowServerChannel, WeakCallbackCaller):
datapath = None
auxiliary = None
cbref = None
def __init__(self, *args, **kwargs):
super(ControllerChannel, self).__init__(*args, **kwargs)
self.seq_lock = sched.Lock()
self.seq = []
def send(self, message, **kwargs):
with self.seq_lock:
return self.locked_send(message, **kwargs)
def locked_send(self, message, **kwargs):
message_handler = kwargs.get("callback") # callable object
if message_handler is None:
pass
else:
assert isinstance(message_handler, object)
assert callable(message_handler)
(version, oftype, length, xid) = parse_ofp_header(message)
if (oftype==18 and version==1) or (oftype==20 and version!=1): # OFPT_BARRIER_REQUEST
self.seq.append(Barrier(xid, message_handler))
elif self.seq:
seq_last = self.seq[-1]
if isinstance(seq_last, Chunk):
if seq_last.callback != message_handler:
bxid = hms_xid()
if self.version==1:
msg = ofp_header_only(18, version=1, xid=bxid) # OFPT_BARRIER_REQUEST=18 (v1.0)
else:
msg = ofp_header_only(20, version=self.version, xid=bxid) # OFPT_BARRIER_REQUEST=20 (v1.1--v1.4)
self.seq.append(Barrier(bxid))
self.seq.append(Chunk(message_handler))
super(ControllerChannel, self).send(msg)
elif isinstance(seq_last, Barrier):
self.seq.append(Chunk(message_handler))
else:
assert False, "seq element must be Chunk or Barrier"
else:
if self.callback != message_handler:
self.seq.append(Chunk(message_handler))
if message_handler:
self.cbfunc = weakref.ref(message_handler)
super(ControllerChannel, self).send(message)
def recv(self):
message = super(ControllerChannel, self).recv()
if message:
(version, oftype, length, xid) = parse_ofp_header(message)
if oftype==6: # FEATURES_REPLY
if self.version < 4:
(self.datapath,) = struct.unpack_from("!Q", message, offset=8) # v1.0--v1.2
else:
(self.datapath,_1,_2,self.auxiliary) = struct.unpack_from("!QIBB", message, offset=8) # v1.3--v1.4
return message
def handle_proxy(self, handle):
def intercept(message, channel):
(version, oftype, length, xid) = parse_ofp_header(message)
if hasattr(self, "handle_async") and oftype in (10,11,12):
# bypass method call for async message
return super(ControllerChannel, self).handle_proxy(self.handle_async)(message, channel)
with self.seq_lock:
if self.seq:
if (oftype==19 and version==1) or (oftype==21 and version!=1): # is barrier
chunk_drop = False
for e in self.seq:
if isinstance(e, Barrier):
if e.xid == xid:
self.seq = self.seq[self.seq.index(e)+1:]
if e.callback:
return e.callback(message, self)
return True
else:
assert False, "missing barrier(xid=%x) before barrier(xid=%x)" % (e.xid, xid)
elif isinstance(e, Chunk):
assert chunk_drop==False, "dropping multiple chunks at a time"
chunk_drop = True
assert False, "got unknown barrier xid=%x" % xid
else:
e = self.seq[0]
if isinstance(e, Chunk):
if e.callback:
return e.callback(message, self)
if self.callback:
return self.callback(message, self)
else:
return super(ControllerChannel, self).handle_proxy(handle)(message, channel)
logging.getLogger(__name__).warn("No callback found for handling message %s" % binascii.b2a_hex(message))
return intercept
class RateLimit(object):
def __init__(self, size):
self.size = size
self.cold_lock = sched.Lock()
self.cold = []
self.loop_lock = sched.Lock()
def spawn(self, func, *args, **kwargs):
with self.cold_lock:
self.cold.append((func, args, kwargs))
sched.spawn(self.loop)
def loop(self):
with self.loop_lock:
while len(self.cold) > 0:
hot_lock = sched.Lock()
hot = []
children = {}
while len(hot) < self.size and len(self.cold) > 0:
task = None
with self.cold_lock:
task = self.cold.pop(0)
if task:
(func, args, kwargs) = task
def proxy():
func(*args, **kwargs)
with hot_lock:
hot.remove(task)
hot.append(task)
children[id(task)] = sched.spawn(proxy)
for task_id,job in tuple(children.items()):
running = False
with hot_lock:
if task_id in [id(task) for task in hot]:
running = True
if running:
job.join(0.5)
else:
chilren.pop(task)
break
class ParallelChannel(OpenflowServerChannel):
# mixin for parent channel
socket_dir = None
async_rate = 0
def __init__(self, *args, **kwargs):
super(ParallelChannel, self).__init__(*args, **kwargs)
self.close_lock = sched.Lock()
self.async_pool = RateLimit(self.async_rate)
def close(self):
with self.close_lock:
super(ParallelChannel, self).close()
def handle_proxy(self, handle):
def intercept(message, channel):
def proxy(message, channel):
try:
handle(message, channel)
except ChannelClose:
logging.getLogger(__name__).info("closing", exc_info=True)
channel.close()
except:
logging.getLogger(__name__).error("handle error", exc_info=True)
channel.close()
rated_call = False
if self.async_rate:
(version, oftype, length, xid) = parse_ofp_header(message)
if oftype in (10, 11, 12):
rated_call = True
if rated_call:
self.async_pool.spawn(proxy, message, channel)
else:
sched.spawn(proxy, message, channel)
return super(ParallelChannel, self).handle_proxy(intercept)
def socket_path(self, path):
if self.socket_dir:
path = os.path.join(self.socket_dir, path)
return os.path.abspath(path)
def helper_path(self, suffix):
old = self.socket_path("unknown-%x.%s" % (id(self), suffix))
if self.datapath:
new = self.socket_path("%x-%x.%s" % (self.datapath, id(self), suffix))
try:
os.rename(old, new)
except OSError:
pass
return new
return old
def override_required(self, *args, **kwargs):
raise Error("Concrete MixIn required")
def bound_socket(info, socktype):
socket = sched.socket
if isinstance(info, socket.socket):
return info
elif isinstance(info, tuple) or isinstance(info, list):
infos = [o for o in socket.getaddrinfo(*info) if o[1]==socktype or o[1]==0]
(family, socktype, proto, canonname, sockaddr) = infos[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(sockaddr)
return s
elif isinstance(info, str):
s = socket.socket(socket.AF_UNIX, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(info)
return s
else:
raise ValueError("unexpected %s" % info)
def stream_socket(info):
return bound_socket(info, sched.socket.SOCK_STREAM)
def dgram_socket(info):
return bound_socket(info, sched.socket.SOCK_DGRAM)
class StreamServer(object):
channel_cls = None
def __init__(self, bound_sock, **kwargs):
self.accepting = False
self.sock = stream_socket(bound_sock)
self.channels_lock = sched.Lock()
self.channels = set()
self.server_address = self.sock.getsockname()
def start(self):
self.accepting = True
sock = self.sock
sock.settimeout(0.5)
sock.listen(10)
sched.spawn(self.run)
def run(self):
try:
while self.accepting:
try:
s = self.sock.accept()
except sched.socket.timeout:
continue
ch = self.channel_cls(socket=s[0], remote_address=s[1], read_wrap=self.read_wrap)
ch.start()
sched.spawn(self._loop_runner, ch)
finally:
self.sock.close()
def _loop_runner(self, ch):
with self.channels_lock:
self.channels.add(ch)
ch.loop()
ch.close()
with self.channels_lock:
self.channels.remove(ch)
def read_wrap(self, func):
def wrap(*args, **kwargs):
if self.accepting==False:
return b""
return default_wrapper(func)(*args, **kwargs)
return wrap
def stop(self):
self.accepting = False
for ch in list(self.channels):
ch.close()
class DgramServer(object):
channel_cls = None
def __init__(self, bound_sock):
self.accepting = False
self.sock = dgram_socket(bound_sock)
self.remotes_lock = sched.Lock()
self.remotes = {}
self.remote_locks = {}
def start(self):
self.accepting = True
sched.spawn(self.run)
def run(self):
sock = self.sock
while self.accepting:
try:
data,remote_address = sock.recv()
except sched.socket.timeout:
continue
with self.remotes_lock:
if remote_address in self.remotes:
ch = self.remotes[remote_address]
lock = self.remote_locks[remote_address]
else:
ch = self.channel_cls(sendto=sock.sendto, remote_address=remote_address, local_address=sock.getsockname())
ch.start()
self.remotes[remote_address] = ch
lock = sched.Lock()
self.remote_locks[remote_address] = lock
sched.spawn(self.locked_loop, ch, lock, data)
sock.close()
def locked_loop(self, ch, lock, data):
with lock:
ch.reader = StringIO.StringIO(data).read
ch.loop()
def stop(self):
self.accepting = False
class ParentChannel(ControllerChannel, ParallelChannel):
jackin = False
monitor = False
jackin_shutdown = None
monitor_shutdown = None
monitors = set()
def close(self):
if self.jackin_shutdown:
self.jackin_shutdown()
try:
os.remove(self.helper_path("jackin"))
except OSError:
pass
if self.monitor_shutdown:
self.monitor_shutdown()
try:
os.remove(self.helper_path("monitor"))
except OSError:
pass
super(ParentChannel, self).close()
def recv(self):
message = super(ParentChannel, self).recv()
if message:
(version, oftype, length, xid) = parse_ofp_header(message)
if oftype==0:
if self.jackin:
serv, addr = self.jackin_server()
self.jackin_shutdown = serv.stop
serv.start() # start after assignment especially for pthread
if self.monitor:
serv, addr = self.monitor_server()
self.monitor_shutdown = serv.stop
self.monitors = serv.channels
serv.start() # start after assignment especially for pthread
else:
if oftype==6: # FEATURES_REPLY
if self.jackin:
self.helper_path("jackin")
if self.monitor:
self.helper_path("monitor")
for ch in list(self.monitors):
ch.send(message)
return message
def jackin_server(self):
path = self.helper_path("jackin")
serv = type("JackinServer", (StreamServer,), dict(
channel_cls = type("JackinCChannel",(JackinChildChannel, AutoEchoChannel, LoggingChannel),{
"accept_versions":[self.version,],
"parent": self })))(path)
return serv, path
def monitor_server(self):
path = self.helper_path("monitor")
serv = type("MonitorServer", (StreamServer,), dict(
channel_cls = type("MonitorCChannel",(ChildChannel, AutoEchoChannel, LoggingChannel),{
"accept_versions":[self.version,],
"parent": self })))(path)
return serv, path
def temp_server(self):
s = sched.socket.socket(sched.socket.AF_INET, sched.socket.SOCK_STREAM)
s.setsockopt(sched.socket.SOL_SOCKET, sched.socket.SO_REUSEADDR, 1)
s.bind(("127.0.0.1", 0))
serv = type("TempServer", (StreamServer,), dict(
channel_cls = type("TempCChannel",(JackinChildChannel, AutoEchoChannel, LoggingChannel),{
"accept_versions":[self.version,],
"parent": self })))(s)
return serv.start, serv.stop, s.getsockname()
class JackinChannel(ParentChannel):
'''
MonitorChannel opens unix domain sockets for openflow operators(jackin programs),
such as ovs-ofctl.
'''
jackin = True
class MonitorChannel(ParentChannel):
'''
MonitorChannel opens unix domain sockets for openflow message listeners(monitors).
'''
monitor = True
class ChildChannel(OpenflowChannel):
parent = None # must be set
def send(self, message, **kwargs):
super(ChildChannel, self).send(message, **kwargs)
def handle(self, message, channel):
pass # ignore all messages
class WeakCallback(object):
def __init__(self, channel):
self.channel = channel
def __call__(self, message, upstream_channel):
self.channel.send(message)
class JackinChildChannel(ChildChannel):
def __init__(self, *args, **kwargs):
super(JackinChildChannel, self).__init__(*args, **kwargs)
self.cbfunc = WeakCallback(self)
def handle(self, message, channel):
(version, oftype, length, xid) = parse_ofp_header(message)
if oftype!=0:
self.parent.send(message, callback=self.cbfunc)
def close(self):
self.cbfunc = None # unref
super(JackinChildChannel, self).close()
class SyncTracker(object):
def __init__(self, xid, ev):
self.xid = xid
self.ev = ev
self.data = None
class SyncChannel(ParallelChannel):
'''
SyncChannel adds synchronous methods.
'''
def __init__(self, *args, **kwargs):
super(SyncChannel, self).__init__(*args, **kwargs)
self.syncs = {}
self.syncs_lock = sched.Lock()
def recv(self):
message = super(SyncChannel, self).recv()
if message:
(version, oftype, length, xid) = parse_ofp_header(message)
if xid in self.syncs:
x = self.syncs[xid]
if (version==1 and oftype==17) or (version!=1 and oftype==19): # multipart
with self.syncs_lock:
if x.data is None:
x.data = message
else:
x.data += message
if not struct.unpack_from("!H", message, offset=10)[0] & 1:
x.ev.set()
else:
x.data = message
x.ev.set()
return message
def send_sync(self, message, **kwargs):
(version, oftype, length, xid) = parse_ofp_header(message)
x = SyncTracker(xid, sched.Event())
with self.syncs_lock:
self.syncs[x.xid] = x
self.send(message, **kwargs)
x.ev.wait(timeout=kwargs.get("timeout", 10))
with self.syncs_lock:
self.syncs.pop(x.xid)
return x.data
def _sync_simple(self, req_oftype, res_oftype):
message = self.send_sync(ofp_header_only(req_oftype, version=self.version))
if message:
(version, oftype, length, xid) = parse_ofp_header(message)
if oftype != res_oftype:
raise OpenflowError(message)
else:
raise ChannelClose("no response")
return message
def close(self):
if self.syncs is not None:
for k,x in tuple(self.syncs.items()):
x.data = ""
x.ev.set()
super(SyncChannel, self).close()
def echo(self):
return self._sync_simple(2, 3)
def feature(self):
return self._sync_simple(5, 6)
def get_config(self):
return self._sync_simple(7, 8)
def barrier(self):
if self.version==1:
return self._sync_simple(18, 19) # OFPT_BARRIER_REQUEST=18 (v1.0)
else:
return self._sync_simple(20, 21) # OFPT_BARRIER_REQUEST=20 (v1.1, v1.2, v1.3)
def single(self, message, **kwargs):
return self.multi((message,), **kwargs).pop()
def multi(self, messages, **kwargs):
prepared = []
for message in messages:
(version, oftype, length, xid) = parse_ofp_header(message)
x = SyncTracker(xid, sched.Event())
with self.syncs_lock:
self.syncs[x.xid] = x
self.send(message, **kwargs)
prepared.append(xid)
self.barrier()
results = []
for xid in prepared:
if xid in self.syncs:
results.append(self.syncs[xid].data)
with self.syncs_lock:
self.syncs.pop(xid)
else:
results.append(None)
return results
class PortMonitorChannel(ControllerChannel, ParallelChannel):
'''
PortMonitorChannel exposes `ports` property, which will be synced with the openflow switch.
'''
def __init__(self, *args, **kwargs):
super(PortMonitorChannel, self).__init__(*args, **kwargs)
self.timeout = kwargs.get("timeout", 2.0)
self._ports_lock = sched.Lock()
self._ports = []
self._ports_init = sched.Event()
self._port_monitor_multi = dict()
self._attach = weakref.WeakValueDictionary()
self._detach = weakref.WeakValueDictionary()
def recv(self):
message = super(PortMonitorChannel, self).recv()
if message:
ofp_port = "!H6s16sIIIIII" # ofp_port v1.0
ofp_port_names = '''port_no hw_addr name
config state
curr advertised supported peer'''
if self.version in (2,3,4):
ofp_port = "!I4x6s2x16sIIIIIIII"
ofp_port_names = '''port_no hw_addr name
config state
curr advertised supported peer
curr_speed max_speed'''
elif self.version == 5:
ofp_port = "!IH2x6s2x6sII"
ofp_port_names = '''port_no length hw_addr name
config state'''
(version, oftype, length, xid) = parse_ofp_header(message)
if xid in self._port_monitor_multi and oftype==19: # MULTIPART_REPLY
assert self.version in (4,5)
(mptype, flags) = struct.unpack_from("!HH4x", message, offset=8)
if mptype==13: # OFPMP_PORT_DESC
ports = self._port_monitor_multi[xid]
offset = 16
while offset < length:
port = list(struct.unpack_from(ofp_port, message, offset=offset))
port[2] = port[2].partition(b'\0')[0]
ports.append(namedtuple("ofp_port", ofp_port_names)(*port))
offset += struct.calcsize(ofp_port)
if not flags&1:
with self._ports_lock:
self._ports_replace(ports)
self._ports_init.set()
del(self._port_monitor_multi[xid])
elif oftype==6 and self.version != 4: # FEATURES_REPLY
fmt = "!BBHIQIB3x"
assert struct.calcsize(fmt) % 8 == 0
offset = struct.calcsize(fmt+"II")
ports = []
while offset < length:
port = list(struct.unpack_from(ofp_port, message, offset=offset))
port[2] = port[2].partition(b'\0')[0]
ports.append(namedtuple("ofp_port", ofp_port_names)(*port))
offset += struct.calcsize(ofp_port)
with self._ports_lock:
self._ports_replace(ports)
self._ports_init.set()
elif oftype==12: # PORT_STATUS
p = struct.unpack_from("!B7x"+ofp_port[1:], message, offset=8)
reason = p[0]
port = list(p[1:])
port[2] = port[2].partition(b'\0')[0]
self._update_port(reason, namedtuple("ofp_port", ofp_port_names)(*port))
return message
def _update_port(self, reason, port):
with self._ports_lock:
ports = list(self._ports)
hit = [x for x in ports if x[0]==port[0]] # check with port_no(0)
if reason==0: # ADD
if self._ports_init.is_set():
assert not hit
ports.append(port)
s = self._attach.get(port.port_no, self._attach.get(port.name))
if s:
s.set(port)
self._attach.pop(s)
elif reason==1: # DELETE
if self._ports_init.is_set():
assert hit
if hit:
assert len(hit) == 1
ports.remove(hit.pop())
s = self._detach.get(port.port_no, self._detach.get(port.name))
if s:
s.set(port)
self._detach.pop(s)
elif reason==2: # MODIFY
if self._ports_init.is_set():
assert hit
if hit:
assert len(hit) == 1
old = hit.pop()
idx = ports.index(old)
ports.remove(old)
ports.insert(idx, port)
else:
ports.append(port)
else:
assert False, "unknown reason %d" % reason
self._ports = ports
@property
def ports(self):
if not self._ports_init.is_set():
if self.version in (4, 5):
xid = hms_xid()
with self._ports_lock:
self._port_monitor_multi[xid] = []
self.send(struct.pack("!BBHIHH4x", self.version,
18, # MULTIPART_REQUEST (v1.3, v1.4)
16, # struct.calcsize(fmt)==16
xid,
13, # PORT_DESC
0, # no REQ_MORE
))
else:
self.send(ofp_header_only(5, version=self.version)) # FEATURES_REQUEST
self._ports_init.wait(timeout=self.timeout)
return tuple(self._ports)
def _ports_replace(self, new_ports):
old_ports = self._ports
old_nums = set([p.port_no for p in old_ports])
old_names = set([p.name for p in old_ports])
new_nums = set([p.port_no for p in new_ports])
new_names = set([p.name for p in new_ports])
for port in old_ports:
if port.port_no in old_nums-new_nums:
s = self._detach.get(port.port_no)
if s:
s.set(port)
self._detach.pop(s)
if port.name in old_names-new_names:
s = self._detach.get(port.name)
if s:
s.set(port)
self._detach.pop(s)
for port in new_ports:
if port.port_no in new_nums-old_nums:
s = self._attach.get(port.port_no)
if s:
s.set(port)
self._attach.pop(s)
if port.name in new_names-old_names:
s = self._attach.get(port.name)
if s:
s.set(port)
self._attach.pop(s)
self._ports = new_ports
def close(self):
self._ports_init.set() # unlock the event
super(PortMonitorChannel, self).close()
def wait_attach(self, num_or_name, timeout=10):
for port in self._ports:
if port.port_no == num_or_name or port.name == num_or_name:
return port
with self._ports_lock:
if num_or_name not in self._attach:
result = self._attach[num_or_name] = sched.Event()
else:
result = self._attach[num_or_name]
if result.wait(timeout=timeout):
for port in self._ports:
if port.port_no == num_or_name or port.name == num_or_name:
return port
def wait_detach(self, num_or_name, timeout=10):
hit = False
for port in self._ports:
if port.port_no == num_or_name or port.name == num_or_name:
hit = True
if not hit:
return num_or_name # already detached
with self._ports_lock:
if num_or_name not in self._detach:
result = self._detach[num_or_name] = sched.Event()
else:
result = self._detach[num_or_name]
if result.wait(timeout=timeout):
return num_or_name
| 28.640459 | 112 | 0.659326 |
4a1cc1f43af9c45550215ed5d00d184bcb2425fb
| 5,189 |
py
|
Python
|
pebbles/utils.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 4 |
2017-05-11T14:50:32.000Z
|
2020-01-10T09:02:27.000Z
|
pebbles/utils.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 145 |
2017-04-07T11:01:58.000Z
|
2019-12-11T15:30:23.000Z
|
pebbles/utils.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 3 |
2017-10-25T12:36:16.000Z
|
2018-04-26T08:49:34.000Z
|
from Crypto.PublicKey import RSA
import base64
import struct
import six
from functools import wraps
from flask import abort, g
import re
KEYPAIR_DEFAULT = {
'bits': 2048,
}
def generate_ssh_keypair(bits=KEYPAIR_DEFAULT['bits']):
new_key = RSA.generate(bits)
public_key = new_key.publickey().exportKey(format="OpenSSH")
private_key = new_key.exportKey(format="PEM")
return private_key, public_key
def validate_ssh_pubkey(pubkey):
"""
Check if the given string looks like a SSH public key.
Based on https://github.com/jirutka/ssh-ldap-pubkey
"""
if not pubkey:
return False
key_parts = pubkey.split()
if len(key_parts) < 2:
return False
key_type, key_data = key_parts[0:2]
if key_type not in ("ssh-rsa", "ssh-dss"):
return False
try:
key_bytes = base64.decodestring(six.b(key_data))
except base64.binascii.Error:
return False
int_len = 4
str_len = struct.unpack('>I', key_bytes[:int_len])[0]
if six.u(key_bytes[int_len:(int_len + str_len)]) != six.b(key_type):
return False
return True
def requires_admin(f):
@wraps(f)
def decorated(*args, **kwargs):
if not g.user.is_admin:
abort(403)
return f(*args, **kwargs)
return decorated
def requires_group_owner_or_admin(f):
@wraps(f)
def decorated(*args, **kwargs):
if not g.user.is_admin and not g.user.is_group_owner:
abort(403)
return f(*args, **kwargs)
return decorated
def memoize(func):
"""
Generic memoization implementation suitable for decorator use
"""
cache = {}
def inner(x):
if x not in cache:
cache[x] = func(x)
return cache[x]
return inner
def parse_maximum_lifetime(max_life_str):
m = re.match(r'^(\d+d\s?)?(\d{1,2}h\s?)?(\d{1,2}m\s?)??$', max_life_str)
if m:
days = hours = mins = 0
if m.group(1):
days = int(m.group(1).strip()[:-1])
if m.group(2):
hours = int(m.group(2).strip()[:-1])
if m.group(3):
mins = int(m.group(3).strip()[:-1])
maximum_lifetime = days * 86400 + hours * 3600 + mins * 60
return maximum_lifetime
else:
raise ValueError
def parse_ports_string(ports_str):
ports_list = []
ports_str = ports_str.replace(',', ' ')
ports = ports_str.split(' ')
ports = filter(None, ports)
for port in ports:
if ':' in port:
(from_port, to_port) = parse_port_range(port)
else:
try:
from_port = int(port)
to_port = int(port)
except:
raise ValueError('Port is not an integer')
if 0 < from_port < 65536 and 0 < to_port < 65536:
ports_list.append((from_port, to_port))
else:
raise ValueError('Error parsing the input port string')
return ports_list
def parse_port_range(port_range):
m = re.match(r'(\d+):(\d+)', port_range)
if m:
if int(m.group(1)) < int(m.group(2)):
return (int(m.group(1)), int(m.group(2)))
else:
raise ValueError('Port range invalid')
else:
raise ValueError('No port range found')
def get_full_blueprint_config(blueprint):
"""Get the full config for blueprint from blueprint template for allowed attributes"""
template = blueprint.template
allowed_attrs = template.allowed_attrs
allowed_attrs = ['name', 'description'] + allowed_attrs
full_config = template.config
bp_config = blueprint.config
for attr in allowed_attrs:
if attr in bp_config:
full_config[attr] = bp_config[attr]
return full_config
def get_blueprint_fields_from_config(blueprint, field_name):
"""Hybrid fields for Blueprint model which need processing"""
full_config = get_full_blueprint_config(blueprint)
if field_name == 'preallocated_credits':
preallocated_credits = False # Default value
if 'preallocated_credits' in full_config:
try:
preallocated_credits = bool(full_config['preallocated_credits'])
except:
pass
return preallocated_credits
if field_name == 'maximum_lifetime':
maximum_lifetime = 3600 # Default value of 1 hour
if 'maximum_lifetime' in full_config:
max_life_str = str(full_config['maximum_lifetime'])
if max_life_str:
maximum_lifetime = parse_maximum_lifetime(max_life_str)
return maximum_lifetime
if field_name == 'cost_multiplier':
cost_multiplier = 1.0 # Default value
if 'cost_multiplier' in full_config:
try:
cost_multiplier = float(full_config['cost_multiplier'])
except:
pass
return cost_multiplier
def b64encode_string(content):
"""python2 and python3 compatibility wrapper function. Can be removed when support for python2 is gone"""
if six.PY3:
return base64.b64encode(content.encode('utf-8')).decode('utf-8')
else:
return base64.b64encode(content).decode('utf-8')
| 28.201087 | 109 | 0.618809 |
4a1cc3d4eedbfbd53cc4529e8cc0d4145f45da91
| 3,234 |
py
|
Python
|
pyslowloris/uri_info.py
|
goasdsdkai/daas
|
78ef23b254893efca22748fe619ef22648b8c1e8
|
[
"MIT"
] | 75 |
2017-06-15T05:58:02.000Z
|
2022-03-31T22:59:25.000Z
|
pyslowloris/uri_info.py
|
goasdsdkai/daas
|
78ef23b254893efca22748fe619ef22648b8c1e8
|
[
"MIT"
] | 8 |
2017-08-25T04:14:19.000Z
|
2021-09-10T06:21:33.000Z
|
pyslowloris/uri_info.py
|
goasdsdkai/daas
|
78ef23b254893efca22748fe619ef22648b8c1e8
|
[
"MIT"
] | 32 |
2017-03-22T22:52:26.000Z
|
2022-03-07T15:53:01.000Z
|
"""
MIT License
Copyright (c) 2020 Maxim Krivich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import socket
from urllib.parse import urlparse
from pyslowloris import exceptions as exc
from pyslowloris import utils as u
class HostAddress:
__slots__ = ("host", "path", "port", "ssl", "scheme", "_ip", )
def __init__(
self, scheme: str, host: str,
path: str, port: int, ssl: bool = False
):
self.host = host
self.path = path
self.port = port
self.ssl = ssl
self.scheme = scheme
self._ip = None
if not self._validate_uri():
raise exc.InvalidURIError("The uri is not valid.")
def __str__(self) -> str:
return self._create_uri()
def __repr__(self) -> str:
internal_dict = {key: getattr(self, key) for key in self.__slots__}
args = ",".join([f"{k}={repr(v)}" for (k, v) in internal_dict.items()])
return f"{self.__class__.__name__}({args.rstrip(',')})"
@classmethod
def from_url(cls, url: str, ssl: bool = False):
"""Construct a request for the specified URL."""
port = None
try:
res = urlparse(url)
port = res.port
except Exception as ex:
raise exc.InvalidURIError("Invalid uri string") from ex
else:
# scheme will be validated in the constructor
if res.scheme:
ssl = res.scheme[-1] == "s"
if not port:
port = 443 if ssl else 80
return cls(
scheme=res.scheme or "http",
host=res.hostname,
port=port,
path=res.path or "/",
ssl=ssl,
)
def _create_uri(self) -> str:
return f"{self.scheme}://{self.host}:{self.port}{self.path}"
def _validate_uri(self) -> bool:
return u.validate_url(self._create_uri())
@property
def ip_address(self):
if not self._ip:
try:
self._ip = socket.gethostbyname(self.host)
except socket.error:
raise exc.HostnameNotFoundedError(
f"Error resolving DNS for {self.host}."
)
return self._ip
| 33 | 79 | 0.624613 |
4a1cc4eb3edd06632db555c4c3cee99024fc6c29
| 20,663 |
py
|
Python
|
survos2/model/dataset.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 4 |
2017-10-10T14:47:16.000Z
|
2022-01-14T05:57:50.000Z
|
survos2/model/dataset.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 1 |
2022-01-11T21:11:12.000Z
|
2022-01-12T08:22:34.000Z
|
survos2/model/dataset.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 2 |
2018-03-06T06:31:29.000Z
|
2019-03-04T03:33:18.000Z
|
"""
Hdf5 datasets, with metadata and custom chunking
"""
import collections
import copy
import itertools
import logging as log
import numbers
import os
import shutil
import dask.array as da
import h5py as h5
import numpy as np
from loguru import logger
from survos2.config import Config
from survos2.improc.utils import optimal_chunksize
from survos2.utils import AttributeDB
CHUNKS = Config["computing.chunk_size"] if Config["computing.chunks"] else None
CHUNKS_SPARSE = (
Config["computing.chunk_size_sparse"] if Config["computing.chunks"] else None
)
class DatasetException(Exception):
pass
class BaseDataset(object):
def close(self):
pass
def supports_metadata(self):
return False
def has_attr(self, key, default=None):
raise NotImplementedError()
def get_attr(self, key, value):
raise NotImplementedError()
def set_attr(self, key, value):
raise NotImplementedError()
def metadata(self):
raise NotImplementedError()
class DatasetWrapper(BaseDataset):
def __init__(self, fileobj, dataset):
self.fileobj = fileobj
self.dataset = dataset
@property
def id(self):
for prop in ["id", "name", "path"]:
if hasattr(self.dataset, prop):
return getattr(self.dataset, prop)
return "dataset"
def close(self):
if self.fileobj:
self.fileobj.close()
def __getattr__(self, attr):
"""
Attributes/Functions that do not exist in the extended class
are going to be passed to the instance being wrapped
"""
return self.dataset.__getattribute__(attr)
def __getitem__(self, slices):
return self.dataset.__getitem__(slices)
def __setitem__(self, slices, values):
return self.dataset.__setitem__(slices, values)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def tojson(self):
if hasattr(self.dataset, "tojson"):
return self.dataset.tojson()
return dict(shape=self.shape, dtype=np.dtype(self.dtype).name)
class Dataset(BaseDataset):
__dbname__ = "dataset"
__dsname__ = "__data__"
def __init__(self, path, readonly=False):
if not os.path.isdir(path):
raise DatasetException("Dataset '%s' does not exist." % path)
self._load(path)
self._readonly = readonly
@property
def id(self):
return self._id
def _load(self, path):
self._id = os.path.basename(path)
self._path = path
dbpath = os.path.join(path, self.__dbname__)
if os.path.isfile(dbpath + ".yaml"):
self._db = db = AttributeDB(dbpath, dbtype="yaml")
elif os.path.isfile(dbpath + ".json"):
self._db = db = AttributeDB(dbpath, dbtype="json")
else:
raise DatasetException("DB not found: '%s' is not a valid dataset." % path)
try:
self._shape = tuple(db[self.__dsname__]["shape"])
self._dtype = db[self.__dsname__]["dtype"]
self._chunk_grid = tuple(db[self.__dsname__]["chunk_grid"])
self._chunk_size = tuple(db[self.__dsname__]["chunk_size"])
self._fillvalue = db[self.__dsname__]["fillvalue"]
except:
raise DatasetException("Unable to load dataset attributes: '%s'" % path)
self._total_chunks = np.prod(self._chunk_grid)
self._ndim = len(self._shape)
if not (len(self.shape) == len(self.chunk_grid) == len(self.chunk_size)):
raise DatasetException(
"Data shape and chunk layout do not match: {}, {}, {}".format(
self.shape, self.chunk_grid, self.chunk_size
)
)
def save_file(self, fullname):
fname = os.path.basename(fullname)
out_fullname = os.path.join(self._path, fname)
from shutil import copyfile, copy
try:
copy(fullname, out_fullname)
except shutil.SameFileError:
pass
return out_fullname
def tojson(self):
db = copy.deepcopy(self._db)
metadata = db.pop(self.__dsname__)
metadata["id"] = self._id
metadata["path"] = self._path
# metadata['metadata'] = db
metadata.setdefault("name", self._id)
return metadata
# Properties
@property
def shape(self):
return self._shape
@property
def dtype(self):
return self._dtype
@property
def chunk_grid(self):
return self._chunk_grid
@property
def chunk_size(self):
return self._chunk_size
@property
def fillvalue(self):
return self._fillvalue
@property
def total_chunks(self):
return self._total_chunks
@property
def ndim(self):
return self._ndim
@property
def readonly(self):
return self._readonly
# Access / Edit metadata
def supports_metadata(self):
return True
def metadata(self):
return self.get_metadata()
def has_attr(self, key):
return self.get_metadata(key) is not None
def get_attr(self, key, default=None):
value = self.get_metadata(key, default=default)
if value is None:
raise KeyError("Dataset has no `{}` metadata.".format(key))
return value
def set_attr(self, key, value):
self.set_metadata(key, value)
def get_metadata(self, key=None, default=None):
if key is None:
return copy.deepcopy(self._db)
elif key in self._db:
return self._db[key]
return default
def set_metadata(self, key, value):
if key == self.__dsname__:
raise DatasetException("Dataset metadata cannot be changed.")
elif not self._db.isserializable(value):
raise DatasetException("Metadata `{}` is not serializable".format(value))
self._db[key] = value
self._db.save()
def update_metadata(self, key, value):
if key == self.__dsname__:
raise DatasetException("Dataset metadata cannot be changed.")
elif not self._db.isserializable(value):
raise DatasetException("Metadata `{}` is not serializable".format(value))
elif key in self._db:
self._db.update(value)
self._db.save()
else:
raise DatasetException("Metadata '%s' does not exist." % key)
# Create
@staticmethod
def create(
path, shape=None, dtype=None, data=None, fillvalue=0, chunks=CHUNKS, **kwargs
):
logger.info(f"Creating dataset on {path} {shape} {dtype} {data} {chunks}")
database = kwargs.pop("database", "yaml")
readonly = kwargs.pop("readonly", False)
if Dataset.exists(path):
raise DatasetException("Dataset '%s' already exists." % path)
if os.path.isfile(path):
raise DatasetException("Path '%s' is not a valid directory path." % path)
elif os.path.isdir(path) and os.listdir(dir): # non-empty dir
raise DatasetException("Path '%s' already exists." % path)
if data is not None:
shape = data.shape
dtype = data.dtype
if shape is None or dtype is None:
raise DatasetException("Not valid `shape` and `dtype` was provided.")
shape = list(shape)
dtype = np.dtype(dtype).name
isize = np.dtype(dtype).itemsize
if chunks is None:
chunk_size = list(shape)
elif isinstance(chunks, collections.Iterable) and len(chunks) == len(shape):
chunk_size = list(chunks)
elif isinstance(chunks, numbers.Number):
chunk_size = list(
optimal_chunksize(shape, chunks, item_size=isize, **kwargs)
)
chunk_grid = (
(np.ceil(np.asarray(shape, "f4") / chunk_size)).astype("i2").tolist()
)
metadata = {
Dataset.__dsname__: dict(
shape=shape,
dtype=dtype,
fillvalue=fillvalue,
chunk_grid=chunk_grid,
chunk_size=chunk_size,
)
}
# Filesystem
if not os.path.isdir(path):
os.makedirs(path)
dbpath = os.path.join(path, Dataset.__dbname__)
# Database
db = AttributeDB.create(dbpath, dbtype=database)
db.update(metadata)
db.save()
ds = Dataset(path, readonly=readonly)
if data is not None:
log.debug("Loading data into dataset: {}".format(shape))
ds.load(data)
return ds
@staticmethod
def exists(path):
try:
Dataset(path)
except Exception as e:
return False
return True
@staticmethod
def remove(path):
Dataset(path).delete()
def delete(self):
shutil.rmtree(self._path)
def _idx2name(self, idx):
if not all([type(i) == int for i in idx]) or len(idx) != self.ndim:
raise DatasetException("Invalid chunk idx: {}".format(idx))
return os.path.join(self._path, "chunk_%s.h5" % "x".join(map(str, idx)))
def create_chunk(self, idx, data=None, cslices=None):
logger.debug(f"Creating chunk {idx} {data} {cslices}")
if self.readonly:
raise DatasetException("Dataset is in readonly mode. Cannot create chunk.")
if self.has_chunk(idx):
raise DatasetException("DataChunk {} already exists".format(idx))
path = self._idx2name(idx)
subchunk_size = optimal_chunksize(self.chunk_size, 8)
with h5.File(path, "w") as f:
chunks = optimal_chunksize(self.chunk_size, 1)
f.create_dataset(
"data",
shape=self.chunk_size,
dtype=self.dtype,
fillvalue=self.fillvalue,
chunks=chunks,
)
if data is not None:
slices = cslices or slice(None)
f["data"][slices] = data
return DataChunk(idx, path, self.chunk_size, self.dtype, self.fillvalue)
def get_chunk(self, idx):
if self.has_chunk(idx):
path = self._idx2name(idx)
return DataChunk(idx, path, self.chunk_size, self.dtype, self.fillvalue)
return self.create_chunk(idx)
def has_chunk(self, idx):
return os.path.isfile(self._idx2name(idx))
def del_chunk(self, idx):
if self.readonly:
raise DatasetException("Dataset is in readonly mode. Cannot delete chunk.")
if self.has_chunk(idx):
os.remove(self._idx2name(idx))
def get_chunk_data(self, idx, slices=None):
if self.has_chunk(idx):
return self.get_chunk(idx)[slices]
return self._fillvalue
def set_chunk_data(self, idx, values, slices=None):
if self.readonly:
raise DatasetException(
"Dataset is in readonly mode. Cannot modify chunk data."
)
self.get_chunk(idx)[slices] = values
# Data setter/getters
def __getitem__(self, slices):
return self.get_data(slices=slices)
def __setitem__(self, slices, values):
return self.set_data(values, slices=slices)
def get_data(self, slices=None):
slices, squeeze_axis = self._process_slices(slices, squeeze=True)
tshape = tuple(x.stop - x.start for x in slices)
chunk_iterator = self._chunk_slice_iterator(slices, self.ndim)
output = np.empty(tshape, dtype=self.dtype)
for idx, cslice, gslice in chunk_iterator:
output[gslice] = self.get_chunk_data(idx, slices=cslice)
if len(squeeze_axis) > 0:
logger.debug(f"Squeeze axis {squeeze_axis}")
output = np.squeeze(
output, axis=squeeze_axis[0]
) # np.squeeze now wants an integer, rather than a list containing an int
return output
def set_data(self, values, slices=None):
if self.readonly:
raise DatasetException("Dataset is in readonly mode. Cannot modify data.")
if slices is None:
return self.load(values)
if np.dtype(self.dtype) != np.asarray(values).dtype:
log.warn(
"Performing automatic data casting from '{}' to '{}'".format(
np.asarray(values).dtype.name, self.dtype
)
)
isscalar = np.isscalar(values)
ndim = self.ndim if isscalar else values.ndim
slices, squeeze_axis = self._process_slices(slices, squeeze=True)
chunk_iterator = self._chunk_slice_iterator(slices, ndim)
for idx, cslice, gslice in chunk_iterator:
if isscalar:
self.set_chunk_data(idx, values, slices=cslice)
else:
self.set_chunk_data(idx, values[gslice], slices=cslice)
def load(self, data):
logger.debug(f"Loading dataset {data}")
if tuple(data.shape) != tuple(self.shape):
raise Exception(
"Data shape does not match: {} expected {}".format(
self.shape, data.shape
)
)
if isinstance(data, da.Array):
data.store(self)
else:
for idx in range(self.total_chunks):
idx = self.unravel_chunk_index(idx)
gslices = self.global_chunk_bounds(idx)
lslices = self.local_chunk_bounds(idx)
self.set_chunk_data(idx, data[gslices], slices=lslices)
def local_chunk_bounds(self, idx):
return tuple(
(
slice(0, min((i + 1) * s, self.shape[j]) - i * s)
for j, (i, s) in enumerate(zip(idx, self.chunk_size))
)
)
def global_chunk_bounds(self, idx):
return tuple(
(
slice(i * s, min((i + 1) * s, self.shape[j]))
for j, (i, s) in enumerate(zip(idx, self.chunk_size))
)
)
def unravel_chunk_index(self, flat_idx):
return tuple(map(int, np.unravel_index(flat_idx, self.chunk_grid)))
def ravel_chunk_index(self, idx):
return tuple(map(int, np.ravel_multi_index(idx, self.chunk_grid)))
def _process_slices(self, slices, squeeze=False):
# logger.debug(f"_process_slices {slices}")
if type(slices) in [slice, int]:
slices = [slices]
elif slices is Ellipsis:
slices = [slice(None)]
elif np.isscalar(slices):
slices = [int(slices)]
elif type(slices) not in [list, tuple]:
raise Exception(
"Invalid Slicing with index of type `{}`".format(type(slices))
)
else:
slices = list(slices)
if len(slices) <= self.ndim:
nmiss = self.ndim - len(slices)
while Ellipsis in slices:
idx = slices.index(Ellipsis)
slices = (
slices[:idx] + ([slice(None)] * (nmiss + 1)) + slices[idx + 1 :]
)
if len(slices) < self.ndim:
slices = list(slices) + ([slice(None)] * nmiss)
elif len(slices) > self.ndim:
raise Exception(
"Invalid slicing of dataset of dimension `{}`"
" with {}-dimensional slicing".format(self.ndim, len(slices))
)
final_slices = []
shape = self.shape
squeeze_axis = []
for i, s in enumerate(slices):
if type(s) == int:
final_slices.append(slice(s, s + 1))
squeeze_axis.append(i)
elif type(s) == slice:
start = s.start
stop = s.stop
if start is None:
start = 0
if stop is None:
stop = shape[i]
elif stop < 0:
stop = self.shape[i] + stop
if start < 0 or start >= self.shape[i]:
raise Exception(
"Only possitive and in-bounds slicing supported: `{}`".format(
slices
)
)
if stop < 0 or stop > self.shape[i] or stop < start:
raise Exception(
"Only possitive and in-bounds slicing supported: `{}`".format(
slices
)
)
if s.step is not None and s.step != 1:
raise Exception("Only slicing with step 1 supported")
final_slices.append(slice(start, stop))
else:
raise Exception(
"Invalid type `{}` in slicing, only integer or"
" slices are supported".format(type(s))
)
if squeeze:
return final_slices, squeeze_axis
return final_slices
def _ndindex(self, dims):
return itertools.product(*(range(d) for d in dims))
def _chunk_slice_iterator(self, slices, ndim):
indexes = []
nchunks = []
cslices = []
gslices = []
chunk_size = self.chunk_size
chunks = self.chunk_grid
for n, slc in enumerate(slices):
sstart = slc.start // chunk_size[n]
sstop = min((slc.stop - 1) // chunk_size[n], chunks[n] - 1)
if sstop < 0:
sstop = 0
pad_start = slc.start - sstart * chunk_size[n]
pad_stop = slc.stop - sstop * chunk_size[n]
_i = [] # index
_c = [] # chunk slices in current dimension
_g = [] # global slices in current dimension
for i in range(sstart, sstop + 1):
start = pad_start if i == sstart else 0
stop = pad_stop if i == sstop else chunk_size[n]
gchunk = i * chunk_size[n] - slc.start
_i += [i]
_c += [slice(start, stop)]
_g += [slice(gchunk + start, gchunk + stop)]
nchunks += [sstop - sstart + 1]
indexes += [_i]
cslices += [_c]
gslices += [_g]
return (
zip(
*(
(
indexes[n][i],
cslices[n][i],
(n < ndim or None) and gslices[n][i],
)
for n, i in enumerate(idx)
)
)
for idx in self._ndindex(nchunks)
)
class DataChunk(object):
def __init__(self, idx, path, shape, dtype, fillvalue):
if not os.path.isfile(path):
raise Exception(
"Wrong initialization of a DataChunk({}): {}".format(idx, path)
)
self._idx = idx
self._path = path
self._shape = shape
self._size = np.prod(shape)
self._dtype = dtype
self._fillvalue = fillvalue
self._ndim = len(shape)
@property
def shape(self):
return self._shape
@property
def size(self):
return self._size
@property
def dtype(self):
return self._dtype
@property
def fillvalue(self):
return self._fillvalue
@property
def ndim(self):
return self._ndim
def get_data(self, slices=None):
if slices is None:
slices = slice(None)
with h5.File(self._path, "r") as f:
data = f["data"][slices]
return data
def set_data(self, values, slices=None):
if slices is None:
slices = slice(None)
with h5.File(self._path, "a") as f:
f["data"][slices] = values
def __getitem__(self, slices):
return self.get_data(slices=slices)
def __setitem__(self, slices, values):
self.set_data(values, slices=slices)
| 32.185358 | 88 | 0.541112 |
4a1cc4ebb81dac509432e579fc6467d015f635d6
| 14,838 |
py
|
Python
|
tests/test_schedulers.py
|
numberonewastefellow/apscheduler
|
092a999e0899c23f4ec3d77a082e196736a18ca0
|
[
"MIT"
] | null | null | null |
tests/test_schedulers.py
|
numberonewastefellow/apscheduler
|
092a999e0899c23f4ec3d77a082e196736a18ca0
|
[
"MIT"
] | null | null | null |
tests/test_schedulers.py
|
numberonewastefellow/apscheduler
|
092a999e0899c23f4ec3d77a082e196736a18ca0
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import sys
import threading
import time
from datetime import datetime, timedelta, timezone
from uuid import UUID
import anyio
import pytest
from anyio import fail_after
from pytest_mock import MockerFixture
from apscheduler.context import current_scheduler, current_worker, job_info
from apscheduler.enums import JobOutcome
from apscheduler.events import (
Event, JobAdded, ScheduleAdded, ScheduleRemoved, SchedulerStarted, SchedulerStopped, TaskAdded)
from apscheduler.exceptions import JobLookupError
from apscheduler.schedulers.async_ import AsyncScheduler
from apscheduler.schedulers.sync import Scheduler
from apscheduler.structures import Job, Task
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.interval import IntervalTrigger
if sys.version_info >= (3, 9):
from zoneinfo import ZoneInfo
else:
from backports.zoneinfo import ZoneInfo
pytestmark = pytest.mark.anyio
async def dummy_async_job(delay: float = 0, fail: bool = False) -> str:
await anyio.sleep(delay)
if fail:
raise RuntimeError('failing as requested')
else:
return 'returnvalue'
def dummy_sync_job(delay: float = 0, fail: bool = False) -> str:
time.sleep(delay)
if fail:
raise RuntimeError('failing as requested')
else:
return 'returnvalue'
class TestAsyncScheduler:
async def test_schedule_job(self) -> None:
def listener(received_event: Event) -> None:
received_events.append(received_event)
if len(received_events) == 5:
event.set()
received_events: list[Event] = []
event = anyio.Event()
scheduler = AsyncScheduler(start_worker=False)
scheduler.events.subscribe(listener)
trigger = DateTrigger(datetime.now(timezone.utc))
async with scheduler:
await scheduler.add_schedule(dummy_async_job, trigger, id='foo')
with fail_after(3):
await event.wait()
# The scheduler was first started
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStarted)
# Then the task was added
received_event = received_events.pop(0)
assert isinstance(received_event, TaskAdded)
assert received_event.task_id == 'test_schedulers:dummy_async_job'
# Then a schedule was added
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleAdded)
assert received_event.schedule_id == 'foo'
# assert received_event.task_id == 'task_id'
# Then that schedule was processed and a job was added for it
received_event = received_events.pop(0)
assert isinstance(received_event, JobAdded)
assert received_event.schedule_id == 'foo'
assert received_event.task_id == 'test_schedulers:dummy_async_job'
# Then the schedule was removed since the trigger had been exhausted
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleRemoved)
assert received_event.schedule_id == 'foo'
# Finally, the scheduler was stopped
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStopped)
# There should be no more events on the list
assert not received_events
@pytest.mark.parametrize('max_jitter, expected_upper_bound', [
pytest.param(2, 2, id='within'),
pytest.param(4, 2.999999, id='exceed')
])
async def test_jitter(self, mocker: MockerFixture, timezone: ZoneInfo, max_jitter: float,
expected_upper_bound: float) -> None:
job_id: UUID | None = None
def job_added_listener(event: Event) -> None:
nonlocal job_id
assert isinstance(event, JobAdded)
job_id = event.job_id
job_added_event.set()
jitter = 1.569374
orig_start_time = datetime.now(timezone) - timedelta(seconds=1)
fake_uniform = mocker.patch('random.uniform')
fake_uniform.configure_mock(side_effect=lambda a, b: jitter)
async with AsyncScheduler(start_worker=False) as scheduler:
trigger = IntervalTrigger(seconds=3, start_time=orig_start_time)
job_added_event = anyio.Event()
scheduler.events.subscribe(job_added_listener, {JobAdded})
schedule_id = await scheduler.add_schedule(dummy_async_job, trigger,
max_jitter=max_jitter)
schedule = await scheduler.get_schedule(schedule_id)
assert schedule.max_jitter == timedelta(seconds=max_jitter)
# Wait for the job to be added
with fail_after(3):
await job_added_event.wait()
fake_uniform.assert_called_once_with(0, expected_upper_bound)
# Check that the job was created with the proper amount of jitter in its scheduled time
jobs = await scheduler.data_store.get_jobs({job_id})
assert jobs[0].jitter == timedelta(seconds=jitter)
assert jobs[0].scheduled_fire_time == orig_start_time + timedelta(seconds=jitter)
assert jobs[0].original_scheduled_time == orig_start_time
async def test_get_job_result_success(self) -> None:
async with AsyncScheduler() as scheduler:
job_id = await scheduler.add_job(dummy_async_job, kwargs={'delay': 0.2})
result = await scheduler.get_job_result(job_id)
assert result.job_id == job_id
assert result.outcome is JobOutcome.success
assert result.return_value == 'returnvalue'
async def test_get_job_result_error(self) -> None:
async with AsyncScheduler() as scheduler:
job_id = await scheduler.add_job(dummy_async_job, kwargs={'delay': 0.2, 'fail': True})
result = await scheduler.get_job_result(job_id)
assert result.job_id == job_id
assert result.outcome is JobOutcome.error
assert isinstance(result.exception, RuntimeError)
assert str(result.exception) == 'failing as requested'
async def test_get_job_result_nowait_not_yet_ready(self) -> None:
async with AsyncScheduler() as scheduler:
job_id = await scheduler.add_job(dummy_async_job, kwargs={'delay': 0.2})
with pytest.raises(JobLookupError):
await scheduler.get_job_result(job_id, wait=False)
async def test_run_job_success(self) -> None:
async with AsyncScheduler() as scheduler:
return_value = await scheduler.run_job(dummy_async_job)
assert return_value == 'returnvalue'
async def test_run_job_failure(self) -> None:
async with AsyncScheduler() as scheduler:
with pytest.raises(RuntimeError, match='failing as requested'):
await scheduler.run_job(dummy_async_job, kwargs={'fail': True})
async def test_contextvars(self) -> None:
def check_contextvars() -> None:
assert current_scheduler.get() is scheduler
assert current_worker.get() is scheduler.worker
info = job_info.get()
assert info.task_id == 'task_id'
assert info.schedule_id == 'foo'
assert info.scheduled_fire_time == scheduled_fire_time
assert info.jitter == timedelta(seconds=2.16)
assert info.start_deadline == start_deadline
assert info.tags == {'foo', 'bar'}
scheduled_fire_time = datetime.now(timezone.utc)
start_deadline = datetime.now(timezone.utc) + timedelta(seconds=10)
async with AsyncScheduler() as scheduler:
await scheduler.data_store.add_task(Task(id='task_id', func=check_contextvars))
job = Job(task_id='task_id', schedule_id='foo',
scheduled_fire_time=scheduled_fire_time, jitter=timedelta(seconds=2.16),
start_deadline=start_deadline, tags={'foo', 'bar'})
await scheduler.data_store.add_job(job)
result = await scheduler.get_job_result(job.id)
if result.outcome is JobOutcome.error:
raise result.exception
else:
assert result.outcome is JobOutcome.success
class TestSyncScheduler:
def test_schedule_job(self):
def listener(received_event: Event) -> None:
received_events.append(received_event)
if len(received_events) == 5:
event.set()
received_events: list[Event] = []
event = threading.Event()
scheduler = Scheduler(start_worker=False)
scheduler.events.subscribe(listener)
trigger = DateTrigger(datetime.now(timezone.utc))
with scheduler:
scheduler.add_schedule(dummy_sync_job, trigger, id='foo')
event.wait(3)
# The scheduler was first started
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStarted)
# Then the task was added
received_event = received_events.pop(0)
assert isinstance(received_event, TaskAdded)
assert received_event.task_id == 'test_schedulers:dummy_sync_job'
# Then a schedule was added
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleAdded)
assert received_event.schedule_id == 'foo'
# Then that schedule was processed and a job was added for it
received_event = received_events.pop(0)
assert isinstance(received_event, JobAdded)
assert received_event.schedule_id == 'foo'
assert received_event.task_id == 'test_schedulers:dummy_sync_job'
# Then the schedule was removed since the trigger had been exhausted
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleRemoved)
assert received_event.schedule_id == 'foo'
# Finally, the scheduler was stopped
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStopped)
# There should be no more events on the list
assert not received_events
@pytest.mark.parametrize('max_jitter, expected_upper_bound', [
pytest.param(2, 2, id='within'),
pytest.param(4, 2.999999, id='exceed')
])
def test_jitter(self, mocker: MockerFixture, timezone: ZoneInfo, max_jitter: float,
expected_upper_bound: float) -> None:
job_id: UUID | None = None
def job_added_listener(event: Event) -> None:
nonlocal job_id
assert isinstance(event, JobAdded)
job_id = event.job_id
job_added_event.set()
jitter = 1.569374
orig_start_time = datetime.now(timezone) - timedelta(seconds=1)
fake_uniform = mocker.patch('random.uniform')
fake_uniform.configure_mock(side_effect=lambda a, b: jitter)
with Scheduler(start_worker=False) as scheduler:
trigger = IntervalTrigger(seconds=3, start_time=orig_start_time)
job_added_event = threading.Event()
scheduler.events.subscribe(job_added_listener, {JobAdded})
schedule_id = scheduler.add_schedule(dummy_async_job, trigger, max_jitter=max_jitter)
schedule = scheduler.get_schedule(schedule_id)
assert schedule.max_jitter == timedelta(seconds=max_jitter)
# Wait for the job to be added
job_added_event.wait(3)
fake_uniform.assert_called_once_with(0, expected_upper_bound)
# Check that the job was created with the proper amount of jitter in its scheduled time
jobs = scheduler.data_store.get_jobs({job_id})
assert jobs[0].jitter == timedelta(seconds=jitter)
assert jobs[0].scheduled_fire_time == orig_start_time + timedelta(seconds=jitter)
assert jobs[0].original_scheduled_time == orig_start_time
def test_get_job_result(self) -> None:
with Scheduler() as scheduler:
job_id = scheduler.add_job(dummy_sync_job)
result = scheduler.get_job_result(job_id)
assert result.outcome is JobOutcome.success
assert result.return_value == 'returnvalue'
def test_get_job_result_error(self) -> None:
with Scheduler() as scheduler:
job_id = scheduler.add_job(dummy_sync_job, kwargs={'delay': 0.2, 'fail': True})
result = scheduler.get_job_result(job_id)
assert result.job_id == job_id
assert result.outcome is JobOutcome.error
assert isinstance(result.exception, RuntimeError)
assert str(result.exception) == 'failing as requested'
def test_get_job_result_nowait_not_yet_ready(self) -> None:
with Scheduler() as scheduler:
job_id = scheduler.add_job(dummy_sync_job, kwargs={'delay': 0.2})
with pytest.raises(JobLookupError):
scheduler.get_job_result(job_id, wait=False)
def test_run_job_success(self) -> None:
with Scheduler() as scheduler:
return_value = scheduler.run_job(dummy_sync_job)
assert return_value == 'returnvalue'
def test_run_job_failure(self) -> None:
with Scheduler() as scheduler:
with pytest.raises(RuntimeError, match='failing as requested'):
scheduler.run_job(dummy_sync_job, kwargs={'fail': True})
def test_contextvars(self) -> None:
def check_contextvars() -> None:
assert current_scheduler.get() is scheduler
assert current_worker.get() is scheduler.worker
info = job_info.get()
assert info.task_id == 'task_id'
assert info.schedule_id == 'foo'
assert info.scheduled_fire_time == scheduled_fire_time
assert info.jitter == timedelta(seconds=2.16)
assert info.start_deadline == start_deadline
assert info.tags == {'foo', 'bar'}
scheduled_fire_time = datetime.now(timezone.utc)
start_deadline = datetime.now(timezone.utc) + timedelta(seconds=10)
with Scheduler() as scheduler:
scheduler.data_store.add_task(Task(id='task_id', func=check_contextvars))
job = Job(task_id='task_id', schedule_id='foo',
scheduled_fire_time=scheduled_fire_time, jitter=timedelta(seconds=2.16),
start_deadline=start_deadline, tags={'foo', 'bar'})
scheduler.data_store.add_job(job)
result = scheduler.get_job_result(job.id)
if result.outcome is JobOutcome.error:
raise result.exception
else:
assert result.outcome is JobOutcome.success
| 43.641176 | 99 | 0.663297 |
4a1cc543e2740a2a4a216c80b986b5664260f513
| 9,371 |
py
|
Python
|
pandapower/pypower/opf.py
|
Zamwell/pandapower
|
ce51946342109e969b87b60c8883d7eec02d3060
|
[
"BSD-3-Clause"
] | 1 |
2019-06-16T05:06:03.000Z
|
2019-06-16T05:06:03.000Z
|
pandapower/pypower/opf.py
|
Zamwell/pandapower
|
ce51946342109e969b87b60c8883d7eec02d3060
|
[
"BSD-3-Clause"
] | null | null | null |
pandapower/pypower/opf.py
|
Zamwell/pandapower
|
ce51946342109e969b87b60c8883d7eec02d3060
|
[
"BSD-3-Clause"
] | 1 |
2022-02-07T14:11:03.000Z
|
2022-02-07T14:11:03.000Z
|
# -*- coding: utf-8 -*-
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
"""Solves an optimal power flow.
"""
from time import time
from numpy import zeros, c_, shape
from pandapower.pypower.idx_brch import MU_ANGMAX
from pandapower.pypower.idx_bus import MU_VMIN
from pandapower.pypower.idx_gen import MU_QMIN
from pandapower.pypower.opf_args import opf_args2
from pandapower.pypower.opf_execute import opf_execute
from pandapower.pypower.opf_setup import opf_setup
def opf(ppc, ppopt):
"""Solves an optimal power flow.
Returns a C{results} dict.
The data for the problem can be specified in one of three ways:
1. a string (ppc) containing the file name of a PYPOWER case
which defines the data matrices baseMVA, bus, gen, branch, and
gencost (areas is not used at all, it is only included for
backward compatibility of the API).
2. a dict (ppc) containing the data matrices as fields.
3. the individual data matrices themselves.
The optional user parameters for user constraints (C{A, l, u}), user costs
(C{N, fparm, H, Cw}), user variable initializer (C{z0}), and user variable
limits (C{zl, zu}) can also be specified as fields in a case dict,
either passed in directly or defined in a case file referenced by name.
When specified, C{A, l, u} represent additional linear constraints on the
optimization variables, C{l <= A*[x z] <= u}. If the user specifies an C{A}
matrix that has more columns than the number of "C{x}" (OPF) variables,
then there are extra linearly constrained "C{z}" variables. For an
explanation of the formulation used and instructions for forming the
C{A} matrix, see the MATPOWER manual.
A generalized cost on all variables can be applied if input arguments
C{N}, C{fparm}, C{H} and C{Cw} are specified. First, a linear transformation
of the optimization variables is defined by means of C{r = N * [x z]}.
Then, to each element of C{r} a function is applied as encoded in the
C{fparm} matrix (see MATPOWER manual). If the resulting vector is named
C{w}, then C{H} and C{Cw} define a quadratic cost on w:
C{(1/2)*w'*H*w + Cw * w}. C{H} and C{N} should be sparse matrices and C{H}
should also be symmetric.
The optional C{ppopt} vector specifies PYPOWER options. If the OPF
algorithm is not explicitly set in the options PYPOWER will use the default
solver, based on a primal-dual interior point method. For the AC OPF this
is C{OPF_ALG = 560}. For the DC OPF, the default is C{OPF_ALG_DC = 200}.
See L{ppoption} for more details on the available OPF solvers and other OPF
options and their default values.
The solved case is returned in a single results dict (described
below). Also returned are the final objective function value (C{f}) and a
flag which is C{True} if the algorithm was successful in finding a solution
(success). Additional optional return values are an algorithm specific
return status (C{info}), elapsed time in seconds (C{et}), the constraint
vector (C{g}), the Jacobian matrix (C{jac}), and the vector of variables
(C{xr}) as well as the constraint multipliers (C{pimul}).
The single results dict is a PYPOWER case struct (ppc) with the
usual baseMVA, bus, branch, gen, gencost fields, along with the
following additional fields:
- C{order} see 'help ext2int' for details of this field
- C{et} elapsed time in seconds for solving OPF
- C{success} 1 if solver converged successfully, 0 otherwise
- C{om} OPF model object, see 'help opf_model'
- C{x} final value of optimization variables (internal order)
- C{f} final objective function value
- C{mu} shadow prices on ...
- C{var}
- C{l} lower bounds on variables
- C{u} upper bounds on variables
- C{nln}
- C{l} lower bounds on nonlinear constraints
- C{u} upper bounds on nonlinear constraints
- C{lin}
- C{l} lower bounds on linear constraints
- C{u} upper bounds on linear constraints
- C{g} (optional) constraint values
- C{dg} (optional) constraint 1st derivatives
- C{df} (optional) obj fun 1st derivatives (not yet implemented)
- C{d2f} (optional) obj fun 2nd derivatives (not yet implemented)
- C{raw} raw solver output in form returned by MINOS, and more
- C{xr} final value of optimization variables
- C{pimul} constraint multipliers
- C{info} solver specific termination code
- C{output} solver specific output information
- C{alg} algorithm code of solver used
- C{var}
- C{val} optimization variable values, by named block
- C{Va} voltage angles
- C{Vm} voltage magnitudes (AC only)
- C{Pg} real power injections
- C{Qg} reactive power injections (AC only)
- C{y} constrained cost variable (only if have pwl costs)
- (other) any user defined variable blocks
- C{mu} variable bound shadow prices, by named block
- C{l} lower bound shadow prices
- C{Va}, C{Vm}, C{Pg}, C{Qg}, C{y}, (other)
- C{u} upper bound shadow prices
- C{Va}, C{Vm}, C{Pg}, C{Qg}, C{y}, (other)
- C{nln} (AC only)
- C{mu} shadow prices on nonlinear constraints, by named block
- C{l} lower bounds
- C{Pmis} real power mismatch equations
- C{Qmis} reactive power mismatch equations
- C{Sf} flow limits at "from" end of branches
- C{St} flow limits at "to" end of branches
- C{u} upper bounds
- C{Pmis}, C{Qmis}, C{Sf}, C{St}
- C{lin}
- C{mu} shadow prices on linear constraints, by named block
- C{l} lower bounds
- C{Pmis} real power mistmatch equations (DC only)
- C{Pf} flow limits at "from" end of branches (DC only)
- C{Pt} flow limits at "to" end of branches (DC only)
- C{PQh} upper portion of gen PQ-capability curve(AC only)
- C{PQl} lower portion of gen PQ-capability curve(AC only)
- C{vl} constant power factor constraint for loads
- C{ycon} basin constraints for CCV for pwl costs
- (other) any user defined constraint blocks
- C{u} upper bounds
- C{Pmis}, C{Pf}, C{Pf}, C{PQh}, C{PQl}, C{vl}, C{ycon},
- (other)
- C{cost} user defined cost values, by named block
@see: L{runopf}, L{dcopf}, L{uopf}, L{caseformat}
@author: Ray Zimmerman (PSERC Cornell)
@author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
Autonoma de Manizales)
@author: Richard Lincoln
"""
##----- initialization -----
t0 = time() ## start timer
## process input arguments
ppc, ppopt = opf_args2(ppc, ppopt)
## add zero columns to bus, gen, branch for multipliers, etc if needed
nb = shape(ppc['bus'])[0] ## number of buses
nl = shape(ppc['branch'])[0] ## number of branches
ng = shape(ppc['gen'])[0] ## number of dispatchable injections
if shape(ppc['bus'])[1] < MU_VMIN + 1:
ppc['bus'] = c_[ppc['bus'], zeros((nb, MU_VMIN + 1 - shape(ppc['bus'])[1]))]
if shape(ppc['gen'])[1] < MU_QMIN + 1:
ppc['gen'] = c_[ppc['gen'], zeros((ng, MU_QMIN + 1 - shape(ppc['gen'])[1]))]
if shape(ppc['branch'])[1] < MU_ANGMAX + 1:
ppc['branch'] = c_[ppc['branch'], zeros((nl, MU_ANGMAX + 1 - shape(ppc['branch'])[1]))]
##----- convert to internal numbering, remove out-of-service stuff -----
# ppc = ext2int(ppc)
##----- construct OPF model object -----
om = opf_setup(ppc, ppopt)
##----- execute the OPF -----
results, success, raw = opf_execute(om, ppopt)
##----- revert to original ordering, including out-of-service stuff -----
# results = int2ext(results)
## zero out result fields of out-of-service gens & branches
# if len(results['order']['gen']['status']['off']) > 0:
# results['gen'][ ix_(results['order']['gen']['status']['off'], [PG, QG, MU_PMAX, MU_PMIN]) ] = 0
#
# if len(results['order']['branch']['status']['off']) > 0:
# results['branch'][ ix_(results['order']['branch']['status']['off'], [PF, QF, PT, QT, MU_SF, MU_ST, MU_ANGMIN, MU_ANGMAX]) ] = 0
##----- finish preparing output -----
et = time() - t0 ## compute elapsed time
results['et'] = et
results['success'] = success
results['raw'] = raw
return results
| 48.05641 | 137 | 0.602497 |
4a1cc54af9821ae3267f7691a7f2a03590bc0000
| 4,063 |
py
|
Python
|
Followup v0.80/fup/views/createbatch.py
|
iaiting/Flask-and-pywebview-followup-application-gui
|
b665334403b4a8471b5f28054ee2dc7adda7d9fc
|
[
"MIT"
] | null | null | null |
Followup v0.80/fup/views/createbatch.py
|
iaiting/Flask-and-pywebview-followup-application-gui
|
b665334403b4a8471b5f28054ee2dc7adda7d9fc
|
[
"MIT"
] | null | null | null |
Followup v0.80/fup/views/createbatch.py
|
iaiting/Flask-and-pywebview-followup-application-gui
|
b665334403b4a8471b5f28054ee2dc7adda7d9fc
|
[
"MIT"
] | 1 |
2019-12-25T11:57:45.000Z
|
2019-12-25T11:57:45.000Z
|
from flask import Blueprint
from flask import render_template, request, redirect, url_for
#Extra imports
import os
# pylint: disable=E0611
from werkzeug import secure_filename
#App imports
from fup.utils.jsoninfo import configInfo, sessionInfo
from fup.utils.commun import generateID, current_date, movetobin
from fup.models.batch import addBatch
from fup.helpers.batch import importFollowup, extractFollowup, importFileshistory, extractFileshistory
from fup.helpers.files import autoNewDirs, updateDBforNewFiles, unassignedtoPrepfiles
createbatch = Blueprint('createbatch', __name__)
@createbatch.route("/import-followup")
def applyimportfollowup():
try:
importFollowup()
return redirect(url_for('comm.showSuccessPage'))
except Exception as e:
errormessage = str("Something went wrong when importing the excel into the database. Got: {}".format(e))
return redirect(url_for('comm.showFailedPage', errormessage=errormessage))
@createbatch.route("/extract-followup")
def applyextractFollowup():
try:
extractFollowup()
return redirect(url_for('comm.showSuccessPage'))
except Exception as e:
errormessage = str("Something went wrong when extracting the excel from database. Got: {}".format(e))
return redirect(url_for('comm.showFailedPage', errormessage=errormessage))
@createbatch.route("/import-fileshistory")
def applyimportfileshistory():
try:
importFileshistory()
return redirect(url_for('comm.showSuccessPage'))
except Exception as e:
errormessage = str("Something went wrong when importing excel in database. Got: {}".format(e))
return redirect(url_for('comm.showFailedPage', errormessage=errormessage))
@createbatch.route("/extract-fileshistory")
def applyextractfileshistory():
try:
extractFileshistory()
return redirect(url_for('comm.showSuccessPage'))
except Exception as e:
errormessage = str("Something went wrong when extracting the excel from database. Got: {}".format(e))
return redirect(url_for('comm.showFailedPage', errormessage=errormessage))
@createbatch.route("/auto-create-unassigned")
def autoCreateUnassignedfromNew():
infoDictli, auto, tobinli = autoNewDirs()
if isinstance(infoDictli, str):
errormessage = str(infoDictli)
context = {'failed': errormessage}
return render_template('failed.html', context=context)
elif isinstance(infoDictli, list):
if len(infoDictli) == 0:
errormessage = "No new files found in NEW folder!"
return redirect(url_for('comm.showFailedPage', errormessage=errormessage))
else:
for infoaddDict in infoDictli:
response = addBatch(infoaddDict, auto)
if isinstance(response, str):
movetobin(tobinli)
errormessage = str(response)
return redirect(url_for('comm.showFailedPage', errormessage=errormessage))
unassignedtoPrepfiles() #Copy files from UNASSIGNED to PREPARED FILES
responseMove = movetobin(tobinli)
if responseMove == True:
try:
response_newFiles = updateDBforNewFiles() #verify if new files were added to a existing batch if so, update db
if isinstance(response_newFiles, str):
print("response_newFiles error: ", response_newFiles)
#return response_newFiles
except Exception as e:
print("[ERROR] updateDBforNewFiles: ",e)
pass
return redirect(url_for('comm.showSuccessPage'))
else:
errormessage = "These where deleted from NEW folder --> " + str(responseMove)
return redirect(url_for('comm.showFailedPage', errormessage=errormessage))
#
| 31.992126 | 131 | 0.65272 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.