repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
TheKaushikGoswami/lyq-hangout-modmail | 18,932,215,860,652 | 80e915944e444f731f4be912444f44f69c864e59 | b7f6954cc86515eb28c8ac0788e4f3228822ae9c | /main.py | eb68957b0b5e6bde21397a8cc5e6b560f1f7da98 | [
"Apache-2.0"
]
| permissive | https://github.com/TheKaushikGoswami/lyq-hangout-modmail | 0aabaae31a06a19b6bb4630464e530f0ce169765 | 78b8bbb0e113123f4e074cf0bcc79729b74c27a3 | refs/heads/main | 2023-05-23T06:20:16.086489 | 2021-06-13T02:45:56 | 2021-06-13T02:45:56 | 376,301,531 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from discord.ext import commands
import discord
import os
intents = discord.Intents.default()
# we need members intent too
intents.members = True
bot = commands.Bot(command_prefix = "-", intents = intents)
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.playing, name=f"DM Me to Open Modmail | Karuta Shop | Made with ❤️ by KAUSHIK"))
print("The bot is online!")
bot.load_extension("cogs.onMessage")
bot.run("your-token-here")
| UTF-8 | Python | false | false | 548 | py | 3 | main.py | 2 | 0.729779 | 0.729779 | 0 | 17 | 30 | 189 |
itk-robotics/reading-buddy | 18,193,481,479,417 | 341321783c980a98af5dd15177450e7c130690c6 | a5bcb456a0510139772c8ddf8d9812187a018386 | /utilities/sendMail.py | fd52fe2ffc19e4331545568359f690af5e519b18 | []
| no_license | https://github.com/itk-robotics/reading-buddy | 9119602cee656f2c5b1d7d1c81e48dc3d730f9f5 | b960b5ec11af5a2cc26f30c127ff2917798dad9c | refs/heads/master | 2020-03-28T12:13:10.155807 | 2019-01-29T13:15:47 | 2019-01-29T13:15:47 | 148,279,513 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """code pasted from choregraphe box"""
import sys, os
import smtplib, email
class choregrapheMail(object):
def mail(self, email_user, to, subject, text, attach, email_pwd, smtp, port):
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = email_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(email.MIMEText.MIMEText(text))
if attach:
part = email.MIMEBase.MIMEBase('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read())
email.Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attach))
msg.attach(part)
if( port != "" ):
mailServer = smtplib.SMTP(smtp, port)
else:
mailServer = smtplib.SMTP(smtp)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(email_user, email_pwd)
mailServer.sendmail(email_user, to, msg.as_string())
mailServer.close()
def sendMessage(self,sText="text body"):
print "preparing email..."
sEmailUser = "itk.norma@outlook.dk"
aTo = "andkr@aarhus.dk"
sSubject = "Pepper service alert"
sAttachedFilePath = ""
sPwd = "kjhg3298f!(4"
sSmtp = "smtp-mail.outlook.com"
sPort = 587
try:
sPort = int( sPort )
bValidPort = ( sPort >= 0 and sPort <= 65535 )
except:
bValidPort = False
if( not bValidPort ):
raise Exception( str(sPort) + " is not a valid port number to use to send e-mail. It must be an integer between 0 and 65535. Please check that the port parameter of the box is correct." )
try:
self.mail(sEmailUser,
aTo,
sSubject,
sText,
sAttachedFilePath,
sPwd,
sSmtp,
sPort)
except smtplib.SMTPAuthenticationError as e:
raise(Exception("Authentication error, server answered : [%s] %s" % (e.smtp_code, e.smtp_error)))
print "email sent"
| UTF-8 | Python | false | false | 2,261 | py | 29 | sendMail.py | 7 | 0.544449 | 0.534719 | 0 | 68 | 31.25 | 199 |
joy13975/elfin | 1,460,288,921,833 | 7e8619c2dff23110fd87448fd39b42e629dd9b64 | 8f7f7d98515b5c05367e5805a141e720bad2b1f0 | /tests/helper.py | 9db06d497aab7953557c2b19341950a046b4ced8 | [
"MIT"
]
| permissive | https://github.com/joy13975/elfin | 5b8d549344e0dc1f2976811ad65bd50d2c832b63 | affa9b5b5cf359409917acc44e3f4bb8dda5596c | refs/heads/master | 2020-07-03T19:54:35.994063 | 2020-05-09T09:27:03 | 2020-05-09T09:27:03 | 93,900,113 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import pytest
import importlib
from functools import partial
def _test(module_name=None, error_type=None, module_test_callback=None, assert_callback=None, package_name=None):
if module_name is None or \
error_type is None or \
module_test_callback is None or \
assert_callback is None or \
package_name is None:
raise ValueError('Insufficient arguments supplied to helper._test()')
with pytest.raises(error_type) as e:
full_module_name = '.'.join([package_name, module_name])
module_test_callback(importlib.import_module(full_module_name))
assert assert_callback(e)
def _test_error_str(module_name, error_type=None, error_str_search=None, package_name=None):
_test(
module_name=module_name,
error_type=error_type,
module_test_callback=lambda mod: mod.main(),
assert_callback=lambda re: error_str_search in str(re.value),
package_name=package_name
)
_test_script_main = partial(
_test,
error_type=SystemExit,
module_test_callback=lambda mod: mod.main(['--help']),
assert_callback=lambda se: se.value.code == 0 # --help should return 0
)
_test_non_executable = partial(
_test_error_str,
error_type=RuntimeError,
error_str_search='executed'
) | UTF-8 | Python | false | false | 1,304 | py | 55 | helper.py | 46 | 0.679448 | 0.677147 | 0 | 41 | 29.853659 | 113 |
zimeon/sbrew | 893,353,214,388 | 59c4b2bf31a867e49fc784e894e9c5663db526e2 | 2353a9fb85b96306309dc1d8f35bfaf01edcd2ab | /brew149_orval.py | 168200a2dee128dddae29221266f13082a0160cf | []
| no_license | https://github.com/zimeon/sbrew | 063268d1ca6e5d41dda32e7dce04511926b2a479 | 7d88b4c2171e4183b4267da86abf1d140de78920 | refs/heads/main | 2022-01-26T02:06:51.219157 | 2022-01-23T21:52:52 | 2022-01-23T21:52:52 | 20,242,287 | 0 | 0 | null | false | 2021-07-22T03:06:15 | 2014-05-28T02:15:39 | 2020-08-05T21:41:25 | 2021-07-22T03:06:15 | 329 | 1 | 0 | 0 | Python | false | false | #!/usr/bin/env python
from sbrew import *
r = Recipe()
r.name = "Orval-ish Brett Beer"
# ithaca water
m = InfusionMash()
m.ingredient('grain', 'pilsener (briess)', '10.0lb', color='1.2L')
m.ingredient('grain', 'caravienne (briess)', '1.25lb', color='20L')
m.ingredient('water', 'strike', '5.5gal')
m.ingredient('misc', 'gypsum', '14g')
m.property('temp', '152F')
m.property('t_mashtun', '60F')
m.solve()
r.add(m)
s = BatchSparge(start=m)
s.property('wort_volume', '7.25gal')
s.solve()
r.add(s)
b = Boil(start=s)
b.time = Quantity('90min')
b.ingredient('hops', 'hallertau', '25IBU', time='60min', aa='4.0%AA')
b.ingredient('hops', 'styrian goldings', '1oz', time='15min', aa='3.5%AA')
b.ingredient('hops', 'styrian goldings', '1oz', time='0min', aa='3.5%AA')
b.ingredient('misc', 'irish moss', '1tsp', time='15min')
b.ingredient('sucrose', 'table sugar', '1.0lb', time='5min')
b.property('boil_end_volume', '6.5gal')
b.solve()
r.add(b)
f = Ferment(start=b)
f.ingredient('yeast', 'WLP510 Belgian Bastogne Ale', '1pack')
f.ingredient('yeast', 'WY5112 Brett Brux (after racking)', '1pack')
f.ingredient('hops', 'styrian goldings', '1oz', time='0min', aa='3.5%AA')
f.property('atten', '80.0%atten')
r.add(f)
r.solve()
print(r)
| UTF-8 | Python | false | false | 1,230 | py | 87 | brew149_orval.py | 85 | 0.650407 | 0.599187 | 0 | 44 | 26.954545 | 74 |
aleph2c/miros-xml | 2,018,634,658,089 | 912f9c07f3466607b9b246de0ecac2aa9f63e424 | cb641e9db2c8604caadc180d141369e7c56a151f | /experiment/4th_example.py | 54fc0f47155c6a275edeee36b91dc330b2ab60cb | []
| no_license | https://github.com/aleph2c/miros-xml | 7873f05c6cf3a49dfe55fe38e3b9e93bcaff2868 | 552f2ab5bdf9a70741cb3e306880e24231d8147e | refs/heads/master | 2023-07-06T14:16:02.706665 | 2021-08-13T12:50:35 | 2021-08-13T12:54:54 | 228,228,973 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import pdb
import time
import inspect
import logging
import pprint as xprint
from functools import wraps
from functools import partial
from functools import lru_cache
from collections import namedtuple
from miros import Event
from miros import spy_on
from miros import signals
from miros import ActiveObject
from miros import return_status
from miros import HsmWithQueues
event_to_investigate = 'E2'
def pp(item):
xprint.pprint(item)
META_SIGNAL_PAYLOAD = namedtuple("META_SIGNAL_PAYLOAD",
['n', 'event', 'state', 'previous_state', 'previous_signal', 'springer'])
FrameData = namedtuple('FrameData', [
'filename',
'line_number',
'function_name',
'lines',
'index'])
@lru_cache(maxsize=128)
def f_to_s(fn):
'''function to str'''
return fn.__name__
@lru_cache(maxsize=128)
def s_to_s(event_or_signal_number):
'''signal to str'''
if type(event_or_signal_number) == int:
signal_name = signals.name_for_signal(event_or_signal_number)
elif type(event_or_signal_number) == str:
signal_name = event_or_signal_number
else:
signal_name = event_or_signal_number.signal_name
return signal_name
def proto_investigate(r, e, _e=None, springer=None):
'''Used for WTF investigations
**Note**:
Steps (takes about 10 minutes to set this up):
1. print(ps(_e)) to see where the meta event is suppose to go
2. place markers at each step, track in notes
3. place an investigate call at each step
**Args**:
| ``r`` (Region): region
| ``e`` (Event): initial event
| ``springer=None`` (str): signal name of event that started the meta event
| ``_e=None`` (Event): subsequent event
**Example(s)**:
.. code-block:: python
# place this on every marker, the look at the logs
investigate(r, e, 'I1', _e)
'''
if springer is not None:
springer_in_event = None
if (hasattr(e, 'payload') and hasattr(e.payload, 'springer')):
springer_in_event = e.payload.springer
if (hasattr(_e, 'payload') and hasattr(_e.payload, 'springer')):
springer_in_event = _e.payload.springer
if springer_in_event == springer:
if hasattr(e, 'payload') and hasattr(e.payload, 'n'):
n = e.payload.n
else:
if hasattr(_e, 'payload') and hasattr(_e.payload, 'n'):
n = _e.payload.n
else:
n = 0
r.scribble("{}: {}".format(n, r.outmost.active_states()))
r.scribble("{}: {}".format(n, ps(e)))
if _e is not None:
r.scribble("{}: {}".format(n, ps(_e)))
r.scribble("{}: {}".format(n, r.outmost.rqs()))
investigate = partial(proto_investigate, springer=event_to_investigate)
def payload_string(e):
'''Reflect upon an event
**Note**:
If the event is a meta event, the resulting string will put each inner
part of the onion on a new line, indented to distiguish it from the
previous line.
If the event provided is a normal event, its signal_name will be returned
**Args**:
| ``e`` (Event): The event to reflect upon
**Returns**:
(str): A string describing the event
**Example(s)**:
.. code-block:: python
# ps has been aliased to payload_string
print(ps(Event(signal=signals.hello_world)) # hello_world
'''
tabs = ""
result = ""
if e.payload is None:
result = "{}".format(e.signal_name)
else:
while(True):
previous_signal_name = s_to_s(e.payload.previous_signal)
result += "{}[n={}]::{}:{} [n={}]::{}:{} ->\n".format(
tabs,
e.payload.n,
e.signal_name,
f_to_s(e.payload.state),
e.payload.n - 1,
previous_signal_name,
f_to_s(e.payload.previous_state),
)
if e.payload.event is None:
break
else:
e = e.payload.event
tabs += " "
return result
# This is a debug function, so we want the name short
ps = payload_string
def pprint(value):
print(value)
def state(fn):
'''Statechart state function wrapper, provides instrumentation and
dynamically assigns the inner attribute'''
@wraps(fn)
def _state(chart, *args):
fn_as_s = fn.__name__
if fn_as_s not in chart.regions:
chart.inner = None
else:
chart.inner = chart.regions[fn_as_s]
chart.current_function_name = fn_as_s
status = spy_on(fn)(chart, *args)
return status
return _state
def othogonal_state(fn):
'''Othogonal component state function wrapper, provides instrumentation and
dynamically assigns the inner attribute.'''
@wraps(fn)
def _pspy_on(region, *args):
if type(region) == XmlChart:
return state(fn)(region, *args)
# dynamically assign the inner attribute
fn_as_s = f_to_s(fn)
if fn_as_s not in region.inners:
region.inners[fn_as_s] = None
if fn_as_s in region.outmost.regions:
region.inners[fn_as_s] = region.outmost.regions[fn_as_s]
# these can be trampled as a side effect of a search (meta_init, meta_trans)
# so make sure you salt their results away when you use these functions
region.inner = region.inners[fn_as_s]
region.current_function_name = fn_as_s
# instrument the region
if region.instrumented:
status = spy_on(fn)(region, *args) # call to state function here
for line in list(region.rtc.spy):
m = re.search(r'SEARCH_FOR_SUPER_SIGNAL', str(line))
if not m:
if hasattr(region, "outmost"):
region.outmost.live_spy_callback(
"[{}] {}".format(region.name, line))
else:
region.live_spy_callback(
"[{}] {}".format(region.name, line))
region.rtc.spy.clear()
else:
e = args[0] if len(args) == 1 else args[-1]
status = fn(region, e) # call to state function here
return status
return _pspy_on
Reflections = []
class Region(HsmWithQueues):
def __init__(self,
name, starting_state, outmost, outer, same,
final_event, under_hidden_state_function,
region_state_function, over_hidden_state_function,
instrumented=True):
'''Create an orthogonal component HSM
**Args**:
| ``name`` (str): The name of the region, naming follows a convention
| ``starting_state`` (fn): The starting state of the region
| ``outmost`` (InstrumentedActiveObject): The statechart that is
| connected to a driving thread
| ``outer`` (Regions): A Regions object equivalent to an
| outer parallel region
| ``same`` (Regions): A Regions object equivalent to this object's
| parallel region.
| ``final_event`` (Event): The event that will be fired with all states
| in this parallel part of the statechart are in
| their final states.
| ``under_hidden_state_function`` (fn): The inert state for this Region
| ``region_state_function`` (fn): The state which contains the
| programmable init feature
| ``over_hidden_state_function`` (fn): The state that can force a
| transition to the region_state_function
| ``instrumented=True`` (bool): Do we want instrumentation?
**Returns**:
(Region): This HSM
**Example(s)**:
.. code-block:: python
# from within the Regions class
region =\
Region(
name='bob',
starting_state=under_hidden_state_function,
outmost=self.outmost,
outer=outer,
same=self,
final_event = Event(signal='bob_final'),
under_hidden_state_function = under_hidden_state_function,
region_state_function = region_state_function,
over_hidden_state_function = over_hidden_state_function,
)
'''
super().__init__()
self.name = name
self.starting_state = starting_state
self.final_event = final_event
self.fns = {}
self.fns['under_hidden_state_function'] = under_hidden_state_function
self.fns['region_state_function'] = region_state_function
self.fns['over_hidden_state_function'] = over_hidden_state_function
self.instrumented = instrumented
self.bottom = self.top
self.outmost = outmost
self.outer = outer
self.same = same
# The inners dict will be indexed by state function names as strings.
# It will be populated as the state machine is run, by the orthgonal_state
# decorator. This collection will be used to provide the 'inner' attribute
# with its regions object if the function using this attribute is an
# injector
self.inners = {}
self.current_function_name = None # dynamically assigned
assert callable(self.fns['under_hidden_state_function'])
assert callable(self.fns['region_state_function'])
assert callable(self.fns['over_hidden_state_function'])
self.final = False
# this will be populated by the 'link' command have each
# region has been added to the regions object
self.regions = []
def scribble(self, string):
'''Add some state context to the spy instrumention'''
# the current_function_name is set by the orthongal_state decoractor
if self.outmost.live_spy and self.outmost.instrumented:
self.outmost.live_spy_callback("[{}] {}".format(
self.current_function_name, string))
def post_p_final_to_outmost_if_ready(self):
ready = False if self.regions is None and len(self.regions) < 1 else True
for region in self.regions:
ready &= True if region.final else False
if ready:
self.outmost.post_fifo(self.final_event)
@lru_cache(maxsize=32)
def tockenize(self, signal_name):
return set(signal_name.split("."))
@lru_cache(maxsize=32)
def token_match(self, resident, other):
alien_set = self.tockenize(other)
resident_set = self.tockenize(resident)
result = True if len(resident_set.intersection(alien_set)) >= 1 else False
return result
def meta_peel(self, e):
result = (None, None)
if len(self.queue) >= 1 and \
(self.queue[0].signal == signals.INIT_META_SIGNAL or
self.queue[0].signal == signals.EXIT_META_SIGNAL):
_e = self.queue.popleft()
result = (_e.payload.event, _e.payload.state)
return result
@lru_cache(maxsize=32)
def within(self, bound, query):
'''For a given bound state function determine if it has a query state
function which is the same as it or which is a child of it in this HSM.
**Note**:
Since the state functions can be decorated, this method compares the
names of the functions and note their addresses.
**Args**:
| ``bound`` (fn): the state function in which to search
| ``query`` (fn): the state function to search for
**Returns**:
(bool): True | False
'''
old_temp = self.temp.fun
old_fun = self.state.fun
state_name = self.state_name
state_fn = self.state_fn
if hasattr(bound, '__wrapped__'):
current_state = bound.__wrapped__
else:
current_state = bound
if hasattr(query, '__wrapped__'):
self.temp.fun = query.__wrapped__
else:
self.temp.fun = query
result = False
super_e = Event(signal=signals.SEARCH_FOR_SUPER_SIGNAL)
while(True):
if(self.temp.fun.__name__ == current_state.__name__):
result = True
r = return_status.IGNORED
else:
r = self.temp.fun(self, super_e)
if r == return_status.IGNORED:
break
self.state_fn = state_fn
self.state_name = state_name
self.temp.fun = old_temp
self.state.fun = old_fun
return result
@lru_cache(maxsize=32)
def has_state(self, state):
'''Determine if this region has a state.
**Note**:
Since the state functions can be decorated, this method compares the
names of the functions and note their addresses.
**Args**:
| ``query`` (fn): a state function
**Returns**:
(bool): True | False
'''
result = False
old_temp = self.temp.fun
old_fun = self.state.fun
state_name = self.state_name
state_fn = self.state_fn
self.temp.fun = state
super_e = Event(signal=signals.SEARCH_FOR_SUPER_SIGNAL)
while(True):
if(self.temp.fun.__name__ == self.fns['region_state_function'].__name__):
result = True
r = return_status.IGNORED
else:
r = self.temp.fun(self, super_e)
if r == return_status.IGNORED:
break
self.state_fn = state_fn
self.state_name = state_name
self.temp.fun = old_temp
self.state.fun = old_fun
return result
@lru_cache(maxsize=32)
def get_region(self, fun=None):
if fun is None:
current_state = self.temp.fun
else:
current_state = fun
old_temp = self.temp.fun
old_fun = self.state.fun
self.temp.fun = current_state
result = ''
super_e = Event(signal=signals.SEARCH_FOR_SUPER_SIGNAL)
while(True):
if 'under' in self.temp.fun.__name__:
result = self.temp.fun.__name__
r = return_status.IGNORED
elif 'top' in self.temp.fun.__name__:
r = return_status.IGNORED
else:
r = self.temp.fun(self, super_e)
if r == return_status.IGNORED:
break
self.temp.fun = old_temp
self.state.fun = old_fun
return result
def function_name(self):
previous_frame = inspect.currentframe().f_back
fdata = FrameData(*inspect.getframeinfo(previous_frame))
function_name = fdata.function_name
return function_name
def pop_event(self):
result = None
if len(self.queue) >= 1:
result = self.queue.popleft()
return result
def _post_fifo(self, e):
super().post_fifo(e)
def _post_lifo(self, e):
super().post_lifo(e)
def _complete_circuit(self):
super().complete_circuit()
#def post_fifo(self, e):
# self._post_fifo(e)
# self.complete_circuit()
#def post_lifo(self, e):
# self._post_lifo(e)
# self.complete_circuit()
class InstrumentedActiveObject(ActiveObject):
def __init__(self, name, log_file):
super().__init__(name)
self.log_file = log_file
self.old_states = None
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s',
filemode='w',
filename=self.log_file,
level=logging.DEBUG)
self.register_live_spy_callback(partial(self.spy_callback))
self.register_live_trace_callback(partial(self.trace_callback))
def trace_callback(self, trace):
'''trace without datetimestamp'''
# trace_without_datetime = re.search(r'(\[.+\]) (\[.+\].+)', trace).group(2)
signal_name = re.search(r'->(.+)?\(', trace).group(1)
new_states = self.active_states()
old_states = "['bottom']" if self.old_states is None else self.old_states
trace = "{}<-{} == {}".format(old_states, signal_name, new_states)
#self.print(trace_without_datetime)
logging.debug("T: " + trace)
self.old_states = new_states
def spy_callback(self, spy):
'''spy with machine name pre-pending'''
#self.print(spy)
logging.debug("S: [%s] %s" % (self.name, spy))
def report(self, message):
logging.debug("R:%s" % message)
def clear_log(self):
with open(self.log_file, "w") as fp:
fp.write("")
class Regions():
'''Replaces long-winded boiler plate code like this:
self.p_regions.append(
Region(
name='s1_r',
starting_state=p_r2_under_hidden_region,
outmost=self,
final_event=Event(signal=signals.p_final)
)
)
self.p_regions.append(
Region(
name='s2_r',
starting_state=p_r2_under_hidden_region,
outmost=self,
final_event=Event(signal=signals.p_final)
)
)
# link all regions together
for region in self.p_regions:
for _region in self.p_regions:
region.regions.append(_region)
With this:
self.p_regions = Regions(name='p', outmost=self).add('s1_r').add('s2_r').regions
'''
def __init__(self, name, outmost):
self.name = name
self.outmost = outmost
self._regions = []
self.final_signal_name = name + "_final"
self.lookup = {}
def add(self, region_name, outer):
'''
self.p_regions.append(
Region(
name='s2_r',
starting_state=p_r2_under_hidden_region,
outmost=self,
final_event=Event(signal=signals.p_final)
outer=self,
)
)
Where to 'p_r2_under_hidden_region', 'p_final' are inferred based on conventions
and outmost was provided to the Regions __init__ method and 'outer' is needed
for the EXIT_META_SIGNAL signal.
'''
under_hidden_state_function = eval(region_name + "_under_hidden_region")
region_state_function = eval(region_name + "_region")
over_hidden_state_function = eval(region_name + "_over_hidden_region")
assert callable(under_hidden_state_function)
assert callable(region_state_function)
assert callable(over_hidden_state_function)
region =\
Region(
name=region_name,
starting_state=under_hidden_state_function,
outmost=self.outmost,
outer=outer,
same=self,
final_event = Event(signal=self.final_signal_name),
under_hidden_state_function = under_hidden_state_function,
region_state_function = region_state_function,
over_hidden_state_function = over_hidden_state_function,
)
self._regions.append(region)
self.lookup[region_state_function] = region
return self
def get_obj_for_fn(self, fn):
result = self._regions[fn] if fn in self._regions else None
return result
def link(self):
'''Create the 'same' and 'regions' attributes for each region object in this
regions object.
The region objects will be placed into a list and any region will be able to
access the other region objects at its level by accessing that list. This
list will be called regions, and it is an attribute of the region object.
Linking a region to it's other region object's is required to provide the
final_event feature and that kind of thing.
The link method will also create the "same" attribute. This is a reference
to this regions object, or the thing that contains the post_fifo, post_life
... methods which place and drive events into region objects at the same
level of the orthgonal component hierarchy.
A call to 'link' should be made once all of the region objects have been
added to this regions object.
**Example(s)**:
.. code-block:: python
outer = self.regions['p']
self.regions['p_p11'] = Regions(
name='p_p11',
outmost=self)\
.add('p_p11_r1', outer=outer)\
.add('p_p11_r2', outer=outer).link()
'''
for region in self._regions:
for _region in self._regions:
if _region not in region.regions:
region.regions.append(_region)
region.same = self
return self
def post_fifo(self, e):
self._post_fifo(e)
[region.complete_circuit() for region in self._regions]
def _post_fifo(self, e):
regions = self._regions
[region.post_fifo(e) for region in regions]
def post_lifo(self, e):
self._post_lifo(e)
[region.complete_circuit() for region in self._regions]
def _post_lifo(self, e):
[region.post_lifo(e) for region in self._regions]
def _complete_circuit(self):
[region.complete_circuit() for region in self._regions]
def start(self):
for region in self._regions:
region.start_at(region.starting_state)
@property
def instrumented(self):
instrumented = True
for region in self._regions:
instrumented &= region.instrumented
return instrumented
@instrumented.setter
def instrumented(self, _bool):
for region in self._regions:
region.instrumented = _bool
def region(self, name):
result = None
for region in self._regions:
if name == region.name:
result = region
break
return result
STXRef = namedtuple('STXRef', ['send_id', 'thread_id'])
class XmlChart(InstrumentedActiveObject):
def __init__(self, name, log_file, live_spy=None, live_trace=None):
super().__init__(name, log_file)
if live_spy is not None:
self.live_spy = live_spy
if live_trace is not None:
self.live_trace = live_trace
self.bottom = self.top
self.shot_lookup = {}
self.regions = {}
outer = self
self.regions['p'] = Regions(
name='p',
outmost=self)\
.add('p_r1', outer=outer)\
.add('p_r2', outer=outer).link()
outer = self.regions['p']
self.regions['p_p11'] = Regions(
name='p_p11',
outmost=self)\
.add('p_p11_r1', outer=outer)\
.add('p_p11_r2', outer=outer).link()
outer = self.regions['p']
self.regions['p_p12'] = Regions(
name='p_p12',
outmost=self)\
.add('p_p12_r1', outer=outer)\
.add('p_p12_r2', outer=outer).link()
outer = self.regions['p_p12']
self.regions['p_p12_p11'] = Regions(
name='p_p12_p11',
outmost=self)\
.add('p_p12_p11_r1', outer=outer)\
.add('p_p12_p11_r2', outer=outer).link()
outer = self.regions['p']
self.regions['p_p22'] = Regions(
name='p_p22',
outmost=self)\
.add('p_p22_r1', outer=outer)\
.add('p_p22_r2', outer=outer).link()
self.current_function_name = None # dynamically assigned
self.outmost = self
def regions_queues_string(self):
'''Reflect upon all queues for all region objects in statechart
**Returns**:
(str): A reflection upon the queue contents for all regions
**Example(s)**:
.. code-block:: python
# rqs is aliased to regions_queues_string
print(self.rqs())
'''
previous_frame = inspect.currentframe().f_back
fdata = FrameData(*inspect.getframeinfo(previous_frame))
function_name = fdata.function_name
line_number = fdata.line_number
if function_name == 'proto_investigate':
previous_frame = inspect.currentframe().f_back.f_back
fdata = FrameData(*inspect.getframeinfo(previous_frame))
function_name = fdata.function_name
line_number = fdata.line_number
width = 78
result = ""
loc_and_number_report = ">>>> {} {} <<<<".format(function_name, line_number)
additional_space = width - len(loc_and_number_report)
result += "{}{}\n".format(loc_and_number_report, "<" * additional_space)
result += "-" * int(width / 2) + "\n"
for name, regions in self.regions.items():
for region_index, region in enumerate(regions._regions):
region_summary = ""
_len = len(region.queue)
region_summary = "{}:{}, ql={}:".format(region.name, region.state_name, _len)
region_summary = region_summary + " {}" if _len == 0 else region_summary
result += "{}\n".format(region_summary)
for index, e in enumerate(region.queue):
_ps = ps(e)
_ps = re.sub(r'([ ]+)(\[n\].+)', r' \1\2', _ps)
queue_summary = str(index) + ": " + _ps
result += queue_summary + "\n"
result += "-" * int(width / 2) + "\n"
result += "<" * width + "\n"
result += "\n"
return result
# This is a debug method, so we want the name short
rqs = regions_queues_string
def start(self):
_instrumented = self.instrumented
if self.live_spy:
for key in self.regions.keys():
self.regions[key].instrumented = self.instrumented
else:
for key in self.regions.keys():
self.regions[key].instrumented = False
for key in self.regions.keys():
self.regions[key].start()
self.start_at(outer_state)
self.instrumented = _instrumented
@lru_cache(maxsize=32)
def tockenize(self, signal_name):
return set(signal_name.split("."))
@lru_cache(maxsize=32)
def token_match(self, resident, other):
alien_set = self.tockenize(other)
resident_set = self.tockenize(resident)
result = True if len(resident_set.intersection(alien_set)) >= 1 else False
return result
def post_fifo_with_sendid(self, sendid, e, period=None, times=None, deferred=None):
thread_id = self.post_fifo(e, period, times, deferred)
if thread_id is not None:
self.shot_lookup[e.signal_name] = \
STXRef(thread_id=thread_id, send_id=sendid)
def post_lifo_with_sendid(self, sendid, e, period=None, times=None, deferred=None):
thread_id = super().post_lifo(e, period, times, deferred)
if thread_id is not None:
self.shot_lookup[e.signal_name] = \
STXRef(thread_id=thread_id, send_id=sendid)
def cancel_with_sendid(self, sendid):
thread_id = None
for k, v in self.shot_lookup.items():
if v.send_id == sendid:
thread_id = v.thread_id
break
if thread_id is not None:
self.cancel_event(thread_id)
def cancel_all(self, e):
token = e.signal_name
for k, v in self.shot_lookup.items():
if self.token_match(token, k):
self.cancel_events(Event(signal=k))
break
def meta_peel(self, e):
result = (None, None)
if len(self.queue.deque) >= 1 and \
(self.queue.deque[0].signal == signals.INIT_META_SIGNAL or
self.queue.deque[0].signal == signals.EXIT_META_SIGNAL):
_e = self.queue.deque.popleft()
result = (_e.payload.event, _e.payload.state)
return result
def active_states(self):
# parallel state names
psn = self.regions.keys()
result = []
for n, regions in self.regions.items():
for _region in regions._regions:
result.append({_region.name: _region.state_name})
def recursive_get_states(name):
states = []
if name in psn:
for region in self.regions[name]._regions:
if region.state_name in psn:
_states = recursive_get_states(region.state_name)
states.append(_states)
else:
states.append(region.state_name)
else:
states.append(self.state_name)
return states
states = recursive_get_states(self.state_name)
return states
def _active_states(self):
'''Used to see all active states at once.
**Note**:
**Args**:
**Returns**:
(type):
**Example(s)**:
.. code-block:: python
# example code goes here
'''
result = []
for n, regions in self.regions.items():
for _region in regions._regions:
result.append({_region.name: _region.state_name})
return result
def _post_lifo(self, e, outmost=None):
self.post_lifo(e)
def _post_fifo(self, e, outmost=None):
self.post_fifo(e)
@lru_cache(maxsize=64)
def meta_init(self, r, s, t, sig):
'''Build target and meta event for the state. The meta event will be a
recursive INIT_META_SIGNAL event for a given WTF signal and return a target for
it's first pass off.
**Note**:
see `e0-wtf-event
<https://aleph2c.github.io/miros-xml/recipes.html#e0-wtf-event>`_ for
details about and why a INIT_META_SIGNAL is constructed and why it is needed.
**Args**:
| ``t`` (state function): target state
| ``sig`` (string): event signal name
| ``s=None`` (state function): source state
**Returns**:
(Event): recursive Event
**Example(s)**:
.. code-block:: python
target, onion = example.meta_init(p_p11_s21, "E0")
assert onion.payload.state == p
assert onion.payload.event.payload.state == p_r1_region
assert onion.payload.event.payload.event.payload.state == p_p11
assert onion.payload.event.payload.event.payload.event.payload.state == p_p11_r2_region
assert onion.payload.event.payload.event.payload.event.payload.event.payload.state == p_p11_s21
assert onion.payload.event.payload.event.payload.event.payload.event.payload.event == None
'''
inner = r.inner
current_function_name = r.current_function_name
region = None
onion_states = []
onion_states.append(t)
@lru_cache(maxsize=32)
def find_fns(state):
'''For a given state find (region_state_function,
outer_function_that_holds_the_region, region_object)
**Args**:
| ``state`` (state_function): the target of the WTF event given to
| meta_init
**Returns**:
| (tuple): (region_state_function,
| outer_function_that_holds_the_region, region_object)
**Example(s)**:
.. code-block:: python
a, b, c = find_fns(p_p11_s21)
assert a == p_p11_r2_region
assert b == p_p11
assert c.name == 'p_p11_r2'
'''
outer_function_state_holds_the_region = None
region_obj = None
assert callable(state)
for k, rs in self.regions.items():
for r in rs._regions:
if r.has_state(state):
outer_function_state_holds_the_region = eval(rs.name)
region_obj = r
break
if region_obj:
region_state_function = region_obj.fns['region_state_function']
assert callable(outer_function_state_holds_the_region)
return region_state_function, outer_function_state_holds_the_region, region_obj
else:
return None, None, None
target_state, region_holding_state, region = find_fns(t)
onion_states += [target_state, region_holding_state]
while region and hasattr(region, 'outer'):
target_state, region_holding_state, region = \
find_fns(region_holding_state)
if s is not None and region_holding_state == s:
break
if target_state:
onion_states += [target_state, region_holding_state]
# Wrap up the onion meta event from the inside out.
# History items at the last layer of the outer part of the
# INIT_META_SIGNAL need to reference an even more outer part
# of the onion, the source of the meta_init call.
event = None
init_onion = onion_states[:]
number = len(init_onion)
for index, entry_target in enumerate(init_onion):
previous_signal = signals.INIT_META_SIGNAL
if index == len(init_onion) - 1:
previous_signal = sig
previous_state = s
else:
previous_state = init_onion[index + 1]
event = Event(
signal=signals.INIT_META_SIGNAL,
payload=META_SIGNAL_PAYLOAD(
n=number,
event=event,
state=entry_target,
previous_state=previous_state,
previous_signal=previous_signal,
springer=sig
)
)
number -= 1
r.inner = inner
r.current_function_name = current_function_name
return event
def build_onion(self, t, sig, s=None):
'''Find an list of state functions which describe a single gradient in the
HHSM, from the source 's', to the target, 't'.
**Note**:
If it is not possible to draw line matching a single gradient between the
two input functions. The 's' is replaced with the lowest common ancestor
of 't' and the provided 's' and the build_onion returns that list instead.
The resulting list is in reverse. I can't remember why I did it this
way, and I'm not going to touch this "feature" right now.
**Args**:
| ``t`` (fn): the target state
| ``sig`` (str): Event signal_name
| ``s=None`` (fn): the source state
**Returns**:
(list): a reverse list describing a single gradient of state functions,
from t to s (this is why it is reversed, it would be expected to be s to
to to)
**Example(s)**:
To see the graph this example is referencing go to
`this link <https://github.com/aleph2c/miros-xml/blob/master/doc/_static/xml_chart_4.pdf>`_
.. code-block:: python
result1 = example.build_onion(s=p, t=p_p12_p11_s12, sig='TEST')
assert(result1 == [
p_p12_p11_s12,
p_p12_p11_r1_region,
p_p12_p11,
p_p12_r1_region,
p_p12,
p_r1_region,
p,
])
result2 = example.build_onion(t=p, s=p_p12_p11_s12, sig='TEST')
assert(result2 == [
p,
p_r1_region,
p_p12,
p_p12_r1_region,
p_p12_p11,
p_p12_p11_r1_region,
p_p12_p11_s12,
])
'''
region = None
onion_states = []
onion_states.append(t)
def find_fns(state):
'''For a given state find (region_state_function,
outer_function_that_holds_the_region, region_object)
**Args**:
| ``state`` (state_function): the target of the WTF event given to
| meta_init
**Returns**:
| (tuple): (regions_state_function,
| injector,
| region_name)
**Example(s)**:
.. code-block:: python
a, b, c = find_fns(p_p11_s21)
assert a == p_p11_r2_region
assert b == p_p11
assert c.name == 'p_p11_r2'
'''
outer_function_state_holds_the_region = None
region_obj = None
assert callable(state)
for k, rs in self.regions.items():
for r in rs._regions:
if r.has_state(state):
outer_function_state_holds_the_region = eval(rs.name)
region_obj = r
break
if region_obj:
region_state_function = region_obj.fns['region_state_function']
assert callable(outer_function_state_holds_the_region)
return region_state_function, outer_function_state_holds_the_region, region_obj
else:
return None, None, None
target_state, region_holding_state, region = find_fns(t)
onion_states += [target_state, region_holding_state]
#inner_region = False
while region and hasattr(region, 'outer'):
target_state, region_holding_state, region = \
find_fns(region_holding_state)
if s is not None and region_holding_state == s:
#inner_region = True
onion_states += [target_state, s]
break
if target_state:
onion_states += [target_state, region_holding_state]
if None in onion_states:
new_target = s
new_source = t
onion_states = self.build_onion(s=new_source, t=new_target, sig=sig)
onion_states.reverse()
return onion_states
@lru_cache(maxsize=64)
def meta_trans(self, r, s, t, sig):
'''Create an event onion which can be passed over zero or one orthogonal
components.
The orthogonal component pattern allows HSM objects within other HSM
objects. The event processor used by the miros library does not support
transition over HSM boundaries. This method creates recursive events which
allow for transitions out of an into orthogonal components, assuming their
states have been written to support these events.
**Note**:
The trans event will contain a payload of the META_SIGNAL_PAYLOAD type.
See the top of the files for details.
**Args**:
| ``s`` (function): source state function (where we are coming from)
| ``t`` (function): target state function (where we want to go)
| ``sig`` (str): signal name of event that initiated the need for this
| event
**Returns**:
(Event): An event which can contain other events within it.
**Example(s)**:
.. code-block:: python
@othogonal_state
def p_p11_s12(r, e):
elif(e.signal == signals.G1):
status = return_status.HANDLED
# Reference the parallel_region_to_orthogonal_component_mapping_6.pdf in
# this repository
status = return_status.HANDLED
_e = r.outmost.meta_trans(
s=p_p11_s12,
t=p_s22,
sig=e.signal_name
)
r.outmost.regions['p_p11'].post_fifo(_e)
'''
inner = r.inner
current_function_name = r.current_function_name
# TODO: clean this up:
source = s
target = t
lca = self.lca(source, target)
exit_onion1 = self.build_onion(
t=source,
s=lca,
sig=sig)
exit_onion = exit_onion1[1:]
strippped_onion = []
for fn in exit_onion:
strippped_onion.append(fn)
if fn == lca:
break
strippped_onion.reverse()
exit_onion = strippped_onion[:]
# exit_onion.reverse()
if not (self.within(outer_state, target)) and lca != target:
entry_onion1 = self.build_onion(
s=lca,
t=target,
sig=sig)[0:-1]
entry_onion = entry_onion1[:]
else:
entry_onion = []
# Wrap up the onion meta event from the inside out. History items at the
# last layer of the outer part of the INIT_META_SIGNAL need to reference an
# even more outer part of the onion, the meta exit details.
event = None
if entry_onion != [] and entry_onion[-1] == exit_onion[0]:
bounce_type = signals.BOUNCE_SAME_META_SIGNAL
if len(exit_onion) == 1:
_state = entry_onion[-2]
else:
_state = None
elif(entry_onion == []):
bounce_type = signals.OUTER_TRANS_REQUIRED
_state = None
else:
bounce_type = signals.BOUNCE_ACROSS_META_SIGNAL
_state = None
if _state and len(exit_onion) == 1:
number = len(entry_onion) - 1
else:
number = len(entry_onion) + len(exit_onion)
if bounce_type == signals.OUTER_TRANS_REQUIRED:
number += 1
previous_state = exit_onion[0]
previous_signal = signals.EXIT_META_SIGNAL
event = Event(
signal=bounce_type,
payload=META_SIGNAL_PAYLOAD(
n=number,
event=event,
state=t,
previous_state=previous_state,
previous_signal=previous_signal,
springer=sig,
)
)
number -= 1
else:
for index, entry_target in enumerate(entry_onion):
#signal_name = signals.INIT_META_SIGNAL if entry_target != lca else bounce_type
signal_name = signals.INIT_META_SIGNAL
if index == len(entry_onion) - 1:
if (len(exit_onion) == 1 and
exit_onion[0] == entry_onion[-1]):
previous_state = source
previous_signal = sig
else:
previous_state = exit_onion[0]
previous_signal = signals.EXIT_META_SIGNAL
event = Event(
signal=signal_name,
payload=META_SIGNAL_PAYLOAD(
n=number,
event=event,
state=entry_target,
previous_state=previous_state,
previous_signal=previous_signal,
springer=sig,
)
)
else:
previous_state = entry_onion[index + 1]
previous_signal = signals.INIT_META_SIGNAL
event = Event(
signal=signal_name,
payload=META_SIGNAL_PAYLOAD(
n=number,
event=event,
state=entry_target,
previous_state=previous_state,
previous_signal=previous_signal,
springer=sig,
)
)
number -= 1
# Wrapping the EXIT_META_SIGNAL details around the META INIT part
# on the onion meta event. When we are at the outer layer
# we need to write in the event that caused this meta event
# and from what state it was created.
if len(exit_onion) > 1:
for index, exit_target in enumerate(exit_onion):
signal_name = signals.EXIT_META_SIGNAL if exit_target != lca else bounce_type
previous_signal = signals.EXIT_META_SIGNAL
if index == len(exit_onion) - 1:
previous_state = source
previous_signal = sig
else:
previous_state = exit_onion[index + 1]
event = Event(
signal=signal_name,
payload=META_SIGNAL_PAYLOAD(
n=number,
event=event,
state=exit_target,
previous_state=previous_state,
previous_signal=previous_signal,
springer=sig,
)
)
number -= 1
else:
previous_state = source
previous_signal = sig
event = Event(
signal=bounce_type,
payload=META_SIGNAL_PAYLOAD(
n=number,
event=event,
state=exit_onion[0],
previous_state=previous_state,
previous_signal=previous_signal,
springer=sig,
)
)
number -= 1
if _state and len(exit_onion) == 1:
event = event.payload.event.payload.event
r.inner = inner
r.current_function_name = current_function_name
return (_state, event)
@lru_cache(maxsize=32)
def lca(self, _s, _t):
if (self.within(outer_state, _t)):
return outer_state
s_onion = self.build_onion(_s, sig=None)[::-1]
t_onion = self.build_onion(_t, sig=None)[::-1]
_lca = list(set(s_onion).intersection(set(t_onion)))[-1]
_lca = self.bottom if _lca is None else _lca
return _lca
@lru_cache(maxsize=32)
def lci(self, s, t):
'''return the least common injector'''
s_onion = self.build_onion(s=s, t=t, sig=None)
t_onion = self.build_onion(s=t, t=s, sig=None)
_lci = list(set(s_onion).intersection(set(t_onion)))[0]
_lci = self.bottom if _lci is None else _lci
return _lci
@lru_cache(maxsize=32)
def within(self, fn_region_handler, fn_state_handler):
old_temp = self.temp.fun
old_fun = self.state.fun
current_state = fn_region_handler
self.temp.fun = fn_state_handler
result = False
super_e = Event(signal=signals.SEARCH_FOR_SUPER_SIGNAL)
while(True):
if(self.temp.fun == current_state):
result = True
r = return_status.IGNORED
else:
r = self.temp.fun(self, super_e)
if r == return_status.IGNORED:
break
self.temp.fun = old_temp
self.state.fun = old_fun
return result
################################################################################
# p region #
################################################################################
@othogonal_state
def p_r1_under_hidden_region(r, e):
status = return_status.UNHANDLED
if(r.token_match(e.signal_name, "enter_region")):
status = r.trans(p_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r1_under_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = r.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_r1_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r1_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p11)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r._post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if r == region and r.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_r1_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_r1_under_hidden_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r1_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_r1_region")
status = return_status.HANDLED
else:
r.temp.fun = p_r1_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_r1_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r1_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_r1_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_r1_region
status = return_status.SUPER
return status
# p_r1
@othogonal_state
def p_p11(r, e):
'''
r is either p_r1, p_r2 region
r.outer = p
'''
status = return_status.UNHANDLED
# enter all regions
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11")
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
investigate(r, e, _e)
if _state:
r.inner._post_fifo(_e)
r.inner.post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
# any event handled within there regions must be pushed from here
elif(r.token_match(e.signal_name, "e1") or
r.token_match(e.signal_name, "e2") or
r.token_match(e.signal_name, "e4") or
r.token_match(e.signal_name, "D3") or
r.token_match(e.signal_name, "G1") or
r.token_match(e.signal_name, "G3")
):
r.scribble(e.signal_name)
r.inner.post_fifo(e)
status = return_status.HANDLED
elif r.token_match(e.signal_name, r.outmost.regions['p_p11'].final_signal_name):
r.scribble(e.signal_name)
status = r.trans(p_p12)
elif r.token_match(e.signal_name, "C0"):
status = r.trans(p_p12)
elif r.token_match(e.signal_name, "A0"):
status = r.trans(p_p11)
elif r.token_match(e.signal_name, "D0"):
_state, _e = r.outmost.meta_trans(r, t=outer_state, s=p_p11, sig=e.signal_name)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif r.token_match(e.signal_name, "G0"):
status = return_status.HANDLED
elif e.signal == signals.OUTER_TRANS_REQUIRED:
status = return_status.HANDLED
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if _state.__name__ == r.state_fn.__name__:
r.inner.post_fifo(Event(signal=signals.exit_region))
r.inner.post_fifo(Event(signal=signals.enter_region))
else:
if r.within(bound=r.state_fn, query=_state):
status = r.trans(_state)
elif e.signal == signals.EXIT_META_SIGNAL:
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
# this appears backwards, but it needs to be this way.
if r.within(bound=_state, query=r.state_fn):
# The next state is going to be our region handler skip it and post this
# region handler would have posted to the outer HSM
if(_e.payload.event.signal == signals.EXIT_META_SIGNAL or
_e.payload.event.signal == signals.BOUNCE_ACROSS_META_SIGNAL or
_e.payload.event.signal == signals.OUTER_TRANS_REQUIRED
):
(_e, _state) = _e.payload.event, _e.payload.state
r.outer._post_lifo(_e)
elif(_e.signal == signals.EXIT_META_SIGNAL):
r.outer._post_lifo(_e)
else:
r.same._post_lifo(_e)
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "F1")):
_state, _e = r.outmost.meta_trans(r, t=p_p12_p11_s12, s=p_p11, sig=e.signal_name)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
#status = return_status.UNHANDLED
elif e.signal == signals.exit_region:
r.scribble(Event(e.signal_name))
status = r.trans(p_r1_under_hidden_region)
elif e.signal == signals.EXIT_SIGNAL:
#scribble(Event(e.signal_name))
r.inner.post_lifo(Event(signal=signals.exit_region))
pprint("exit p_p11")
status = return_status.HANDLED
else:
r.temp.fun = p_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r1_under_hidden_region(r, e):
status = return_status.UNHANDLED
if(r.token_match(e.signal_name, "enter_region")):
status = r.trans(p_p11_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r1_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r1_under_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = r.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r1_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r1_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p11_s11)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r._post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif e.signal == signals.EXIT_META_SIGNAL:
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_p11_r1_region, _state):
(_e, _state) = _e.payload.event, _e.payload.state
r.outer._post_lifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_p11_r1_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r1_region")
#r.inner.post_lifo(Event(signal=signals.exit_region))
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r1_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r1_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p11_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r1_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r1_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r1_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_s11(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_s11")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e4")):
status = r.trans(p_p11_s12)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_s11")
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_s12(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_s12")
status = return_status.HANDLED
elif r.token_match(e.signal_name, "D3"):
_state, _e = r.outmost.meta_trans(r, t=p, s=p_p11_s12, sig=e.signal_name)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif(r.token_match(e.signal_name, "e1")):
status = r.trans(p_p11_r1_final)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_s12")
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r1_final(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r1_final")
r.final = True
r.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r1_final")
r.final = False
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r2_under_hidden_region(r, e):
status = return_status.UNHANDLED
if(r.token_match(e.signal_name, "enter_region")):
status = r.trans(p_p11_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r2_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r2_under_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = r.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r2_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r2_region")
status = return_status.HANDLED
# p_p11_s21
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
investigate(r, e, _e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
if _state == p_p11_r2_region:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is a child of this state then transition to it
if _state is None or not r.within(p_p11_r2_region, _state):
status = r.trans(p_p11_s21)
else:
status = r.trans(_state)
if _e is not None:
r.post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
r.scribble(e.signal_name)
investigate(r, e, _e)
_state, _e = e.payload.state, e.payload.event
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_p11_r2_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_p11_r2_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r2_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r2_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r2_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p11_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r2_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r2_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r2_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_s21(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_s21")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e1")):
status = r.trans(p_p11_s22)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_s21")
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_s22(r, e):
'''
'''
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_s22")
status = return_status.HANDLED
elif(e.signal == signals.G3):
_state, _e = r.outmost.meta_trans(
r,
s=p_p11_s22,
t=p_s21,
sig=e.signal_name
)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif(r.token_match(e.signal_name, "F2")):
_state, _e = r.meta_trans(r, t=p_p12_p11_s12, s=p_p11_s22, sig=e.signal_name)
investigate(r, e, _e)
r.outer.post_lifo(_e)
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e2")):
status = r.trans(p_p11_r2_final)
elif(r.token_match(e.signal_name, "e1")):
status = r.trans(p_p11_s21)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_s22")
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p11_r2_final(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p11_r2_final")
r.final = True
r.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r2_final")
r.final = False
status = return_status.HANDLED
else:
r.temp.fun = p_p11_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_r1_final(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r1_final")
r.final = True
r.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_r1_final")
r.final = False
status = return_status.HANDLED
else:
r.temp.fun = p_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12(r, e):
status = return_status.UNHANDLED
# Enter all regions
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12")
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
investigate(r, e, _e)
if _state:
r.inner._post_fifo(_e)
r.inner.post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
# Any event handled within there regions must be pushed from here
elif(r.token_match(e.signal_name, "e1") or
r.token_match(e.signal_name, "e2") or
r.token_match(e.signal_name, "e4") or
r.token_match(e.signal_name, "G1") or
r.token_match(e.signal_name, "G3") or
r.token_match(e.signal_name, "D4") or
r.token_match(e.signal_name, "I1")
):
r.scribble(e.signal_name)
r.inner.post_fifo(e)
status = return_status.HANDLED
# All internal injectors will have to have this structure
elif e.signal == signals.EXIT_META_SIGNAL:
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
# this appears backwards, but it needs to be this way.
if r.within(bound=_state, query=r.state_fn):
# The next state is going to be our region handler skip it and post this
# region handler would have posted to the outer HSM
if(_e.payload.event.signal == signals.EXIT_META_SIGNAL or
_e.payload.event.signal == signals.BOUNCE_ACROSS_META_SIGNAL or
_e.payload.event.signal == signals.OUTER_TRANS_REQUIRED
):
(_e, _state) = _e.payload.event, _e.payload.state
r.outer._post_lifo(_e)
elif(_e.signal == signals.EXIT_META_SIGNAL):
r.outer._post_lifo(_e)
else:
r.same._post_lifo(_e)
status = return_status.HANDLED
elif e.signal == signals.OUTER_TRANS_REQUIRED:
status = return_status.HANDLED
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if _state.__name__ == r.state_fn.__name__:
r.inner.post_fifo(Event(signal=signals.exit_region))
r.inner.post_fifo(Event(signal=signals.enter_region))
else:
if r.within(bound=r.state_fn, query=_state):
status = r.trans(_state)
# Final token match
elif(r.token_match(e.signal_name, r.outmost.regions['p_p12'].final_signal_name)):
r.scribble(e.signal_name)
status = r.trans(p_r1_final)
elif(r.token_match(e.signal_name, "e5")):
status = r.trans(p_r1_final)
elif(e.signal == signals.E2):
r.scribble(e.signal_name)
_e = r.outmost.meta_init(r, t=p_p12_p11_s12, s=p_p12, sig=e.signal_name)
investigate(r, e, _e)
# this force_region_init might be a problem
r.inner._post_lifo(Event(signal=signals.force_region_init))
r.inner.post_fifo(_e)
status = return_status.HANDLED
elif r.token_match(e.signal_name, "H1"):
_state, _e = r.outmost.meta_trans(r, t=outer_state, s=p_p12, sig=e.signal_name)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
# Exit signals
elif(e.signal == signals.exit_region):
r.scribble(e.signal_name)
status = r.trans(p_r1_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
r.inner.post_lifo(Event(signal=signals.exit_region))
pprint("exit p_p12")
status = return_status.HANDLED
else:
r.temp.fun = p_r1_over_hidden_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_r1_under_hidden_region(rr, e):
status = return_status.UNHANDLED
if(rr.token_match(e.signal_name, "enter_region")):
status = rr.trans(p_p12_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r1_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_r1_under_hidden_region")
status = return_status.HANDLED
else:
rr.temp.fun = rr.bottom
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_r1_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r1_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p12_p11)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r.post_fifo(_e)
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_p12_r1_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_p12_r1_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_r1_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r1_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_r1_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p12_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r1_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_r1_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r1_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_p11(r, e):
status = return_status.UNHANDLED
# enter all regions
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11")
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
investigate(r, e, _e)
if _state:
r.inner._post_fifo(_e)
r.inner.post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "G1") or
r.token_match(e.signal_name, "D4") or
r.token_match(e.signal_name, "I1")):
r.scribble(e.signal_name)
r.inner.post_fifo(e)
status = return_status.HANDLED
elif(r.token_match(e.signal_name, r.outmost.regions['p_p12_p11'].final_signal_name)):
r.scribble(e.signal_name)
status = r.trans(p_p12_s12)
elif e.signal == signals.EXIT_META_SIGNAL:
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
# this appears backwards, but it needs to be this way.
if r.within(bound=_state, query=r.state_fn):
# The next state is going to be our region handler skip it and post this
# region handler would have posted to the outer HSM
if(_e.payload.event.signal == signals.EXIT_META_SIGNAL or
_e.payload.event.signal == signals.BOUNCE_ACROSS_META_SIGNAL or
_e.payload.event.signal == signals.OUTER_TRANS_REQUIRED
):
(_e, _state) = _e.payload.event, _e.payload.state
r.outer._post_lifo(_e)
elif(_e.signal == signals.EXIT_META_SIGNAL):
r.outer._post_lifo(_e)
else:
r.same._post_lifo(_e)
status = return_status.HANDLED
elif e.signal == signals.OUTER_TRANS_REQUIRED:
status = return_status.HANDLED
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if _state.__name__ == r.state_fn.__name__:
r.inner.post_fifo(Event(signal=signals.exit_region))
r.inner.post_fifo(Event(signal=signals.enter_region))
else:
if r.within(bound=r.state_fn, query=_state):
status = r.trans(_state)
elif(r.token_match(e.signal_name, "e4")):
status = r.trans(p_p12_s12)
elif(e.signal == signals.exit_region):
r.scribble(e.signal_name)
status = r.trans(p_p12_r1_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
r.inner.post_lifo(Event(signal=signals.exit_region))
pprint("exit p_p12_p11")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_r1_under_hidden_region(rrr, e):
status = return_status.UNHANDLED
if(rrr.token_match(e.signal_name, "enter_region")):
status = rrr.trans(p_p12_p11_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_r1_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_r1_under_hidden_region")
status = return_status.HANDLED
else:
rrr.temp.fun = rrr.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_r1_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_r1_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p12_p11_s11)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r.post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
r.scribble(e.signal_name)
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_p12_p11_r1_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_p12_p11_r1_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_r1_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_p11_r1_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_r1_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p12_p11_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_r1_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_r1_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_p11_r1_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_s11(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_s11")
status = return_status.HANDLED
elif(e.signal == signals.e1):
status = r.trans(p_p12_p11_s12)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_s11")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_p11_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_s12(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_s12")
status = return_status.HANDLED
elif r.token_match(e.signal_name, "D4"):
_state, _e = r.outmost.meta_trans(r, t=p_p12, s=p_p12_p11_s12, sig=e.signal_name)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_s12")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_p11_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_r2_under_hidden_region(r, e):
status = return_status.UNHANDLED
if(r.token_match(e.signal_name, "enter_region")):
status = r.trans(p_p12_p11_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_r2_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_r2_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
if r.within(p_p12_p11_r2_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
else:
r.temp.fun = r.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_r2_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_r2_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p12_p11_s21)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r.post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
r.scribble(e.signal_name)
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_p12_p11_r2_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_p12_p11_r2_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_r2_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_p11_r2_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_r2_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p12_p11_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_r2_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_r2_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_p11_r2_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_p11_s21(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_p11_s21")
status = return_status.HANDLED
elif(e.signal == signals.G1):
_state, _e = r.outmost.meta_trans(
r,
s=p_p12_p11_s21,
t=p_p22_s11,
sig=e.signal_name
)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif(e.signal == signals.I1):
_state, _e = r.outmost.meta_trans(
r,
s=p_p12_p11_s21,
t=p_p11_s12,
sig=e.signal_name
)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_p11_s21")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_p11_r2_over_hidden_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_s12(rr, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_s12")
status = return_status.HANDLED
elif(rr.token_match(e.signal_name, "e1")):
status = rr.trans(p_p12_r1_final)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_s12")
status = return_status.HANDLED
else:
rr.temp.fun = p_p12_r1_over_hidden_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_r1_final(rr, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r1_final")
rr.final = True
rr.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_r1_final")
rr.final = False
status = return_status.HANDLED
else:
rr.temp.fun = p_p12_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_r2_under_hidden_region(rr, e):
status = return_status.UNHANDLED
if(rr.token_match(e.signal_name, "enter_region")):
status = rr.trans(p_p12_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r2_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_r2_under_hidden_region")
status = return_status.HANDLED
else:
rr.temp.fun = rr.bottom
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_r2_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r2_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p12_s21)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r.post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
r.scribble(e.signal_name)
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_p12_r2_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_p12_r2_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_r2_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r2_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p12_r2_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p12_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r2_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p11_r2_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r2_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_s21(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_s21")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e1")):
status = r.trans(p_p12_s22)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_s21")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r2_over_hidden_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_s22(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_s22")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e2")):
status = r.trans(p_p12_r2_final)
elif(r.token_match(e.signal_name, "e1")):
status = r.trans(p_p12_s21)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_s22")
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r2_over_hidden_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p12_r2_final(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p12_r2_final")
r.final = True
r.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p12_r2_final")
r.final = False
status = return_status.HANDLED
else:
r.temp.fun = p_p12_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_r2_under_hidden_region(r, e):
status = return_status.UNHANDLED
if(r.token_match(e.signal_name, "enter_region")):
status = r.trans(p_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r2_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_r2_under_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = r.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_r2_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r2_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_s21)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r.post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
r.scribble(e.signal_name)
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if r == region and r.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_r2_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_r2_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_r2_region")
status = return_status.HANDLED
else:
r.temp.fun = p_r2_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_r2_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r2_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_r2_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_r2_region
status = return_status.SUPER
return status
@othogonal_state
def p_s21(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_s21")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "C0")):
status = r.trans(p_p22)
elif(r.token_match(e.signal_name, "F1")):
_state, _e = r.outmost.meta_trans(r, t=p_p22_s12, s=p_s21, sig=e.signal_name)
r.same.post_fifo(_e)
investigate(r, e, _e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif(r.token_match(e.signal_name, "G0")):
_state, _e = r.outmost.meta_trans(
r,
s=p_s21,
t=p_p12_p11_s21,
sig=e.signal_name
)
investigate(r, e, _e)
r.same.post_lifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif r.token_match(e.signal_name, "H0"):
_state, _e = r.outmost.meta_trans(r, t=outer_state, s=p_s21, sig=e.signal_name)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_s21")
status = return_status.HANDLED
else:
r.temp.fun = p_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22(r, e):
status = return_status.UNHANDLED
# enter all regions
if(e.signal == signals.ENTRY_SIGNAL):
# search for INIT_META_SIGNAL
print("enter p_p22")
(_e, _state) = r.meta_peel(e)
investigate(r, e, _e)
if _state:
r.inner._post_fifo(_e)
r.inner.post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
# any event handled within there regions must be pushed from here
elif(r.token_match(e.signal_name, "e1") or
r.token_match(e.signal_name, "e2") or
r.token_match(e.signal_name, "e4") or
r.token_match(e.signal_name, "D2") or
r.token_match(e.signal_name, "E2")
):
r.scribble(e.signal_name)
r.inner.post_fifo(e)
status = return_status.HANDLED
# final token match
elif(r.token_match(e.signal_name, r.outmost.regions['p_p22'].final_signal_name)):
r.scribble(e.signal_name)
status = r.trans(p_r2_final)
elif e.signal == signals.EXIT_META_SIGNAL:
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
# this appears backwards, but it needs to be this way.
if r.within(bound=_state, query=r.state_fn):
# The next state is going to be our region handler skip it and post this
# region handler would have posted to the outer HSM
if(_e.payload.event.signal == signals.EXIT_META_SIGNAL or
_e.payload.event.signal == signals.BOUNCE_ACROSS_META_SIGNAL or
_e.payload.event.signal == signals.OUTER_TRANS_REQUIRED
):
(_e, _state) = _e.payload.event, _e.payload.state
r.outer._post_lifo(_e)
elif(_e.signal == signals.EXIT_META_SIGNAL):
r.outer._post_lifo(_e)
else:
r.same._post_lifo(_e)
status = return_status.HANDLED
elif e.signal == signals.OUTER_TRANS_REQUIRED:
status = return_status.HANDLED
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if _state.__name__ == r.state_fn.__name__:
r.inner.post_fifo(Event(signal=signals.exit_region))
r.inner.post_fifo(Event(signal=signals.enter_region))
else:
if r.within(bound=r.state_fn, query=_state):
status = r.trans(_state)
elif(e.signal == signals.exit_region):
r.scribble(e.signal_name)
status = r.trans(p_r2_under_hidden_region)
elif(e.signal == signals.C1):
status = r.trans(p_s21)
elif(e.signal == signals.EXIT_SIGNAL):
r.inner.post_lifo(Event(signal=signals.exit_region))
pprint("exit p_p22")
status = return_status.HANDLED
else:
r.temp.fun = p_r2_over_hidden_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p22_r1_under_hidden_region(r, e):
status = return_status.UNHANDLED
if(r.token_match(e.signal_name, "enter_region")):
status = r.trans(p_p22_r1_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r1_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r1_under_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = r.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_p22_r1_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r1_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p22_s11)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r.post_fifo(_e)
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
r.scribble(e.signal_name)
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
# can we get rid of exit_region?
elif(e.signal == signals.exit_region):
status = r.trans(p_p22_r1_under_hidden_region)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r1_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r1_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_r1_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p22_r1_under_hidden_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r1_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r1_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r1_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_s11(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_s11")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e4")):
status = r.trans(p_p22_s12)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_s11")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_s12(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_s12")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e1")):
status = r.trans(p_p22_r1_final)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_s12")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r1_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_r1_final(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r1_final")
status = return_status.HANDLED
r.final = True
r.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r1_final")
r.final = False
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r1_over_hidden_region
status = return_status.SUPER
return status
# inner parallel
@othogonal_state
def p_p22_r2_under_hidden_region(r, e):
status = return_status.UNHANDLED
if(r.token_match(e.signal_name, "enter_region")):
status = r.trans(p_p22_r2_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r2_under_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r2_under_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = r.bottom
status = return_status.SUPER
return status
@othogonal_state
def p_p22_r2_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r2_region")
status = return_status.HANDLED
elif(e.signal == signals.INIT_SIGNAL):
# search for INIT_META_SIGNAL
(_e, _state) = r.meta_peel(e)
# If the target state is this state, just strip this layer of
# the meta event and use the next one (this was done to make the
# meta events consistent and easy to read and usable by different
# types of WTF events.
investigate(r, e, _e)
# We can't compare the function directly because they can be arbitrarily
# decorated by the user, so their addresses may not be the same, but their
# names will be the same
if _state and _state.__name__ == r.state_name:
(_e, _state) = _e.payload.event, _e.payload.state
# if _state is None or is referencing another region then follow are default
# init behavior
if _state is None or not r.within(bound=r.state_fn, query=_state):
status = r.trans(p_p22_s21)
else:
# if _state is this state or a child of this state, transition to it
status = r.trans(_state)
# if there is more to our meta event, post it into the chart
if _e is not None:
r.post_fifo(_e)
elif(e.signal == signals.INIT_META_SIGNAL):
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_SAME_META_SIGNAL:
r.scribble(e.signal_name)
_state, _e = e.payload.state, e.payload.event
investigate(r, e, _e)
for region in r.same._regions:
if region.has_state(e.payload.previous_state):
region._post_fifo(_e)
region._post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(r, e, _e)
if r.within(p_p22_r2_region, _state):
r.outer._post_fifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
status = r.trans(p_p22_r2_under_hidden_region)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r2_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r2_under_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_r2_over_hidden_region(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.force_region_init):
status = r.trans(p_p22_r2_under_hidden_region)
elif(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r2_over_hidden_region")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r2_over_hidden_region")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r2_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_s21(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_s21")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e1")):
status = r.trans(p_p22_s22)
elif r.token_match(e.signal_name, "D2"):
_state, _e = r.outmost.meta_trans(r, t=some_other_state, s=p_p22_s21, sig=e.signal_name)
investigate(r, e, _e)
r.same._post_fifo(_e)
if _state:
status = r.trans(_state)
else:
status = return_status.UNHANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_s21")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_s22(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_s22")
status = return_status.HANDLED
elif(r.token_match(e.signal_name, "e2")):
status = r.trans(p_p22_r2_final)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_s22")
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_p22_r2_final(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_p22_r2_final")
r.final = True
r.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_p22_r2_final")
r.final = False
status = return_status.HANDLED
else:
r.temp.fun = p_p22_r2_over_hidden_region
status = return_status.SUPER
return status
@othogonal_state
def p_r2_final(r, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter p_r2_final")
r.final = True
r.post_p_final_to_outmost_if_ready()
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit p_r2_final")
r.final = False
status = return_status.HANDLED
else:
r.temp.fun = p_r2_over_hidden_region
status = return_status.SUPER
return status
@state
def outer_state(self, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter outer_state")
status = return_status.HANDLED
elif(self.token_match(e.signal_name, "to_p")):
if self.live_spy and self.instrumented:
self.live_spy_callback("{}:outer_state".format(e.signal_name))
status = self.trans(p)
elif(self.token_match(e.signal_name, "E0")):
if self.live_spy and self.instrumented:
self.live_spy_callback("{}:outer_state".format(e.signal_name))
_e = self.meta_init(r=self, t=p_p11_s22, s=outer_state, sig=e.signal_name)
self.post_fifo(_e.payload.event)
#self.live_spy_callback(ps(_e))
#self.complete_circuit()
status = self.trans(_e.payload.state)
elif(self.token_match(e.signal_name, "B0")):
_e = self.meta_init(r=self, t=p_p22, s=outer_state, sig=e.signal_name)
investigate(self, e, _e)
self.post_fifo(_e.payload.event)
#self.live_spy_callback(ps(_e))
#self.complete_circuit()
status = self.trans(_e.payload.state)
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit outer_state")
status = return_status.HANDLED
else:
self.temp.fun = self.bottom
status = return_status.SUPER
return status
@state
def some_other_state(self, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
pprint("enter some_other_state")
status = return_status.HANDLED
elif(e.signal == signals.EXIT_SIGNAL):
pprint("exit some_other_state")
status = return_status.HANDLED
elif(self.token_match(e.signal_name, "F0")):
_e = self.meta_init(r=self, t=p_p22_s21, s=some_other_state, sig=e.signal_name)
investigate(self, e, _e)
self.post_fifo(_e.payload.event)
#self.live_spy_callback(ps(_e))
#self.complete_circuit()
status = self.trans(_e.payload.state)
elif(e.signal == signals.F0):
if self.live_spy and self.instrumented:
self.live_spy_callback("{}:outer_state".format(e.signal_name))
_e = self.meta_init(r=self, t=p_p22_s21, sig=e.signal_name)
self.post_fifo(_e.payload.event)
status = self.trans(_e.payload.state)
else:
self.temp.fun = outer_state
status = return_status.SUPER
return status
@state
def p(self, e):
status = return_status.UNHANDLED
# enter all regions
if(e.signal == signals.ENTRY_SIGNAL):
self.scribble("[p] {}".format(e.signal_name))
(_e, _state) = self.meta_peel(e) # search for INIT_META_SIGNAL
if _state:
self.inner._post_fifo(_e)
pprint("enter p")
self.inner.post_lifo(Event(signal=signals.enter_region))
status = return_status.HANDLED
# any event handled within there regions must be pushed from here
elif(type(self.regions) == dict and (self.token_match(e.signal_name, "e1") or
self.token_match(e.signal_name, "e2") or
self.token_match(e.signal_name, "e3") or
self.token_match(e.signal_name, "e4") or
self.token_match(e.signal_name, "e5") or
self.token_match(e.signal_name, "A0") or
self.token_match(e.signal_name, "C0") or
self.token_match(e.signal_name, "C1") or
self.token_match(e.signal_name, "D0") or
self.token_match(e.signal_name, "D2") or
self.token_match(e.signal_name, "D3") or
self.token_match(e.signal_name, "D4") or
self.token_match(e.signal_name, "E2") or
self.token_match(e.signal_name, "F1") or
self.token_match(e.signal_name, "F2") or
self.token_match(e.signal_name, "G0") or
self.token_match(e.signal_name, "G1") or
self.token_match(e.signal_name, "G3") or
self.token_match(e.signal_name, "I1") or
self.token_match(e.signal_name, "H0") or
self.token_match(e.signal_name, "H1") or
self.token_match(e.signal_name, self.regions['p_p11'].final_signal_name) or
self.token_match(e.signal_name, self.regions['p_p12'].final_signal_name) or
self.token_match(e.signal_name, self.regions['p_p22'].final_signal_name)
)):
self.scribble("[p] {}".format(e.signal_name))
self.inner.post_fifo(e)
status = return_status.HANDLED
elif(e.signal == signals.INIT_META_SIGNAL):
# hasn't been updated
self.regions['p']._post_lifo(Event(signal=signals.force_region_init))
self.regions['p'].post_fifo(e.payload.event.payload.event)
status = return_status.HANDLED
elif e.signal == signals.BOUNCE_ACROSS_META_SIGNAL:
self.scribble("[p] {}".format(e.signal_name))
_e, _state = e.payload.event, e.payload.state
self.inner._post_fifo(_e)
for region in self.inner._regions:
if region.has_state(e.payload.previous_state):
region.pop_event()
region._post_lifo(Event(signal=signals.exit_region))
else:
region.post_lifo(Event(signal=signals.enter_region))
[region.complete_circuit() for region in self.inner._regions]
status = return_status.HANDLED
elif(self.token_match(e.signal_name, "E1")):
self.scribble("[p] {}".format(e.signal_name))
_e = self.meta_init(r=self, s=p, t=p_p11_s12, sig=e.signal)
self.scribble(payload_string(_e))
self.inner._post_lifo(Event(signal=signals.force_region_init))
self.inner.post_fifo(_e)
status = return_status.HANDLED
# final token match
elif(type(self.regions) == dict and self.token_match(e.signal_name,
self.regions['p'].final_signal_name)):
self.scribble("[p] {}".format('exit_region'))
self.regions['p'].post_fifo(Event(signal=signals.exit_region))
status = self.trans(some_other_state)
elif(self.token_match(e.signal_name, "to_o")):
status = self.trans(outer_state)
elif(self.token_match(e.signal_name, "to_s")):
status = self.trans(some_other_state)
elif(self.token_match(e.signal_name, "A1")):
status = self.trans(p)
elif(self.token_match(e.signal_name, "B1")):
_e = self.meta_init(r=self, s=p, t=p_p22_s11, sig=e.signal)
self.scribble(payload_string(_e))
self.inner._post_lifo(Event(signal=signals.force_region_init))
self.inner.post_fifo(_e)
status = return_status.HANDLED
elif(self.token_match(e.signal_name, "D1")):
status = self.trans(outer_state)
elif(e.signal == signals.EXIT_META_SIGNAL):
(_e, _state) = e.payload.event, e.payload.state
investigate(self, e, _e)
self.post_lifo(_e)
status = return_status.HANDLED
elif(e.signal == signals.OUTER_TRANS_REQUIRED):
status = return_status.HANDLED
_state = e.payload.state
investigate(self, e)
if _state != p:
status = self.trans(_state)
else:
self.inner.post_fifo(Event(signal=signals.exit_region))
self.inner.post_fifo(Event(signal=signals.enter_region))
elif(e.signal == signals.EXIT_SIGNAL):
self.scribble("[p] {}".format('exit_region'))
self.inner.post_lifo(Event(signal=signals.exit_region))
pprint("exit p")
status = return_status.HANDLED
elif(e.signal == signals.exit_region):
self.scribble("[p] {}".format('exit_region'))
status = return_status.HANDLED
else:
self.temp.fun = outer_state
status = return_status.SUPER
return status
if __name__ == '__main__':
regression = True
active_states = None
example = XmlChart(
name='x',
log_file="/mnt/c/github/miros-xml/experiment/4th_example.log",
live_trace=False,
live_spy=True,
)
#example.instrumented = False
example.instrumented = True
example.start()
time.sleep(0.20)
example.report("\nstarting regression\n")
if regression:
def build_test(sig, expected_result, old_result, duration=0.2):
'''test function, so it can be slow'''
example.post_fifo(Event(signal=sig))
#if sig == 'G1':
# active_states = example.active_states()[:]
# string1 = "{:>39}{:>5} <-> {:<80}".format(str(old_result), sig, str(active_states))
# print(string1)
time.sleep(duration)
active_states = example.active_states()[:]
string1 = "{:>39}{:>5} <-> {:<80}".format(str(old_result), sig, str(active_states))
string2 = "\n{} <- {} == {}\n".format(str(old_result), sig, str(active_states))
print(string1)
example.report(string2)
list_difference = []
for item in active_states:
if item not in expected_result:
list_difference.append(item)
if active_states != expected_result:
previous_frame = inspect.currentframe().f_back
fdata = FrameData(*inspect.getframeinfo(previous_frame))
function_name = '__main__'
line_number = fdata.line_number
print("Assert error from {}:{}".format(function_name, line_number))
print("From: {}->{}".format(sig, old_result))
print("Expecting: {}".format(expected_result))
print("Observed: {}".format(active_states))
print("Difference: {}".format(list_difference))
example.active_states()
exit(1)
#assert active_states == expected_result
return active_states
def build_reflect(sig, expected_result, old_result, duration=0.2):
'''test function, so it can be slow'''
example.post_fifo(Event(signal=sig))
#if sig == 'G1':
# active_states = example.active_states()[:]
# string1 = "{:>39}{:>5} <-> {:<80}".format(str(old_result), sig, str(active_states))
# print(string1)
time.sleep(duration)
active_states = example.active_states()[:]
string1 = "{:>39}{:>5} <-> {:<80}".format(str(old_result), sig, str(active_states))
string2 = "\n{} <- {} == {}\n".format(str(old_result), sig, str(active_states))
print(string1)
example.report(string2)
#assert active_states == expected_result
return active_states
assert(example.lca(_s=p_p12, _t=outer_state) == outer_state)
assert(example.lca(_s=p_p12, _t=some_other_state) == outer_state)
result1 = example.build_onion(s=p, t=p_p11, sig='TEST')
assert(result1 == [p_p11, p_r1_region, p])
result2 = example.build_onion(s=p_p11, t=p, sig='TEST')
assert(result2 == [p, p_r1_region, p_p11])
result1 = example.build_onion(s=p, t=p_p11_s11, sig='TEST')
assert(result1 == [p_p11_s11, p_p11_r1_region, p_p11, p_r1_region, p])
result2 = example.build_onion(s=p_p11_s11, t=p, sig='TEST')
assert(result2 == [p, p_r1_region, p_p11, p_p11_r1_region, p_p11_s11])
result1 = example.build_onion(s=p, t=p_p12_p11_s12, sig='TEST')
assert(result1 == [
p_p12_p11_s12,
p_p12_p11_r1_region,
p_p12_p11,
p_p12_r1_region,
p_p12,
p_r1_region,
p,
])
result2 = example.build_onion(t=p, s=p_p12_p11_s12, sig='TEST')
assert(result2 == [
p,
p_r1_region,
p_p12,
p_p12_r1_region,
p_p12_p11,
p_p12_p11_r1_region,
p_p12_p11_s12,
])
result1 = example.build_onion(s=p_p11, t=p_p12, sig='TEST')
assert(result1 == [
p_p12,
p_r1_region,
p,
])
result2 = example.build_onion(t=p_p11, s=p_p12, sig='TEST')
active_states = example.active_states()
print("{:>39}{:>5} <-> {:<80}".format("start", "", str(active_states)))
old_results = example.active_states()[:]
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='e4',
expected_result=[['p_p11_s12', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='e1',
expected_result=[['p_p11_r1_final', 'p_p11_s22'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='e2',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='C0',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='e4',
expected_result=[['p_p12_s12', 'p_p12_s21'], ['p_p22_s12', 'p_p22_s21']],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='e1',
expected_result=[['p_p12_r1_final', 'p_p12_s22'], ['p_p22_r1_final', 'p_p22_s22']],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='e2',
expected_result=['some_other_state'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='e4',
expected_result=[['p_p11_s12', 'p_p11_s21'], 'p_s21'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='e1',
expected_result=[['p_p11_r1_final', 'p_p11_s22'], 'p_s21'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='to_o',
expected_result=['outer_state'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='E0',
expected_result=[['p_p11_s11', 'p_p11_s22'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='E1',
expected_result=[['p_p11_s12', 'p_p11_s21'], 'p_s21'],
old_result=old_results,
duration=0.2
)
old_results = build_reflect(
sig='E2',
expected_result=[['p_p11_s12', 'p_p11_s21'], 'p_s21'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='C0',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='E2',
expected_result=[[['p_p12_p11_s12', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result=old_results,
duration=0.2
)
time.sleep(1000)
old_results = build_test(
sig='E0',
expected_result=[['p_p11_s11', 'p_p11_s22'], 'p_s21'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='C0',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='G1',
expected_result=['p_r1_under_hidden_region', ['p_p22_s11', 'p_p22_s21']],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='A1',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='G0',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], 'p_r2_under_hidden_region'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='A1',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='F1',
expected_result=[[['p_p12_p11_s12', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='A1',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='G0',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], 'p_r2_under_hidden_region'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='I1',
expected_result=[['p_p11_s12', 'p_p11_s21'], 'p_r2_under_hidden_region'],
old_result = old_results,
duration=0.2
)
old_results = build_test(
sig='e1',
expected_result=[['p_p11_r1_final', 'p_p11_s22'], 'p_r2_under_hidden_region'],
old_result = old_results,
duration=0.2
)
old_results = build_test(
sig='G3',
expected_result=['p_r1_under_hidden_region', 'p_s21'],
old_result = old_results,
duration=0.2
)
old_results = build_test(
sig='C0',
expected_result=['p_r1_under_hidden_region', ['p_p22_s11', 'p_p22_s21']],
old_result = old_results,
duration=0.2
)
old_results = build_test(
sig='C1',
expected_result=['p_r1_under_hidden_region', 'p_s21'],
old_result = old_results,
duration=0.2
)
old_results = build_test(
sig='E0',
expected_result=[['p_p11_s11', 'p_p11_s22'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='A0',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='e1',
expected_result=[['p_p11_s11', 'p_p11_s22'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='B1',
expected_result=[['p_p11_s11', 'p_p11_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='D1',
expected_result=['outer_state'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='D0',
expected_result=['outer_state'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='C0',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result = old_results,
duration=0.2
)
old_results = build_test(
sig='H1',
expected_result=['outer_state'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='H0',
expected_result=['outer_state'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='C0',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result = old_results,
duration=0.2
)
old_results = build_test(
sig='D2',
expected_result=['some_other_state'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='e4',
expected_result=[['p_p11_s12', 'p_p11_s21'], 'p_s21'],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='D3',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_p',
expected_result=[['p_p11_s11', 'p_p11_s21'], 'p_s21'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='F1',
expected_result=[[['p_p12_p11_s12', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result=old_results,
duration=0.2
)
old_results = build_test(
sig='D4',
expected_result=[[['p_p12_p11_s11', 'p_p12_p11_s21'], 'p_p12_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='to_s',
expected_result=['some_other_state'],
old_result= old_results,
duration=0.2
)
old_results = build_test(
sig='F0',
expected_result=[['p_p11_s11', 'p_p11_s21'], ['p_p22_s11', 'p_p22_s21']],
old_result= old_results,
duration=0.2
)
| UTF-8 | Python | false | false | 121,642 | py | 157 | 4th_example.py | 32 | 0.627694 | 0.607981 | 0 | 3,813 | 30.90139 | 103 |
xingag/marks | 1,314,260,032,034 | 72b9c17a8372616d98667fc21022eef36808b47a | 13bf7271775795249946e5f677f68f1f66540fb6 | /Python/词云/使用 itchat 爬取微信朋友的签名并生成词云/itchatDemo/send_news.py | 869fb50ec4f75f7efb3815e47e7f31ecf2bd3a9c | []
| no_license | https://github.com/xingag/marks | 21780adeb09267738fc5c0f13c6c37c88fcba38c | 133d0e980bb1f3fb8f237fff6bd448bbabadc455 | refs/heads/master | 2018-09-19T15:28:29.495166 | 2018-09-19T13:43:11 | 2018-09-19T13:43:11 | 81,279,968 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: send_news.py
@time: 2018/5/4 17:10
@description:itchat 自动发送信息给好友
"""
# https://mp.weixin.qq.com/s?__biz=MzIxMzgyOTg1MQ==&mid=2247484131&idx=1&sn=001de6693c2bf243abba531e9aa6e5cb&chksm=97b19357a0c61a41ec8f31faf5aa1a6eb5984f3e5cd4bb7a1e64a28ba1c411badef60187e8cd&mpshare=1&scene=1&srcid=05048vyPLZVqtIZ6suVGIHrM#rd
import requests
import itchat
import time
import re
# 利用线程去执行定时任务
import threading as thd
def get_news():
url = "http://open.iciba.com/dsapi"
# 具体的内容
request_result = requests.get(url).json()
content = request_result['content']
translation = request_result['translation']
img = request_result['picture2']
# print('content:%s\ntranslation:%s\nimg:%s' % (content, translation, img))
return content, translation, img
def send_news():
print('发送消息咯~')
try:
# 短时间关闭程序后重连
# 这样即使程序关闭,一定时间内重新开启也可以不用重新扫码。
itchat.auto_login(hotReload=True)
dear_friend = (itchat.search_friends(name=u'小敏子'))[0]
# 获取对应名称的一串数字
YouPingNian = dear_friend['UserName']
# 待发送的内容
message1 = str(get_news()[0])
content = str(get_news()[1])
img = str(get_news()[2])
# 用正则表达式截取中文引号 ‘ 之间的文字。
# str = "词霸小编:卓别林的一句话送你们“用特写镜头看生活,生活是一个悲剧,但用长镜头看生活,就是一部喜剧。”"
# str1 = result = re.findall(".*“(.*)”.*", str)
message2 = str(re.findall(".*“(.*)”.*", content)[0])
message3 = "我是机器人"
# 发送消息
print('开始发送消息')
itchat.send(message1, toUserName=YouPingNian)
itchat.send(message2, toUserName=YouPingNian)
itchat.send(message3, toUserName=YouPingNian)
print('发送消息完成')
# itchat.send_image()
# 每86400秒(1天)发送1次
# 不用Linux的定时任务是因为每次登陆都需要扫描二维码登陆
# 很麻烦的事,就让他一直挂着吧
# thd.Timer(10, send_news).start()
pass
except:
message4 = u'噗噗噗~出现bug了~'
itchat.send(message4, toUserName=YouPingNian)
if __name__ == "__main__":
send_news()
# ==============================================================
# Python 正则表达式匹配两个字符之间的字符
# https://zhidao.baidu.com/question/433631377743612444.html
# -*- coding: cp936 -*-
# import re
# string = "xxxxxxxxxxxxxxxxxxxxxxxx entry '某某内容' for aaaaaaaaaaaaaaaaaa"
# result = re.findall(".*entry(.*)for.*", string)
# for x in result:
# print x
# '某某内容'
# ==============================================================
| UTF-8 | Python | false | false | 2,892 | py | 948 | send_news.py | 497 | 0.662595 | 0.605307 | 0 | 96 | 23.729167 | 243 |
Valupiruiz/AutomationPHP | 7,825,430,459,306 | f47adff109369593e56eba483c51be9b75f8a6d0 | e585c3a61b830d3c24a8cec8343d262c84c724e7 | /BORA/config/data/exceptions.py | cf45f3d953754d210b986532a961e663328ad31f | []
| no_license | https://github.com/Valupiruiz/AutomationPHP | bb0728b2b6508b017c133a7d560a652033adeaf4 | 9a92634ac9f5b27e46723294f9a4cc83a1f99252 | refs/heads/master | 2023-01-18T17:27:57.819270 | 2020-11-27T15:04:49 | 2020-11-27T15:04:49 | 310,594,260 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Error(Exception):
pass
class DriverNoExistenteException(Error):
pass
class QuerySinResultadosException(Error):
pass
| UTF-8 | Python | false | false | 129 | py | 299 | exceptions.py | 269 | 0.806202 | 0.806202 | 0 | 10 | 11.9 | 41 |
softwerks/chateau | 7,181,185,340,477 | e7d9a66a15140db29dfd6329f7f92b21c067c802 | 89fe25a9ce87a4ac4ae3e4e8de0d6070da78f9b8 | /chateau/stats/__init__.py | 088a8f45ffa6c25ea943d7e9ad2b0ae42a1ff9a6 | [
"Apache-2.0"
]
| permissive | https://github.com/softwerks/chateau | fc32e32bfb89fd0223ca60246af21e512bae11df | 6218dd623b21298a29f255e9c0d88d69651796e0 | refs/heads/main | 2023-08-11T20:55:41.395021 | 2021-09-22T01:25:49 | 2021-09-22T01:25:49 | 289,591,074 | 0 | 0 | Apache-2.0 | false | 2021-06-01T19:52:51 | 2020-08-23T00:49:13 | 2020-11-09T19:40:37 | 2021-06-01T19:52:51 | 1,062 | 0 | 0 | 0 | Python | false | false | # Copyright 2021 Softwerks LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Optional
import time
import urllib.parse
import flask
import redis
blueprint = flask.Blueprint("stats", __name__)
import chateau.stats.routes
def init_app(app: flask.app.Flask) -> None:
"""Initialize the app."""
app.before_request(_before)
app.after_request(_after)
def _before() -> None:
if "start" not in flask.g:
flask.g.start = time.perf_counter()
def _after(response: flask.Response) -> flask.Response:
if response.status_code < HTTPStatus.BAD_REQUEST:
_store_stats()
return response
def _store_stats() -> None:
now: datetime = datetime.now(timezone.utc)
date: str = now.strftime("%Y-%m-%d")
expire: int = int(
datetime.timestamp(
now.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=30)
)
)
url: urllib.parse.ParseResult = urllib.parse.urlparse(flask.request.url)
pipeline: redis.client.Pipeline = flask.g.redis.pipeline()
_store_page(pipeline, date, expire, url)
_store_browser(pipeline, date, expire)
_store_os(pipeline, date, expire)
_store_referrer(pipeline, date, expire)
_store_response_time(pipeline, url)
_update_visitor_count(pipeline, date, expire)
_update_page_views(pipeline, date, expire)
pipeline.execute()
def _store_page(
pipeline: redis.client.Pipeline,
date: str,
expire: int,
url: urllib.parse.ParseResult,
) -> None:
key: str = f"stats:page:{date}"
pipeline.zincrby(key, 1, url.path)
pipeline.expireat(key, expire)
def _store_browser(pipeline: redis.client.Pipeline, date: str, expire: int) -> None:
browser: Optional[str] = flask.request.user_agent.browser
if browser is not None:
browser_key: str = f"stats:browser:{date}"
pipeline.zincrby(browser_key, 1, browser)
pipeline.expireat(browser_key, expire)
version: Optional[str] = flask.request.user_agent.version
if version is not None:
version_key: str = f"stats:{browser}:{date}"
pipeline.zincrby(version_key, 1, version)
pipeline.expireat(version_key, expire)
def _store_os(pipeline: redis.client.Pipeline, date: str, expire: int) -> None:
os: Optional[str] = flask.request.user_agent.platform
if os is not None:
key: str = f"stats:os:{date}"
pipeline.zincrby(key, 1, os)
pipeline.expireat(key, expire)
def _store_referrer(pipeline: redis.client.Pipeline, date: str, expire: int) -> None:
referrer: Optional[str] = flask.request.referrer
if referrer is not None:
try:
referrer_url: urllib.parse.ParseResult = urllib.parse.urlparse(
flask.request.url
)
except:
pass
else:
key: str = f"stats:referrers:{date}"
pipeline.zincrby(key, 1, referrer_url.netloc)
pipeline.expireat(key, expire)
def _store_response_time(
pipeline: redis.client.Pipeline, url: urllib.parse.ParseResult
) -> None:
response_time: float = round(time.perf_counter() - flask.g.start, 3)
key: str = f"stats:perf:{url.path}"
pipeline.lpush(key, response_time)
pipeline.ltrim(key, 0, 99)
def _update_visitor_count(
pipeline: redis.client.Pipeline, date: str, expire: int
) -> None:
key: str = f"stats:visitors:{date}"
pipeline.pfadd(key, flask.g.session.id_)
pipeline.expire(key, expire)
def _update_page_views(pipeline: redis.client.Pipeline, date: str, expire: int) -> None:
key: str = f"stats:views:{date}"
pipeline.incr(key)
pipeline.expire(key, expire)
| UTF-8 | Python | false | false | 4,266 | py | 57 | __init__.py | 23 | 0.666667 | 0.661275 | 0 | 141 | 29.255319 | 88 |
renato-lopes/CompNat-DCC | 8,289,286,889,195 | bfe57e89ffabbec138de610f88bfe1ba2940a14a | f6cd8d0ecae1b674e8b0396254e87a51c82851c0 | /TP1/src/function_tree.py | 54743e2328c461c3fc2609acb89b42b978739d90 | []
| no_license | https://github.com/renato-lopes/CompNat-DCC | 9b83706d49d68180f68a4658ae9ab7bd05a9a9d1 | 907f920639bd7192fe4697371253d9e029e51b70 | refs/heads/main | 2023-03-24T00:55:46.249100 | 2021-03-15T15:12:50 | 2021-03-15T15:12:50 | 331,150,828 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Tree used to represent functions """
FUNCTION_NODE=0 # Node with a function, like sum and mul
CONSTANT_NODE=1 # Node with a constant, like a real number
VAR_NODE=2 # Node with a reference to a variable (based on variables array index)
LEFT_CHILD=0
RIGHT_CHILD=1
class Tree:
def __init__(self, node_type, node_value, parent=None, lchild=None, rchild=None):
self.node_type = node_type
self.node_value = node_value
self.parent = parent
self.lchild = lchild
self.rchild = rchild
def set_left_child(self, node):
self.lchild = node
node.parent = (self, LEFT_CHILD)
def set_right_child(self, node):
self.rchild = node
node.parent = (self, RIGHT_CHILD)
def evaluate(self, variables):
if self.node_type == CONSTANT_NODE:
return self.node_value
elif self.node_type == VAR_NODE:
return variables[self.node_value]
elif self.node_type == FUNCTION_NODE:
lvalue = self.lchild.evaluate(variables)
rvalue = self.rchild.evaluate(variables)
return self.node_value(lvalue, rvalue)
else:
raise ValueError()
def height(self):
if self.node_type == CONSTANT_NODE or self.node_type == VAR_NODE:
return 0
else:
return max(1+self.lchild.height(), 1+self.rchild.height())
def tostring(self, level=0):
rep = '\t'*level
if self.node_type == FUNCTION_NODE:
rep += str(self.node_value)+"\n"
rep += self.lchild.tostring(level+1)
rep += self.rchild.tostring(level+1)
else:
if self.node_type == VAR_NODE:
rep += "var"
rep += str(self.node_value)+"\n"
return rep
def get_nodes_at_level(self, nodes, level):
if level == 0:
nodes.append(self)
elif self.node_type == FUNCTION_NODE:
self.lchild.get_nodes_at_level(nodes, level-1)
self.rchild.get_nodes_at_level(nodes, level-1)
def copy(self):
cp = Tree(self.node_type, self.node_value)
if self.node_type == FUNCTION_NODE:
cp.set_left_child(self.lchild.copy())
cp.set_right_child(self.rchild.copy())
return cp
def get_terminal_nodes(self, nodes, level=0):
if self.node_type == FUNCTION_NODE:
self.lchild.get_terminal_nodes(nodes, level + 1)
self.rchild.get_terminal_nodes(nodes, level + 1)
else:
nodes.append((self, level))
def get_function_nodes(self, nodes, level=0):
if self.node_type == FUNCTION_NODE:
nodes.append((self, level))
self.lchild.get_function_nodes(nodes, level + 1)
self.rchild.get_function_nodes(nodes, level + 1)
def __eq__(self, tree):
if self.node_type != tree.node_type:
return False
if self.node_type == FUNCTION_NODE:
if str(self.node_value) != str(tree.node_value):
return False
if str(self.node_value) in {'SUM', 'MUL'}: # Commutative operations
return ((self.lchild == tree.lchild) and (self.rchild == tree.rchild)) or ((self.lchild == tree.rchild) and (self.rchild == tree.lchild))
return (self.lchild == tree.lchild) and (self.rchild == tree.rchild)
else:
return self.node_value == tree.node_value
| UTF-8 | Python | false | false | 3,438 | py | 14 | function_tree.py | 12 | 0.579988 | 0.574171 | 0 | 94 | 35.574468 | 153 |
credibit/credibit-back | 11,665,131,198,399 | 7e8ad839b9786b9c8c45ce3a47434a6c0cddfb32 | 56b46182e1d5a22d04094eadcb1df67c1cea78b0 | /api/lamdaCalls.py | d37b41ad35b40fd0aadc87ce329eb1dadef7b05a | []
| no_license | https://github.com/credibit/credibit-back | 981e94faef57f9ee4628899b7c30c959c6b3be0c | e6542823010e82c578382d3cba3b42d82c21bff4 | refs/heads/master | 2022-12-22T23:02:46.893703 | 2019-06-13T16:26:00 | 2019-06-13T16:26:00 | 183,804,412 | 1 | 0 | null | false | 2022-12-08T05:01:52 | 2019-04-27T17:48:30 | 2019-07-22T21:24:22 | 2022-12-08T05:01:52 | 56,490 | 0 | 0 | 4 | Python | false | false | import requests
import json
def getCredit(params):
desired_months = params['plazoDeseado']
results = {
'A': {
'months': [3, 6, 12, desired_months],
'is_valid': True
},
'B': {
'months': [12, desired_months if desired_months <= 36 else 36],
'is_valid': True
},
'C': {
'months': [12, 24],
'is_valid': True
}
}
lambda_api = 'https://tlnlicdqk0.execute-api.us-east-2.amazonaws.com/prod/paymentCapacity'
r = requests.post(lambda_api, data=json.dumps(params))
credit = r.json()
body = json.loads(credit['body'])
response = body['response']
category = response[0]
if category == 'D':
return {
'is_valid': False
}
result = results[category]
result['ammount'] = response[1]
return result
def verifySite(url, company):
lambda_api = 'https://tlnlicdqk0.execute-api.us-east-2.amazonaws.com/prod/checkSiteAvailability'
r = requests.post(lambda_api, data=json.dumps({ 'url': url, 'company': company }))
return r.status_code == 200
def verifyEmail(email):
lambda_api = 'https://tlnlicdqk0.execute-api.us-east-2.amazonaws.com/prod/validateMail'
r = requests.post(lambda_api, data=json.dumps({ 'mail': email }))
return r.status_code == 200
def getFullContact(domain):
lambda_api = 'https://tlnlicdqk0.execute-api.us-east-2.amazonaws.com/prod/getFullContact'
r = requests.post(lambda_api, data=json.dumps({ 'domain': domain }))
response = r.json()
company_data = response['response']
return company_data | UTF-8 | Python | false | false | 1,648 | py | 11 | lamdaCalls.py | 8 | 0.598908 | 0.580704 | 0 | 57 | 27.929825 | 100 |
LothairKizardjian/RocketLeagueBot-RL | 13,898,514,173,630 | 3dbed9c397013a19058e14dd58f263b97ceccdd0 | c535deb8ecd8aefe34bb30c534299d6e76765324 | /src/algorithms/base_agent.py | d91bc63e50f1fa1913528acc31e485c3d4a10b21 | []
| no_license | https://github.com/LothairKizardjian/RocketLeagueBot-RL | 42076617167344e49cd74421702bf834eba27cbc | 5d843d746fb9ebb5b0278575ec64b084b684e97d | refs/heads/main | 2023-08-16T00:02:34.904398 | 2021-10-17T22:36:14 | 2021-10-17T22:36:14 | 416,049,244 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ### ABSTRACT CLASS FOR A BASE ALGORITHM ###
from abc import ABC, abstractmethod
from typing import Any
class BaseAgent(ABC):
"""
Abstract class for a basic Agent.
Describes the structure an Agent must implement.
"""
def __init__(self):
pass
@abstractmethod
def select_action(self, observations) -> Any:
"""
Parameters
----------
observations : Observations of the game
Returns : An action to apply to the environment
"""
raise NotImplementedError
@abstractmethod
def update_parameters(self):
raise NotImplementedError
@abstractmethod
def train(self):
raise NotImplementedError | UTF-8 | Python | false | false | 764 | py | 15 | base_agent.py | 13 | 0.579843 | 0.579843 | 0 | 37 | 19.675676 | 55 |
rosspeckomplekt/RParser | 3,607,772,576,450 | 7008ec894906e912348994218a760fa6f0c165fc | e8c715dd86ca964e76c284c315b83dde0cfa0264 | /scratch/scratch5.py | ba8c763274d24bdda160dcdbf9028df1080b7d3a | []
| no_license | https://github.com/rosspeckomplekt/RParser | 62b7227ec2270863a28a345cc97bbd8f1172468a | 75c1c0bf004ccd74b0eb72e3471a193c006701b8 | refs/heads/master | 2020-09-23T14:01:34.755941 | 2015-12-09T08:32:20 | 2015-12-09T08:32:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from RParser import RParser
from PreProcessor import PreProcessor
__author__ = 'Shaun Rong'
__version__ = '0.1'
__maintainer__ = 'Shaun Rong'
__email__ = 'rongzq08@gmail.com'
cfuf = PreProcessor()
with open('../data/verb_method_arg/test/paper0105.raw.txt', 'r') as f:
text = f.read().splitlines()
orig_text = text[4]
process_text, sub_table = cfuf.process([orig_text.strip()])
sen = process_text[0]
rp = RParser()
NPs = rp.return_NPs(sen)
print NPs | UTF-8 | Python | false | false | 484 | py | 87 | scratch5.py | 19 | 0.671488 | 0.650826 | 0 | 26 | 17.653846 | 70 |
ravix339/ModularRobots | 15,290,083,576,317 | b5398b29d591be26d08c0cc8663146980d6d806f | e818f2a8f409bb0005f1cdbc9f6cefd3c873e400 | /PythonReconfig/main.py | cd5ba3f918208e27a0615908ab0349b16ac73d2f | []
| no_license | https://github.com/ravix339/ModularRobots | 9e69ab5f83dbfd72d27b323b766b291201a0f27c | d1d019e709143664f3f671b8165961601977326b | refs/heads/master | 2022-06-10T15:24:25.698734 | 2020-05-07T18:44:14 | 2020-05-07T18:44:14 | 247,126,369 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from logn import LogNReconfig
BLOCKSIZE = 8 #number of 4x4 modules in a block
analyzer = LogNReconfig(BLOCKSIZE)
# highLevelStruct = [
# [1,1,1,1],
# [1,1,1,1],
# [0,0,0,0],
# [0,0,0,0]]
# highLevelStruct = [
# [1,1,1,1],
# [1,0,0,1],
# [1,0,0,1],
# [0,0,0,0]]
# highLevelStruct = [
# [0,0,0,0,0,0,0,0],
# [0,0,0,1,0,0,0,0],
# [0,0,1,1,1,0,0,0],
# [0,0,0,0,1,0,0,0],
# [0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0]]
highLevelStruct = [
[0,0,0,0,1,1,1,0],
[0,0,0,0,1,0,0,0],
[0,0,1,1,1,0,0,0],
[0,0,0,1,0,0,0,0],
[0,0,0,1,0,0,0,0],
[0,0,0,1,1,0,0,0],
[0,0,0,1,1,0,0,0],
[0,0,0,0,0,0,0,0]]
fullForm = analyzer.block_32_to_basic(highLevelStruct)
desiredForm, progress = analyzer.change_to_desired_form(fullForm)
for timestamp in range(len(progress)):
analyzer.display(progress[timestamp])
print('\n')
out_formatted = analyzer.convert_to_out(progress[timestamp])
f = open(str(timestamp) + '.txt',mode='w')
f.writelines([row + '\n' for row in out_formatted])
| UTF-8 | Python | false | false | 1,162 | py | 13 | main.py | 9 | 0.505164 | 0.363167 | 0 | 48 | 22.208333 | 65 |
ssloat/pythonanywhere | 6,425,271,085,121 | 2667ea242906500632c3fd7faa7b67ca4d6af58d | 1cd104162ccb0949d98c423f758061983ec61472 | /finances/models/tax_rates.py | 0edc5ab8c0d9fac11c41aea5061364caa0573a85 | []
| no_license | https://github.com/ssloat/pythonanywhere | 720616f15b85f63b031827512f1fbd09fb60d70c | 4ec21ed7f05dd8003e72b566af3c59d227a02c39 | refs/heads/master | 2023-03-16T02:20:01.348104 | 2023-03-12T17:37:02 | 2023-03-12T17:37:02 | 50,216,788 | 0 | 0 | null | false | 2023-03-12T17:37:03 | 2016-01-23T00:53:00 | 2019-04-19T20:26:36 | 2023-03-12T17:37:02 | 259 | 0 | 0 | 0 | Python | false | false | from mysite import db
class TaxDeduction(db.Model):
__tablename__ = 'tax_deductions'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
year = db.Column(db.Integer)
amount = db.Column(db.Integer)
status = db.Column(db.String(64))
def __init__(self, name, year, amount, status=None):
self.name = name
self.year = year
self.amount = amount
self.status = status or 'Single'
def __repr__(self):
return "<TaxDeduction('%s', %d, %d, '%s')>" % (
self.name, self.year, self.amount, self.status
)
class TaxRate(db.Model):
__tablename__ = 'tax_rates'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
year = db.Column(db.Integer)
start = db.Column(db.Integer)
end = db.Column(db.Integer)
rate = db.Column(db.Float)
status = db.Column(db.String(64))
def __init__(self, name, year, start, end, rate, status=None):
self.name = name
self.year = year
self.start = start
self.end = end
self.rate = rate
self.status = status or 'Single'
def __repr__(self):
return "<TaxRate(%s %s %d: %d - %d => %f)>" % (
self.name, self.status, self.year, self.start, self.end, self.rate,
)
def tax(tax_rates, amount):
return reduce(lambda a, b: a + _tax(b, amount), tax_rates, 0.0);
def _tax(tax_rate, amount):
if tax_rate.start >= amount:
return 0
if tax_rate.end == 0 or amount < tax_rate.end:
return tax_rate.rate * (amount - tax_rate.start)
return tax_rate.rate * (tax_rate.end - tax_rate.start)
def add_tax_rates(session):
session.add( TaxDeduction('Fed Standard', 2013, 6100, 'Single') )
session.add( TaxDeduction('Fed Standard', 2014, 6200, 'Single') )
session.add( TaxDeduction('Fed Standard', 2015, 6300, 'Single') )
session.add( TaxDeduction('Fed Standard', 2013, 8950, 'Head') )
session.add( TaxDeduction('Fed Standard', 2014, 9100, 'Head') )
session.add( TaxDeduction('Fed Standard', 2015, 9250, 'Head') )
session.add( TaxDeduction('Fed Standard', 2013, 12200, 'Joint') )
session.add( TaxDeduction('Fed Standard', 2014, 12400, 'Joint') )
session.add( TaxDeduction('Fed Standard', 2015, 12600, 'Joint') )
session.add( TaxDeduction('Fed Standard', 2013, 6100, 'Separate') )
session.add( TaxDeduction('Fed Standard', 2014, 6200, 'Separate') )
session.add( TaxDeduction('Fed Standard', 2015, 6300, 'Separate') )
# session.add( TaxDeduction('401k', 2013, 17000) )
# session.add( TaxDeduction('401k', 2014, 17500) )
# session.add( TaxDeduction('401k', 2015, 18000) )
session.add( TaxRate('Illinois', 2015, 0, 0, 0.0375, 'Single') )
session.add( TaxRate('Illinois', 2014, 0, 0, 0.05, 'Single') )
session.add( TaxRate('Illinois', 2013, 0, 0, 0.05, 'Single') )
session.add( TaxRate('Federal', 2015, 0, 9225, .10, 'Single') )
session.add( TaxRate('Federal', 2015, 9225, 37450, .15, 'Single') )
session.add( TaxRate('Federal', 2015, 37450, 90750, .25, 'Single') )
session.add( TaxRate('Federal', 2015, 90750, 189300, .28, 'Single') )
session.add( TaxRate('Federal', 2015, 189300, 411500, .33, 'Single') )
session.add( TaxRate('Federal', 2015, 411500, 413200, .35, 'Single') )
session.add( TaxRate('Federal', 2015, 413200, 0, .396, 'Single') )
session.add( TaxRate('Federal', 2014, 0, 9075, .10, 'Single') )
session.add( TaxRate('Federal', 2014, 9075, 36900, .15, 'Single') )
session.add( TaxRate('Federal', 2014, 36900, 89350, .25, 'Single') )
session.add( TaxRate('Federal', 2014, 89350, 186350, .28, 'Single') )
session.add( TaxRate('Federal', 2014, 186350, 405100, .33, 'Single') )
session.add( TaxRate('Federal', 2014, 405100, 406750, .35, 'Single') )
session.add( TaxRate('Federal', 2014, 406750, 0, .396, 'Single') )
session.add( TaxRate('Federal', 2013, 0, 8925, .10, 'Single') )
session.add( TaxRate('Federal', 2013, 8925, 36250, .15, 'Single') )
session.add( TaxRate('Federal', 2013, 36250, 87850, .25, 'Single') )
session.add( TaxRate('Federal', 2013, 87850, 183250, .28, 'Single') )
session.add( TaxRate('Federal', 2013, 183250, 398350, .33, 'Single') )
session.add( TaxRate('Federal', 2013, 398350, 400000, .35, 'Single') )
session.add( TaxRate('Federal', 2013, 400000, 0, .396, 'Single') )
session.add( TaxRate('Federal', 2015, 0, 13150, .10, 'Head') )
session.add( TaxRate('Federal', 2015, 13150, 50200, .15, 'Head') )
session.add( TaxRate('Federal', 2015, 50200, 129600, .25, 'Head') )
session.add( TaxRate('Federal', 2015, 129600, 209850, .28, 'Head') )
session.add( TaxRate('Federal', 2015, 209850, 411500, .33, 'Head') )
session.add( TaxRate('Federal', 2015, 411500, 439000, .35, 'Head') )
session.add( TaxRate('Federal', 2015, 439000, 0, .396, 'Head') )
session.add( TaxRate('Federal', 2014, 0, 12950, .10, 'Head') )
session.add( TaxRate('Federal', 2014, 12950, 49400, .15, 'Head') )
session.add( TaxRate('Federal', 2014, 49400, 127550, .25, 'Head') )
session.add( TaxRate('Federal', 2014, 127550, 206600, .28, 'Head') )
session.add( TaxRate('Federal', 2014, 206600, 405100, .33, 'Head') )
session.add( TaxRate('Federal', 2014, 405100, 432200, .35, 'Head') )
session.add( TaxRate('Federal', 2014, 432200, 0, .396, 'Head') )
session.add( TaxRate('Federal', 2013, 0, 12750, .10, 'Head') )
session.add( TaxRate('Federal', 2013, 12750, 48600, .15, 'Head') )
session.add( TaxRate('Federal', 2013, 48600, 125450, .25, 'Head') )
session.add( TaxRate('Federal', 2013, 125450, 203150, .28, 'Head') )
session.add( TaxRate('Federal', 2013, 203150, 398359, .33, 'Head') )
session.add( TaxRate('Federal', 2013, 398359, 425000, .35, 'Head') )
session.add( TaxRate('Federal', 2013, 425000, 0, .396, 'Head') )
session.add( TaxRate('SocialSecurity', 2014, 0, 117000, 0.062) )
session.add( TaxRate('SocialSecurity', 2015, 0, 118500, 0.062) )
session.add( TaxRate('Medicare', 2014, 0, 200000, 0.0145, 'Single') )
session.add( TaxRate('Medicare', 2014, 200000, 0, 0.0235, 'Single') )
session.add( TaxRate('Medicare', 2014, 0, 250000, 0.0145, 'Joint') )
session.add( TaxRate('Medicare', 2014, 250000, 0, 0.0235, 'Joint') )
session.add( TaxRate('Medicare', 2014, 0, 125000, 0.0145, 'Separate') )
session.add( TaxRate('Medicare', 2014, 125000, 0, 0.0235, 'Separate') )
session.add( TaxRate('Medicare', 2015, 0, 200000, 0.0145, 'Single') )
session.add( TaxRate('Medicare', 2015, 200000, 0, 0.0235, 'Single') )
session.add( TaxRate('Medicare', 2015, 0, 250000, 0.0145, 'Joint') )
session.add( TaxRate('Medicare', 2015, 250000, 0, 0.0235, 'Joint') )
session.add( TaxRate('Medicare', 2015, 0, 125000, 0.0145, 'Separate') )
session.add( TaxRate('Medicare', 2015, 125000, 0, 0.0235, 'Separate') )
session.commit()
| UTF-8 | Python | false | false | 7,168 | py | 55 | tax_rates.py | 34 | 0.60519 | 0.456752 | 0 | 150 | 46.786667 | 78 |
graspr/embedded | 3,770,981,295,666 | c95943c52402420e9a7c3a780a55da9ea66f0b08 | 4ed99ee58451f7b23e689d4c09aa59467eaababe | /rpi/python/graspr.py | e14e1b8aff3f55f81192159ff3b70954cf6c5cd5 | []
| no_license | https://github.com/graspr/embedded | 15096a08c943738e483c3198f4c058ffadbc5d03 | 59c21aa303d713eb190048ac7ad6809b2be45517 | refs/heads/master | 2020-04-14T05:26:43.765915 | 2015-04-14T00:06:30 | 2015-04-14T00:06:30 | 22,432,533 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Entry point for Graspr Embedded App
"""
import signal
import sys
import time
from thread import *
from collections import deque
import web
import spi
import mux
PORT = 8080
DQ_MAX_LENGTH = 10000
def signal_handler(signal, frame):
"""
For when we hit CTRL+C!
"""
print(('End of run: {!s}'.format(time.time())))
sys.exit(0)
def application_setup():
signal.signal(signal.SIGINT, signal_handler) #handler for keyboard interrupt
if __name__ == "__main__":
print('Setting up')
application_setup()
spi.setup()
print(('Start of run: {!s}'.format(time.time())))
print('Channel 14,Channel 15,Channel 16')
if len(sys.argv) > 1:
PORT = sys.argv[1]
web.run(PORT)
| UTF-8 | Python | false | false | 719 | py | 21 | graspr.py | 13 | 0.642559 | 0.617524 | 0 | 35 | 19.542857 | 80 |
oalhinnawi/CTCI-Problems-Python | 9,921,374,481,998 | 1b98c6256eefb5ce54d56f79bb250331d9b0ba6e | 8ba2bc23d1b5777f762e0a3dca49e2c36bd1db39 | /sum_lists.py | 3d820e5f5c09c267870d394a24dfbc3d14b31e76 | []
| no_license | https://github.com/oalhinnawi/CTCI-Problems-Python | 170b8f5fc28e035ee1511845f97e7bc8e10abe99 | e08404c08af1e5d2081698daea6e050d9933ddcb | refs/heads/main | 2023-05-12T13:32:23.350463 | 2021-06-02T23:10:02 | 2021-06-02T23:10:02 | 370,898,787 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 2.5 - Sum Lists: You have two numbers represented by a linked list, where each node contains a single
# digit. The digits are stored in reverse order, such that the 1 's digit is at the head of the list. Write a
# function that adds the two numbers and returns the sum as a linked list.
# First thoughts: This isn't too bad since it's in reversed order, we can construct
# the strings as we iterate through the list, so nothing should be 2n's worth of work.
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.numNodes = 0
def printLinkedList(self):
currentNode = self.head
while currentNode is not None:
print(currentNode.value)
currentNode = currentNode.next
def addNodeAtHead(self, value):
newHead = Node(value)
newHead.next = self.head
self.head = newHead
self.numNodes += 1
def deleteNode(self, key):
current = self.head
# Delete the Head node if it has the value
if(current.value == key):
self.head = current.next
current = None
return
# If not the head node find the node
while(current is not None):
if(current.value == key):
break
prev = current
current = current.next
if(current == None):
return
prev.next = current.next
current = None
def returnKthElement(self, k):
current = self.head
for _ in range(0, k - 1):
current = current.next
return current.value
def sumLists(firstList, secondList):
firstSum = ""
secondSum = ""
# Construct the first sum
current = firstList.head
while current is not None:
firstSum = str(current.value) + firstSum
current = current.next
# Construct the second sum
current = secondList.head
while current is not None:
secondSum = str(current.value) + secondSum
current = current.next
firstSum = int(firstSum)
secondSum = int(secondSum)
print("First Sum:", firstSum)
print("Second Sum:", secondSum)
combinedSum = firstSum + secondSum
combinedSum = str(combinedSum)
print("Combined Sum:", combinedSum)
combinedList = LinkedList()
for num in combinedSum:
combinedList.addNodeAtHead(int(num))
return combinedList
if __name__ == "__main__":
firstList = LinkedList()
secondList = LinkedList()
for i in range(0,4):
firstList.addNodeAtHead(i)
for i in range(4,10):
secondList.addNodeAtHead(i)
combinedList = sumLists(firstList, secondList)
combinedList.printLinkedList() | UTF-8 | Python | false | false | 2,813 | py | 22 | sum_lists.py | 21 | 0.611802 | 0.607181 | 0 | 106 | 25.54717 | 109 |
julianferres/Competitive-Programming | 16,956,530,926,030 | 67f9d0e68db98069281e36dc249621d04f904c9f | 9f69c4c61ca2a2082643f9316354826f6144e1f5 | /TAP2014/I.py | 211317a6735605a73487d78c6e2170162357be1c | []
| no_license | https://github.com/julianferres/Competitive-Programming | 668c22cf5174c57a2f3023178b1517cb25bdd583 | c3b0be4f796d1c0d755a61a6d2f3665c86cd8ca9 | refs/heads/master | 2022-03-23T05:56:53.790660 | 2022-03-18T14:29:33 | 2022-03-18T14:29:33 | 146,931,407 | 4 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | def is_prime(n):
"""Naive O(sqrt(n)) approach"""
d = 2
while(d*d<=n):
if(n%d == 0):
return False
d+=1
return True
for i in range(2184,2200):
if(is_prime(i)):
print(i)
| UTF-8 | Python | false | false | 215 | py | 330 | I.py | 324 | 0.47907 | 0.427907 | 0 | 12 | 16.916667 | 35 |
wangmou21/NSNet | 19,061,064,865,395 | f465c6c061a9904868bda919e28c191209e8386a | 28653b73242d0f2d384cf1c2998d0cb649922371 | /models.py | 03e406604aa7b7c4dcde8a0f7c7ab31577e1591b | []
| no_license | https://github.com/wangmou21/NSNet | aa6f98998b3063b717589f1c646ca80eb705e3a0 | 16dd4a671385675e6ea0b19afce4a0dca60ef5b6 | refs/heads/master | 2023-01-08T22:16:48.431289 | 2020-11-14T14:57:49 | 2020-11-14T14:57:49 | 307,575,100 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 10:40:51 2020
@author: Silence
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import config
def init_layer(layer, nonlinearity='leaky_relu'):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_uniform_(layer.weight, nonlinearity=nonlinearity)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.running_mean.data.fill_(0.)
bn.weight.data.fill_(1.)
bn.running_var.data.fill_(1.)
class ConvBlock_Down(nn.Module):
def __init__(self, in_channels, out_channels, padding=1, stride=1):
super(ConvBlock_Down, self).__init__()
inter_channels = in_channels//2 if in_channels > out_channels else out_channels//2
self.conv1 = nn.Conv3d(in_channels=in_channels,
out_channels=inter_channels,
kernel_size=3, stride=stride,
padding=padding, bias=False)
self.conv2 = nn.Conv3d(in_channels=inter_channels,
out_channels=out_channels,
kernel_size=3, stride=stride,
padding=padding, bias=False)
self.bn1 = nn.BatchNorm3d(inter_channels)
self.bn2 = nn.BatchNorm3d(out_channels)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, x, is_pool=True, pool_size=(2, 2, 2), pool_type='avg'):
if is_pool:
if pool_type == 'max':
x = F.max_pool3d(x, kernel_size=pool_size, stride=(2, 2, 2))
elif pool_type == 'avg':
x = F.avg_pool3d(x, kernel_size=pool_size, stride=(2, 2, 2))
else:
raise Exception('Incorrect argument!')
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
return x
class ConvBlock_Up(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, sample=True):
super(ConvBlock_Up, self).__init__()
if sample:
self.sample = nn.Upsample(scale_factor=2, mode='trilinear',align_corners=False)
else:
self.sample = nn.ConvTranspose3d(in_channels, in_channels, 2, stride=2)
in_channels = in_channels//2+in_channels
inter_channels = in_channels//2 if in_channels > out_channels else out_channels//2
inter_channels = max(inter_channels, out_channels)
self.conv1 = nn.Conv3d(in_channels=in_channels,
out_channels=inter_channels,
kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv2 = nn.Conv3d(in_channels=inter_channels,
out_channels=out_channels,
kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(inter_channels)
self.bn2 = nn.BatchNorm3d(out_channels)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input1, input2):
x = input1
x1 = input2
x = self.sample(x)
x = torch.cat((x, x1), dim=1)
x = F.relu(self.bn1(self.conv1(x)), inplace=False)
x = F.relu(self.bn2(self.conv2(x)), inplace=False)
return x
class ConvBlock_Map(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock_Map, self).__init__()
inter_channels = in_channels//2 if in_channels > out_channels else out_channels//2
self.conv1 = nn.Conv3d(in_channels=in_channels,
out_channels=inter_channels,
kernel_size=3, stride=1,
padding=1, bias=False)
self.conv2 = nn.Conv3d(in_channels=inter_channels,
out_channels=out_channels,
kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(inter_channels)
self.bn2 = nn.BatchNorm3d(out_channels)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, x, is_activate=True):
x = F.relu(self.bn1(self.conv1(x)), inplace=False)
x = self.conv2(x)
if is_activate:
x = self.bn2(x)
return x
class MassNET(nn.Module):
def __init__(self):
super(MassNET, self).__init__()
self.conv_block1 = ConvBlock_Down(in_channels=1, out_channels=32)
self.conv_block_down1 = ConvBlock_Down(in_channels=32, out_channels=64)
self.conv_block_down2 = ConvBlock_Down(in_channels=64, out_channels=128)
self.conv_block_down3 = ConvBlock_Down(in_channels=128, out_channels=256)
self.conv_block_up3 = ConvBlock_Up(in_channels=256, out_channels=128)
self.conv_block_up2 = ConvBlock_Up(in_channels=128, out_channels=64)
self.conv_block_up1 = ConvBlock_Up(in_channels=64, out_channels=64)
self.conv_block2 = ConvBlock_Map(in_channels=64, out_channels=1)
def forward(self, x):
x = x[:, None, :, :, :]
# Encoder
x1 = self.conv_block1(x, is_pool=False)
x2 = self.conv_block_down1(x1)
x3 = self.conv_block_down2(x2)
out1 = self.conv_block_down3(x3)
# Decoder
out1 = self.conv_block_up3(out1, x3)
out1 = self.conv_block_up2(out1, x2)
out1 = self.conv_block_up1(out1, x1)
out1 = self.conv_block2(out1)
out1 = out1.mul(1-x)
out1 = torch.tanh(out1)
out1 = out1.squeeze(dim=1)
out2 = torch.mean(out1,(3,2,1))
out2 = out2*config.scale_Mass/config.dCdX/config.Db
#return output
return out1, out2
class FlowNET(nn.Module):
def __init__(self):
super(FlowNET, self).__init__()
self.conv_block1 = ConvBlock_Down(in_channels=1, out_channels=32)
self.conv_block_down1 = ConvBlock_Down(in_channels=32, out_channels=64)
self.conv_block_down2 = ConvBlock_Down(in_channels=64, out_channels=128)
self.conv_block_down3 = ConvBlock_Down(in_channels=128, out_channels=256)
self.conv_block_up3 = ConvBlock_Up(in_channels=256, out_channels=128)
self.conv_block_up2 = ConvBlock_Up(in_channels=128, out_channels=64)
self.conv_block_up1 = ConvBlock_Up(in_channels=64, out_channels=64)
self.conv_block2 = ConvBlock_Map(in_channels=64, out_channels=1)
def forward(self, x):
x = x[:, None, :, :, :]
# Encoder
x1 = self.conv_block1(x, is_pool=False)
x2 = self.conv_block_down1(x1)
x3 = self.conv_block_down2(x2)
out1 = self.conv_block_down3(x3)
# Decoder
out1 = self.conv_block_up3(out1, x3)
out1 = self.conv_block_up2(out1, x2)
out1 = self.conv_block_up1(out1, x1)
out1 = self.conv_block2(out1)
out1 = out1.mul(1-x)
out1 = torch.tanh(out1)
out1 = out1.squeeze(dim=1)
#return output
return out1
class TempNET(nn.Module):
def __init__(self):
super(TempNET, self).__init__()
self.conv_block1 = ConvBlock_Down(in_channels=1, out_channels=32)
self.conv_block_down1 = ConvBlock_Down(in_channels=32, out_channels=64)
self.conv_block_down2 = ConvBlock_Down(in_channels=64, out_channels=128)
self.conv_block_down3 = ConvBlock_Down(in_channels=128, out_channels=256)
self.conv_block_up3 = ConvBlock_Up(in_channels=256, out_channels=128)
self.conv_block_up2 = ConvBlock_Up(in_channels=128, out_channels=64)
self.conv_block_up1 = ConvBlock_Up(in_channels=64, out_channels=64)
self.conv_block2 = ConvBlock_Map(in_channels=64, out_channels=1)
def forward(self, x):
x = x[:, None, :, :, :]
# Encoder
x1 = self.conv_block1(x, is_pool=False)
x2 = self.conv_block_down1(x1)
x3 = self.conv_block_down2(x2)
out1 = self.conv_block_down3(x3)
# Decoder
out1 = self.conv_block_up3(out1, x3)
out1 = self.conv_block_up2(out1, x2)
out1 = self.conv_block_up1(out1, x1)
out1 = self.conv_block2(out1, is_activate=False)
out1 = out1.squeeze(dim=1)
return out1 | UTF-8 | Python | false | false | 9,391 | py | 12 | models.py | 8 | 0.538281 | 0.501118 | 0 | 275 | 33.152727 | 97 |
trainto/Problem-Solving | 14,877,766,734,404 | 695187ec783c740cac30395d777600e5ee628078 | 0f8cb8d926301ca8b68288af5d1d81266af36048 | /algospot/uri.py | 37e0af2f79a8066c3f1ed0e976a2e859a8bb8889 | []
| no_license | https://github.com/trainto/Problem-Solving | f70f345f38eedba51688eb263129c54de21287fa | a08881d09a107650a3417d7beeb374f5362dda70 | refs/heads/main | 2023-05-05T18:46:32.913424 | 2021-05-31T09:40:38 | 2021-05-31T09:40:38 | 60,740,421 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | numberOfTestCases = int(input())
testCases = []
for i in range(numberOfTestCases):
testCases.append(input())
def decoder(rawUri):
result = ""
result = rawUri.replace("%20", " ")
result = result.replace("%21", "!")
result = result.replace("%24", "$")
result = result.replace("%28", "(")
result = result.replace("%29", ")")
result = result.replace("%2a", "*")
result = result.replace("%25", "%")
return result
for i in testCases:
print(decoder(i))
| UTF-8 | Python | false | false | 492 | py | 120 | uri.py | 118 | 0.591463 | 0.565041 | 0 | 18 | 26.333333 | 39 |
ja153903/codeforces | 14,456,859,967,060 | f35288bc26a11e08acfcd0a5062d1acbec0ac61d | 5487525fc3e3313d3a1eac8b98d1c610e3e75862 | /746A_compote.py | 1bafa7c9478246cb72d08d5579f33d79c9d6f17b | []
| no_license | https://github.com/ja153903/codeforces | 62c222bcd53d88d61da801977cec0fe7fb465eff | 84b40565e280577d975416286be69b5c3fe84255 | refs/heads/master | 2021-05-17T15:10:14.507074 | 2020-05-20T03:56:50 | 2020-05-20T03:56:50 | 250,836,893 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def int_lst_input():
return [int(val) for val in input().split(' ')]
def int_input():
return int(input())
def print_lst(lst):
print(' '.join([str(val) for val in lst]))
def solve():
a = int_input()
b = int_input()
c = int_input()
comp = 0
while a > 0 and b > 1 and c > 3:
comp += 1
a -= 1
b -= 2
c -= 4
print(comp * 7)
if __name__ == '__main__':
solve()
| UTF-8 | Python | false | false | 434 | py | 90 | 746A_compote.py | 89 | 0.465438 | 0.4447 | 0 | 27 | 15.074074 | 51 |
snakecuriosity95/OOPv | 4,973,572,131,143 | 6b1167ea3eeebad7a8380cdcbcd20806876056ae | ff900ac0fa01a32a51eecead57024e79a1f53086 | /PYpr/main.py | 1ea5b6799fe95969659585ba1a21a61168a8f9c6 | []
| no_license | https://github.com/snakecuriosity95/OOPv | 49df7072162c87adbbae38b03f567ceb504fc52f | 077e55ca1ad36b8c2224d19f9eabfba5d81a2222 | refs/heads/master | 2020-05-09T18:29:48.513964 | 2019-05-02T20:21:11 | 2019-05-02T20:21:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Warriors import Warrior
import random as r
import time
'''
def newhero(obj):
obj = Warrior()
obj.show()
h_mod_obj, m_mod_obj, s_mod_obj = obj.race()
obj.setHP(h_mod_obj * obj.getHP())
obj.setMP(m_mod_obj * obj.getMP())
obj.setSP(s_mod_obj * obj.getSP())
obj.show()
return obj
orc = None
elf = None
orc = newhero(orc)
elf = newhero(elf)
orc.show()
elf.show()
'''
#obj.race()
#print("HP =",obj.getHP())
#obj.charStat(h_mod_obj, m_mod_obj, s_mod_obj)
#
# Доработать уровни атаки и защиты
# Подклассы у каждого класса
# Орк : Разбойник 20hp -> 20sp || Шаман 20hp -> 20mp
# Эльф : Охотник 15sp -> 15hp || Жрец 10sp -> 10hp
# Человек : Рыцарь 10mp -> 15hp || Волшебник 10sp -> 15mp
# 5sp 5hp
# Гном : Палладин 5sp -> 10hp || Чернокнижник 10hp -> 15mp
# 5sp 5sp
# Спектр : Убийца 10mp -> 10sp || Элементалист 10sp -> 10hp
# 10mp -> 10hp 10mp -> 10hp
# Добавить очки за победу и занести их в таблицу лидеров.
# При достаточном кол-ве очков - подъем уровня.
#
'''
1 - Orc.
2 - Elf.
3 - Human.
4 - Dwarf.
5 - Spectre.
'''
orc = Warrior('Eric',2,1) # name, luck
h_mod_orc, m_mod_orc, s_mod_orc, race_name_orc = orc.race(1)
orc.setHP(h_mod_orc * orc.getHP())
orc.setMP(m_mod_orc * orc.getMP())
orc.setSP(s_mod_orc * orc.getSP())
orc.setRace(1)
orc.charInfo()
elf = Warrior('Mistery',7,2)
h_mod_elf, m_mod_elf, s_mod_elf, race_name_elf = elf.race(2)
elf.setHP(h_mod_elf * elf.getHP())
elf.setMP(m_mod_elf * elf.getMP())
elf.setSP(s_mod_elf * elf.getSP())
elf.setRace(2)
elf.charInfo()
hum = Warrior('Newbee',3,3)
h_mod_hum, m_mod_hum, s_mod_hum, race_name_hum = hum.race(3)
hum.setHP(h_mod_hum * hum.getHP())
hum.setMP(m_mod_hum * hum.getMP())
hum.setSP(s_mod_hum * hum.getSP())
hum.setRace(3)
hum.charInfo()
dwf = Warrior('Oldman',3,4)
h_mod_dwf, m_mod_dwf, s_mod_dwf, race_name_dwf = dwf.race(3)
dwf.setHP(h_mod_dwf * dwf.getHP())
dwf.setMP(m_mod_dwf * dwf.getMP())
dwf.setSP(s_mod_dwf * dwf.getSP())
players = [orc,elf,hum,dwf]
dwf.setRace(4)
dwf.charInfo()
spr = Warrior('Cristall',4,5)
h_mod_spr, m_mod_spr, s_mod_spr, race_name_spr = spr.race(3)
spr.setHP(h_mod_spr * spr.getHP())
spr.setMP(m_mod_spr * spr.getMP())
spr.setSP(s_mod_spr * spr.getSP())
spr.setRace(5)
spr.charInfo()
#players = [hum,hum,hum,hum,dwf]
players = [orc,elf,hum,dwf,spr]
def battle():
warriors = []
def rrrr():
B = r.choice(players)
if B not in warriors:
warriors.append(B)
return warriors
else:
rrrr()
A = r.choice(players)
warriors.append(A)
rrrr()
A = warriors[0]
B = warriors[1]
#print("Firts warrior is ", A.name)
#print("Second warrior is ", B.name)
# input("Start! (Press Enter) ")
while A.health >= 1 and B.health >= 1:
A.attack(B)
B.attack(A)
# continue
file_stat1 = open("stat_win.txt",'a')
file_stat2 = open("stat_lose.txt",'a')
file_stat3 = open("stat_tie.txt",'a')
file_stat4 = open("total.txt",'a')
if A.health > B.health:
file_stat1.writelines("\n" + A.name)
file_stat2.writelines("\n" + B.name)
file_stat4.writelines("\n" + A.name)
file_stat4.writelines("\n" + B.name)
print(A.name, " is a winner! ")
elif B.health > A.health:
file_stat2.writelines("\n" + A.name)
file_stat1.writelines("\n" + B.name)
file_stat4.writelines("\n" + A.name)
file_stat4.writelines("\n" + B.name)
print(B.name, " is a winner! ")
else:
file_stat3.writelines("\n" + A.name)
file_stat3.writelines("\n" + B.name)
file_stat4.writelines("\n" + A.name)
file_stat4.writelines("\n" + B.name)
print("Tie.")
file_stat1.close()
file_stat2.close()
file_stat3.close()
file_stat4.close()
# time.sleep(1)
[battle() for _ in range(5)]
#[battle() for _ in range(5)]
#[battle() for _ in range(5)]
import re
#import string
frequency_win = {}
document_text = open('stat_win.txt', 'r')
text_string = document_text.read().capitalize()
match_pattern = re.findall(r'\b[a-z]{3,15}\b', text_string)
for word in match_pattern:
count = frequency_win.get(word,0)
frequency_win[word] = count + 1
frequency_list = frequency_win.keys()
print("\n")
print("___Winer boaard___")
for words in frequency_list:
print(words.capitalize(), frequency_win[words])
print("\n")
frequency_lose = {}
document_text = open('stat_lose.txt', 'r')
text_string = document_text.read().capitalize()
match_pattern = re.findall(r'\b[a-z]{3,15}\b', text_string)
for word in match_pattern:
count = frequency_lose.get(word,0)
frequency_lose[word] = count + 1
frequency_list = frequency_lose.keys()
print("___Loser board___")
for words in frequency_list:
print(words.capitalize(), frequency_lose[words])
print("\n")
frequency_lose = {}
document_text = open('total.txt', 'r')
text_string = document_text.read().capitalize()
match_pattern = re.findall(r'\b[a-z]{3,15}\b', text_string)
for word in match_pattern:
count = frequency_lose.get(word,0)
frequency_lose[word] = count + 1
frequency_list = frequency_lose.keys()
print("___Participation in battle___")
for words in frequency_list:
print(words.capitalize(), frequency_lose[words])
document_text.close()
def clear():
file_stat1 = open("stat_win.txt",'w')
file_stat2 = open("stat_lose.txt",'w')
file_stat3 = open("stat_tie.txt",'w')
file_stat4 = open("total.txt",'w')
file_stat1.write('')
file_stat2.write('')
file_stat3.write('')
file_stat4.write('')
file_stat1.close()
file_stat2.close()
file_stat3.close()
file_stat4.close() | UTF-8 | Python | false | false | 6,442 | py | 13 | main.py | 6 | 0.562983 | 0.541881 | 0 | 258 | 22.054264 | 60 |
Sarma38/know-your-planet | 7,035,156,479,774 | 1f9fa749b2f8245724d8a94e35421ddf6a3774c5 | 603a3ffca8cb2376dde71dfeedcbbc6738f9c7c0 | /api/migrations/0013_remove_question_category.py | f27e5721e46274e8f9af13752dd6e0f442e39f45 | []
| no_license | https://github.com/Sarma38/know-your-planet | e7029e53a03106d87609b1faaddac1ca31a15021 | 9d891a21554257e963a52cc5dc1b71f39eae7c35 | refs/heads/master | 2023-02-04T06:02:27.604631 | 2020-12-25T09:41:43 | 2020-12-25T09:41:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.4 on 2020-04-05 17:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("api", "0012_question_category_temp"),
]
operations = [
migrations.RemoveField(model_name="question", name="category",),
]
| UTF-8 | Python | false | false | 298 | py | 110 | 0013_remove_question_category.py | 83 | 0.651007 | 0.587248 | 0 | 14 | 20.285714 | 72 |
Akrabut/Mathemical-Calculator | 13,005,160,976,122 | d9b4454737b1c16f5cb0bbb943011b703cad0899 | 6e2727f537eb1f854cb3ebc02931b4714f2a84ea | /PythonCalculator.py | 09ba0658014292f4d8d2dec616040b170a694bc1 | []
| no_license | https://github.com/Akrabut/Mathemical-Calculator | db764abc2db34ce0abc6a77e8cc85629b4279952 | ae9d5ae7b938fa4ecbdd22b3353faa7e9b95fb51 | refs/heads/master | 2020-04-12T12:19:11.316390 | 2018-12-19T20:35:16 | 2018-12-19T20:35:16 | 162,487,840 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | ####EXERCISE 1####
class date:
#Takes a year, month and day value and creates an object that represents a date
def __init__(self, newyear, newmonth, newday):
self.year = newyear
if(newmonth == 1):
self.month = "January"
if(newmonth == 2):
self.month = "February"
if(newmonth == 3):
self.month = "March"
if(newmonth == 4):
self.month = "April"
if(newmonth == 5):
self.month = "May"
if(newmonth == 6):
self.month = "June"
if(newmonth == 7):
self.month = "July"
if(newmonth == 8):
self.month = "August"
if(newmonth == 9):
self.month = "September"
if(newmonth == 10):
self.month = "October"
if(newmonth == 11):
self.month = "November"
if(newmonth == 12):
self.month = "December"
self.day = newday
def __repr__(self):
return 'date()'
def __str__(self):
return str(self.day) + " of " + str(self.month) + ", " + str(self.year)
class time:
#takes an hour and a min values and creates an object that represents the time
def __init__(self, newhour, newmin):
self.hour = newhour
self.min = newmin
def __repr__(self):
return 'time()'
def __str__(self):
if (self.min < 10):
return str(self.hour) + ':' + '0' + str(self.min)
return str(self.hour) + ':' + str(self.min)
class task:
#A class that represents a task, makes the calendarentry code more readable
def __init__(self, name, time1, time2):
self.name = name
self.time1 = time1
self.time2 = time2
class calendarentry:
#represents a calendar entry which may contain a list of tasks needed to be done at a certain time and ate.
#also sorts the tasks by time
def __init__(self, year, month, day):
self.tasks = []
self.index = 0
self.day = date(year, month, day)
def addtask(self, name, time1, time2):
for i in range(0, self.index):
if (time1.hour is self.tasks[i].time1.hour and time2.hour is self.tasks[i].time2.hour and time1.min is self.tasks[i].time1.min and time2.min is self.tasks[i].time2.min):
print ("Invalid entry")
return
self.index += 1
self.newtask = task(name, time1, time2)
self.tasks.append(self.newtask)
self.tasks = sorted(self.tasks, key = lambda task: task.time1.hour)
def __repr__(self):
return 'calendarentry()'
def __str__(self):
print("Todo list for", self.day, ":")
for i in range(0, self.index):
print (i+1, ".", self.tasks[i].time1, "-", self.tasks[i].time2, "-", self.tasks[i].name)
return ""
####EXERCISE 2####
#Functions from the presentations
def make_class(attributes, base_class=None):
def get_value(name):
if name in attributes:
return attributes[name]
elif base_class is not None:
return base_class['get'](name)
def set_value(name, value):
attributes[name] = value
def new(*args):
return init_instance(cls, *args)
cls = {'get': get_value, 'set': set_value, 'new': new}
return cls
def init_instance(cls, *args):
instance = make_instance(cls)
init = cls['get']('__init__')
if init:
init(instance, *args)
return instance
def make_instance(cls):
attributes = {}
def get_value(name):
if name in attributes:
return attributes[name]
else:
value = cls['get'](name)
return bind_method(value,instance)
def set_value(name, value):
attributes[name] = value
instance = {'get': get_value, 'set': set_value}
return instance
def bind_method(value, instance):
if callable(value):
def method(*args):
return value(instance, *args)
return method
else:
return value
def make_date_class():
#implementation of date class using a function
def __init__(self, year, month, day):
self['set']('year', year)
self['set']('month', month)
self['set']('day', day)
return make_class({'__init__':__init__})
def make_calentry_class():
#implementation of calendar class using a fuction
def __init__(self, year, month, day):
self['set']('year',year)
self['set']('month',month)
self['set']('day',day)
def addtask(self, name, time1, time2):
if(self['get']('tasks') == None):
self['set']('tasks', {(time1['get']('__str__')(), time2['get']('__str__')()): name})
else:
tasks = {(time1['get']('__str__')(), time2['get']('__str__')()): name}
tasks.update(self['get']('tasks'))
self['set']('tasks', tasks)
return make_class({'__init__':__init__, 'addtask':addtask})
def make_time_class():
#implementation of time class using a function
def __init__(self, hour, min):
self['set']('hour', hour)
self['set']('min', min)
def __str__(self):
if (self['get']('min') < 10):
return str(self['get']('hour')) + ':' + '0' + str(self['get']('min'))
return str(self['get']('hour')) + ':' + str(self['get']('min'))
return make_class({'__init__':__init__, '__str__':__str__})
####EXERCISE 3####
rates={('dollar','nis'):3.82,('euro','nis'):4.07, ('nis', 'dollar'): 0.26, ('euro', 'dollar'): 0.93, ('dollar', 'euro'):1.06, ('nis', 'euro'): 0.24}
class shekel:
#represents shekel currency, can calculate the addition of two values
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value) + 'nis'
def __repr__(self):
return 'shekel' + '(' + str(self.value) + ')'
def __add__(self, other):
return self.amount() + other.amount()
def amount(self):
return self.value
class dollar:
#represents dollar currency, can calculate the addition of two values and convert to shekel
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value) + '$'
def __repr__(self):
return 'dollar' + '(' + str(self.value) + ')'
def __add__(self, other):
return self.amount() + other.amount()
def amount(self):
return self.value * rates['dollar', 'nis']
class euro:
#represents euro currency, can calculate the addition of two values and convert to shekel
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value) + 'eu'
def __repr__(self):
return 'euro' + '(' + str(self.value) + ')'
def __add__(self, other):
return self.amount() + other.amount()
def amount(self):
return self.value * rates['euro', 'nis']
def add(coin1, coin2):
#the required addition function
return coin1.amount() + coin2.amount()
####EXERCISE 4####
def add_shekel(c1, c2):
#addition of shekel and other currency, result is respresented in shekels
return 'shekel('+str(c1.amount() + c2.amount())+')'
def sub_shekel(c1, c2):
#subtractioon of shekel and other currency, result is respresented in shekels
return 'shekel('+str(c1.amount() - c2.amount())+')'
def add_dollar(c1, c2):
#addition of dollar and other currency, result is respresented in dollars
if (type(c2) == shekel):
return 'dollar(' + str(c1.value + c2.value * rates['dollar', 'nis']) + ')'
if (type(c2) == dollar):
return 'dollar(' + str(c1.value + c2.value) + ')'
if (type(c2) == euro):
return 'dollar(' + str(c1.value + c2.value * rates['euro', 'dollar']) + ')'
def sub_dollar(c1, c2):
#subtraction of dollar and other currency, result is respresented in dollars
if (type(c2) == shekel):
return 'dollar(' + str(c1.value - c2.value * rates['dollar', 'nis']) + ')'
if (type(c2) == dollar):
return 'dollar(' + str(c1.value - c2.value) + ')'
if (type(c2) == euro):
return 'dollar(' + str(c1.value - c2.value * rates['euro', 'dollar']) + ')'
def add_euro(c1, c2):
#addition of euros and other currency, result is respresented in euros
if (type(c2) == shekel):
return 'euro(' + str(c1.value + c2.value * rates['nis', 'euro']) + ')'
if (type(c2) == dollar):
return 'euro(' + str(c1.value + c2.value * rates['dollar', 'euro']) + ')'
if (type(c2) == euro):
return 'euro(' + str(c1.value + c2.value) + ')'
def sub_euro(c1, c2):
#subtraction of euros and other currency, result is respresented in euros
if (type(c2) == shekel):
return 'euro(' + str(c1.value - c2.value * rates['nis', 'euro']) + ')'
if (type(c2) == dollar):
return 'euro(' + str(c1.value - c2.value * rates['dollar', 'euro']) + ')'
if (type(c2) == euro):
return 'euro(' + str(c1.value - c2.value) + ')'
#dispatch on type method
dispatch = {('add', (shekel, shekel)): add_shekel, ('add', (shekel, dollar)): add_shekel, ('add', (shekel, euro)): add_shekel,
('add', (dollar, shekel)): add_dollar, ('add', (dollar, dollar)): add_dollar, ('add', (dollar, euro)): add_dollar,
('add', (euro, shekel)): add_euro, ('add', (euro, dollar)): add_euro, ('add', (euro, euro)): add_euro,
('sub', (shekel, shekel)): sub_shekel, ('sub', (shekel, dollar)): sub_shekel, ('sub', (shekel, euro)): sub_shekel,
('sub', (dollar, shekel)): sub_dollar, ('sub', (dollar, dollar)): sub_dollar, ('sub', (dollar, euro)): sub_dollar,
('sub', (euro, shekel)): sub_euro, ('sub', (euro, dollar)): sub_euro, ('sub', (euro, euro)): sub_euro}
def apply(op, c1, c2):
#the apply functioon
return dispatch[op, (type(c1), type(c2))](c1, c2)
####EXERCISE 5####
coercions = {('dollar', 'nis'): dollar.amount, ('euro', 'nis'): euro.amount}
#the coercion dictionary
def coerce_apply(op, c1, c2):
#returns the result in shekels
if (op == 'add'):
return 'Shekel(' + str(c1.amount() + c2.amount()) + ')'
if (op == 'sub'):
return 'Shekel(' + str(c1.amount() - c2.amount()) + ')'
####EXERCISE 6####
def get_reverse_map_iterator(s, g = lambda x: x):
#gets a sequence and a nameless function or just a sequence.
#if input is 'next', either returns g applied on s[i] if g exists, or return s[i] otherwise.
i = len(s)
def has_more():
nonlocal i
if (i == 0):
return False
else: return True
def next():
#The edited Next function
nonlocal i
try:
i -= 1
if (i < 0):
raise IndexError
print(g(s[i]))
except IndexError:
print("no more items")
return
except (ZeroDivisionError, ArithmeticError, ValueError, TypeError):
return next()
return {'next': next, 'has_more': has_more}
####EXERCISE 7####
from functools import reduce
from operator import mul,add
class Exp(object):
def __init__(self, operator, operands):
self.operator = operator
self.operands = operands
def __repr__(self):
return 'Exp({0}, {1})'.format(repr(self.operator), repr(self.operands))
def __str__(self):
operand_strs = ', '.join(map(str, self.operands))
return '{0}({1})'.format(self.operator, operand_strs)
def calc_eval(exp):
#Evaluate a Calculator expression.
if type(exp) in (int, float):
return exp
if type(exp) == Exp:
arguments = list(map(calc_eval, exp.operands))
return calc_apply(exp.operator, arguments)
def calc_apply(operator, args):
#Apply the named operator to a list of args.
if operator in ('add', '+'):
return sum(args)
if operator in ('sub', '-'):
if len(args) == 0:
raise TypeError(operator + 'requires at least 1 argument')
if len(args) == 1:
return -args[0]
return sum(args[:1] + [-arg for arg in args[1:]])
if operator in ('mul', '*'):
return reduce(mul, args, 1)
if operator in ('div', '/'):
if len(args) != 2:
raise TypeError(operator + ' requires exactly 2 arguments')
numer, denom = args
return numer/denom
#The required additions.
if operator in ('pow', '^'):
if len(args) != 2:
raise TypeError(operator + ' requires exactly two arguements')
return args[0]**args[1]
if operator in ('sqrt', 'V'):
if len(args) != 1:
raise TypeError(operator + ' requires exactly one arguement')
if (args[0] < 0):
raise ValueError(operator + ' math domain error')
return args[0]**0.5
def read_eval_print_loop():
#Run a read-eval-print loop for calculator.
while True:
try:
expression_tree = calc_parse(input('calc> '))
print(calc_eval(expression_tree))
except (SyntaxError, TypeError, ZeroDivisionError,ValueError,ArithmeticError) as err:
print(type(err).__name__ + ':', err)
except (KeyboardInterrupt, EOFError): # <Control>-D, etc. <ctrl-C>
print('Calculation completed.')
return
def calc_parse(line):
#Parse a line of calculator input and return an expression tree.
tokens = tokenize(line)
expression_tree = analyze(tokens)
if len(tokens) > 0:
raise SyntaxError('Extra token(s): ' + ' '.join(tokens))
return expression_tree
def tokenize(line):
spaced = line.replace('(',' ( ').replace(')',' ) ').replace(',', ' , ')
return spaced.strip().split()
def analyze(tokens):
#Create a tree of nested lists from a sequence of tokens.
assert_non_empty(tokens)
token = analyze_token(tokens.pop(0))
if type(token) in (int, float):
return token
if token in known_operators:
if len(tokens) == 0 or tokens.pop(0) != '(':
raise SyntaxError('expected ( after ' + token)
return Exp(token, analyze_operands(tokens))
else:
raise SyntaxError('unexpected ' + token)
def analyze_operands(tokens):
#Read a list of comma-separated operands.
assert_non_empty(tokens)
operands = []
while tokens[0] != ')':
if operands and tokens.pop(0) != ',':
raise SyntaxError('expected ,')
operands.append(analyze(tokens))
assert_non_empty(tokens)
tokens.pop(0) # Remove )
return operands
def analyze_token(token):
#Return the value of token if it can be analyzed as a number, or token.
try:
return int(token)
except (TypeError, ValueError):
try:
return float(token)
except (TypeError, ValueError):
return token
except ArithmeticError as e:
return e
known_operators = ['add', 'sub', 'mul', 'div', 'pow','sqrt','+', '-', '*', '/','^','V']
def assert_non_empty(tokens):
#Raise an exception if tokens is empty.
if len(tokens) == 0:
raise SyntaxError('unexpected end of line')
read_eval_print_loop() | UTF-8 | Python | false | false | 15,594 | py | 1 | PythonCalculator.py | 1 | 0.545402 | 0.533923 | 0 | 415 | 35.580723 | 181 |
entrekid/daily_algorithm | 12,987,981,114,507 | d3bcf7a14fd40cb906473d4ee1cb1e6de479a755 | 35250c1ccc3a1e2ef160f1dab088c9abe0381f9f | /2020/0411/2445.py | c111503e866737ecc545dcb1b3119a034d8855a9 | []
| no_license | https://github.com/entrekid/daily_algorithm | 838ab50bd35c1bb5efd8848b9696c848473f17ad | a6df9784cec95148b6c91d804600c4ed75f33f3e | refs/heads/master | 2023-02-07T11:21:58.816085 | 2021-01-02T17:58:38 | 2021-01-02T17:58:38 | 252,633,404 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
# increase
for iter in range(1, n):
print("*" * iter + " " * 2 * (n - iter) +"*" * iter)
# middle
print("*" * 2 * n)
# end
for iter in range(n - 1 , 0, -1):
print("*" * iter + " " * 2 * (n - iter) +"*" * iter) | UTF-8 | Python | false | false | 236 | py | 155 | 2445.py | 155 | 0.444915 | 0.415254 | 0 | 9 | 25.333333 | 57 |
Deepstatsanalysis/ML_algos_from_scratch | 15,564,961,486,168 | 5d44faa224ac48c5b361bcec036243886d113ad3 | 58ca7455374354cc7decac4ccb0af28dc2d42de2 | /regression_algorithms_from_scratch/Linear_regression_1/linear_regression_from_scratch.py | e896aae037dde3ce6c07c1545302948c4ba7c287 | []
| no_license | https://github.com/Deepstatsanalysis/ML_algos_from_scratch | b97f793e69327f88e43809fb17ce3c06827c5f13 | a506fd5a234b753fbd90c71d49aab7012d8d31fc | refs/heads/master | 2022-07-14T21:44:32.552029 | 2020-05-19T03:18:51 | 2020-05-19T03:18:51 | 277,743,838 | 1 | 0 | null | true | 2020-07-07T07:10:56 | 2020-07-07T07:10:56 | 2020-05-19T06:36:39 | 2020-05-19T03:18:52 | 11,332 | 0 | 0 | 0 | null | false | false | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
class Airfoil:
def train_test_split(self, dataframe,test_size):
dataframe_size=len(dataframe)
if isinstance(test_size,float):#if test size is passed as a proportion
test_size=round(test_size*dataframe_size)
#pick random samples from the data for train test split
indexes=dataframe.index.tolist()
test_indices=random.sample(population=indexes,k=test_size)
#now putting the values of train and test data into the respective df's
test_dataframe=dataframe.loc[test_indices]
cropped_dataframe=dataframe.drop(test_indices)
train_dataframe=cropped_dataframe
return train_dataframe,test_dataframe
def normalize_test(self,test_values,train_mean,train_std):
for i in range(test_values.shape[1]):
test_values[:,i]=test_values[:,i]-train_mean[i]
test_values[:,i]=test_values[:,i]/train_std[i]
return test_values
def normalize(self,dataset):
train_mean=[]
train_std=[]
for i in range(dataset.shape[1]):
mean=np.mean(dataset[:,i])
train_mean.append(mean)
std=np.std(dataset[:,i])
train_std.append(std)
dataset[:,i]=(dataset[:,i]-mean)/std
return dataset,train_mean,train_std
def gradient_descent(self,weights,train_values,train_labels,alpha,bias,num_iter):
num_samples=train_values.shape[0]
dim=train_values.shape[1]
cost=np.ones(num_iter)
i=0
#print(weights,bias)
for i in range(num_iter):
predict=np.dot(train_values,weights)+bias
cost[i]=(1/(2*num_samples)*sum(np.square(predict-train_labels)))
#print(cost[i])
#print(train_values.shape)
dw=1/(num_samples)*np.dot(train_values.T,(predict-train_labels))
db=1/(num_samples)*np.sum(predict-train_labels)
weights-=alpha*dw
bias-=alpha*db
i+=1
return weights,bias,cost
def multi_var_linear_regression(self,train_values,train_labels,alpha,num_iter):
train_dimension=train_values.shape[1]
num_samples=train_values.shape[0]
ones=np.ones((train_values.shape[0],1))
bias=0
weights=np.zeros(train_dimension)
weights,bias,cost=self.gradient_descent(weights,train_values,train_labels,alpha,bias,num_iter)
return weights,bias
def predict_test(self,test_value):
return np.dot(test_value,self.theta)+self.bias
def predict(self,filename):
test_data=pd.read_csv(filename,header=None)
test_data=np.array(test_data)
test_data=test_data[:,:-1]
prediction=[]
self.test_values=self.normalize_test(test_data,self.train_mean,self.train_std)
for i in range(len(self.test_values)):
pred=self.predict_test(self.test_values[i])
prediction.append(pred)
return prediction
def train(self,filename):
dataset=pd.read_csv(filename,header=None)
train_data=np.array(dataset)
self.train_values=train_data[:,:-1]
self.train_labels=train_data[:,-1]
self.train_values,self.train_mean,self.train_std=self.normalize(self.train_values)
self.theta,self.bias=self.multi_var_linear_regression(self.train_values,self.train_labels,alpha=0.1,num_iter=20000)
| UTF-8 | Python | false | false | 3,585 | py | 22 | linear_regression_from_scratch.py | 5 | 0.614784 | 0.60781 | 0 | 87 | 39.045977 | 123 |
onmyoji-xiao/ISIC_Classification | 4,604,204,983,192 | 13aa8394260ccdeb38d218bc3948b0f75bc4564e | 4c097da07e8a2264063b8ef4c145a47673190afc | /dataset_g.py | f3357b041c72e013bee401674c506c9baaa95522 | []
| no_license | https://github.com/onmyoji-xiao/ISIC_Classification | af81329b86bf4f649f9b939572fcc0ff781ed365 | 232fa8b2794706a0f1d18c62f5f2b19c2ea7cd7f | refs/heads/master | 2022-04-08T13:00:44.449947 | 2020-03-05T19:35:14 | 2020-03-05T19:35:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from torch.utils.data import Dataset
from PIL import Image
import csv
import torch
def default_loader(path):
return Image.open(path).convert('RGB')
class MyDataset_G(Dataset):
def __init__(self, csv_path, image_path, lbp_path, dataset='', data_transforms=None, target_transform=None,
loader=default_loader):
imgs = []
csvFile = open(csv_path, "r")
reader = csv.reader(csvFile)
for item in reader:
if reader.line_num == 1:
continue
temp = 0
if item[1] == '1.0':
temp = 1
if item[2] == '1.0':
temp = 2
imgs.append((image_path + "/" + item[0], reader.line_num - 1, temp))
self.imgs = imgs
self.lbp_path = lbp_path
self.data_transforms = data_transforms
self.target_transform = target_transform
self.loader = loader
self.dataset = dataset
def __getitem__(self, index):
fn, num, label = self.imgs[index]
orin_img = self.loader(fn + ".jpg")
img = orin_img.resize((224, 224))
if self.data_transforms is not None:
try:
img = self.data_transforms[self.dataset](img)
except:
print("Cannot transform image: {}".format(fn))
csvFile = open(self.lbp_path, "r")
reader = csv.reader(csvFile)
feature = []
for row in reader:
if reader.line_num == num:
feature = [float(i) for i in row]
feature = torch.Tensor(feature)
break
return img, feature, label
def __len__(self):
return len(self.imgs)
| UTF-8 | Python | false | false | 1,706 | py | 5 | dataset_g.py | 4 | 0.528722 | 0.518171 | 0 | 61 | 26.967213 | 111 |
n1zmarck/ongeki-card-tools | 1,906,965,482,250 | 9aea5e2a9bce3c15e0eb71d574b5b6e157a76a83 | 2fb96389122cdc48948311ea05b58076eccd188a | /apps/card/models.py | 1753ecc00f0c896e11ba8b8c6823e509f1512177 | []
| no_license | https://github.com/n1zmarck/ongeki-card-tools | 411cd3e05378a2de5ead278f30df9bdc12292c0d | 7decf33dd1d806602018af664be48b3c25c1e100 | refs/heads/master | 2023-08-31T15:08:27.405086 | 2021-10-21T21:02:57 | 2021-10-22T11:25:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Type
from django.db import models
import uuid
from django.db.models.deletion import SET_NULL
from apps.user.models import User
from django.core.mail import send_mail
from django.core.validators import RegexValidator, FileExtensionValidator
from django.contrib.auth.models import PermissionsMixin, UserManager
from django.contrib.auth.base_user import AbstractBaseUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from re import T
class Attack(models.Model):
def __str__(self):
return self.Name
class Meta:
verbose_name = _('攻撃力パターン')
verbose_name_plural = _('攻撃力パターン')
Name = models.CharField(
verbose_name=_('パターン名'),
max_length=1024,
blank=False,
)
Lv50Atk = models.IntegerField(
verbose_name=_('★1攻撃力'),
blank=True,
default=0
)
Lv55Atk = models.IntegerField(
verbose_name=_('★2攻撃力'),
blank=True,
default=0
)
Lv60Atk = models.IntegerField(
verbose_name=_('★3攻撃力'),
blank=True,
default=0
)
Lv65Atk = models.IntegerField(
verbose_name=_('★4攻撃力'),
blank=True,
default=0
)
Lv70Atk = models.IntegerField(
verbose_name=_('★5攻撃力'),
blank=True,
default=0
)
Lv75Atk = models.IntegerField(
verbose_name=_('★6攻撃力'),
blank=True,
default=0
)
Lv80Atk = models.IntegerField(
verbose_name=_('★7攻撃力'),
blank=True,
default=0
)
Lv85Atk = models.IntegerField(
verbose_name=_('★8攻撃力'),
blank=True,
default=0
)
Lv90Atk = models.IntegerField(
verbose_name=_('★9攻撃力'),
blank=True,
default=0
)
Lv95Atk = models.IntegerField(
verbose_name=_('★10攻撃力'),
blank=True,
default=0
)
Lv100Atk = models.IntegerField(
verbose_name=_('★11攻撃力'),
blank=True,
default=0
)
class SkillCondition(models.Model):
def __str__(self):
return self.Name
class Meta:
verbose_name = _('スキル条件')
verbose_name_plural = _('スキル条件')
Name = models.CharField(
verbose_name=_('条件名'),
max_length=1024,
blank=False,
)
class SkillEffect(models.Model):
def __str__(self):
return self.Name
class Meta:
verbose_name = _('スキル効果')
verbose_name_plural = _('スキル効果')
Name = models.CharField(
verbose_name=_('効果名'),
max_length=1024,
blank=False,
)
class Skill(models.Model):
def __str__(self):
return self.Name
class Meta:
verbose_name = _('スキル')
verbose_name_plural = _('スキル')
Name = models.CharField(
verbose_name=_('スキル名(判別用)'),
max_length=1024,
)
SKILLTYPE_CHOICES = (
(0, "未選択"),
(1, "ATTACK"),
(2, "BOOST"),
(3, "ASSIST"),
(4, "GUARD"),
)
Type = models.IntegerField(
verbose_name=_('スキル種別'),
default=0,
choices=SKILLTYPE_CHOICES,
)
SkillName = models.CharField(
verbose_name=_('スキル名'),
max_length=256,
default="",
blank=True,
)
SkillText = models.TextField(
verbose_name=_('スキル効果テキスト'),
default="",
blank=True
)
SkillName2 = models.CharField(
verbose_name=_('超開花スキル名'),
max_length=256,
default="",
blank=True
)
SkillText2 = models.TextField(
verbose_name=_('超開花スキル効果テキスト'),
default="",
blank=True
)
SkillConditionA = models.ForeignKey(
SkillCondition,
related_name='skillA_list',
on_delete=models.PROTECT,
verbose_name=_('スキル効果1条件'),
blank=True,
null=True
)
SkillEffectA = models.ForeignKey(
SkillEffect,
on_delete=models.PROTECT,
related_name='skillA_list',
verbose_name=_('スキル効果1'),
blank=True,
null=True
)
SkillParamA = models.IntegerField(
verbose_name=_('スキル効果1効果値'),
default=0,
)
SkillParamA2 = models.IntegerField(
verbose_name=_('スキル効果1超開花効果値'),
default=0,
)
SkillConditionB = models.ForeignKey(
SkillCondition,
related_name='skillB_list',
on_delete=models.PROTECT,
verbose_name=_('スキル効果2条件'),
blank=True,
null=True
)
SkillEffectB = models.ForeignKey(
SkillEffect,
on_delete=models.PROTECT,
related_name='skillB_list',
verbose_name=_('スキル効果2'),
blank=True,
null=True
)
SkillParamB = models.IntegerField(
verbose_name=_('スキル効果2効果値'),
default=0,
)
SkillParamB2 = models.IntegerField(
verbose_name=_('スキル効果2超開花効果値'),
default=0,
)
class Work(models.Model):
def __str__(self):
return self.Name
class Meta:
verbose_name = _('作品')
verbose_name_plural = _('作品')
Name = models.CharField(
verbose_name=_('作品名'),
max_length=1024,
blank=False,
)
class Character(models.Model):
def __str__(self):
return self.Name
class Meta:
verbose_name = _('キャラクター')
verbose_name_plural = _('キャラクター')
Name = models.CharField(
verbose_name=_('キャラクター名'),
max_length=1024,
blank=False,
)
Work = models.ForeignKey(
Work,
on_delete=models.PROTECT,
verbose_name=_('作品'),
null=True
)
class Card(models.Model):
def __str__(self):
return self.Name
class Meta:
verbose_name = _('カード')
verbose_name_plural = _('カード')
Name = models.CharField(
verbose_name=_('カード名'),
max_length=1024,
blank=False,
)
Number = models.CharField(
verbose_name=_('カード番号'),
max_length=64,
blank=True,
)
RARE_CHOICES = (
(0, "未選択"),
(1, "N"),
(2, "R"),
(3, "SR"),
(4, "SR+"),
(5, "SSR"),
)
Rare = models.IntegerField(
verbose_name=_('レアリティ'),
default=0,
choices=RARE_CHOICES,
)
TYPE_CHOICES = (
(0, "未選択"),
(1, "FIRE"),
(2, "AQUA"),
(3, "LEAF")
)
Type = models.IntegerField(
verbose_name=_('属性'),
default=0,
choices=TYPE_CHOICES,
)
Date = models.DateField(
verbose_name=_('追加日'),
blank=True,
null=True
)
Character = models.ForeignKey(
Character,
on_delete=models.PROTECT,
verbose_name=_('作品'),
null=True,
blank=True
)
Atk = models.ForeignKey(
Attack,
on_delete=models.PROTECT,
verbose_name=_('攻撃力パターン'),
null=True,
blank=True
)
Skill = models.ForeignKey(
Skill,
on_delete=models.PROTECT,
verbose_name=_('スキル'),
null=True,
blank=True
)
HowToGet = models.CharField(
verbose_name=_('入手方法'),
max_length=256,
blank=True
)
HowToGet2 = models.TextField(
verbose_name=_('入手方法詳細'),
max_length=256,
blank=True
)
| UTF-8 | Python | false | false | 7,916 | py | 13 | models.py | 11 | 0.534985 | 0.518105 | 0 | 360 | 19.405556 | 73 |
jingchaoluan/OCRService | 335,007,466,538 | c3fadafedd493610957c03f486c455d89d7a4157 | 86c5a40ade24f7d6aab5145192f72b601fdf0b14 | /api/ocr.py | f50a974755fdca4107e3e028acf45463e8289c47 | []
| no_license | https://github.com/jingchaoluan/OCRService | 93c32e3c9682ef22d08beeac295bfd385532dba5 | 734d45b3fe292b6214aba129587e7e693631c293 | refs/heads/master | 2021-10-18T18:46:06.557687 | 2019-01-17T22:12:57 | 2019-01-17T22:12:57 | 93,561,105 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from django.conf import settings
from PIL import Image
from resizeimage import resizeimage # Used for image resize
import sys, os, os.path, subprocess, shutil
# get the directory of ocropy script
ocropyDir = settings.BASE_DIR + "/ocropy"
# Get the directory which stores all input and output files
dataDir = settings.MEDIA_ROOT
# Resize the image size to meet the smallest size requirment of binarization: 600*600 pixels
# Resize by adding a white backgroud border, but not to strech the original image
def resize_image(imagepath):
fd_img = open(imagepath, 'r')
img = Image.open(fd_img)
w, h = img.size
if w<600 or h<600:
if w<600: w = 600
if h<600: h = 600
new_size = [w, h]
new_image = resizeimage.resize_contain(img, new_size)
new_image.save(imagepath, new_image.format) # override the original image
fd_img.close()
else:
pass
# Execute ocr scripts: extract texts in a image
# Parameter: the original image
# Return: the .txt file of the image
def ocr_exec(imagename):
# Prepare path for OCR service
srcImagePath = dataDir +"/"+ imagename
image_base, image_extension = os.path.splitext(imagename)
outputDir = dataDir +"/"+ image_base
# Call binarization script
binarize_cmd = ocropyDir + "/ocropus-nlbin -n " + srcImagePath + " -o " + outputDir
r_binarize = subprocess.call([binarize_cmd], shell=True)
if r_binarize != 0:
sys.exit("Error: Binarization process failed")
# Call page layout analysis script
la_inputPath = outputDir + "/????.bin.png"
layoutAnalysis_cmd = ocropyDir + "/ocropus-gpageseg -n --minscale 1.0 " + la_inputPath
r_layoutAnalysis = subprocess.call([layoutAnalysis_cmd], shell=True)
if r_layoutAnalysis != 0:
sys.exit("Error: Layout analysis process failed")
# Call text recognition script
recog_model = ocropyDir + "/models/en-default.pyrnn.gz"
recog_inputPath = outputDir + "/????/??????.bin.png"
textRecog_cmd = ocropyDir + "/ocropus-rpred -n -Q 2 -m " + recog_model + " " + recog_inputPath
r_textRecognition = subprocess.call([textRecog_cmd], shell=True)
if r_textRecognition != 0:
sys.exit("Error: Text recognition process failed")
# Generate output file
output_file = outputDir + "/" + image_base + ".txt"
cat_cmd = "cat " + outputDir + "/0001/??????.txt >" + output_file
r_genOutput = subprocess.call([cat_cmd], shell=True)
if r_genOutput != 0:
sys.exit("Error: Generate output process failed")
'''
# Generate HTML output
output_file = outputDir + "/" + image_base + ".html"
genOutput_cmd = ocropyDir + "/ocropus-hocr " + la_inputPath + " -o " + outputFile
r_genOutput = subprocess.call([genOutput_cmd], shell=True)
if r_genOutput != 0:
sys.exit("Error: Generate output process failed")
'''
return output_file
# Delete all files related to this service time
def del_service_files(dataDir):
# Delete all original images
for the_file in os.listdir(dataDir):
file_path = os.path.join(dataDir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
| UTF-8 | Python | false | false | 3,256 | py | 32 | ocr.py | 27 | 0.668919 | 0.657555 | 0 | 92 | 34.380435 | 98 |
adamghill/goingson | 2,121,713,883,156 | fa1a85fc5325856695536a64bfd175536f0992f3 | 557d653ae5299bd6ca9916a058b5bccb03f8d157 | /project/settings.py | fe393647ebe545b4c16b75b1264525fe5716de2d | []
| no_license | https://github.com/adamghill/goingson | 1de89ad886a0199c57cb793e211c142c829dc5e5 | 46fba7911c7361b782eec796c23a5a6baefa1e3f | refs/heads/master | 2018-03-10T07:24:42.548315 | 2014-02-18T01:06:35 | 2014-02-18T01:06:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from os import environ, path
from conf.logging import *
BASE_DIR = path.dirname(path.dirname(__file__))
SITE_ROOT = path.dirname(path.realpath(__file__))
ENVIRONMENT = environ.get('ENVIRONMENT', 'dev')
SECRET_KEY = environ.get('SECRET_KEY')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
THIRD_PARTY_APPS = (
'compressor',
'debug_toolbar',
'south',
)
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
)
INTERNAL_APPS = (
'account',
'journal',
'www',
'project',
)
INSTALLED_APPS = THIRD_PARTY_APPS + DJANGO_APPS + INTERNAL_APPS
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'django_utils.context_processors.settings.settings',
)
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = SITE_ROOT + '/assets'
STATIC_URL = '/assets/'
STATICFILES_DIRS = (
path.join(SITE_ROOT, '../', 'www', 'assets'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = False
SETTINGS_IN_CONTEXT = ['DEBUG', ]
try:
from local_settings import *
except ImportError:
pass
if ENVIRONMENT == 'production':
DEBUG = False
TEMPLATE_DEBUG = DEBUG
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
COMPRESS_CSS_HASHING_METHOD = 'content'
INSTALLED_APPS += (
'gunicorn',
)
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['.herokuapp.com']
else:
INSTALLED_APPS += (
'devserver',
)
DEVSERVER_MODULES = (
'devserver.modules.profile.ProfileSummaryModule',
)
| UTF-8 | Python | false | false | 2,897 | py | 14 | settings.py | 8 | 0.684501 | 0.68243 | 0 | 119 | 23.344538 | 66 |
datakind/Mar21-vaccine-uptake | 2,078,764,200,947 | 56bfe919cfae82bfda7dc26ba02f3d8b28932fd6 | ecc8e88da7374e34317cbbcd17e775955b3c8ba3 | /scrapers/reddit.py | 408506437c50ed84cc0637ea3245521e524a0282 | [
"MIT"
]
| permissive | https://github.com/datakind/Mar21-vaccine-uptake | 0ac9720e5affc0f8505364bc547d2fdc568b7680 | 97bcb8204b6cdef9cd1286a0cfecd20db79fb63b | refs/heads/main | 2023-03-17T04:14:10.473162 | 2021-03-07T19:31:58 | 2021-03-07T19:31:58 | 341,291,215 | 11 | 4 | MIT | false | 2021-03-07T19:31:58 | 2021-02-22T18:04:49 | 2021-03-07T18:50:58 | 2021-03-07T19:31:58 | 262,983 | 6 | 4 | 14 | Jupyter Notebook | false | false | from urllib.request import urlopen, HTTPError
import json
import pandas as pd
import random
import re
import time
from datetime import datetime
import logging
logging.basicConfig(filename='reddit_scraping.log', level=logging.INFO)
# collect posts from subreddits pertaining to the vaccine
# collect comments from those posts
# collect replies to those comments
sub_reddits = ['Coronavirus', 'vaxxhappened', 'antivax', 'VaccineMyths',
'science', 'news', 'COVID19', 'conspiracy', 'nyc', "Indiana", "Conservative", "illinois", "nashville", "LosAngeles"]
match_words =['covid-19 vaccine', 'vaccine', 'vaccination', 'coronavirus vaccine',
'covid vaccine', 'covid', 'coronavirus', 'virus', 'vax', 'doses', 'pfizer', 'moderna',
'johnson & johnson', 'J&J', 'vaccinators']
def parse_sub_reddits(sub_reddit: str,
match_words: list):
"""
Check all the posts in the subreddit for
Args:
sub_reddit (str): a subreddit to parse posts
match_words (list): a list of match words
Returns:
List of all posts in the subreddit mentioning vaccines
"""
url_to_open = f"https://www.reddit.com/r/{sub_reddit}.json"
success_status = 0
while success_status != 200:
try:
response = urlopen(url_to_open, timeout=10)
success_status = response.status
except HTTPError:
logging.info(f"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.")
time.sleep(120)
success_status = 400
entire_sub_reddit = json.loads(response.read())
posts = [post["data"] for post in entire_sub_reddit['data']['children'] if post["kind"] == "t3"]
_ids = []
post_dataframes = []
return_dict = {}
if len(posts) > 0:
for post in posts:
try:
title = post['title'].lower()
if re.findall(r"(?=("+'|'.join(match_words)+r"))", title):
_id = post['id']
norm_df = pd.json_normalize(post)
norm_df = norm_df[['id', 'subreddit', 'title', 'ups', 'downs', 'upvote_ratio', 'num_comments', 'author_fullname', 'created_utc', 'subreddit_subscribers']]
norm_df = norm_df.rename(columns = {'id': 'post_id', 'author_fullname': 'author'})
post_dataframes.append(norm_df)
if post['num_comments'] > 0:
_ids.append(_id)
except KeyError:
pass
if len(post_dataframes) > 0:
all_dfs = pd.concat(post_dataframes, ignore_index=True)
return_dict['data'] = all_dfs
return_dict['ids'] = _ids
else:
return_dict['data'] = None
return_dict['ids'] = None
else:
return_dict['data'] = None
return_dict['ids'] = None
return return_dict
def comment_data(post_id: str,
sub_reddit: str):
"""
Generates a pandas dataframe with scraped comments and replies data. Will concatenate replies with comments
post_id (str): post_id from valid posts that contain covid vaccine keywords
"""
url_to_open = f"https://www.reddit.com/r/{sub_reddit}/comments/{post_id}.json"
success_status = 0
while success_status != 200:
try:
response = urlopen(url_to_open, timeout=10)
success_status = response.status
except HTTPError:
logging.info(f"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.")
time.sleep(120)
success_status = 400
sub_reddit_page = json.loads(response.read())
comments_df = pd.json_normalize(sub_reddit_page[1]['data']['children'])
comments_df['post_id'] = post_id
comments_df = comments_df[['post_id', 'data.id', 'data.author_fullname', 'data.body', 'data.created',
'data.downs', 'data.ups']]
comments_df = comments_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment',
'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})
comments_df['reply'] = 'N'
comments_df['comment_replied_id'] = ''
# get all replies
replies_list = []
for comment in sub_reddit_page[1]['data']['children']:
replies = comment.get('data').get('replies')
comment_id = comment.get('data').get('id')
if replies is None or replies == '':
pass
else:
replies_df = pd.json_normalize(replies['data']['children'])
try:
replies_df = replies_df[['data.id', 'data.author_fullname', 'data.body', 'data.created',
'data.downs', 'data.ups']]
except KeyError:
pass
replies_df = replies_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment',
'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})
replies_df['reply'] = 'Y'
replies_df['comment_replied_id'] = comment_id
replies_df['post_id'] = post_id
replies_list.append(replies_df)
if len(replies_list) == 1:
all_replies = replies_list[0]
elif len(replies_list) > 1:
all_replies = pd.concat(replies_list, ignore_index = True)
else:
all_replies = None
column_order = [c for c in comments_df.columns]
comments_df = comments_df[column_order]
if all_replies is not None:
all_replies = all_replies[column_order]
all_comments_replies = pd.concat([comments_df, replies_df], ignore_index=True)
else:
all_comments_replies = comments_df
return all_comments_replies
def utc_to_date(x):
try:
new_value = datetime.strftime(datetime.fromtimestamp(x), '%Y-%m-%d %H:%M:%S')
except ValueError:
new_value = None
return new_value
def stream_to_db(subreddit: str,
df_dict: dict,
db_path: str) -> None:
"""
Appends to CSVs and removes any duplicated tweets or users before saving
Args:
df_dict (dict): return from scraping tweets
db_path (str): path to database files
"""
file_lkps = {'posts': f"reddit-{subreddit}-posts.csv",
'comments': f"reddit-{subreddit}-comments.csv"}
for _key in df_dict:
if df_dict.get(_key) is None:
pass
full_path = f"{db_path}/{file_lkps.get(_key)}"
df = df_dict.get(_key)
df.to_csv(full_path, index=False, encoding='utf-8')
logging.info(f"Saved {_key} data for subreddit {subreddit} at {datetime.today()}")
return None
if __name__ == "__main__":
for sr in sub_reddits:
logging.info(f'Starting scraping for subreddit {sr} at {datetime.today()}')
db_path = '/Users/philazar/Desktop/projects/covid-sentiment/data/reddit'
valid_posts = parse_sub_reddits(sub_reddit = sr, match_words= match_words)
posts_df = valid_posts.get('data')
if posts_df is not None:
posts_df['post_date'] = posts_df['created_utc'].apply(lambda x: utc_to_date(x))
stream_to_db(subreddit = sr,
df_dict = {'posts': posts_df},
db_path=db_path)
post_ids = valid_posts.get('ids')
if post_ids is not None:
comments_dataframes = []
for i in post_ids:
comments_dataframe = comment_data(post_id=i, sub_reddit= sr)
comments_dataframes.append(comments_dataframe)
all_comments = pd.concat(comments_dataframes, ignore_index =True)
all_comments['comment_date'] = all_comments['created_utc'].apply(lambda x: utc_to_date(x))
stream_to_db(subreddit = sr,
df_dict = {'comments': all_comments},
db_path=db_path)
logging.info(f'Finished scraping for subreddit {sr} at {datetime.today()}')
| UTF-8 | Python | false | false | 8,254 | py | 26 | reddit.py | 3 | 0.564696 | 0.55985 | 0 | 198 | 40.606061 | 174 |
Gabiiii/BaekJoon_python | 755,914,266,031 | 082fa8553166f16cf2547586b4487e1baf98b731 | 2da635c12f7585ba67bed7335066069c04a0bc0a | /21633.py | 4f70196e3627ba019d03f18259591b1ca4a3dfd4 | []
| no_license | https://github.com/Gabiiii/BaekJoon_python | cd25362df3171ffa71d0037ccb016bd9df8ad569 | 7117ffcf6711945e4aaca6f0366ac8b6a2d88f90 | refs/heads/master | 2021-08-07T16:57:43.788038 | 2021-07-06T02:35:43 | 2021-07-06T02:35:43 | 154,042,604 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | input=int(input())
res=(input*0.01)+25
if res<100:
res=100
elif res>2000:
res=2000
print(round(res,2)) | UTF-8 | Python | false | false | 110 | py | 84 | 21633.py | 83 | 0.654545 | 0.472727 | 0 | 7 | 14.857143 | 19 |
rudimk/construktdrops | 7,447,473,314,937 | ffb42a2f44a629c7697ce66fcb8caab6567b19d5 | 314e6227d27693b966f2236b392cabd7d433a4d6 | /app.py | 9b52511e0c9dc372d9e02ae5bdefab633399ebdd | [
"MIT"
]
| permissive | https://github.com/rudimk/construktdrops | a54ad1c069641744ab11976f9a48d084ae54432c | 9c5604050d16707aac9287ca8a32f4b415b9e0c4 | refs/heads/master | 2021-01-10T18:46:34.422042 | 2015-02-06T12:07:35 | 2015-02-06T12:07:35 | 30,347,324 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, redirect, url_for, session, request, render_template
# flask-peewee bindings
from flask_peewee.db import Database
# flask-peewee auth
from flask_peewee.auth import Auth
from peewee import *
from flask_peewee.admin import Admin, ModelAdmin
from flask_oauth import OAuth
from settings import *
# configure our database
DATABASE = {
'name': 'data.db',
'engine': 'peewee.SqliteDatabase',
}
DEBUG = True
SECRET_KEY = 'ssshhhh'
app = Flask(__name__)
app.config.from_object(__name__)
#app.config.from_envvar('CONSTRUKT_DROPS_SETTINGS')
db = Database(app)
auth = Auth(app, db)
oauth = OAuth()
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email'}
)
class Owner(db.Model):
name = CharField()
email = CharField(unique=True)
facebook_id = CharField(unique=True)
def __unicode__(self):
return self.name
'''def __init__(self, name, email, facebook_id):
self.name = name
self.email = email
self.facebook_id = facebook_id'''
class Drop(db.Model):
drop_owner = ForeignKeyField(Owner)
drop_address = TextField()
drop_lat = CharField()
drop_long = CharField()
drop_tags = CharField()
'''def __init__(self, drop_owner, drop_address, drop_tags):
self.drop_owner = drop_owner
self.drop_address = drop_address
self.drop_tags = drop_tags'''
admin = Admin(app, auth)
class OwnerAdmin(ModelAdmin):
columns = ('name', 'email', 'facebook_id',)
class DropAdmin(ModelAdmin):
columns = ('drop_owner', 'drop_address', 'drop_lat', 'drop_long', 'drop_tags',)
admin.register(auth.User)
admin.register(Owner, OwnerAdmin)
admin.register(Drop, DropAdmin)
admin.setup()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login')
def login():
return facebook.authorize(callback=url_for('facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True))
@app.route('/login/authorized')
@facebook.authorized_handler
def facebook_authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
owner = Owner(name=me.data['name'], email=me.data['email'], facebook_id=me.data['id'])
owner.save()
session['logged_in_owner'] = me.data['id']
return 'Logged in as id=%s name=%s redirect=%s' % \
(me.data['id'], me.data['name'], request.args.get('next'))
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| UTF-8 | Python | false | false | 2,933 | py | 3 | app.py | 1 | 0.673713 | 0.672349 | 0 | 109 | 25.908257 | 90 |
dr-dos-ok/Code_Jam_Webscraper | 9,405,978,395,603 | 4346d5a402bbd697b7ffdf6c6e425fdcf84f2abe | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/2256.py | fc39798aece4996b97ab72a18f91cc1de5703c0a | []
| no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
def reverse(S, K):
res = ''
for i in range(K):
res += '+' if S[i] == '-' else '-'
return res
def calc(S, K):
if not S:
return 0
if S[0] == '-':
if len(S) < K:
return -1
else:
count = calc(reverse(S[1:K], K-1) + S[K:], K)
if count < 0:
return -1
else:
return 1 + count
else:
return calc(S[1:], K)
def main():
sys.setrecursionlimit(1200)
#with open('A-small-attempt0.in', 'r') as infile:
with open('A-large.in', 'r') as infile:
with open('output.txt', 'w') as outfile:
T = int(infile.readline().strip())
for t in range(T):
line = infile.readline().split()
S = line[0]
K = int(line[1])
count = calc(S, K)
if count < 0:
outfile.write('Case #%d: IMPOSSIBLE\n' % (t+1))
else:
outfile.write('Case #%d: %d\n' % (t+1, count))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,147 | py | 60,747 | 2256.py | 60,742 | 0.395815 | 0.37925 | 0 | 47 | 22.404255 | 67 |
csijun/nyu_vp | 9,603,546,878,151 | 7efec7a56582d0bc320872c91847620eecfd15aa | c17aed97808cd8048cab48138bc0afc4c7f72af8 | /nyu.py | 59d28a3887af8d4688dc47635a9caa9c9f553991 | []
| no_license | https://github.com/csijun/nyu_vp | 1cd878bf73cab8443f63f37ef209a63a6150f61e | 237a025b55d7655033db7a3e2adb998f0f00d8b5 | refs/heads/master | 2023-02-25T16:15:54.374968 | 2021-01-29T17:06:35 | 2021-01-29T17:06:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import glob
import os
import csv
import numpy as np
import scipy.io
from .lsd import lsd
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
class NYUVP:
def __init__(self, data_dir_path="./data", split='all', keep_data_in_memory=True, mat_file_path=None,
normalise_coordinates=False, remove_borders=True, extract_lines=False):
"""
NYU-VP dataset class
:param data_dir_path: Path where the CSV files containing VP labels etc. are stored
:param split: train, val, test, trainval or all
:param keep_data_in_memory: whether data shall be cached in memory
:param mat_file_path: path to the MAT file containing the original NYUv2 dataset
:param normalise_coordinates: normalise all point coordinates to a range of (-1,1)
:param remove_borders: ignore the white borders around the NYU images
:param extract_lines: do not use the pre-extracted line segments
"""
self.keep_in_mem = keep_data_in_memory
self.normalise_coords = normalise_coordinates
self.remove_borders = remove_borders
self.extract_lines = extract_lines
self.vps_files = glob.glob(os.path.join(data_dir_path, "vps*"))
self.lsd_line_files = glob.glob(os.path.join(data_dir_path, "lsd_lines*"))
self.labelled_line_files = glob.glob(os.path.join(data_dir_path, "labelled_lines*"))
self.vps_files.sort()
self.lsd_line_files.sort()
self.labelled_line_files.sort()
if split == "train":
self.set_ids = list(range(0, 1000))
elif split == "val":
self.set_ids = list(range(1000, 1224))
elif split == "trainval":
self.set_ids = list(range(0, 1224))
elif split == "test":
self.set_ids = list(range(1224, 1449))
elif split == "all":
self.set_ids = list(range(0, 1449))
else:
assert False, "invalid split: %s " % split
self.dataset = [None for _ in self.set_ids]
self.data_mat = None
if mat_file_path is not None:
self.data_mat = scipy.io.loadmat(mat_file_path, variable_names=["images"])
fx_rgb = 5.1885790117450188e+02
fy_rgb = 5.1946961112127485e+02
cx_rgb = 3.2558244941119034e+02
cy_rgb = 2.5373616633400465e+02
K = np.matrix([[fx_rgb, 0, cx_rgb], [0, fy_rgb, cy_rgb], [0, 0, 1]])
if normalise_coordinates:
S = np.matrix([[1. / 320., 0, -1.], [0, 1. / 320., -.75], [0, 0, 1]])
K = S * K
self.Kinv = K.I
def __len__(self):
return len(self.dataset)
def __getitem__(self, key):
"""
Returns a sample from the dataset.
:param key: image ID within the selected dataset split
:return: dictionary containing vanishing points, line segments, original image
"""
id = self.set_ids[key]
datum = self.dataset[key]
if datum is None:
lsd_line_segments = None
if self.data_mat is not None:
image_rgb = self.data_mat['images'][:,:,:,id]
image = rgb2gray(image_rgb)
if self.remove_borders:
image_ = image[6:473,7:631].copy()
else:
image_ = image
if self.extract_lines:
lsd_line_segments = lsd.detect_line_segments(image_)
if self.remove_borders:
lsd_line_segments[:,0] += 7
lsd_line_segments[:,2] += 7
lsd_line_segments[:,1] += 6
lsd_line_segments[:,3] += 6
else:
image_rgb = None
if lsd_line_segments is None:
lsd_line_segments = []
with open(self.lsd_line_files[id], 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter=' ')
for line in reader:
p1x = float(line['point1_x'])
p1y = float(line['point1_y'])
p2x = float(line['point2_x'])
p2y = float(line['point2_y'])
lsd_line_segments += [np.array([p1x, p1y, p2x, p2y])]
lsd_line_segments = np.vstack(lsd_line_segments)
labelled_line_segments = []
with open(self.labelled_line_files[id], 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter=' ')
for line in reader:
lines_per_vp = []
for i in range(1,5):
key_x1 = 'line%d_x1' % i
key_y1 = 'line%d_y1' % i
key_x2 = 'line%d_x2' % i
key_y2 = 'line%d_y2' % i
if line[key_x1] == '':
break
p1x = float(line[key_x1])
p1y = float(line[key_y1])
p2x = float(line[key_x2])
if line[key_y2] == '433q':
assert False, self.labelled_line_files[id]
p2y = float(line[key_y2])
ls = np.array([p1x, p1y, p2x, p2y])
lines_per_vp += []
if self.normalise_coords:
ls[0] -= 320
ls[2] -= 320
ls[1] -= 240
ls[3] -= 240
ls[0:4] /= 320.
lines_per_vp += [ls]
lines_per_vp = np.vstack(lines_per_vp)
labelled_line_segments += [lines_per_vp]
if self.normalise_coords:
lsd_line_segments[:,0] -= 320
lsd_line_segments[:,2] -= 320
lsd_line_segments[:,1] -= 240
lsd_line_segments[:,3] -= 240
lsd_line_segments[:,0:4] /= 320.
line_segments = np.zeros((lsd_line_segments.shape[0], 7+2+3+3))
for li in range(line_segments.shape[0]):
p1 = np.array([lsd_line_segments[li,0], lsd_line_segments[li,1], 1])
p2 = np.array([lsd_line_segments[li,2], lsd_line_segments[li,3], 1])
centroid = 0.5*(p1+p2)
line = np.cross(p1, p2)
line /= np.linalg.norm(line[0:2])
line_segments[li, 0:3] = p1
line_segments[li, 3:6] = p2
line_segments[li, 6:9] = line
line_segments[li, 9:12] = centroid
vp_list = []
vd_list = []
with open(self.vps_files[id]) as csv_file:
reader = csv.reader(csv_file, delimiter=' ')
for ri, row in enumerate(reader):
if ri == 0: continue
vp = np.array([float(row[1]), float(row[2]), 1])
if self.normalise_coords:
vp[0] -= 320
vp[1] -= 240
vp[0:2] /= 320.
vp_list += [vp]
vd = np.array(self.Kinv * np.matrix(vp).T)
vd /= np.linalg.norm(vd)
vd_list += [vd]
vps = np.vstack(vp_list)
vds = np.vstack(vd_list)
datum = {'line_segments': line_segments, 'VPs': vps, 'id': id, 'VDs': vds, 'image': image_rgb,
'labelled_lines': labelled_line_segments}
for vi in range(datum['VPs'].shape[0]):
datum['VPs'][vi,:] /= np.linalg.norm(datum['VPs'][vi,:])
if self.keep_in_mem:
self.dataset[key] = datum
return datum
if __name__ == '__main__':
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(
description='NYU-VP dataset visualisation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mat_file', default=None,
help='Dataset directory')
opt = parser.parse_args()
mat_file_path = opt.mat_file
if mat_file_path is None:
print("Specify the path where your 'nyu_depth_v2_labeled.mat' " +
"is stored using the --mat_file option in order to load the original RGB images.")
dataset = NYUVP("./data", mat_file_path=mat_file_path, split='all', normalise_coordinates=False,
remove_borders=True)
show_plots = True
max_num_vp = 0
all_num_vps = []
for idx in range(len(dataset)):
vps = dataset[idx]['VPs']
num_vps = vps.shape[0]
print("image no. %04d -- vps: %d" % (idx, num_vps))
all_num_vps += [num_vps]
if num_vps > max_num_vp: max_num_vp = num_vps
ls = dataset[idx]['line_segments']
vp = dataset[idx]['VPs']
if show_plots:
image = dataset[idx]['image']
ls_per_vp = dataset[idx]['labelled_lines']
colours = ['#e6194b', '#4363d8', '#aaffc3', '#911eb4', '#46f0f0', '#f58231', '#3cb44b', '#f032e6',
'#008080', '#bcf60c', '#fabebe', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000']
fig = plt.figure(figsize=(16,5))
ax1 = plt.subplot2grid((1,3), (0,0))
ax2 = plt.subplot2grid((1,3), (0,1))
ax3 = plt.subplot2grid((1,3), (0,2))
ax1.set_aspect('equal', 'box')
ax2.set_aspect('equal', 'box')
ax3.set_aspect('equal', 'box')
ax1.axis('off')
ax2.axis('off')
ax3.axis('off')
ax1.set_title('original image')
ax2.set_title('labelled line segments per VP')
ax3.set_title('extracted line segments')
if image is not None:
ax1.imshow(image)
ax2.imshow(rgb2gray(image), cmap='Greys_r')
else:
ax1.text(0.5, 0.5, 'not loaded', horizontalalignment='center', verticalalignment='center',
transform=ax1.transAxes, fontsize=12, fontweight='bold')
for vpidx, lss in enumerate(ls_per_vp):
c = colours[vpidx]
for l in lss:
if image is None:
l[1] *= -1
l[3] *= -1
ax2.plot([l[0], l[2]], [l[1], l[3]], '-', c=c, lw=5)
for li in range(ls.shape[0]):
ax3.plot([ls[li,0], ls[li,3]], [-ls[li,1], -ls[li,4]], 'k-', lw=2)
fig.tight_layout()
plt.show()
print("num VPs: ", np.sum(all_num_vps), np.sum(all_num_vps)*1./len(dataset), np.max(all_num_vps))
plt.rcParams.update({'font.size': 18})
plt.figure(figsize=(9, 3))
values, bins, patches = plt.hist(all_num_vps, bins=[0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5])
print(values)
print(bins)
plt.show()
| UTF-8 | Python | false | false | 11,104 | py | 4,343 | nyu.py | 1 | 0.485591 | 0.444074 | 0 | 287 | 37.689895 | 110 |
seenureddy/fyers_notification | 12,103,217,861,346 | 7fc0047e5cbbf11219db23f34492df92ab6fd231 | 8ec191fee0449297cd2ee48f04a3010d93f8105a | /fys_notification/services/email.py | 79f4e0c2775a8c23ff3d117bd4347635505f075c | []
| no_license | https://github.com/seenureddy/fyers_notification | 92772f80d1369876835513fe5203933c4f70bf41 | 350195c8debc72070b3db2ebd30c451e57e85247 | refs/heads/main | 2023-03-29T16:45:47.345290 | 2021-03-22T08:25:56 | 2021-03-22T08:25:56 | 349,616,121 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Third party imports
from datetime import date, timedelta
import smtplib
import jinja2
import logging
import csv
from flask import abort
from base64 import b64encode
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Local Imports
from fys_notification.config import (
FYS_SMPT_EMAIL, SMPT_SERVER, SMPT_PORT, FYS_SMPT_PASSWORD, FYS_ADMIN_EMAIL
)
from fys_notification.models.db_models import db
from fys_notification.models.email_data_models import EmailDataAnalytics
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # noqa: W1202
email_logger = logging.getLogger("EmailLog")
def send_internal_email(total_sent_emails):
admin_req_parse = dict(
user_name="Fyres Team",
mail_to=FYS_ADMIN_EMAIL,
subject="Analytics: Number of emails sent.",
body=total_sent_emails,
message_title="Email Sent Analytics."
)
send_default_email(admin_req_parse, template='admin.html')
def send_client_email(req_parse):
"""
Send client email
:param req_parse:
:return:
"""
message_title = "Thanks you for contacting us."
req_parse['message_title'] = message_title
return send_default_email(req_parse)
def send_default_email(req_parse, template=None):
message_title = req_parse['message_title']
user_name = req_parse['user_name']
# Create the root message and fill in the from, to, and subject headers
tos = req_parse['mail_to'].split(',')
msg_base = MIMEMultipart('related')
msg_base['Subject'] = f"Subject: {req_parse['subject']}"
msg_base['From'] = FYS_SMPT_EMAIL
msg_base['To'] = ", ".join(tos)
if req_parse.get('cc_mail_to'):
cc_recipients = req_parse['cc_mail_to'].split(',')
msg_base['Cc'] = ", ".join(cc_recipients)
# Adding the CC Email Address.
tos.extend(cc_recipients)
if req_parse.get('bcc_mail_to'):
bcc_recipients = req_parse['bcc_mail_to'].split(',')
msg_base['Bcc'] = ", ".join(bcc_recipients)
# Adding the BCC Email Address.
tos.extend(bcc_recipients)
msg_base.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgAlternative = MIMEMultipart('alternative')
msg_base.attach(msgAlternative)
message = f"{req_parse['body']}"
# Plain Text
msgText = MIMEText('You are missing the email format and images.\n' + message)
msgAlternative.attach(msgText)
# Create HTML From Template
# If regular email use default template.
if template is None:
html = render_template(
'base_client_email.html', message=message, message_title=message_title, user_name=user_name)
else:
email_logger.info(f'template {template}')
html = render_template(template, message=message, message_title=message_title, user_name=user_name)
# HTML
# We reference the image in the IMG SRC attribute by the ID we give it below
msgText = MIMEText(html, 'html')
msgAlternative.attach(msgText)
return send_email(msg_base, tos)
def csv_file_read_send_email(csv_file_read):
data = {}
with open(csv_file_read) as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
email_logger.info(f'Column names are {", ".join(csv_reader.fieldnames)}')
line_count += 1
email_logger.info(f'{row}')
line_count += 1
row['mail_to'] = row['mail_to'].replace('[', '').replace(']', '')
row['cc_mail_to'] = row['cc_mail_to'].replace('[', '').replace(']', '')
row['bcc_mail_to'] = row['bcc_mail_to'].replace('[', '').replace(']', '')
# sending an email
result = send_client_email(row)
email_logger.info(f'Processed {line_count} lines.')
data[row['user_name']] = result
# data commit
email_data = EmailDataAnalytics(email_sent_number=line_count)
db.session.add(email_data)
db.session.commit()
return data
def send_email(msg_base, tos):
"""
:param msg_base:
:param tos:
:return:
"""
# Create a secure SSL context
# Send the email (this example assumes SMTP authentication is required)
smtp = smtplib.SMTP(SMPT_SERVER)
smtp.connect(SMPT_SERVER, port=SMPT_PORT)
# Secure the connection
smtp.starttls()
if not (FYS_SMPT_EMAIL and FYS_SMPT_PASSWORD):
raise abort(400, "No login credentials provided")
smtp.login(FYS_SMPT_EMAIL, FYS_SMPT_PASSWORD)
# tos = req_parse['mail_to'].split(',')
email_logger.info(tos)
try:
email_logger.info("Sending an email")
smtp.sendmail(FYS_SMPT_EMAIL, tos, msg_base.as_string())
smtp.quit()
except Exception as error:
return error
return "Successfully Sent Email"
def render_template_dict(template, render_dict):
""" renders a Jinja template into HTML """
templateLoader = jinja2.FileSystemLoader(searchpath="fys_notification/static/templates")
templateEnv = jinja2.Environment(loader=templateLoader)
templ = templateEnv.get_template(template)
return templ.render(render_dict)
def render_template(template, **kwargs):
""" renders a Jinja template into HTML """
templateLoader = jinja2.FileSystemLoader(searchpath="fys_notification/static/templates")
templateEnv = jinja2.Environment(loader=templateLoader)
templ = templateEnv.get_template(template)
return templ.render(**kwargs)
def get_sent_email_analytics_data():
day = date.today()
next_day = day + timedelta(days=1)
my_data = db.session.query(EmailDataAnalytics). \
filter(EmailDataAnalytics.lastest_date_time >= day, EmailDataAnalytics.lastest_date_time < next_day).all()
email_logger.info(f"data: {my_data}")
sent_email_analytics = {'total_sent_emails': 0}
for data in my_data:
sent_email_analytics['total_sent_emails'] = sent_email_analytics.get('total_sent_emails') + data.email_sent_number
send_internal_email(sent_email_analytics['total_sent_emails'])
| UTF-8 | Python | false | false | 6,298 | py | 18 | email.py | 11 | 0.654811 | 0.651318 | 0 | 187 | 32.679144 | 122 |
venkatajagadeesh123/python_snippets | 10,823,317,619,390 | cc4ef255ceb26fe11216b3ea594d5c93eb066cf0 | 5a7737e401a1c4a0df1cd78b66c4c827320f1d14 | /dictionary.py | 201d40cab7569f753d7451ebffa76edac68896af | []
| no_license | https://github.com/venkatajagadeesh123/python_snippets | 41ada001730bda070d0984b6931b5a94995ac1d9 | c2c7394b80a86f1bc4ac3c051d5bc655414f8fbc | refs/heads/master | 2021-01-02T09:18:58.512713 | 2017-08-02T06:26:28 | 2017-08-02T06:26:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
student = {'name': 'John', 'age': 25, 'courses': ['Math', 'CompSci']}
for key, value in student.items():
print(key, value)
# Return a list by sotred values
citys = {'three':3,'one':1,'two':2}
# method 1
k = [(k) for k in sorted(citys,key=citys.get)]
# method 2
n = sorted(citys.items() , key= lambda t : t[1])
print (k)
mylist = [1,2,3,4,5,1,2,1,3]
# Return a dict with how meny times duplicate values repeted
d = {}
for i in mylist:
if i not in d:
d[i]= 0
else:
d[i] += 1
d = {}
for i in mylist:
d[i] = d.get(i,0)+1
# sort this valu by len of elemnt
names = ['matdeman','rachel','matthe0','jhonson']
# exp : {6 [matdeman,jhonson] , 5 [rachel.rachel] }
d = {}
for name in names:
key = len(name)
if key not in d:
d[key] = []
d[key].append(name)
d = {}
for name in names:
key = len(name)
d.setdefault(key,[]).append(name)
| UTF-8 | Python | false | false | 904 | py | 11 | dictionary.py | 8 | 0.564159 | 0.537611 | 0 | 58 | 14.5 | 69 |
maayanbrodsky/questions-app | 2,001,454,788,926 | 68354e0024bfe87bf78d426cba0351cf667218b5 | c8e2e3afc35a8620a2d6c10767aa1123812ed2fb | /question_db.py | 31e38e91f6395847404a3900ae21ecf616dcdea0 | []
| no_license | https://github.com/maayanbrodsky/questions-app | 69ca5955bbaae89c831e26eb680f5566ed2dbcac | 3de174aaf82202f2dbe43bb9298ea70bfc66a403 | refs/heads/master | 2023-01-24T11:16:08.711673 | 2020-12-10T20:10:53 | 2020-12-10T20:10:53 | 319,647,369 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from dotenv import load_dotenv
from flask import render_template, request
from Models import Questions, Topics, Users
from app import app
load_dotenv()
app.secret_key = os.environ.get('SECRET_KEY')
@app.route("/")
def home():
return render_template('home.j2')
@app.route("/about", methods=['GET', 'POST'])
def about():
return render_template('about.j2')
@app.route("/register", methods=['GET', 'POST'])
def register():
if request.method == 'GET':
return render_template('register.j2')
elif request.method == 'POST':
details = dict(request.form)
print(details)
Users.create(username=details['username'],
password=details['password'],
email=details['email'],
institution=details['institution'])
return render_template('about.j2')
@app.route("/read", methods=['GET', 'POST'])
def read():
if request.method == 'GET':
users = Users.select()
return render_template('read.j2', users=users)
@app.route("/delete", methods=['GET'])
def delete():
if request.method == 'GET':
id = request.args.get('id') #This gets the id from the "read" template "<td><a href="/delete?id={{ user.id }}">DELETE</a>"
delRows = Users.delete().where(Users.id == id).execute()
if delRows > 0:
return render_template('delsuccess.j2')
else:
return render_template('delfailed.j2')
@app.route("/update", methods=['GET', 'POST'])
def update():
if request.method == 'GET':
id = request.args.get('id') #This gets the id from the "read" template "<td><a href="/update?id={{ user.id }}">DELETE</a>"
user = Users.select().where(Users.id == id).get()
return render_template('update.j2', user=user)
elif request.method == 'POST':
details = dict(request.form)
Users.update(username=details['username'],
password=details['password'],
email=details['email'],
institution=details['institution']).where(Users.id == details['id']).execute()
return render_template('about.j2')
@app.route("/enter_question", methods=['GET', 'POST'])
def enter_question():
if request.method == 'GET':
return render_template('enter_question.j2')
elif request.method == 'POST':
details = dict(request.form)
Questions.create(textbook=details['textbook'],
chapter=details['chapter'],
section=details['section'],
submitted_by=details['submitted_by'],
question_text=details['question_text'],
topic=details['topic'],)
return render_template('about.j2')
@app.route("/topics", methods=['GET', 'POST'])
def enter_topic():
if request.method == 'GET':
topics = Topics.select()
return render_template('topics.j2', topics=topics)
elif request.method == 'POST':
details = dict(request.form)
print(details)
Topics.create(topic=details['topic'])
return render_template('home.j2')
if __name__ == '__main__':
app.run()
| UTF-8 | Python | false | false | 3,202 | py | 17 | question_db.py | 4 | 0.582761 | 0.578389 | 0 | 99 | 31.343434 | 131 |
harshhemani/fprint | 274,877,921,189 | 1356d45cea83f09e5e770fe769c819ebcdd50194 | c7ccfe8781fbe84a6b15a9e80d489c2a21f22ff9 | /orientation.py | c495d0e4991b36f9d36c4f133dfa1ca8a0df437c | [
"MIT"
]
| permissive | https://github.com/harshhemani/fprint | 4a964d774f228d5344dec4957f8d96653bfda440 | 102ff331505627f1708f781ca4519d1a910300fd | refs/heads/master | 2021-01-19T13:30:19.249467 | 2015-04-21T09:08:39 | 2015-04-21T09:08:39 | 34,307,488 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
@author: Harsh Hemani
@date: 21/04/2015
@SoP: Mainly here to provide `get_orientation_map` function
This function takes an image matrix as input (gray level)
and returns an orientation-map, i.e., a matrix that contains
the orientation (theta) value for each pixel in the image.
Orientation denotes the tangent of the ridges at the point.
"""
import sys
import Image
import numpy as np
from scipy import ndimage
from scipy.signal import convolve2d
from pylab import imshow, show
def binarize(img, threshold=150):
"""
Convert image into a binary image
With white denoted by 0
and
Black denoted by 255
"""
new_img = np.ones(img.shape) * 255
white_indices = img > threshold
new_img[white_indices] = 0
return new_img
def get_orientation_map(image_matrix):
"""
Input an image matrix (grayscale)
and
Outputs the direction of ridge tanget at each pixel
"""
bin_img_mx = binarize(image_matrix)
Gx = ndimage.sobel(bin_img_mx, 0)
Gy = ndimage.sobel(bin_img_mx, 1)
# grad_img = np.hypot(Gx, Gy)
# grad_img *= 255.0 / np.max(grad_img)
# imshow(grad_img)
# show()
Gxx = np.zeros(Gx.shape)
Gyy = np.zeros(Gy.shape)
theta = np.zeros(Gx.shape)
W = 1
for i in range(Gxx.shape[0]):
for j in range(Gxx.shape[1]):
lower_k = i - W
lower_l = j - W
upper_k = i + W
upper_l = j + W
if lower_k < 0:
lower_k = 0
if lower_l < 0:
lower_l = 0
if upper_k >= Gxx.shape[0]:
upper_k = Gxx.shape[0] - 1
if upper_l >= Gxx.shape[1]:
upper_l = Gxx.shape[1] - 1
for k in range(lower_k, upper_k+1):
for l in range(lower_l, upper_l+1):
Gxx[i][j] += (Gx[k][l] ** 2) - (Gy[k][l] ** 2)
Gyy[i][j] += 2.0 * Gx[k][l] * Gy[k][l]
if abs(Gxx[i][j]) <= 1.0E-10:
theta[i][j] = np.pi / 2.0
else:
theta[i][j] = 0.5 * np.arctan(Gyy[i][j]/Gxx[i][j])
# now average the thetas, cuz image is noisy (eg: broken ridges)
gauss_line = np.array([1, 4, 6, 4, 1])[:, np.newaxis]
kernel = (1.0 / 256) * np.dot(gauss_line, gauss_line.T)
conv_numerator = convolve2d(np.sin(2*theta), kernel, mode='same')
conv_denomenat = convolve2d(np.cos(2*theta), kernel, mode='same')
theta_prime = 0.5 * np.arctan(conv_numerator / conv_denomenat)
return theta_prime
if __name__ == '__main__':
if len(sys.argv) < 2:
print '\nSyntax:\n\tpython', sys.argv[0], '<path/to/image>\n'
sys.exit()
image_path = sys.argv[1]
print 'Loading image..'
image_mx = np.asarray(Image.open(image_path).convert('L'))
print 'Image loaded.'
theta_map = get_orientation_map(image_mx)
theta_map_min = np.min(theta_map)
theta_map_max = np.max(theta_map)
theta_im = 255 * (theta_map - theta_map_min*np.ones(theta_map.shape)) / theta_map_max
imshow(theta_im, 'gray')
show()
print 'Done!'
| UTF-8 | Python | false | false | 3,118 | py | 2 | orientation.py | 2 | 0.560295 | 0.536241 | 0 | 90 | 33.633333 | 89 |
zulfadlizainal/5G-NR-Data-Rates | 9,929,964,417,618 | 823e8b31bd213bf9abfa1355cc1011136013b843 | 0659288c3f9c5022c5f7c45f65b2421184cbdd60 | /Throughput Simulation/5GNRTputSim.py | 83c716e7482e8e33f96c3a319174b61eebee6eb2 | []
| no_license | https://github.com/zulfadlizainal/5G-NR-Data-Rates | 00020643eacd97e68ab9955281e437505537754c | c03309684b0c4a360008aff40fba62d41871508e | refs/heads/master | 2021-10-25T23:04:28.782429 | 2021-10-19T06:38:59 | 2021-10-19T06:38:59 | 200,147,497 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Created by github.com/zulfadlizainal
import pandas as pd
import numpy as np
df_rsrp = pd.read_excel('Ref_LA.xlsx', 'RSRP')
df_rsrq = pd.read_excel('Ref_LA.xlsx', 'RSRQ')
df_sinr = pd.read_excel('Ref_LA.xlsx', 'SINR')
df_mcs = pd.read_excel('Ref_MCSxR.xlsx')
# Calculate Code Rate (R)
df_mcs['R'] = df_mcs['R x 1024'] / 1024
# Input
prb_num = int(input('Max PRB: '))
# Calculate RSRP
df_rsrp['R'] = df_rsrp['MCS'].map(df_mcs['R']) # MAP based on Index (Index = MCS in this case)
df_rsrp['QM'] = df_rsrp['MCS'].map(df_mcs['QM']) # Map based on Index (Index = MCS in this case)
df_rsrp['nre_prime'] = (12 * df_rsrp['SYM LENGTH AVG (BASED ON SLIV)']) - df_rsrp['DMRS PER PRB'] - df_rsrp['RRC OH']
df_rsrp['nre'] = (np.minimum(156,df_rsrp['nre_prime'])) * (df_rsrp['PRB AVG (%)'] * prb_num) * df_rsrp['SLOT (%)']
df_rsrp['ninfo'] = df_rsrp['nre'] * df_rsrp['R'] * df_rsrp['QM'] * df_rsrp['LAYER']
df_rsrp['ninfo_prime'] = np.maximum(24, )
# print(' ')
# print('ありがとうございました!!')
# print('Download this program: https://github.com/zulfadlizainal')
# print('Author: https://www.linkedin.com/in/zulfadlizainal')
# print(' ')
| UTF-8 | Python | false | false | 1,156 | py | 4 | 5GNRTputSim.py | 2 | 0.626549 | 0.613274 | 0 | 32 | 34.3125 | 117 |
nv-hiep/ISMDust | 738,734,422,869 | 48cc584d7b58c54bf95ac9b0a15d3c5d1e3bf4e2 | bb6bc9822d03ef0bc2180c925fcd65b7789822ed | /source/dust/xoh/xoh_simple/hist_xoh.py | e54be44a924a6ec81e95a0f785231d4f23acf8d8 | []
| no_license | https://github.com/nv-hiep/ISMDust | d5b2a971a48c2937e912b41c90806168caead028 | 319a413965f409bfec75ad3deb17eae3ad4defce | refs/heads/master | 2021-08-06T21:10:57.320102 | 2017-11-07T04:52:16 | 2017-11-07T04:52:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os, sys
sys.path.insert(0, os.getenv("HOME")+'/ISMDust/common') # add folder of Class
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import healpy as hp
import pylab as pl
import module as md
import copy
from numpy import array
from restore import restore
from plotting import cplot
from mpfit import mpfit
## Read info of XOH from tau, Ebv, Radiance #
#
# params string fname Filename
# return dict info
#
# version 4/2017
# Author Van Hiep ##
def read_xoh(fname = 'xoh_from_tau.txt'):
cols = ['idx', 'src', 'l', 'b', 'xoh', 'xoher', 'nh2', 'nh2er', 'nhi', 'nhier', 'nh', 'nher', 'cnm', 'cnmer', 'noh', 'noher', 'av', 'aver']
fmt = ['i', 's', 'f','f', 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' , 'f' ]
data = restore(fname, 4, cols, fmt)
dat = data.read(asarray=True)
xoh = dat['xoh']
return dat['src'], dat['l'], dat['b'], dat['xoh'], dat['xoher'], dat['nh2'], dat['nh2er'], dat['nhi'], dat['nhier'], dat['nh'], dat['nher'], \
dat['cnm'], dat['cnmer'], dat['noh'], dat['noher'], dat['av'], dat['aver']
#================= MAIN ========================#
t_src, t_xl, t_xb, t_xoh, t_xoher, t_nh2, t_nh2er, t_nhi, t_nhier, t_nh, t_nher, t_cnm, t_cnmer, t_noh, t_noher, t_av, t_aver = read_xoh(fname = 'xoh_from_tau.txt')
e_src, e_xl, e_xb, e_xoh, e_xoher, e_nh2, e_nh2er, e_nhi, e_nhier, e_nh, e_nher, e_cnm, e_cnmer, e_noh, e_noher, e_av, e_aver = read_xoh(fname = 'xoh_from_ebv2011_plot.txt')
r_src, r_xl, r_xb, r_xoh, r_xoher, r_nh2, r_nh2er, r_nhi, r_nhier, r_nh, r_nher, r_cnm, r_cnmer, r_noh, r_noher, r_av, r_aver = read_xoh(fname = 'xoh_from_radiance_plot.txt')
print len(t_xoh)
print len(e_xoh)
print len(r_xoh)
print t_xoh
### X(OH) to e-7 ###
r_xoh = r_xoh*10.
t_xoh = t_xoh*10.
e_xoh = e_xoh*10.
r_xoher = r_xoher*10.
t_xoher = t_xoher*10.
e_xoher = e_xoher*10.
filtr = (t_src=='3C132') ## filter
xt_av = t_av[filtr]
xt_xoh = t_xoh[filtr]
xt_nh2 = t_nh2[filtr]
xe_xoh = e_xoh[filtr]
xe_nh2 = e_nh2[filtr]
xr_xoh = r_xoh[filtr]
xr_nh2 = r_nh2[filtr]
print '3C132'
print xt_av
print xt_xoh
## For Plotting ##
fts = 42
labelsize = 28
majorlght = 9
minorlght = 5
lgsize = 34
min_y_data, max_y_data = np.min(r_xoh*100.), np.max(r_xoh*100.)
min_y_data, max_y_data = 0., 90.
binsize = 6.25
num_y_bins = np.floor((max_y_data - min_y_data) / binsize)
num_y_bins = 13
# Axes definitions
nullfmt = plt.NullFormatter()
left, width = 0.1, 0.7
bottom, height = 0.1, 0.8
bottom_h = left_h = left + width + 0.01
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.4]
rect_histy = [left_h, bottom, 0.2, height]
# Generate initial figure, scatter plot, and histogram quadrants
# start with a rectangular Figure
mpl.rcParams['axes.linewidth'] = 2.5
fig = plt.figure(1, figsize=(18, 10))
axScatter = plt.axes(rect_scatter)
axHistY = plt.axes(rect_histy)
# Remove labels from histogram edges touching scatter plot
axHistY.yaxis.set_major_formatter(nullfmt)
major_xticks = np.arange(0., 12., 1.)
minor_xticks = np.arange(0., 12., 0.25)
major_yticks = np.arange(0., 12., 1.)
minor_yticks = np.arange(0., 12., 0.5)
# Draw scatter plot
axScatter.errorbar(r_av, r_xoh, xerr=r_aver, yerr=r_xoher, color='r', marker='^', ls='None', markersize=8, markeredgecolor='r', markeredgewidth=1, label='$From$ $Radiance$')
xerb1, = axScatter.plot(r_av, r_xoh, color='r', marker='^', ls='None', markersize=10, markeredgecolor='r', markeredgewidth=1, label='$From$ $Radiance$')
axScatter.errorbar(t_av, t_xoh, xerr=t_aver, yerr=t_xoher, color='b', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label=r'$From$ $\tau_{353}$')
xerb2, = axScatter.plot(t_av, t_xoh, color='b', marker='o', ls='None', markersize=10, markeredgecolor='b', markeredgewidth=1, label=r'$From$ $\tau_{353}$')
axScatter.errorbar(e_av, e_xoh, xerr=e_aver, yerr=e_xoher, color='k', marker='d', ls='None', markersize=8, markeredgecolor='k', markeredgewidth=1, label='$From$ $E(B-V)$')
xerb3, = axScatter.plot(e_av, e_xoh, color='k', marker='d', ls='None', markersize=10, markeredgecolor='k', markeredgewidth=1, label='$From$ $E(B-V)$')
### data from vanDishoeck1986 ###
# yd = np.array([1.9, 1.7, 1.3, 2.2, 1.9, 1.4, 3.1, 2.5, 1.7, 5.0, 3.6, 2.4, 2.2, 0.68, 12., 3.2, 2.2, 2.2, 1.7])
# yd = yd/4.2
# xd = np.array([1.01, 0.79, 0.66, 0.95, 0.75, 0.64, 0.92, 0.73, 0.63, 0.86, 0.71, 0.62, 0.93, 2.12, 0.96, 0.8, 0.94, 0.95, 0.94])
# axScatter.plot(xd, yd, color='k', marker='x', ls='None', markersize=10, markeredgecolor='k', markeredgewidth=1, label='$From$ $E(B-V)$')
### data from vanDishoeck1986 ###
axScatter.set_xticks(major_xticks)
axScatter.set_xticks(minor_xticks, minor=True)
axScatter.set_yticks(major_yticks)
axScatter.set_yticks(minor_yticks, minor=True)
axScatter.tick_params(axis='x', labelsize=25, pad=3)
axScatter.tick_params(axis='y', labelsize=25)
axScatter.tick_params(which='both', width=2.5)
axScatter.tick_params(which='major', length=12)
axScatter.tick_params(which='minor', length=6)
axScatter.axvline(2.25, ymin=0.485, ymax=0.535, c='k', ls='-', linewidth=2)
axScatter.axvline(4.95, ymin=0.485, ymax=0.535, c='k', ls='-', linewidth=2)
axScatter.annotate(s='', xy=(4.97,4.5), xytext=(2.23,4.5), arrowprops=dict(arrowstyle='<->', linewidth=2))
axScatter.text(2.8, 4.59, r'$\mathrm{Sightlines\ with\ |b|<11^{o}}$', color='k', fontsize=32)
axScatter.text(0.8, 1., '(3C132)', color='k', fontsize=16, fontweight='bold')
print '3C132'
print xt_av
print xt_xoh
axScatter.annotate(s='', xy=(xt_av[0]-0.01, 0.+xt_xoh[0]), xytext=(1., 1.), arrowprops=dict(arrowstyle='->', linewidth=2))
axScatter.set_ylim(-0.2, 9.)
axScatter.set_xlim(0., 5.0)
axScatter.set_xlabel('$\mathrm{A_{V}}[mag]$', fontsize=36, fontweight='normal')
axScatter.set_ylabel('$\mathrm{X_{OH} [10^{-7}]}$', fontsize=36, fontweight='normal')
axbox = axScatter.get_position()
leg = axScatter.legend([xerb1, xerb3, xerb2],\
[r'$\mathrm{From\ \mathcal{R}}$',\
r'$\mathrm{From}\ E(B-V)$',\
r'$\mathrm{From\ \tau_{353}}$' ], \
loc=(axbox.x0+0.5, axbox.y0+0.6), numpoints=1, fontsize=lgsize)
leg.get_frame().set_linewidth(0.0)
#### Draw y-axis histogram ####
### axHistY ###
major_xticks = np.arange(5., 20., 5.)
minor_xticks = np.arange(1., 20., 1.)
major_yticks = np.arange(0., 12., 1.)
minor_yticks = np.arange(0., 12., 0.5)
# Draw y-axis histogram
axHistY.hist(t_xoh, alpha=0.9, label='', color='b', ls='-', histtype='step', stacked=False, fill=False, range=(0.0,9.0), bins=13, lw=3, edgecolor='b', orientation='horizontal')
axHistY.hist(e_xoh, alpha=0.99, label='', color='k', ls='-', histtype='step', stacked=False, fill=False, range=(0.0,9.0), bins=13, lw=3, edgecolor='k', orientation='horizontal')
axHistY.hist(r_xoh, alpha=1.0, label='', color='r', ls='-', histtype='step', stacked=False, fill=False, range=(0.0,9.0), bins=13, lw=3, edgecolor='r', orientation='horizontal')
axHistY.set_xlabel(r'$\mathrm{\#\ of\ sightlines}$', fontsize=36, fontweight='normal')
axHistY.set_xticks(major_xticks)
axHistY.set_xticks(minor_xticks, minor=True)
axHistY.set_yticks(major_yticks)
axHistY.set_yticks(minor_yticks, minor=True)
axHistY.tick_params(axis='x', labelsize=25, pad=3)
axHistY.tick_params(axis='y', labelsize=22)
axHistY.tick_params(which='both', width=2)
axHistY.tick_params(which='major', length=12)
axHistY.tick_params(which='minor', length=6)
axHistY.set_xlim(0., 15.)
axHistY.set_ylim(-0.2, 9.)
# plt.tight_layout()
plt.savefig('xoh_vs_av.eps', bbox_inches='tight', pad_inches=0.03, format='eps', dpi=600)
plt.show()
## X(OH) vs Av ##
mpl.rcParams['axes.linewidth'] = 1.5
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111); #ax.set_rasterized(True)
mks = 8
fts = 36
major_xticks = np.arange(0., 12., 1.)
minor_xticks = np.arange(0., 12., 0.25)
major_yticks = np.arange(0., 12., 1.)
minor_yticks = np.arange(0., 12., 0.5)
# Draw scatter plot
plt.errorbar(r_av, r_xoh, xerr=r_aver, yerr=r_xoher, color='r', marker='^', ls='None', markersize=8, markeredgecolor='r', markeredgewidth=1, label='$From$ $Radiance$')
xerb1, = plt.plot(r_av, r_xoh, color='r', marker='^', ls='None', markersize=10, markeredgecolor='r', markeredgewidth=1, label='$From$ $Radiance$')
plt.errorbar(t_av, t_xoh, xerr=t_aver, yerr=t_xoher, color='b', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label=r'$From$ $\tau_{353}$')
xerb2, = plt.plot(t_av, t_xoh, color='b', marker='o', ls='None', markersize=10, markeredgecolor='b', markeredgewidth=1, label=r'$From$ $\tau_{353}$')
plt.errorbar(e_av, e_xoh, xerr=e_aver, yerr=e_xoher, color='k', marker='d', ls='None', markersize=8, markeredgecolor='k', markeredgewidth=1, label='$From$ $E(B-V)$')
xerb3, = plt.plot(e_av, e_xoh, color='k', marker='d', ls='None', markersize=10, markeredgecolor='k', markeredgewidth=1, label='$From$ $E(B-V)$')
plt.title('', fontsize=0)
plt.xlabel('$\mathrm{A_{V}}[mag]$', fontsize=36, fontweight='normal')
plt.ylabel('$\mathrm{X_{OH} [10^{-7}]}$', fontsize=36, fontweight='normal')
ax.set_yticks(major_yticks)
ax.set_yticks(minor_yticks, minor=True)
plt.tick_params(axis='x', labelsize=22, pad=7)
plt.tick_params(axis='y', labelsize=22)
plt.tick_params(which='both', width=2)
plt.tick_params(which='major', length=12)
plt.tick_params(which='minor', length=6)
plt.grid(False)
plt.yscale('log')
plt.xscale('log')
plt.xlim(0.2, 5.0)
plt.ylim(0.08,20.)
# for i in range(len(t_src)):
# # if(oh[i] > 0):
# plt.annotate('('+str(t_src[i])+')', xy=(t_nh2[i], t_xoh[i]), xycoords='data',
# xytext=(-50.,30.), textcoords='offset points',
# arrowprops=dict(arrowstyle="->"),fontsize=12,
# )
axbox = ax.get_position()
leg = plt.legend([xerb1, xerb2, xerb3], [r'$X_{OH}\ from\ \tau_{353}$', r'$X_{OH}\ from\ E(B-V)$', r'$X_{OH}\ from\ \mathcal{R}$'],\
fontsize=14, loc=(axbox.x0+0.5, axbox.y0+0.7), numpoints=1)
leg.get_frame().set_linewidth(0.0)
plt.tight_layout()
# plt.savefig('xoh_vs_av.eps', bbox_inches='tight', pad_inches=0.03, format='eps', dpi=600)
plt.savefig('xoh_vs_av.png', bbox_inches='tight', pad_inches=0.03, format='png', dpi=100)
plt.show()
## END - PLOT ##
## X(OH) vs NH2 ##
mpl.rcParams['axes.linewidth'] = 1.5
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111); #ax.set_rasterized(True)
mks = 10
fts = 42
c3 = 'k'
c2 = 'b'
c1 = 'purple'
mk1 = '^'
mk2 = 'd'
mk3 = 'h'
major_xticks = np.arange(0., 500., 10.)
minor_xticks = np.arange(0., 500., 10.)
major_yticks = np.arange(0.1, 50., 2.)
minor_yticks = np.arange(0.1, 50., 1.)
xerb1, = plt.plot(t_nh2*1e20, t_xoh, color=c1, marker=mk1, ls='None', markersize=mks, markeredgecolor=c1, markeredgewidth=1, label=r'$X_{OH}\ from\ \tau_{353}$')
plt.errorbar(t_nh2*1e20, t_xoh, xerr=t_nh2er*1e20, yerr=t_xoher, color=c1, marker=mk1, ls='None', markersize=mks, markeredgecolor=c1, markeredgewidth=1, label='data')
xerb2, = plt.plot(e_nh2*1e20, e_xoh, color=c2, marker=mk2, ls='None', markersize=mks, markeredgecolor=c2, markeredgewidth=1, label=r'$X_{OH}\ from\ E(B-V)$')
plt.errorbar(e_nh2*1e20, e_xoh, xerr=e_nh2er*1e20, yerr=e_xoher, color=c2, marker=mk2, ls='None', markersize=mks, markeredgecolor=c2, markeredgewidth=1, label='data')
xerb3, = plt.plot(r_nh2*1e20, r_xoh, color=c3, marker=mk3, ls='None', markersize=mks, markeredgecolor=c3, markeredgewidth=1, label=r'$X_{OH}\ from\ \mathcal{R}$')
plt.errorbar(r_nh2*1e20, r_xoh, xerr=r_nh2er*1e20, yerr=r_xoher, color=c3, marker=mk3, ls='None', markersize=mks, markeredgecolor=c3, markeredgewidth=1, label='data')
c1 = mpl.patches.Ellipse((xt_nh2*1e20, xt_xoh), 1e20, 0.04, edgecolor='r', facecolor='none', linewidth=2)
ax.add_artist(c1)
c2 = mpl.patches.Ellipse((xe_nh2*1e20, xe_xoh), 0.8e20, 0.06, edgecolor='r', facecolor='none', linewidth=2)
ax.add_artist(c2)
c3 = mpl.patches.Ellipse((xr_nh2*1e20, xr_xoh), 0.3e20, 0.7, edgecolor='r', facecolor='none', linewidth=2)
ax.add_artist(c3)
plt.title('', fontsize=0)
plt.xlabel(r'$\mathrm{N_{H_{2}}\ [cm^{-2}]} $', fontsize=fts, fontweight='normal')
plt.ylabel(r'$\mathrm{X_{OH} [10^{-7}]}$', fontsize=fts, fontweight='normal')
ax.set_yticks(major_yticks)
ax.set_yticks(minor_yticks, minor=True)
plt.tick_params(axis='x', labelsize=22, pad=7)
plt.tick_params(axis='y', labelsize=22)
plt.tick_params(which='both', width=2)
plt.tick_params(which='major', length=12)
plt.tick_params(which='minor', length=6)
plt.grid(False)
plt.yscale('log')
plt.xscale('log')
plt.xlim(0.25*1e20, 500.0*1e20)
plt.ylim(0.08,20.)
# for i in range(len(t_src)):
# # if(oh[i] > 0):
# plt.annotate('('+str(t_src[i])+')', xy=(t_nh2[i], t_xoh[i]), xycoords='data',
# xytext=(-50.,30.), textcoords='offset points',
# arrowprops=dict(arrowstyle="->"),fontsize=12,
# )
axbox = ax.get_position()
leg = plt.legend([xerb3, xerb2, xerb1], [r'$X_{OH}\ from\ \mathcal{R}$', r'$X_{OH}\ from\ E(B-V)$', r'$X_{OH}\ from\ \tau_{353}$'],\
fontsize=22, loc=(axbox.x0+0.48, axbox.y0+0.73), numpoints=1)
leg.get_frame().set_linewidth(0.0)
plt.tight_layout()
plt.savefig('xoh_vs_nh2.eps', bbox_inches='tight', pad_inches=0.03, format='eps', dpi=600)
# plt.savefig('xoh_vs_nh2.png', bbox_inches='tight', pad_inches=0.03, format='png', dpi=100)
plt.show()
## END - PLOT ##
sys.exit()
## N(H2) vs Av ##
plt.errorbar(t_av, t_nh2, xerr=t_aver, yerr=t_nh2er, color='r', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(e_av, e_nh2, xerr=e_aver, yerr=e_nh2er, color='b', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(r_av, r_nh2, xerr=r_aver, yerr=r_nh2er, color='k', marker='o', ls='None', markersize=8, markeredgecolor='k', markeredgewidth=1, label='data')
plt.title('N$_{H2}$ (from Hiep) vs A$_{V}$', fontsize=30)
plt.xlabel('$A_{V}$ mag', fontsize=35)
plt.ylabel('$N_{H2}$', fontsize=35)
# plt.axhline(80., xmin=0, xmax=5)
# plt.axhline(10., xmin=0, xmax=5)
plt.grid(True)
# plt.ylim(-10., 80.)
plt.tick_params(axis='x', labelsize=18)
plt.tick_params(axis='y', labelsize=15)
for i in range(len(t_src)):
# if(oh[i] > 0):
plt.annotate('('+str(t_src[i])+')', xy=(t_av[i], t_nh2[i]), xycoords='data',
xytext=(-50.,30.), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=12,
)
plt.show()
## X(OH) vs NHI ##
plt.errorbar(t_nhi, t_xoh, xerr=t_nhier, yerr=t_xoher, color='r', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(e_nhi, e_xoh, xerr=e_nhier, yerr=e_xoher, color='b', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(r_nhi, r_xoh, xerr=r_nhier, yerr=r_xoher, color='k', marker='o', ls='None', markersize=8, markeredgecolor='k', markeredgewidth=1, label='data')
plt.title('X$_{OH}$ (from Hiep) vs NHI', fontsize=30)
plt.xlabel('NHI', fontsize=35)
plt.ylabel('$X_{OH}$', fontsize=35)
plt.grid(True)
# plt.ylim(-10., 80.)
plt.tick_params(axis='x', labelsize=18)
plt.tick_params(axis='y', labelsize=15)
for i in range(len(t_src)):
# if(oh[i] > 0):
plt.annotate('('+str(t_src[i])+')', xy=(t_nhi[i], t_xoh[i]), xycoords='data',
xytext=(-50.,30.), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=12,
)
plt.show()
## X(OH) vs CNM ##
plt.errorbar(t_cnm, t_xoh, xerr=t_cnmer, yerr=t_xoher, color='r', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(e_cnm, e_xoh, xerr=e_cnmer, yerr=e_xoher, color='b', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(r_cnm, r_xoh, xerr=r_cnmer, yerr=r_xoher, color='k', marker='o', ls='None', markersize=8, markeredgecolor='k', markeredgewidth=1, label='data')
plt.title('X$_{OH}$ (from Hiep) vs CNM', fontsize=30)
plt.xlabel('CNM', fontsize=35)
plt.ylabel('$X_{OH}$', fontsize=35)
plt.grid(True)
# plt.ylim(-10., 80.)
plt.tick_params(axis='x', labelsize=18)
plt.tick_params(axis='y', labelsize=15)
for i in range(len(t_src)):
# if(oh[i] > 0):
plt.annotate('('+str(t_src[i])+')', xy=(t_cnm[i], t_xoh[i]), xycoords='data',
xytext=(-50.,30.), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=12,
)
plt.show()
## X(OH) vs NH ##
plt.errorbar(t_nh, t_xoh, xerr=t_nher, yerr=t_xoher, color='r', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(e_nh, e_xoh, xerr=e_nher, yerr=e_xoher, color='b', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(r_nh, r_xoh, xerr=r_nher, yerr=r_xoher, color='k', marker='o', ls='None', markersize=8, markeredgecolor='k', markeredgewidth=1, label='data')
plt.title('X$_{OH}$ (from Hiep) vs NH', fontsize=30)
plt.xlabel('NH', fontsize=35)
plt.ylabel('$X_{OH}$', fontsize=35)
plt.grid(True)
# plt.ylim(-10., 80.)
plt.tick_params(axis='x', labelsize=18)
plt.tick_params(axis='y', labelsize=15)
for i in range(len(t_src)):
# if(oh[i] > 0):
plt.annotate('('+str(t_src[i])+')', xy=(t_nh[i], t_xoh[i]), xycoords='data',
xytext=(-50.,30.), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=12,
)
plt.show()
## X(OH) vs Av ##
plt.errorbar(t_av, t_xoh, xerr=t_aver, yerr=t_xoher, color='r', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(e_av, e_xoh, xerr=e_aver, yerr=e_xoher, color='b', marker='o', ls='None', markersize=8, markeredgecolor='b', markeredgewidth=1, label='data')
plt.errorbar(r_av, r_xoh, xerr=r_aver, yerr=r_xoher, color='k', marker='o', ls='None', markersize=8, markeredgecolor='k', markeredgewidth=1, label='data')
plt.title('X$_{OH}$ (from Hiep) vs A$_{V}$', fontsize=30)
plt.xlabel('$A_{V}$ mag', fontsize=35)
plt.ylabel('$X_{OH}$', fontsize=35)
# plt.hline((0,5),(0.8, 10))
plt.grid(True)
plt.ylim(-10., 80.)
plt.tick_params(axis='x', labelsize=18)
plt.tick_params(axis='y', labelsize=15)
for i in range(len(t_src)):
# if(oh[i] > 0):
plt.annotate('('+str(t_src[i])+')', xy=(t_av[i], t_xoh[i]), xycoords='data',
xytext=(-50.,30.), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=12,
)
plt.show() | UTF-8 | Python | false | false | 18,890 | py | 358 | hist_xoh.py | 103 | 0.623399 | 0.575013 | 0 | 442 | 41.739819 | 178 |
dbxogns77/py_lab2 | 6,502,580,527,064 | 8c5862e5f060253d5906afad6b0c394a7e380c01 | 6c2682512e915786e4d5f905d60c46ea3db15f5c | /myprog_pkg.py | 0e14ae2d988c652a5e1cd0ffcec0811ac0bae5dd | []
| no_license | https://github.com/dbxogns77/py_lab2 | ecfe092ddd1bd140731ab15d7938110f7cdc7607 | 9805649a7a99f1b7866f48c392fba4266e30a5aa | refs/heads/master | 2022-04-23T10:20:48.458208 | 2020-04-25T10:36:02 | 2020-04-25T10:36:02 | 258,748,648 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import my_pkg
if __name__ == "__main__":
while(1):
num = (int)(input("Select menu: 1) convserion 2) union/intersection 3) exit? "))
if(num not in range(1, 4)):
print("not in menu!")
continue
if(num == 1):
binnum = (int)(input("input binary number : "))
print("=> OCT> ",my_pkg.OCT(binnum))
print("=> DEC> ",my_pkg.DEC(binnum))
print("=> HEX> ",my_pkg.HEX(binnum))
elif(num == 2):
list1 = input("1st list: ")
list1 = list1[1:-1].split(',')
for i in list1:
list1[list1.index(i)] = i.strip()
list2 = input("2nd list: ")
list2 = list2[1:-1].split(',')
for i in list2:
list2[list2.index(i)] = i.strip()
print("=> union ", my_pkg.union(list1, list2))
print("=> intersection ", my_pkg.intersection(list1, list2))
else:
print("exit the program...")
break
| UTF-8 | Python | false | false | 837 | py | 4 | myprog_pkg.py | 4 | 0.573477 | 0.537634 | 0 | 31 | 26 | 82 |
jucimarjr/ipc20161 | 11,493,332,505,790 | dd9d24862fbe5415e45f597265534827af918bfe | 13e1b9ef93d71c146c7a207b8d7377a5df6c08bc | /lista2/ipc_lista2.02.py | 23ac1a4b65cb48f4ac5ed5a50c852dc35638eda7 | [
"Apache-2.0"
]
| permissive | https://github.com/jucimarjr/ipc20161 | d0f45bc6b64a3fcaa38595d72daafffb351f6e04 | 1cc62521d8165cd767b4cf29bba2f704860726fc | refs/heads/master | 2016-08-31T07:33:49.245856 | 2016-06-22T23:54:57 | 2016-06-22T23:54:57 | 55,017,497 | 9 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
#introdução a programação de computadores
#Professor: Jucimar JR
#EQUIPE 1
#
#Any Mendes Carvalho - 1615310044
#Eduardo Maia Freire - 1615310003
#Kid Mendes de Oliveira Neto - 1615310011
#Luiz Gustavo de Rocha Melo - 1615310015
#Matheus Palheta Barbosa -1615310019
#Victor Rafael da Silva e Silva - 1615310025
#
valor = float(input("Informe um numero: "))
if (valor>0):
print ("O numero digitado e positivo")
else:
print ("O numero digitado e negativo")
| UTF-8 | Python | false | false | 470 | py | 384 | ipc_lista2.02.py | 384 | 0.742489 | 0.609442 | 0 | 19 | 23.526316 | 44 |
chaithra-yenikapati/python-code | 13,597,866,494,536 | cc36469451c13d6932ac196f7b102ac0dc2cc04e | 341c34c196f86d0dbfcdcec093b5e424da32f23b | /question_04.py | ea8a4e6ce781b8b631f5733c5fb825f2ef9617ea | []
| no_license | https://github.com/chaithra-yenikapati/python-code | db4f91491f55fc52e00e06ca429d046e3a59b701 | 8069d441a07fb6d8a03671694e89a86f1734dbee | refs/heads/master | 2021-01-10T13:32:33.107499 | 2016-01-02T00:58:15 | 2016-01-02T00:58:15 | 48,893,686 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Chaithra'
notes = """
This is to introduce you to think on optimizing the solution by iterating down the code written
"""
#Given a list of numbers, modify the list so that each element has the product of all elements except the number
#ex: Input:[1,2,3,4,5]
#output:[120,60,40,30,24]
#Return the list of products
def product_of_list_elements(input):
len_input=len(input)
if len_input>1:
if 0 not in input:
l=[0]*len_input
r=[0]*len_input
l[0]=input[0]
for i in range(1,len_input):
l[i]=l[i-1]*input[i]
r[len_input-1]=input[len_input-1]
r[0]=l[len_input-1]
for i in range(len_input-2,0,-1):
r[i]=r[i+1]*input[i]
input[i]=(l[i]*r[i])/(input[i]*input[i])
input[len_input-1]=l[len_input-2]
input[0]=l[len_input-1]/input[0]
else:
if input.count(0)>1:
return [0]*len_input
else:
p=1
for i in range(0,len_input):
if input[i]!=0:
p*=input[i]
else:
continue
input=[0]*len_input
input[input.index(0)]=p
return input
else:
return input
def test_product_of_list_elements():
assert [120, 60, 40, 30, 24] == product_of_list_elements([1, 2, 3, 4, 5])
assert [0, 0, 0, 0, 0] == product_of_list_elements([0, 0, 0, 0, 0])
assert [1] == product_of_list_elements([1])
assert [72, 0, 0, 0, 0] == product_of_list_elements([0, 3, 1, 8, 3])
assert [98, 42, 147, -294, -42] == product_of_list_elements([-3, -7, -2, 1, 7])
assert [] == product_of_list_elements([])
assert [0] == product_of_list_elements([0])
assert [4.2, 2.4000000000000004, 2.52, 5.04] == product_of_list_elements([1.2, 2.1, 2, 1]) | UTF-8 | Python | false | false | 1,796 | py | 20 | question_04.py | 20 | 0.546771 | 0.47216 | 0 | 52 | 32.576923 | 112 |
gylow/ml-recommendation | 12,008,728,573,095 | f56d42aefab4064a963993c2057f6fb818917aaa | 08502ca2b4477dd6aa9c612177395a4ae0ebf548 | /src/metrics.py | 56b3419413b77895385371ba36877d7b32aa9f11 | [
"MIT"
]
| permissive | https://github.com/gylow/ml-recommendation | 3e3299ed1c68267465b1ae4ad010b422c421cccf | 7ba19d7c1044f6ba2d6dccda188359a00a6cdf52 | refs/heads/master | 2022-12-05T22:53:31.066761 | 2020-09-01T14:57:27 | 2020-09-01T14:57:27 | 286,829,964 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
from sklearn.metrics import mean_squared_error, median_absolute_error, r2_score
class Metrics:
def __init__(self):
pass
def calculate_regression(self, y_true, y_pred):
'''
Calculate the metrics from a regression problem
:param y_true: Numpy.ndarray or Pandas.Series
:param y_pred: Numpy.ndarray or Pandas.Series
:return: Dict with metrics
'''
median_abs_err = median_absolute_error(y_true, y_pred)
mean_sqr_err = mean_squared_error(y_true, y_pred)
r_2_score = r2_score(y_true, y_pred)
return {'median_abs_err' : median_abs_err, 'mean_sqr_err' : mean_sqr_err, 'r_2_score' : r_2_score}
| UTF-8 | Python | false | false | 726 | py | 10 | metrics.py | 7 | 0.61157 | 0.604683 | 0 | 19 | 36 | 106 |
letterix/rasp-temp | 16,484,084,502,423 | 13fa063297db068bb0aa3544662bb366bece67de | 100f77348c720c2c7c7a31029f2c115b5cc586cb | /raspberry/backend/handlers/installationHandler.py | 23fcf3428e9eaa94f82c67b2f08634ce8b24a032 | []
| no_license | https://github.com/letterix/rasp-temp | 2b0644ff8e7a68739d48507ccbdbd0a1e2b63256 | add27b894ab8533702eb07219b86aea30f3d8aa5 | refs/heads/master | 2021-01-10T20:00:59.982757 | 2015-12-26T11:23:26 | 2015-12-26T11:23:26 | 39,295,970 | 0 | 0 | null | false | 2015-12-26T11:23:27 | 2015-07-18T11:20:32 | 2015-09-18T15:19:13 | 2015-12-26T11:23:27 | 2,079 | 0 | 0 | 0 | Python | null | null | from dao import InstallationDao
from uuid import getnode as get_mac
def get_installation():
installation = InstallationDao.get()
if installation and installation['serial_number'] == str(get_py_mac()):
return installation
print("Setting correct serial_number")
serial_number = get_py_mac()
model = 'pi'
print("real serial_number is: ", serial_number)
InstallationDao.create(serial_number, model)
installation = InstallationDao.get()
print("serial_number now set to: ", installation.get('serial_number'))
return installation
def get_py_mac():
mac = get_mac()
if (mac >> 40)%2:
raise OSError("The system could not find the mac address of the pi")
return mac | UTF-8 | Python | false | false | 726 | py | 142 | installationHandler.py | 113 | 0.683196 | 0.679063 | 0 | 23 | 30.608696 | 76 |
hlfwm/lightcurve-chunksize | 4,166,118,283,443 | d09f7b63b5008f3034a2b92c8edc1e30617a6a93 | cb46c68da245fb33dbe4fd820c870b785229a650 | /lightcurve-assoc.py | 868d87f6ed757d45d56cbae70b061baeb4d39dd9 | []
| no_license | https://github.com/hlfwm/lightcurve-chunksize | 9d9ecc63cba40adc8dc665d2c350d4c954b6f18c | c0d9d314c95b692ccce4f8767832374703768fd2 | refs/heads/master | 2021-01-21T12:03:19.136241 | 2015-09-12T11:23:23 | 2015-09-12T11:23:23 | 42,354,367 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scipy as S
import monetdb.sql #need sudo ipython
import os, socket, sys
machine_tableno = {
'stones01.scilens.private' : 0,
'stones02.scilens.private' : 1,
'stones03.scilens.private' : 2,
'stones04.scilens.private' : 3,
'stones05.scilens.private' : 4,
'stones06.scilens.private' : 5,
'stones07.scilens.private' : 6,
'stones08.scilens.private' : 7,
'stones09.scilens.private' : 8,
'stones10.scilens.private' : 9,
'stones11.scilens.private' : 10,
'stones12.scilens.private' : 11,
'stones13.scilens.private' : 12,
'stones14.scilens.private' : 13,
'stones15.scilens.private' : 14,
'stones16.scilens.private' : 15,
'gwacdb' : 16
}
tblno = machine_tableno[socket.gethostname()]
def draw_flux(uid, mitosis):
connection = monetdb.sql.connect(username="monetdb", password="monetdb", hostname="localhost", database="gwacdb")
cursor = connection.cursor()
cursor.arraysize = 100
if mitosis == False:
print "disable mitosis optimizer pipe"
cursor.execute("set optimizer='no_mitosis_pipe';")
cursor.execute("declare table tmp(flux double, imageid int)")
cursor.execute('insert into tmp select flux,imageid from targets%d t, (select targetid from assoc%d where uniqueid=%d and type=3) t0 where t.id=t0.targetid;' %(tblno,tblno,uid))
cursor.execute('select i.jd, tmp.flux from tmp, image%d i where tmp.imageid=i.imageid' %tblno)
mylist=cursor.fetchall()
dd = S.array(mylist) #convert list to array
#S.savetxt("lightcurve_jd_flux_%d.txt" %uid,dd)
if dd.size == 0:
print "source "+str(uid)+" has no targetid in assoc%d." %tblno
else:
print "source "+str(uid)+" has %d targetid in assoc%d." %(len(dd),tblno)
cursor.close()
connection.close()
return dd
uniqueid=int(sys.argv[1])
mitosis=int(sys.argv[2])
draw_flux(uniqueid, mitosis)
| UTF-8 | Python | false | false | 1,943 | py | 4 | lightcurve-assoc.py | 4 | 0.650026 | 0.616572 | 0 | 51 | 37.098039 | 181 |
akashp11/EnggProject | 16,492,674,442,852 | 5c6c79d969cf6e10f16ade389ca069228de2faed | 402a1ad20fa4601507114d9387c9324c314bceaa | /handlers/register_phc.py | c6227c3222f2661f74a1294eb95fa21bbc2a1d55 | []
| no_license | https://github.com/akashp11/EnggProject | b78a233b328b1f74afb3e88c270296d45cac18d0 | aca853d6c1a01930232e84cacc31ae97b034d808 | refs/heads/master | 2022-11-27T18:45:39.895111 | 2020-08-11T19:14:26 | 2020-08-11T19:14:26 | 247,554,282 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_restful import Resource
from flask import render_template, make_response, request, redirect, url_for
from wtforms import Form, StringField, IntegerField, TextAreaField, PasswordField, validators, SelectField
from passlib.hash import sha256_crypt
from models import PHCUser
from datastore import db
class RegisterForm(Form):
name = StringField('Name', [validators.Length(min=1, max=50)])
username = StringField('Username', [validators.Length(min=4, max = 25)])
location = StringField('Centre Code (provided to you)', [validators.DataRequired()])
password = PasswordField('Password', [validators.DataRequired(),
validators.EqualTo('confirm',
message = 'Passwords do not match')])
confirm = PasswordField('Confirm Password')
class Register(Resource):
def get(self):
headers = {'Content-Type': 'text/html'}
form = RegisterForm(request.form)
return make_response(render_template('register.html', form=form),200,headers)
def post(self):
form = RegisterForm(request.form)
name = form.name.data
location = form.location.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
phc_user = PHCUser(name=name, username=username, location=int(location), password=password)
db.session.add(phc_user)
db.session.commit()
return redirect(url_for('login'))
| UTF-8 | Python | false | false | 1,469 | py | 55 | register_phc.py | 28 | 0.681416 | 0.671205 | 0 | 31 | 46.032258 | 106 |
chenyuyi94/opencv-python | 9,577,777,114,028 | dffefe9e48ac3b246d765a3af90991e0a5978b88 | dc26cc64d005f50352de40c8c07e00a2a134a026 | /chapter13.py | 1e6acd2cd681ccf54845176f4ff738bc313a93bb | []
| no_license | https://github.com/chenyuyi94/opencv-python | cb0143eb3b4b9f1807149361323b32293d5fae02 | 85582c6c7a2c9e2ac2da7380ee49dbf0674337a2 | refs/heads/master | 2022-12-07T18:39:29.292956 | 2020-08-21T13:43:18 | 2020-08-21T13:43:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/7/28 10:10
# @Author : Hubery-Lee
# @Email : hrbeulh@126.com
## 答题卡识别
'''
0. 答题卡仿生变换
1. 图像预处理
2. 形态学处理
3. 找出涂黑的选项
4. 与答案进行匹配
'''
import cv2
import numpy as np
import matplotlib as plt
# test01_png的答案
ANSWER = {0:1, 1:4, 2:0, 3:2, 4:1}
def cv_imshow(winname, src):
"""
显示图片,按任意键关闭
:param winname: 窗口名字
:param src: 待显示图片源文件
:return: 无返回值
"""
cv2.imshow(winname, src)
cv2.waitKey(0)
def cnts_sorted(cnts):
"""
画边框,按X轴坐标位置排序
:param cnts: 输入等高线组
:return: 排序后的等高线组和boundingBoxes(外接矩形组)
"""
boudaryBoxes = [cv2.boundingRect(cnt) for cnt in cnts]
dat = zip(cnts, boudaryBoxes)
(Cnts, Boxes) = zip(*sorted(dat, key=lambda b: b[1][1],reverse=False))
return Cnts, Boxes
def sort_contours(cnts, method="left-to-right"):
reverse = False
i = 0
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
return cnts, boundingBoxes
def order_points(pts):
"""
对变换对象的几何顶点进行排序
:param pts: 输入坐标点列表
:return: 返回排序后的坐标点
"""
# print(pts)
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
# print("s")
# print(s)
# print(np.argmin(s))
# print(np.argmax(s))
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts,axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_points_transform(img,pts):
"""
仿生变换
:param img: 输入包含待变换对象的图像
:param pts: 输入待变换对象在图像中的坐标点
:return: 输出变换对象的图像
"""
# 变换前的坐标
org_pts = order_points(pts)
# 变换后的坐标
widthA = np.sqrt((org_pts[0][0]-org_pts[1][0])**2 + (org_pts[0][1]-org_pts[1][1])**2)
widthB = np.sqrt((org_pts[2][0] - org_pts[2][0]) ** 2 + (org_pts[3][1] - org_pts[3][1]) ** 2)
width = max(int(widthA),int(widthB))
heightA = np.sqrt((org_pts[0][0]-org_pts[1][0])**2 + (org_pts[3][1]-org_pts[3][1])**2)
heightB = np.sqrt((org_pts[1][0] - org_pts[2][0]) ** 2 + (org_pts[1][1] - org_pts[2][1]) ** 2)
height = max(int(heightA),int(heightB))
Points = np.array([[0, 0], [width-1,0],[width-1,height-1],[0,height-1]],dtype= "float32")
# 仿生变换
matrix2 = cv2.getPerspectiveTransform(org_pts,Points)
imgWarp = cv2.warpPerspective(img,matrix2,(width,height))
return imgWarp
def resize(img,height = None,width = None,inter = cv2.INTER_AREA):
"""
对图像大小进行插值缩小或放大
:param img: 待缩放图像
:param height: 图像高度
:param width: 图像宽度
:param inter: openCV中的缩放插值方法
:return: 缩放后的图像
"""
dim = None
(h,w) = img.shape[:2]
if height is None and width is None:
return img
elif width is None:
ratio = height/float(h)
dim = (int(w*ratio),height)
else:
ratio = width/float(w)
dim = (width,int(h*ratio))
# cv2.resize(img,(width,height),flag)
resized = cv2.resize(img,dim,inter)
return resized
# 读入图像
image = cv2.imread("test_01.png")
# image = resize(image,500)
img = image.copy()
# 预处理
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img,(5,5),0)
edges = cv2.Canny(img,100,255)
cv_imshow("Canny",edges)
# 找出答题卡区域
# 1. 检测轮廓
# 2. 提取坐标
# 3. 仿生变换变换
binary, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours,key = cv2.contourArea,reverse= True)[:5] #
for cnt in contours:
perimeter = cv2.arcLength(cnt,True)
epsilon = 0.02*perimeter
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx) == 4:
screenCnt = approx
break
cv2.drawContours(image,[screenCnt],0,(0,0,255),2)
cv_imshow("screen",image)
pts = screenCnt.reshape(4,2)
warped = four_points_transform(img,pts)
cv_imshow("warped",warped)
# 找出涂黑的选项位置
# 1. 二值化处理
# 2. 检测圆圈轮廓
# 3. 筛选涂黑选项
cv_imshow("warped_",warped)
ret, thresh = cv2.threshold(warped, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
cv_imshow("Ostu", thresh)
bin_c, cnts_c, hierarchy_c = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
th = thresh.copy()
# warped = cv2.cvtColor(warped,cv2.COLOR_GRAY2BGR)
cv2.drawContours(th, cnts_c, -1, (0, 0, 255), 2)
cv_imshow("Ostu_cnts",th)
# 选项轮廓
questionCnts = []
# 遍历
for c in cnts_c:
# 计算比例和大小
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
# 根据实际情况指定标准
if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
questionCnts.append(c)
# 按照从上到下进行排序
# questionCnts, _ = cnts_sorted(questionCnts)
questionCnts,_ = sort_contours(questionCnts, method="top-to-bottom")
# print(questionCnts)
# warped = cv2.cvtColor(warped, cv2.COLOR_GRAY2BGR)
# for i,cnt in enumerate(questionCnts):
# (x,y,w,h) = cv2.boundingRect(cnt)
# cv2.drawContours(warped, cnt, 0, (0, 0, 255), 2)
# cv2.putText(warped,str(i),(x-10,y-10),cv2.FONT_ITALIC,0.5,(0,0,255),2)
# cv_imshow("order", warped)
correct = 0
warped = cv2.cvtColor(warped, cv2.COLOR_GRAY2BGR)
# 每排有5个选项
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
# 排序
cnts, _ = sort_contours(questionCnts[i:i + 5])
bubbled = None
# 遍历每一个结果
for (j, c) in enumerate(cnts):
# 使用mask来判断结果
mask = np.zeros(thresh.shape, dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1) # -1表示填充
# cv_imshow('mask', mask)
# 通过计算非零点数量来算是否选择这个答案
mask = cv2.bitwise_and(thresh, thresh, mask=mask)
total = cv2.countNonZero(mask)
# cv_imshow("maskbit", mask)
# print("total")
# print(total)
# 通过阈值判断
if bubbled is None or total > bubbled[0]:
bubbled = (total, j)
print(bubbled)
# 对比正确答案
color = (0, 0, 255)
k = ANSWER[q]
# 判断正确
if k == bubbled[1]:
color = (0, 255, 0)
correct += 1
# 绘图
cv2.drawContours(warped, [cnts[k]], -1, color, 3)
score = (correct / 5.0) * 100
print("[INFO] score: {:.2f}%".format(score))
cv2.putText(warped, "{:.2f}%".format(score), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.imshow("Original", image)
cv2.imshow("Exam", warped)
cv2.waitKey(0)
| UTF-8 | Python | false | false | 7,152 | py | 17 | chapter13.py | 16 | 0.602376 | 0.559706 | 0 | 243 | 25.316872 | 104 |
effigies/etelemetry-client | 13,159,779,809,541 | e6449a1e56b77be04c6f2337ba1cb814803c7c0a | b20b03bbfbb7417ea7285e647bced40fccb8da62 | /etelemetry/tests/test_client.py | 9bf1790bef151656217adf5d792b06ae62537ab3 | [
"Apache-2.0"
]
| permissive | https://github.com/effigies/etelemetry-client | 3c5301cc84f173eeb5e1310a8000dbd2cdfcabd0 | bf70c9b36c3f2c02e3c8e1a2998637afd5d87621 | refs/heads/master | 2020-08-01T10:46:09.419461 | 2019-09-16T15:43:42 | 2019-09-16T15:43:42 | 210,972,484 | 0 | 0 | NOASSERTION | true | 2019-09-26T01:25:41 | 2019-09-26T01:25:41 | 2019-09-16T15:43:45 | 2019-09-24T19:01:59 | 29 | 0 | 0 | 0 | null | false | false | import pytest
from ..config import ET_ROOT
from ..client import _etrequest, get_project
def test_etrequest():
endpoint = "http://fakeendpoint/"
with pytest.raises(RuntimeError):
_etrequest(endpoint, method="get")
assert _etrequest(ET_ROOT)
# ensure timeout is working properly
endpoint = "https://google.com"
with pytest.raises(RuntimeError):
_etrequest(endpoint, timeout=0.01)
assert _etrequest(endpoint)
def test_get_project():
repo = "invalidrepo"
with pytest.raises(ValueError):
get_project(repo)
repo = "github/hub"
res = get_project(repo)
assert "version" in res
| UTF-8 | Python | false | false | 646 | py | 2 | test_client.py | 1 | 0.671827 | 0.667183 | 0 | 25 | 24.84 | 44 |
KarlOstradt/Master-2021-Hybrid-Human-Machine-interpretation-of-well-logs-using-deep-learning | 4,140,348,523,960 | e4d009b0d6021d7a5399ec696e44c1e9d580c085 | 1358d67fff5e634a1bb9ca8490c052076cf956b8 | /master/metrics.py | 426a26aadf54c07955b05830c241dc0789926ef8 | []
| no_license | https://github.com/KarlOstradt/Master-2021-Hybrid-Human-Machine-interpretation-of-well-logs-using-deep-learning | f0400c4f2cfd68dbbefcc342aa7bbe81e30a963a | 8e75b16b32c5601a7ee3b7bc12422ffdf6c626fa | refs/heads/master | 2023-06-07T03:00:08.025308 | 2021-06-15T01:08:04 | 2021-06-15T01:08:04 | 376,787,411 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import math
import matplotlib.pyplot as plt
from prettytable import PrettyTable
import master.util as util
def evaluation_metrics(truePos, trueNeg, falsePos, falseNeg):
eval_metrics = {
"accuracy" : accuracy(truePos,trueNeg,falsePos,falseNeg),
"error_rate": error_rate(truePos, trueNeg, falsePos, falseNeg),
"prevalence" : prevalence(truePos, trueNeg, falsePos, falseNeg),
"null_error_rate" : null_error_rate(truePos, trueNeg, falsePos, falseNeg),
"precision" : precision(truePos, falsePos),
"recall" : recall(truePos, falseNeg),
"specificity" : specificity(trueNeg, falsePos),
"fallout" : fallout(trueNeg, falsePos),
"miss_rate" : miss_rate(truePos, falseNeg),
"f1_score" : f_score(truePos, falsePos, falseNeg, beta=1),
"f2_score" : f_score(truePos, falsePos, falseNeg, beta=2),
"false_discovery_rate" : false_discovery_rate(truePos,falsePos),
"false_omission_rate" : false_omission_rate(trueNeg, falseNeg),
"mcc" : matthews_correlation_coefficient(truePos, trueNeg, falsePos, falseNeg)
}
return eval_metrics
def accuracy(truePos, trueNeg, falsePos, falseNeg):
# Proportion of correct labels
if truePos + trueNeg + falsePos + falseNeg == 0: return np.nan
return (truePos + trueNeg) / (truePos + trueNeg + falsePos + falseNeg)
def error_rate(truePos, trueNeg, falsePos, falseNeg):
# Proportion of incorrect labels (1 - accuracy)
if truePos + trueNeg + falsePos + falseNeg == 0: return np.nan
return (falsePos + falseNeg) / (truePos + trueNeg + falsePos + falseNeg)
def prevalence(truePos, trueNeg, falsePos, falseNeg):
# Proportion of positive labels
if truePos + trueNeg + falsePos + falseNeg == 0: return np.nan
return (truePos + falseNeg) / (truePos + trueNeg + falsePos + falseNeg)
def null_error_rate(truePos, trueNeg, falsePos, falseNeg):
# Proportion of negative labels
if truePos + trueNeg + falsePos + falseNeg == 0: return np.nan
return (trueNeg + falsePos) / (truePos + trueNeg + falsePos + falseNeg)
def precision(truePos, falsePos):
# Alias: Positive predictive value (PPV)
if truePos + falsePos == 0: return np.nan
return truePos / (truePos + falsePos)
def recall(truePos, falseNeg):
# Alias: Sensitivity / True positive rate (TPR)
if truePos + falseNeg == 0: return np.nan
return truePos / (truePos + falseNeg)
def specificity(trueNeg, falsePos):
# Alias: Selectivity / True negative rate (TNR)
if trueNeg + falsePos == 0: return np.nan
return trueNeg / (trueNeg + falsePos)
def fallout(trueNeg, falsePos):
# Alias: False positive rate (FPR)
if falsePos + trueNeg == 0: return np.nan
return falsePos / (falsePos + trueNeg)
def miss_rate(truePos, falseNeg):
# Alias: False negative rate (FNR)
if falseNeg + truePos == 0: return np.nan
return falseNeg / (falseNeg + truePos)
def f_score(truePos, falsePos, falseNeg, beta=1):
prec = precision(truePos, falsePos)
rec = recall(truePos, falseNeg)
if prec == np.nan: return np.nan
if rec == np.nan: return np.nan
return (1+beta**2) * ((prec * rec) / ((beta**2 * prec) + rec))
def false_discovery_rate(truePos,falsePos):
if falsePos + truePos == 0: return np.nan
return falsePos / (falsePos + truePos)
def false_omission_rate(trueNeg, falseNeg):
if falseNeg + trueNeg == 0: return np.nan
return falseNeg / (falseNeg + trueNeg)
def matthews_correlation_coefficient(truePos, trueNeg, falsePos, falseNeg):
# Testing for cases when only one value is non-zero
n_cases = truePos + trueNeg + falsePos + falseNeg
if truePos == n_cases or trueNeg == n_cases:
return 1
elif falsePos == n_cases or falseNeg == n_cases:
return -1
# Testing for rows or columns in confusion matrix with sum of zero
p = truePos + falseNeg # Actual positives
n = falsePos + trueNeg # Actual negatives
pp = truePos + falsePos # Predicted positives
pn = falseNeg + trueNeg # Predicted negatives
if p == 0 or n == 0 or pp == 0 or pn == 0:
return 0
nominator = truePos*trueNeg - falsePos*falseNeg
denominator = math.sqrt(math.prod(np.array([p,n,pp,pn], dtype=np.float64)))
return nominator / denominator
def stats(lists, percentiles=[5,50,95]):
"""Find min, max, avg and percentiles for test files.
Args:
lists : Tuple, list or numpy array containing lists (of numbers).
Returns:
dict: Statistics for test files, or None if any lists are empty.
"""
if len(lists) == 0:
return None
for l in lists:
if len(l) == 0:
return None
z = np.concatenate((lists))
stats = {
'min': min(z), # Global minimum
'max': max(z), # Global maximum
'avg': sum(z)/len(z) , # Global average
'files': list() # Statistics of all individual test files
}
for p in percentiles:
key = f'p{p}'
stats[key] = 0
for l in lists:
percentile = np.percentile(l, percentiles)
e = {
'min': min(l),
'max': max(l),
'avg': sum(l)/len(l),
}
for i, p in enumerate(percentiles):
key = f'p{p}'
e[key] = percentile[i]
stats['files'].append(e)
median = lambda p : np.median(list(map(lambda i : stats['files'][i][p] , range(len(stats['files'])))))
for p in percentiles:
key = f'p{p}'
stats[key] = median(key)
return stats
| UTF-8 | Python | false | false | 5,609 | py | 26 | metrics.py | 10 | 0.634516 | 0.627919 | 0 | 165 | 32.951515 | 106 |
ForexStuff/backtraderbd | 10,883,447,139,827 | f7d40f70901229bbba6ba8afc7dd47e3ce0f3d5b | a2ef29a89a0ca04294110ebc2e4fe7d4c0113e49 | /backtraderbd/__init__.py | 8c2192ed469d33ce45f909513ecc0443e69595e9 | [
"MIT"
]
| permissive | https://github.com/ForexStuff/backtraderbd | 477a5dff961aa2a7b0c12e7e5756055991757551 | dc211d279ac96fdda48cf60d695507425b8a72f1 | refs/heads/master | 2022-11-27T07:03:55.996842 | 2022-09-27T11:51:16 | 2022-09-27T11:51:16 | 278,747,881 | 0 | 0 | MIT | true | 2022-11-24T20:44:42 | 2020-07-10T22:51:02 | 2020-07-10T22:51:04 | 2022-11-24T20:44:41 | 205 | 0 | 0 | 0 | null | false | false |
__author__ = 'Raisul Islam'
__all__ = ['RSIStrategy', 'EMACStrategy', 'MACDStrategy', 'SMACStrategy']
| UTF-8 | Python | false | false | 108 | py | 18 | __init__.py | 16 | 0.62037 | 0.62037 | 0 | 4 | 25 | 73 |
mlauryn/EsatanOpt | 2,121,713,875,489 | 8173359044c90d54c6340adef504ee2948e10566 | 26c9aff4cdbd5c7ddc3c57bcb654b62b42bdb1a3 | /RU/TempComp.py | 158b5fb98e0976cd206aac2b16791437a8a3b756 | []
| no_license | https://github.com/mlauryn/EsatanOpt | d3f6014afe22e6953ede5af16ab46bb7c341b2e7 | 4cca1f61aa0a5b2b25886173f32d4728651254b1 | refs/heads/master | 2021-06-11T04:31:39.238479 | 2020-04-11T16:08:48 | 2020-04-11T16:08:48 | 128,372,853 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import openmdao.api as om
import numpy as np
class TempComp(om.ImplicitComponent):
"""Computes steady state node temperature residual across a model based on conductor definition and boundary conditions at single design point."""
def initialize(self):
self.options.declare('n', default=1, types=int, desc='number of diffusion nodes')
def setup(self):
n = self.options['n'] + 1
self.add_output('T', val=np.zeros(n), units='K')
self.add_input('GL', val=np.zeros((n,n)), units='W/K')
self.add_input('GR', val=np.zeros((n,n)))
self.add_input('QS', val=np.zeros(n), units='W')
self.add_input('QI', val=np.zeros(n), units='W')
self.declare_partials(of='T', wrt='*')
def apply_nonlinear(self, inputs, outputs, residuals):
GL = inputs['GL']
GR = inputs['GR']
QS = inputs['QS']
QI = inputs['QI']
T = outputs['T']
residuals['T'] = GL.dot(T) + GR.dot(T**4) + QS + QI
def linearize(self,
inputs, outputs, partials):
n = self.options['n'] + 1
GL = inputs['GL']
GR = inputs['GR']
QS = inputs['QS']
QI = inputs['QI']
T = outputs['T']
partials['T', 'GL'] = np.einsum('ij, k', np.eye(n, n), T)
partials['T', 'GR'] = np.einsum('ij, k', np.eye(n, n), T**4)
partials['T', 'QS'] = np.eye(n, n)
partials['T', 'QI'] = np.eye(n, n)
partials['T', 'T'] = (GL + (4 * (GR * (T ** 3)[np.newaxis, :])))
def guess_nonlinear(self, inputs, outputs, residuals):
n = self.options['n'] + 1
#gues values
outputs['T'] = -np.ones(n)*50 + 273
if __name__ == "__main__":
from inits import inits
problem = om.Problem()
model = problem.model
nodes = 'Nodal_data.csv'
conductors = 'Cond_data.csv'
n, GL_init, GR_init, QI_init, QS_init = inits(nodes, conductors)
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('GL', val=GL_init, units='W/K')
indeps.add_output('GR', val=GR_init)
indeps.add_output('QS', val=QS_init, units='W')
indeps.add_output('QI', val=QI_init, units='W')
model.add_subsystem('tmm', TempComp(n=n), promotes=['*'])
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
model.nonlinear_solver.options['iprint'] = 2
model.nonlinear_solver.options['maxiter'] = 50
model.nonlinear_solver.linesearch = om.ArmijoGoldsteinLS()
model.nonlinear_solver.linesearch.options['maxiter'] = 10
model.nonlinear_solver.linesearch.options['iprint'] = 2
model.linear_solver = om.DirectSolver()
problem.setup(check=True)
problem.run_model()
print(problem['T']-273.15)
check_partials_data = problem.check_partials(compact_print=True, show_only_incorrect=True, form='central', step=1e-3)
#problem.model.list_inputs(print_arrays=True, includes=['*G*']) | UTF-8 | Python | false | false | 2,922 | py | 65 | TempComp.py | 35 | 0.596167 | 0.587269 | 0 | 75 | 37.973333 | 150 |
adominichini/turnodigital | 13,503,377,189,834 | a358c267f82690ed674ef6d514117fe747d6a749 | 5801ad76e11ad8496d79f86a247af1678d1fdc24 | /app/urls.py | 074c56cfbe56cb5bc51f6cc043c539180b2d96ba | []
| no_license | https://github.com/adominichini/turnodigital | db9418941df14a5e17d35375e0dd8998a9692578 | 4a61ef50f8f1aa8441779fa7c799f1c5c8cce9f7 | refs/heads/master | 2018-12-20T19:19:42.711243 | 2016-05-12T18:21:24 | 2016-05-12T18:21:24 | 40,140,869 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls.static import static
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework import routers
from api.views import *
from app import settings
router = routers.DefaultRouter()
router.register(r'companies', CompanyViewSet)
router.register(r'staffs', StaffViewSet)
router.register(r'appointments', AppointmentViewSet)
router.register(r'proficiencies', ProficiencyViewSet)
router.register(r'headquarters', HeadquartersViewSet)
router.register(r'clients', ClientViewSet)
router.register(r'users', UserViewSet)
router.register(r'medicalcares', MedicalcareViewSet)
router.register(r'referents', ReferentViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
# API
url(r'^api/', include(router.urls)),
url(r'^changePassword/$', 'app.views.change_password', name='change_password'),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# App
url(r'^$', 'app.views.index', name='index'),
url(r'^build$', 'app.views.build', name='build'),
url(r'^login/$', 'app.views.log_in', name='login'),
url(r'^logout/$', 'app.views.log_out', name='logout'),
# url(r'^search/$', 'app.views.search', name='search'),
url(r'^register/$', 'app.views.register', name='setup'),
url(r'^mycompany/$', 'app.views.my_company', name='my_company'),
url(r'^getAppointmentCollections/$', 'app.views.get_appointment_collections', name='calendar_collections'),
url(r'^getCalendarAppointments/$', 'app.views.get_calendar_appointments', name='calendar_appointments'),
url(r'^getLoggedUser/$', 'app.views.get_logged_user', name='get_user'),
url(r'^exportCSV/$', 'app.views.export_csv', name='export_csv'),
url(r'^exportPDF/$', 'app.views.export_pdf', name='export_pdf'),
# Wizard
url(r'wizard/$', 'app.views.wizard', name='wizard'),
url(r'^wizard/createStaffMember/$', 'app.views.create_staff_member', name='create_staff_member'),
url(r'^wizard/createProfAndAssign/$', 'app.views.create_prof_and_assign', name='create_prof_and_assign'),
# Public
url(r'^frontpage/$', 'app.views.frontpage', name='frontpage'),
url(r'^public/getCalendarFilters/$', 'app.views.public_get_calendar_filters', name='public_calendar_filters'),
url(r'^public/getCalendarAppointments/$', 'app.views.public_get_calendar_appointments', name='public_calendar_appointments'),
url(r'^public/bookAppointment/$', 'app.views.public_book_appointment', name='public_book_appointment'),
# Etc
url(r'^testmail/$', 'app.views.test_mail', name='test_mail'),
]
if settings.DEBUG:
urlpatterns += static('assets/', document_root=settings.BASE_DIR + '/static/assets/')
urlpatterns += static('pages/', document_root=settings.BASE_DIR + '/static/pages/')
urlpatterns += static('tpl/', document_root=settings.BASE_DIR + '/static/tpl/') | UTF-8 | Python | false | false | 2,882 | py | 72 | urls.py | 43 | 0.699167 | 0.699167 | 0 | 59 | 47.864407 | 129 |
hwang033/job_algorithm | 17,772,574,704,254 | e099f0e45e4292f757e063c51d814ae7ea6065e0 | 61d03b29778df041318bb43758d98186f70a5319 | /py/longest_palindromic_substring.py | 67138302a11600d5120f5ebdb5a539dc315ce478 | []
| no_license | https://github.com/hwang033/job_algorithm | 9829f33eb5c9d5f7bb0083676bad95550b79f06a | 199557a34c1b3820757e68bf031c2c44223fa9bb | refs/heads/master | 2016-09-05T19:32:32.903991 | 2015-08-14T01:23:47 | 2015-08-14T01:23:47 | 38,217,155 | 1 | 0 | null | false | 2016-02-21T23:45:37 | 2015-06-28T22:49:39 | 2015-08-07T19:23:30 | 2016-02-21T23:44:53 | 681 | 1 | 0 | 1 | Python | null | null | import pdb
class Solution:
# @return a string
'''
def longestPalindrome(self, s):
# f[i][j]= f[i-1][j-1] + (1 if s[i]==s[j] else 0)
m = len(s)
f = [[(1 if i==j else 0) for j in range(m)] for i in range(m)]
#print f
max_len = 0
max_i = 0
max_j = 0
for i in range(m-1, -1, -1):
for j in range(i+1, m, 1):
if f[i+1][j-1] == 0:
if j - i == 1 and s[i] == s[j]:
f[i][j] = 2
if f[i][j] > max_len:
max_len, max_i, max_j = f[i][j], i, j
else:
f[i][j] = 0
elif s[i] == s[j]:
f[i][j] = f[i+1][j-1] + 2
if f[i][j] > max_len:
max_len, max_i, max_j = f[i][j], i, j
#print f, max_len, max_i, max_j
return s[max_i:max_j+1]
'''
def longestPalindrome(self, s):
if not s:
return s
char_pos = {}
s_list = list(s)
rst_len = 0
rst = s_list[0]
for idx, char in enumerate(s_list):
char_pos.setdefault(char, [])
char_pos[char].append(idx)
for char, pos in char_pos.items():
while pos:
idx = pos.pop()
for prev_idx in pos:
if idx - prev_idx + 1 > rst_len and self.is_palindrome(s_list[prev_idx: idx + 1]):
rst_len = idx - prev_idx + 1
rst = s[prev_idx: idx + 1]
return rst
def is_palindrome(self, s):
return s == s[::-1]
def main():
s = Solution()
print s.longestPalindrome("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
print s.longestPalindrome("accbac")
print s.longestPalindrome("mwwfjysbkebpdjyabcfkgprtxpwvhglddhmvaprcvrnuxifcrjpdgnktvmggmguiiquibmtviwjsqwtchkqgxqwljouunurcdtoeygdqmijdympcamawnlzsxucbpqtuwkjfqnzvvvigifyvymfhtppqamlgjozvebygkxawcbwtouaankxsjrteeijpuzbsfsjwxejtfrancoekxgfyangvzjkdskhssdjvkvdskjtiybqgsmpxmghvvicmjxqtxdowkjhmlnfcpbtwvtmjhnzntxyfxyinmqzivxkwigkondghzmbioelmepgfttczskvqfejfiibxjcuyevvpawybcvvxtxycrfbcnpvkzryrqujqaqhoagdmofgdcbhvlwgwmsmhomknbanvntspvvhvccedzzngdywuccxrnzbtchisdwsrfdqpcwknwqvalczznilujdrlevncdsyuhnpmheukottewtkuzhookcsvctsqwwdvfjxifpfsqxpmpwospndozcdbfhselfdltmpujlnhfzjcgnbgprvopxklmlgrlbldzpnkhvhkybpgtzipzotrgzkdrqntnuaqyaplcybqyvidwcfcuxinchretgvfaepmgilbrtxgqoddzyjmmupkjqcypdpfhpkhitfegickfszermqhkwmffdizeoprmnlzbjcwfnqyvmhtdekmfhqwaftlyydirjnojbrieutjhymfpflsfemkqsoewbojwluqdckmzixwxufrdpqnwvwpbavosnvjqxqbosctttxvsbmqpnolfmapywtpfaotzmyjwnd")
print s.longestPalindrome("flsuqzhtcahnyickkgtfnlyzwjuiwqiexthpzvcweqzeqpmqwkydhsfipcdrsjkefehhesubkirhalgnevjugfohwnlhbjfewiunlgmomxkafuuokesvfmcnvseixkkzekuinmcbmttzgsqeqbrtlwyqgiquyylaswlgfflrezaxtjobltcnpjsaslyviviosxorjsfncqirsjpkgajkfpoxxmvsyynbbovieoothpjgncfwcvpkvjcmrcuoronrfjcppbisqbzkgpnycqljpjlgeciaqrnqyxzedzkqpqsszovkgtcgxqgkflpmrikksaupukdvkzbltvefitdegnlmzeirotrfeaueqpzppnsjpspgomyezrlxsqlfcjrkglyvzvqakhtvfmeootbtbwfhqucbnuwznigoyatvkocqmbtqghybwrhmyvvuchjpvjckiryvjfxabezchynfxnpqaeampvaapgmvoylyutymdhvhqfmrlmzkhuhupizqiujpwzarnszrexpvgdmtoxvjygjpmiadzdcxtggwamkbwrkeplesupagievwsaaletcuxtpsxmbmeztcylsjxvhzrqizdmgjfyftpzpgxateopwvynljzffszkzzqgofdlwyknqfruhdkvmvrrjpijcjomnrjjubfccaypkpfokohvkqndptciqqiscvmpozlyyrwobeuazsawtimnawquogrohcrnmexiwvjxgwhmtpykqlcfacuadyhaotmmxevqwarppknoxthsmrrknu")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,705 | py | 147 | longest_palindromic_substring.py | 144 | 0.669096 | 0.660729 | 0 | 62 | 58.758065 | 872 |
Gjgarr/projEuler | 481,036,374,795 | 1ffb0af5e4ae593151d0b6b1f69d95b8ff615a8a | 76760f03437548e3e2fe81efa667ea4d2fb149f8 | /solved/und1sec/p5.py | fe96d22d9e289b18a7a9ffecfa3f9407c0ac21a9 | []
| no_license | https://github.com/Gjgarr/projEuler | 5e85957ae87a21e44444a301ae0eb79e8d02d220 | 618adfc20859cac39a7e9e3ddac50358a6cce9d0 | refs/heads/master | 2019-01-02T23:11:45.385669 | 2016-05-31T08:19:48 | 2016-05-31T08:19:48 | 40,143,870 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
#What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
import time
def div_by1_to20():
num = 2520
while any(num % i for i in [11, 13, 14, 16, 17, 18, 19, 20]):
num += 2520
return num
def ans(times):
l = []
for k in xrange(times):
start_t = time.time()
z = div_by1_to20()
end_t = time.time()
time_taken = end_t - start_t
l.append(time_taken)
return sorted(l)
| UTF-8 | Python | false | false | 585 | py | 59 | p5.py | 59 | 0.610256 | 0.54188 | 0 | 20 | 28.25 | 107 |
Abutalib44/django-ITI-proj | 13,305,808,700,163 | 15eb55d3a6014615c3cafe161e7d6973daa5107b | 51dec6e05cb877578b722f6e21e100cbec7e7031 | /AppProject/urls.py | 6870515fb8d899706d839954f760b4d7568db183 | []
| no_license | https://github.com/Abutalib44/django-ITI-proj | 6b90bfbd219ef7a18a07ae0744d25c3d383f4a95 | fc04df84ce1c1a9b7748a357fbf0a80962475c17 | refs/heads/master | 2023-04-04T08:57:47.516264 | 2021-03-30T21:49:04 | 2021-03-30T21:49:04 | 353,146,369 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path,include
from .views import createProject, create_tags
app_name="AppProject"
urlpatterns = [
path('create/', createProject, name='create'),
path("create_tag/", create_tags, name='create_tag')
]
| UTF-8 | Python | false | false | 233 | py | 40 | urls.py | 29 | 0.712446 | 0.712446 | 0 | 9 | 24.777778 | 55 |
Ptolemy-DL/Ptolemy | 16,097,537,428,928 | 8ada95c938b90407ffdca2782f193f2ec77c1256 | bc8509d57a162fb685da06a98c67dc8130d96316 | /src/nninst/plot/heatmap_alexnet_imagenet_inter_class_similarity_frequency.py | 2ce0692dbb9dbc53c64f62fdadf855b89afbf67f | [
"Apache-2.0"
]
| permissive | https://github.com/Ptolemy-DL/Ptolemy | 2065e2d157d641010567062410bee4608691d059 | f72a531286d17c69e0e2e84d0ad8a5b0587e2e08 | refs/heads/master | 2023-05-29T08:58:18.328258 | 2021-06-15T09:28:16 | 2021-06-15T09:28:16 | 284,590,756 | 115 | 5 | NOASSERTION | false | 2020-10-24T04:18:51 | 2020-08-03T03:06:35 | 2020-10-17T14:45:11 | 2020-10-24T04:18:50 | 1,734 | 5 | 1 | 0 | Python | false | false | import numpy as np
import pandas as pd
import seaborn as sns
from nninst.backend.tensorflow.model import AlexNet
from nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity import (
alexnet_imagenet_inter_class_similarity_frequency,
)
from nninst.op import Conv2dOp, DenseOp
np.random.seed(0)
sns.set()
threshold = 0.5
frequency = int(500 * 0.1)
label = "import"
variant = None
base_name = f"alexnet_imagenet_inter_class_similarity_frequency_{frequency}"
cmap = "Greens"
same_class_similarity = []
diff_class_similarity = []
layer_names = []
layers = AlexNet.graph().load().ops_in_layers(Conv2dOp, DenseOp)
for layer_name in [
None,
*layers,
]:
similarity = alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer_name
).load()
same_class_similarity.append(
np.mean(similarity[np.eye(similarity.shape[0], dtype=bool)])
)
diff_class_similarity.append(
np.mean(
similarity[
np.tri(similarity.shape[0], similarity.shape[1], k=-1, dtype=bool)
]
)
)
if layer_name is None:
file_name = base_name
layer_names.append("All")
else:
file_name = base_name + "_" + layer_name[: layer_name.index("/")]
layer_names.append(layer_name[: layer_name.index("/")])
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
for layer_name, similarity in zip(
["avg", "first_half", "second_half"],
[
np.mean(
[
alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer
).load()
for layer in layers
],
axis=0,
),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[:len(layers) // 2]], axis=0),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[len(layers) // 2:]], axis=0),
],
):
file_name = base_name + "_" + layer_name
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
summary_df = pd.DataFrame(
{
"Same Class": same_class_similarity,
"Diff Class": diff_class_similarity,
"Layer": layer_names,
}
)
summary_df.to_csv(f"{base_name}_summary.csv", index=False)
| UTF-8 | Python | false | false | 3,277 | py | 143 | heatmap_alexnet_imagenet_inter_class_similarity_frequency.py | 130 | 0.616417 | 0.610009 | 0 | 98 | 32.438776 | 85 |
bagreve/Nivelacion | 7,069,516,178,840 | 9630aa5321205638e6ea0ed7ca6edb0364e2dd50 | c329622b727d24e6305f4c6868b86e8708713871 | /22082019/000900.py | 65aa6ed9c953ad5fa8b8e50cb99baaf12f03694a | []
| no_license | https://github.com/bagreve/Nivelacion | ee9c09a4dd7d83a7b5b4c2cbdd0bf908c779e8de | 32ff794e828ed9745d808d3597c5ba37446f1539 | refs/heads/master | 2020-07-06T16:42:07.294339 | 2019-08-31T23:49:58 | 2019-08-31T23:49:58 | 203,082,083 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # La actividad final trata sobre encontrar la suma de todos los numeros multiplos de 3 y 5
# y que la suma sea menos que 100
lista3 = [] # Se crean dos listas vacias para llenarlas con los multiplos
lista5 = [] # de 3 y 5
sumatotal = 0 # Se crea un contador para revisar que la restriccion de bajo 100 se cumpla,
for i in range(1,1000): # con un for range se crea una lista de tamano x, con el fin de recorrer
if sumatotal + i < 100: # todos los numeros naturales. Luego se pone la restriccion que la sumatotal
if i % 3 == 0: # mas el elemento que se esta corriendo de la lista sea menor que 100 para
lista3.append(i) # no tener que pasar por todos los numeros de la lista.
sumatotal += i # Se chequea que el elemento i sea multiplo de 3 o 5 y se agrega al final de la
elif i % 5 == 0: # lista mientras se suma a la suma total
lista5.append(i)
sumatotal += i
elif sumatotal > 100: # Si sumatotal es mayor que 100, el for sufre un quiebre y se detiene.
break
print lista3 # Se imprime la lista 3
print lista5 # Se imprime la lista 4
print sumatotal # Se imprime la sumatotal
| UTF-8 | Python | false | false | 1,154 | py | 22 | 000900.py | 22 | 0.686308 | 0.649913 | 0 | 21 | 53.809524 | 103 |
ZionDeng/LeetPythonCode | 2,405,181,734,391 | b38a7479f896003d43d4b2eb0307ead4bf6dde7b | ebfcfb7d9ff07d71bff481cc4c1eb09db8b4a6ed | /List_problems/T922_sortArrayByParity.py | ed7cbbb3ea6c895ddb47a7a794653d61653b7c1a | []
| no_license | https://github.com/ZionDeng/LeetPythonCode | 3d23f796135b21fc7e5233c2e4dbfa953f1680c7 | c507f684dc98986dfc2d5f80d324964874f0f221 | refs/heads/master | 2023-02-18T08:31:15.971844 | 2021-01-15T08:46:16 | 2021-01-15T08:46:16 | 317,254,598 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
# p_odd = 1
# p_even = 0
# n = len(A)
# while p_odd < n and p_even < n:
# try:
# while A[p_odd] % 2 == 1:
# p_odd += 2
# while A[p_even] % 2 == 0 :
# p_even += 2
# A[p_odd], A[p_even] = A[p_even], A[p_odd]
# except:
# break
# return A
ou = [i for i in A if i % 2]
ji = [i for i in A if not i % 2]
return [i for n in zip(ji, ou) for i in n]
if __name__ == "__main__":
s = Solution()
print(s.sortArrayByParityII([4,6,5,7,4,8,7,3,0,5]))
| UTF-8 | Python | false | false | 740 | py | 127 | T922_sortArrayByParity.py | 123 | 0.405405 | 0.378378 | 0 | 26 | 27.461538 | 61 |
JuanBrugera/UNIR-DevOps-CP1B | 15,375,982,967,009 | 1f9e8d25e399a46f21617abf8d89dedc92e4bffb | afcfbfeabe015b577c4420eff2aca0d9ecb691bc | /src/todoTable.py | b88c6f1c8de6553306476e2dbb86007c7e6e1da5 | []
| no_license | https://github.com/JuanBrugera/UNIR-DevOps-CP1B | ed8371d47337a9037aeea468e8172e414e360654 | 293af9dbf83db110a31dec4c4e288fd53387e9f0 | refs/heads/master | 2023-02-26T16:45:42.761602 | 2021-02-07T18:01:35 | 2021-02-07T18:01:35 | 336,852,760 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import uuid
from typing import List
import boto3
class TodoTable(object):
def __init__(self, table, dynamodb=None):
self.tableName = table
if dynamodb:
self.dynamodb = dynamodb
else:
self.dynamodb = boto3.resource('dynamodb',
endpoint_url='http://localhost:8000'
)
self.table = self.dynamodb.Table(self.tableName)
def create_todo_table(self):
table = self.dynamodb.create_table(
TableName=self.tableName,
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
}
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
)
# Wait until the table exists.
table.meta.client.get_waiter(
'table_exists').wait(TableName=self.tableName)
if table.table_status != 'ACTIVE':
raise AssertionError()
return table
def delete_todo_table(self):
self.table.delete()
return True
def get_todo(self, id: str) -> dict:
result = self.table.get_item(
Key={
'id': id
}
)
return result['Item']
def put_todo(self, text: str, id: str = None) -> dict:
timestamp = str(time.time())
item = {
'id': id if id else str(uuid.uuid1()),
'text': text,
'checked': False,
'createdAt': timestamp,
'updatedAt': timestamp,
}
self.table.put_item(Item=item)
return item
def update_todo(self, id: str, text: str, checked: bool) -> dict:
timestamp = int(time.time() * 1000)
result = self.table.update_item(
Key={
'id': id
},
ExpressionAttributeNames={
'#todo_text': 'text',
},
ExpressionAttributeValues={
':text': text,
':checked': checked,
':updatedAt': timestamp,
},
UpdateExpression='SET #todo_text = :text, '
'checked = :checked, '
'updatedAt = :updatedAt',
ReturnValues='ALL_NEW',
)
return result['Attributes']
def delete_todo(self, id: str) -> bool:
self.table.delete_item(
Key={
'id': id
}
)
return True
def scan_todo(self) -> List[dict]:
scan = self.table.scan()
return scan['Items']
| UTF-8 | Python | false | false | 2,898 | py | 14 | todoTable.py | 8 | 0.452381 | 0.447895 | 0 | 105 | 26.6 | 79 |
pipistar2017/python-django2.0.0-trainningProject | 12,893,491,826,258 | 290c166e358f56b5664fc4102bf9433f0ea2f4b0 | 94c502cb1019002d414be9a1d1374ac582c951ed | /lesson/models.py | 26f273c9272b1b6c77d6ce449b6a49f49dfdd908 | []
| no_license | https://github.com/pipistar2017/python-django2.0.0-trainningProject | c0833150d599aff03edb3ae6b5f28db15c4fe47a | 00f75428ca0a5638fd815918d99fd9e6ea1ec046 | refs/heads/master | 2020-04-14T19:43:53.145992 | 2019-01-04T07:13:48 | 2019-01-04T07:13:48 | 164,069,204 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
class Lesson(models.Model):
lesson_id = models.AutoField(primary_key=True)
lesson_title = models.CharField(max_length=255)
content = models.CharField(max_length=255)
link_info = models.CharField(max_length=255)
attachment = models.CharField(max_length=255)
lesson_code = models.CharField(max_length=30)
lesson_status = models.CharField(max_length=2)
lesson_type = models.CharField(max_length=255)
realm_account = models.CharField(max_length=30)
start_time = models.DateTimeField(default=None)
end_time = models.DateTimeField(default=None)
address = models.CharField(default=None, max_length=255)
signIn_code = models.CharField(max_length=10)
publish_range = models.CharField(max_length=255)
score = models.CharField(max_length=10)
marks = models.CharField(max_length=10)
avgScore = models.CharField(max_length=10)
def __str__(self):
return '%d %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s' % (self.lesson_id, self.lesson_title, self.content, self.link_info,
self.attachment,self.lesson_code,self.lesson_status,self.lesson_type,
self.realm_account,self.start_time,self.end_time,self.address,
self.signIn_code, self.publish_range, self.score, self.marks, self.avgScore)
class Signup(models.Model):
id = models.AutoField(primary_key=True)
lesson_id = models.IntegerField()
realm_account = models.CharField(max_length=30)
sign_status= models.CharField(max_length=1)
score= models.CharField(max_length=10)
evaluate_desc= models.CharField(max_length=1000)
def __str__(self):
return '%d %d %s %s %s %s' % (self.id,self.lesson_id,self.realm_account,self.sign_status
, self.score, self.evaluate_desc)
class Course(models.Model):
id = models.AutoField(primary_key=True)
lesson_id = models.IntegerField()
realm_account = models.CharField(max_length=30)
path = models.CharField(max_length=255)
def __str__(self):
return '%d %d %s %s' % (self.id, self.lesson_id, self.realm_account, self.path)
| UTF-8 | Python | false | false | 2,197 | py | 13 | models.py | 13 | 0.658625 | 0.636777 | 0 | 47 | 45.744681 | 135 |
ksenia-krasheninnikova/genome_comp | 8,993,661,555,769 | 4d100aea34a3fc0abaf4b27cc07c274f120f0dab | ba4d302ed6a51601c4def05100563483f3af93e8 | /bin/draw_exons_distribution | 37b71cddc7b6ae77473484b89fd69ec66b449fe6 | []
| no_license | https://github.com/ksenia-krasheninnikova/genome_comp | c4b5be6e06b719930678ba9d59dc3e09799f7287 | 02a6d4b7334d1e72b0b4e07f0499f6b00af21093 | refs/heads/master | 2021-06-08T19:07:20.357701 | 2016-11-18T13:59:11 | 2016-11-18T13:59:11 | 43,004,304 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/hive/groups/recon/local/bin/python
import bisect
import sys
import argparse
import numpy
from collections import Counter
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import os
def draw_distr(path, size2quantity_1, size2quantity_2, size2quantity_3):
pp=PdfPages(os.path.join(path,'plot.pdf'))
plt.xlabel('Exon Length, bp')
plt.ylabel('Quantity')
plt.plot(size2quantity_1.keys(),size2quantity_1.values(),'ro', label='domestic cat', color='crimson', markersize=3)
plt.plot(size2quantity_2.keys(),size2quantity_2.values(),'ro', label='human', color='darkkhaki', markersize=3)
plt.plot(size2quantity_3.keys(),size2quantity_3.values(),'ro', label='mouse', color='plum', markersize=3)
plt.legend()
#plt.legend(loc=1, borderaxespad=0., fontsize=5)
plt.savefig(pp, format='pdf')
pp.close()
def get_exons(path):
exons = []
with open(path) as f:
for line in f:
line = line.strip()
if line[0] == '#':
continue
line=line.split()
if line[2] == 'exon':
length = int(line[4]) - int(line[3])
if length < 1000:
exons.append(length)
return Counter(exons)
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
parser.add_argument('path_to_gtf_1')
parser.add_argument('path_to_gtf_2')
parser.add_argument('path_to_gtf_3')
args = parser.parse_args()
size2quantity_1 = get_exons(args.path_to_gtf_1)
size2quantity_2 = get_exons(args.path_to_gtf_2)
size2quantity_3 = get_exons(args.path_to_gtf_3)
draw_distr(os.path.dirname(args.path_to_gtf_1), size2quantity_1, size2quantity_2, size2quantity_3)
| UTF-8 | Python | false | false | 1,783 | 26 | draw_exons_distribution | 24 | 0.645541 | 0.616938 | 0 | 49 | 35.387755 | 119 |
|
kareemsuhail/nws_project_1 | 15,942,918,641,658 | 9603a6d6748e6e660fa6629a730b7524ba346ba2 | 1140c0fd00d208721fc0e9ee58458399f40a4b94 | /ClientThread.py | 3da8fc676cf511ef109f95d7f711089b8da31207 | []
| no_license | https://github.com/kareemsuhail/nws_project_1 | 0c3badc1381941af1f7d8c5084edb4dc34d5eb43 | 1d89ae9334d2bc76c6d503d6480eca7621dd3837 | refs/heads/master | 2021-04-26T22:16:57.193016 | 2018-03-09T19:35:06 | 2018-03-09T19:35:06 | 124,060,115 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from threading import Thread
from textblob import TextBlob
import json
class ClientThread(Thread):
def __init__(self, ip, port,conn,connected_users,threads,groups):
Thread.__init__(self)
self.ip = ip
self.port = port
self.conn = conn
self.connected_users = connected_users
self.response = {}
self.responseBytes = b''
self.server_threads = threads
self.groups = groups
print("[+] new client has connected " + ip + ":" + str(port))
def run(self):
while True:
data = self.conn.recv(2048)
data_dictionary = json.loads(data.decode('utf8').replace("'", '"'))
if (data_dictionary['type'] == 'command'):
self.execute_command(data_dictionary)
continue
print(data_dictionary)
if data_dictionary['username'] == 'unknown':
self.prepare_msg("sorry you have to specify your username", 'server', 'info', 'failed',
data_dictionary['username'])
self.conn.send(self.responseBytes)
continue
if data_dictionary['msgTo'] != 'unknown' and data_dictionary['rec'] == 'person':
temp = TextBlob((data_dictionary['msg']))
if temp.sentiment.polarity != 0 :
print("{} is {}".format(data_dictionary['username'],"happy" if temp.sentiment.polarity > 0 else "sad"))
temp = temp.correct()
data_dictionary['msg'] = temp.string
msg = bytes(json.dumps(data_dictionary), 'utf-8')
self.server_threads[self.connected_users[data_dictionary['msgTo']]
].conn.send(msg)
elif data_dictionary['msgTo'] != 'unknown' and data_dictionary['rec'] == 'group':
msg = bytes(json.dumps(data_dictionary), 'utf-8')
data_dictionary['msg'] = TextBlob(data_dictionary['msg']).correct().string
for clientThread in self.groups[data_dictionary['msgTo']]:
if clientThread != self:
clientThread.conn.send(msg)
else:
self.prepare_msg("sorry you have to specify msg receiver", 'server', 'info', 'failed', data_dictionary['username'])
self.conn.send(self.responseBytes)
def execute_command(self,data_dictionary):
command = data_dictionary['msg']
if command.startswith("**set_username"):
username = command[len('**set_username'):].strip()
if username not in self.connected_users :
print("{} has been connected".format(username))
user_data = str(data_dictionary['s_ip']+":"+str(self.port))
self.connected_users[username] = user_data
self.prepare_msg("**set_username",'server','command','success',username)
self.conn.send(self.responseBytes)
else:
self.prepare_msg("sorry {} is already taken".format(username),'server','info','failed',username)
self.conn.send(self.responseBytes)
elif command.startswith("**create_group"):
group = command[len('**create_group'):].strip()
if group not in self.groups:
print("{} group has been created".format(group))
self.groups[group] = []
self.prepare_msg("group {} has been created".format(group), 'server', 'info', 'success', group)
self.conn.send(self.responseBytes)
else:
self.prepare_msg("sorry group {} is already exists".format(group), 'server', 'info', 'failed', group)
self.conn.send(self.responseBytes)
elif command.startswith("**connect_to_group"):
group = command[len('**connect_to_group'):].strip()
if group in self.groups:
print("{} has connected to group {}".format(data_dictionary['username'],group))
self.groups[group].append(self.server_threads[self.connected_users[data_dictionary['username']]])
self.prepare_msg("you are now connected to group {}".format(group), 'server', 'info', 'success', group)
self.conn.send(self.responseBytes)
else:
self.prepare_msg("sorry group {} is not exists".format(group), 'server', 'info', 'failed', group)
self.conn.send(self.responseBytes)
else:
self.prepare_msg("sorry this is unknown command", 'server', 'info', 'failed', ' ')
self.conn.send(self.responseBytes)
def prepare_msg(self,msg,username,type,status,data):
self.response['msg'] = msg
self.response['username'] = username
self.response['type'] = type
self.response['status'] = status
self.response['data'] = data
self.responseBytes = bytes(json.dumps(self.response),"utf-8")
| UTF-8 | Python | false | false | 4,955 | py | 5 | ClientThread.py | 5 | 0.565288 | 0.563269 | 0 | 98 | 49.55102 | 131 |
ZacharyKarry/RegEx | 9,165,460,251,140 | 4ecff1b1f91a1370e3b21869d08ba39885e186be | 55d0fe4d6f45e83e2ce0d37d951e89047798e9e5 | /test_regex_functions.py | 4f8beb92d3d52c0baa0a640a18a07f5bf25461bd | []
| no_license | https://github.com/ZacharyKarry/RegEx | 7ef71f0e8f6c0eb31b864ca97505989e7e4c7bc9 | d9c3c9814e54719a34573dd60ff184e2777dcda5 | refs/heads/master | 2020-05-14T12:02:51.605397 | 2019-04-17T00:31:42 | 2019-04-17T00:31:42 | 181,787,849 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # test solutions for A2, Part 2
from unittest import TestCase, main, TestLoader, TextTestRunner, TestResult
from regex_functions import is_regex, all_regex_permutations
from regex_functions import build_regex_tree, regex_match
from regextree import Leaf, DotTree, BarTree, StarTree, RegexTree
# brief aliases for regular expression tree classes
# and leaf instances
L, B, D, S = Leaf, BarTree, DotTree, StarTree
LE, L0, L1, L2 = L('e'), L('0'), L('1'), L('2')
class TestRegexMatch(TestCase):
# TODO: separate 7 test methods for examples involving only the symbols from
# one of the nonempty subsets of {*,|,.}
# OK --- add fail test method for each ---> 14 methods
# then add test match/non match for leaves --> 16 methods total
# Perhaps methods for a single {*, |, .} should be separate from multiple ones?
def setUp(self: 'TestRegexMatch') -> None:
pass
def tearDown(self: 'TestRegexMatch') -> None:
"""Clean up test case"""
pass
def test_leaf_ok(self: 'TestRegexMatch') -> None:
"""Correctly matches leaf regexes?"""
leaf_list = [(LE, ''), (L0, '0'), (L1, '1'), (L2, '2')]
for t in leaf_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_leaf_fail(self: 'TestRegexMatch') -> None:
"""Correct rejects near-leaves?"""
nearly_leaf_list = [(LE, 'e'), (L0, '(0)'), (LE, '()'), (L1, '11'),
(L1, '3')]
for t in nearly_leaf_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_edge_empty_ok(self: 'TestRegexMatch') -> None:
"""Correctly matches various matches of empty string?"""
empty_string_list = [(D(LE, LE), ''), (B(L0, LE), ''), (S(L2), ''),
(S(LE), '')]
for t in empty_string_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_dot_okay(self: 'TestRegexMatch') -> None:
"""Correctly matches dotted regexes?"""
dot_list = [(D(L1, LE), '1'), (D(LE, L2), '2'), (D(L1, L1), '11'),
(D(L0, L2), '02')]
for t in dot_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_dot_fail(self: 'TestRegexMatch') -> None:
"""Correctly rejects near-dots?"""
nearly_dot_list = [(D(L1, L0), '1'), (D(L1, L0), '102'),
(D(L1, L0), '1.0'), (D(L1, L2), '(12)'),
(D(L1, L2), '(1.2)')]
for t in nearly_dot_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_bar_okay(self: 'TestRegexMatch') -> None:
"""Correctly matches barred regexes?"""
bar_list = [(B(L1, LE), '1'), (B(LE, L2), '2'), (B(L1, L1), '1')]
for t in bar_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_bar_fail(self: 'TestRegexMatch') -> None:
"""Correctly rejects near-bars?"""
nearly_bar_list = [(B(L1, L2), '12'), (B(L1, L2), '0'),
(B(L1, L2), '')]
for t in nearly_bar_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_star_okay(self: 'TestRegexMatch') -> None:
"""Correctly matches starred regexes?"""
star_list = [(S(L1), '1'), (S(L2), '222222')]
for t in star_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_star_fail(self: 'TestRegexMatch') -> None:
"""Correctly rejects near-stars?"""
nearly_star_list = [(S(L1), '1 1'), (S(L2), '22212'),
(S(L0), '0000 0'), (S(L0), '3')]
for t in nearly_star_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_dot_bar_ok(self: 'TestRegexMatch') -> None:
"""Correctly matches dot-bar regexes?"""
dot_bar_list = [(D(B(L0, L1), B(L2, L0)),'12'),
(B(D(L0, L1), D(L2, L0)),'20'),
(D(B(L0, L1), D(L2, L1)), '121'),
(B(D(L0, L1), B(L2, L1)), '01')]
for t in dot_bar_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_dot_bar_fail(self: 'TestRegexMatch') -> None:
"""Correctly rejects near dot-bars?"""
nearly_dot_bar_list = [(D(B(L0, L1), B(L2, L0)), '012'),
(B(D(L0, L1), D(L2, L0)), '02'),
(D(B(L0, L1), D(L2, L1)), '0121'),
(B(D(L0, L1), B(L2, L1)), '0121')]
for t in nearly_dot_bar_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_dot_star_ok(self: 'TestRegexMatch') -> None:
"""Correctly matches dot-star regexes?"""
dot_star_list = [(D(S(L1), S(L2)), '112'), (D(S(L1), S(L2)), '122'),
(D(S(L1), S(L2)), '2222'), (D(S(L1), S(L2)), '111'),
(S(D(L1, L0)), '101010'), (D(L1, S(D(L2, L0))), '1202020'),
(D(L1, S(D(L2, L0))), '1'), (S(D(L1, S(L0))), '100110')]
for t in dot_star_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_dot_star_fail(self: 'TestRegexMatch') -> None:
"""Correctly rejects near dot-stars?"""
near_dot_star_list = [(D(L0, S(L1)), '(0.1*)'), (D(L0, S(L1)), '0101'),
(S(D(L1, L1)), '111'), (S(D(L1, L0)), '1100')]
for t in near_dot_star_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_bar_star_ok(self: 'TestRegexMatch') -> None:
"""Correctly matches bar-star regexes?"""
bar_star_list = [(B(S(L1), S(L0)), '000'), (S(B(L2, L1)), '11212212212'),
(S(B(L1, B(L0, L2))), '1002221102201')]
for t in bar_star_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_bar_star_fail(self: 'TestRegexMatch') -> None:
"""Correctly rejects near bar-stars?"""
near_bar_star_list = [(B(L0, S(L1)), '(0|1*)'), (B(L0, S(L1)), '01'),
(S(B(L0, L1)), '(0|1)*'), (S(B(L0, L1)), '00 1')]
for t in near_bar_star_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_bar_star_dot_ok(self: 'TestRegexMatch') -> None:
"""Correctly matches bar-star-dot regexes?"""
bar_star_dot_list = [(B(S(L2), D(L1, L0)), '10'), (B(S(L2), D(L1, L0)), '222'),
(D(B(L0, L2), S(L1)), '0111'),
(S(D(B(L0, L2), S(L1))), '0121210121'),
(S(S(L2)), '22222')]
for t in bar_star_dot_list:
self.assertTrue(regex_match(t[0], t[1]),
"Rejects valid match: {}".format(t))
def test_bar_star_dot_fail(self: 'TestRegexMatch') -> None:
"""Correctly rejects near bar-star-dots?"""
near_bar_star_dot_list = [(B(S(L2), D(L1, L0)), '210'), (B(S(L2), D(L1, L0)), '1'),
(D(B(L0, L2), S(L1)), '02111'),
(S(D(B(L0, L2), S(L1))), '102102'),
(S(S(L2)), '2 2222')]
for t in near_bar_star_dot_list:
self.assertFalse(regex_match(t[0], t[1]),
"Accepts invalid match: {}".format(t))
def test_difficult_star(self: 'TestRegexMatch') -> None:
"""Correct on difficult case for star (1|(1.2))*?
Almost-correct implementation of * will fail this test, in particular
will not get accept enough strings."""
r = S(B(L1,D(L1,L2)))
yes = ["11212","12121","112112"]
no = ["1221","11221"]
for s in yes:
self.assertTrue(regex_match(r,s),
"Rejects valid match: {}".format((r,s)))
for s in no:
self.assertFalse(regex_match(r, s),
"Accepts invalid match: {}".format((r,s)))
class TestBuildRegexTree(TestCase):
def setUp(self: 'TestBuildRegexTree') -> None:
pass
def tearDown(self: 'TestBuildRegexTree') -> None:
pass
def test_leaf(self: 'TestBuildRegexTree') -> None:
"""Correctly builds leaves?"""
leaf_list = [(LE, 'e'), (L0, '0'), (L1, '1'), (L2, '2')]
for t in leaf_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
def test_dot(self: 'TestBuildRegexTree') -> None:
"""Correctly builds dot trees?"""
dot_list = [(D(L0, L1), '(0.1)'), (D(LE, L1), '(e.1)'),
(D(L1, LE), '(1.e)'), (D(L2, L2), '(2.2)')]
for t in dot_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
def test_bar(self: 'TestBuildRegexTree') -> None:
"""Correctly builds bar trees?"""
bar_list = [(B(L0, L1), '(0|1)'), (B(LE, L1), '(e|1)'),
(B(L1, LE), '(1|e)'), (B(L2, L2), '(2|2)')]
for t in bar_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
def test_star(self: 'TestBuildRegexTree') -> None:
"""Correctly builds star trees?"""
star_list = [(S(L1), '1*'), (S(LE), 'e*'), (S(L0), '0*'), (S(L2), '2*')]
for t in star_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
def test_bar_dot(self: 'TestBuildRegexTree') -> None:
"""Correctly builds bar-dot trees?"""
bar_dot_list = [(B(D(L0, L1), D(L2, LE)), '((0.1)|(2.e))'),
(B(B(L0, L1), D(L2, LE)), '((0|1)|(2.e))'),
(B(D(L0, L1), B(L2, LE)), '((0.1)|(2|e))'),
(B(D(L1, L2), L0), '((1.2)|0)'),
(B(L1, D(L2, L0)), '(1|(2.0))'),
(D(B(L0, L1), B(L2, LE)), '((0|1).(2|e))'),
(D(D(L0, L1), B(L2, LE)), '((0.1).(2|e))'),
(D(B(L0, L1), D(L2, LE)), '((0|1).(2.e))')]
for t in bar_dot_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
def test_bar_star(self: 'TestBuildRegexTree') -> None:
"""Correctly builds bar-star trees?"""
bar_star_list = [(B(L0, S(L1)), '(0|1*)'), (B(S(L0), L1), '(0*|1)'),
(B(S(L1), S(L2)), '(1*|2*)'),
(S(B(L0, L1)), '(0|1)*'), (S(B(S(L0), L1)), '(0*|1)*'),
(S(B(S(L0), B(L1, S(L2)))), '(0*|(1|2*))*')]
for t in bar_star_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
def test_dot_star(self: 'TestBuildRegexTree') -> None:
"""Correctly builds dot-star trees?"""
dot_star_list = [(D(L0, S(L1)), '(0.1*)'), (D(S(L0), L1), '(0*.1)'),
(D(S(L1), S(L2)), '(1*.2*)'),
(S(D(L0, L1)), '(0.1)*'), (S(D(S(L0), L1)), '(0*.1)*'),
(S(D(S(L0), D(L1, S(L2)))), '(0*.(1.2*))*')]
for t in dot_star_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
def test_bar_dot_star(self: 'TestBuildRegexTree') -> None:
"""Correctly builds bar-dot-star trees?"""
bar_dot_star_list = [(B(D(L0, L1), D(S(L2), LE)), '((0.1)|(2*.e))'),
(D(B(L0, L1), B(S(L2), LE)), '((0|1).(2*|e))'),
(S(D(B(L0, L1), D(S(L2), LE))),
'((0|1).(2*.e))*'),
(S(B(D(L0, L1), B(S(L2), LE))),
'((0.1)|(2*|e))*'),
(S(D((L0), B(S(L2), LE))),
'(0.(2*|e))*'),]
for t in bar_dot_star_list:
self.assertEqual(t[0], build_regex_tree(t[1]),
"Regex tree {} doesn't match {}.".format(
t[0], t[1]))
class TestIsRegex(TestCase):
def setUp(self: 'TestIsRegex') -> None:
pass
def tearDown(self: 'TestIsRegex') -> None:
"""Clean up test case"""
pass
def test_leaf_ok(self: 'TestIsRegex') -> None:
"""Leaf regular expressions accepted?"""
leaf_list = ['0', '1', '2', 'e']
for r in leaf_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_leaf_fail(self: 'TestIsRegex') -> None:
"""Leaf non-regexes rejected?"""
bad_leaf_list = ['3', '00', '', '(1)']
for r in bad_leaf_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
def test_bar(self: 'TestIsRegex') -> None:
"""Bar regexes accepted?"""
bar_regex_list = ['(0|1)', '(0|e)', '(1|1)', '(1|e)', '(2|1)']
for r in bar_regex_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_bar_fail(self: 'TestIsRegex') -> None:
"""Bar-like non-regexes rejected?"""
bad_bar_list = ['0|1', '(0|1|2)', '|', '(|)', '(00|1)']
for r in bad_bar_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
def test_dot(self: 'TestIsRegex') -> None:
"""Dot regexes accepted?"""
dot_regex_list = ['(0.1)', '(0.e)', '(1.1)', '(1.e)', '(2.1)']
for r in dot_regex_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_dot_fail(self: 'TestIsRegex') -> None:
"""Dot-like non-regexes rejected?"""
bad_dot_list = ['0.1', '(0.1.2)', '.', '(.)', '(00.1)']
for r in bad_dot_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
def test_star_ok(self: 'TestIsRegex') -> None:
"""Star regexes accepted?"""
star_list = ['e*', '0*', '1*', '2*']
for r in star_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_star_fail(self: 'TestIsRegex') -> None:
"""Star-like non-regexes rejected?"""
bad_star_list = ['*1', '*', '(2*)', '0*1', '(1)*']
for r in bad_star_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
def test_bar_dot_ok(self: 'TestIsRegex') -> None:
"""Bar-dot regexes accepted?"""
bar_dot_list = ['((0.1)|(2.e))', '((0|1).(2|e))', '((0|1).e)',
'(0.(2|e))', '((0.1)|e)', '(1|(2.e))', '((0.1)|(2|e))',
'((0|1).(2.e))']
for r in bar_dot_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_bar_dot_fail(self: 'TestIsRegex') -> None:
"""Bar-dot-line non-regexes rejected?"""
bad_bar_dot_list = ['(0.1|2.e)', '(.2|e)', '((0.1)|(1.2.0))', '(.|)']
for r in bad_bar_dot_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
def test_bar_star_ok(self: 'TestIsRegex') -> None:
"""Bar-star regexes accepted?"""
bar_star_list = ['(0|1)*', '(0*|1)', '(1|0*)', '(0|(1|2*))*',
'((0|1)*|2)*']
for r in bar_star_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_bar_star_fail(self: 'TestIsRegex') -> None:
"""Bar-star-like non-regexes rejected?"""
bad_bar_star_list = ['0|1*', '*(0|1)', '(*0|1)', '(1|*0)', '(|0(1|2*)']
for r in bad_bar_star_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
def test_dot_star_ok(self: 'TestIsRegex') -> None:
"""Dot-star regexes accepted?"""
dot_star_list = ['(0.1)*', '(0*.1)', '(1.0*)', '(0.(1.2*))*',
'((0.1)*.2)*']
for r in dot_star_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_dot_star_fail(self: 'TestIsRegex') -> None:
"""Dot-star-like non-regexes rejected?"""
bad_dot_star_list = ['0.1*', '*(0.1)', '(*0.1)', '(1.*0)', '(.0(1.2*)']
for r in bad_dot_star_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
def test_bar_star_dot_ok(self: 'TestIsRegex') -> None:
"""Bar-star-dot regexes accepted?"""
bar_star_dot_list = ['((0.e)|(1*.2))*', '((0|e).(1*|2))*',
'(0|(1*.2))*', '((0|1)|(2*.(e|0)))*',
'((0.1).(2*|(e.0)))*']
for r in bar_star_dot_list:
self.assertTrue(is_regex(r),
"Rejects valid regex: {}".format(r))
def test_bar_star_dot_fail(self: 'TestIsRegex') -> None:
"""Bar-star-dot non-regexes rejected?"""
bad_bar_star_dot_list = ['0*|1.2', '((0*)|(1).(2))', '(0.1|2*)',
'((0*|(1.2)))', '((0.1*)|(2*)',
'((0|1).(2|((1.0))))*', '((0.1)|(2.((1|0))))*']
for r in bad_bar_star_dot_list:
self.assertFalse(is_regex(r),
"Accepts invalid regex: {}".format(r))
students_arp = all_regex_permutations
def all_regex_permutations(s:str):
rv = students_arp(s)
set_rv = set(rv)
if len(set_rv) != len(rv):
raise Exception("Student returned a list with duplicates")
return set_rv
class TestAllRegexPermutations(TestCase):
# Some students will return lists
# len() works on both lists and sets
# otherwise, just convert to a set.
def test_empty_results(self) -> None:
ex = ['0221','||(011)','0*.1*','0*1']
for s in ex:
self.assertTrue(len(set(all_regex_permutations(s))) == 0,
("No regular expressions can be formed from {}" +
" but returned a non-empty collection").format(s))
def test_leaf(self: 'TestAllRegexPermutations') -> None:
"""Correctly produces unique permutation of leaf?"""
leaf_list = ['e', '0', '1', '2']
for s in leaf_list:
self.assertEqual(set(s), all_regex_permutations(s),
"Different permutation set: {}, {}".format(
set(s), all_regex_permutations(s)))
def test_binary(self: 'TestAllRegexPermutations') -> None:
"""Correctly produces permutations of binary regexes?"""
binary_list = [('(0.1)', {'(0.1)', '(1.0)'}),
('(1|2)', {'(1|2)', '(2|1)'}),
('(1|2*)', {'(1|2*)', '(2*|1)', '(1*|2)', '(2|1*)',
'(1|2)*', '(2|1)*'})]
for t in binary_list:
self.assertEqual(all_regex_permutations(t[0]), t[1],
"Different permutation sets: {}, {}".format(
set(all_regex_permutations(t[0])), t[1]))
def test_long(self: 'TestAllRegexPermutations') -> None:
"""Correctly produces permutations of long regex?"""
# naive generation of permutations not practical for much longer strings
s = '(0*.1)*'
p = {'(0*.1)*', '(0*.1*)', '(0.1*)*', '(1*.0)*', '(1*.0*)',
'(1.0*)*', '(0.1)**', '(1.0)**', '(1**.0)', '(1.0**)',
'(0**.1)', '(0.1**)'}
self.assertEqual(set(all_regex_permutations(s)), p)
is_regex_suite = TestLoader().loadTestsFromTestCase(TestIsRegex)
all_regex_permutations_suite = TestLoader().loadTestsFromTestCase(TestAllRegexPermutations)
match_regex_suite = TestLoader().loadTestsFromTestCase(TestRegexMatch)
build_regex_tree_suite = TestLoader().loadTestsFromTestCase(TestBuildRegexTree)
def show_failures_and_errors() -> TestResult:
results = {}
results['is_regex'] = TestResult()
results['all_regex_permutations'] = TestResult()
results['match_regex'] = TestResult()
results['build_regex_tree'] = TestResult()
is_regex_suite.run(results['is_regex'])
all_regex_permutations_suite.run(results['all_regex_permutations'])
match_regex_suite.run(results['match_regex'])
build_regex_tree_suite.run(results['build_regex_tree'])
failures = {}
for case in results.keys():
failures[case] = [e[0]._testMethodName for
e in results[case].failures]
errors = {}
for case in results.keys():
errors[case] = [e[0]._testMethodName for
e in results[case].errors]
for (case,methods) in failures.items():
for m in methods:
print("failure:{}.{}".format(case,m))
for (case,methods) in errors.items():
for m in methods:
print("error: {}.{}".format(case,m))
if __name__ == '__main__':
OUTPUT_FOR_COMPUTING_STATS = False
if OUTPUT_FOR_COMPUTING_STATS:
results = show_failures_and_errors()
else:
TextTestRunner().run(is_regex_suite)
TextTestRunner().run(all_regex_permutations_suite)
TextTestRunner().run(match_regex_suite)
TextTestRunner().run(build_regex_tree_suite)
#main(exit=False, verbosity=2)
| UTF-8 | Python | false | false | 23,354 | py | 3 | test_regex_functions.py | 3 | 0.452342 | 0.416032 | 0 | 505 | 44.245545 | 91 |
GoogleCloudPlatform/professional-services | 3,908,420,291,408 | b5eafdd67f733f3c94ebc150f941da9423467b75 | 61004e474b7b2ad0071c16766f0f7874f04f9466 | /tools/bqms-config-generator/config_generator/util/constants.py | 48430a3301b27bd52d1828534e1c49598210977c | [
"Apache-2.0"
]
| permissive | https://github.com/GoogleCloudPlatform/professional-services | eb79751efae765a8c691a745e520f44f51bd715c | 0f51121b945bd74c7f667e74e8861fceda87565c | refs/heads/main | 2023-09-05T02:57:33.328973 | 2023-08-30T14:40:30 | 2023-08-30T14:40:30 | 91,730,359 | 2,626 | 1,381 | Apache-2.0 | false | 2023-09-14T20:13:42 | 2017-05-18T19:29:27 | 2023-09-13T21:27:22 | 2023-09-14T20:13:42 | 368,532 | 2,570 | 1,264 | 72 | Python | false | false | # Copyright 2023 Google. This software is provided as-is, without warranty or
# representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
"""String constants"""
from dataclasses import dataclass
@dataclass(frozen=True)
class ObjectTypes:
""" The type of source object used for renaming in a name mapping rule """
DATABASE = 'DATABASE'
SCHEMA = 'SCHEMA'
RELATION = 'RELATION'
RELATION_ALIAS = 'RELATION_ALIAS'
ATTRIBUTE = 'ATTRIBUTE'
ATTRIBUTE_ALIAS = 'ATTRIBUTE_ALIAS'
FUNCTION = 'FUNCTION'
@dataclass(frozen=True)
class YamlConfigConstants:
"""Constants for yaml config generation"""
INPUT_FIELDS = ['bq_project', 'bq_dataset', 'table_name', 'column_name', 'source_datatype',
'target_datatype', 'source_pattern', 'target_pattern']
MANDATORY_INPUT_FIELDS = ['bq_project', 'bq_dataset', 'table_name', 'column_name', 'source_datatype',
'target_datatype']
SUPPORTED_TARGET_TYPES = ['BOOLEAN', 'TINYINT', 'SMALLINT', 'INTEGER', 'BIGINT', 'FLOAT', 'DOUBLE', 'NUMERIC',
'TIME', 'TIMETZ', 'DATE', 'DATETIME', 'TIMESTAMP', 'TIMESTAMPTZ', 'CHAR', 'VARCHAR']
@dataclass(frozen=True)
class JsonConfigConstants:
"""Constants for json config generation"""
DEFAULT_DATABASE = "__DEFAULT_DATABASE__"
INPUT_FIELDS = ['type', 'src_db', 'src_schema', 'src_relation', 'src_attribute',
'bq_project', 'bq_dataset', 'bq_table', 'bq_column']
YAML_CONSTANTS = YamlConfigConstants()
JSON_CONSTANTS = JsonConfigConstants()
OBJECT_TYPE = ObjectTypes()
| UTF-8 | Python | false | false | 1,640 | py | 2,659 | constants.py | 1,136 | 0.65122 | 0.64878 | 0 | 48 | 33.166667 | 114 |
Tinkerforge/flash-test | 17,282,948,413,638 | 70a7813635cb393d18bf5af887f89819f0443e83 | 5e27682b74c317b8abfad02ae9256e8f394b45c4 | /src/flash-test/plugin_system/plugins/bricklet_voltage_current.py | b5d4dbb1cd20d38d17ecfab5cd74437d41a7e379 | []
| no_license | https://github.com/Tinkerforge/flash-test | 88b18fcb8ef988f3a68e4ce3c5f7d60d34c8cbda | 845756dec9f890dd487ec1a48942205e0ce81c7b | refs/heads/master | 2023-09-04T10:57:16.621811 | 2023-08-30T08:56:18 | 2023-08-30T08:56:18 | 40,542,028 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
flash-test (Brick/Bricklet/Extension Flash and Test tool)
Copyright (C) 2016 Olaf Lüke <olaf@tinkerforge.com>
bricklet_voltage_current.py: Voltage/Current plugin
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5 import Qt, QtWidgets, QtCore
from ..tinkerforge.bricklet_voltage_current import BrickletVoltageCurrent
from ..bricklet_base import BrickletBase, get_bricklet_firmware_filename
from ..callback_emulator import CallbackEmulator
import time
import math
class Plugin(BrickletBase):
TODO_TEXT = u"""\
1. Verbinde Voltage/Current Bricklet mit Port C
2. Drücke "Flashen"
3. Warte bis Master Brick neugestartet hat (Tool Status ändert sich auf "Plugin gefunden")
4. Kalibriere Strom:
* Schließe Testaufbau (24V/1A) an
* Überprüfe Anzeige = ~24V/1A
* Trage mit Multimeter gemessenen Strom ein und drücke 'Kalibrieren'
5. Das Bricklet ist fertig, in ESD-Tüte stecken, zuschweißen, Aufkleber aufkleben
6. Gehe zu 1
"""
def __init__(self, *args):
BrickletBase.__init__(self, *args)
self.cbe_voltage = None
self.cbe_current = None
self.last_values = [0, 0]
def start(self):
BrickletBase.start(self)
self.mw.button_save_vc.clicked.connect(self.save_clicked)
def stop(self):
super().stop()
self.mw.button_save_vc.clicked.disconnect(self.save_clicked)
if self.cbe_voltage != None:
self.cbe_voltage.set_period(0)
if self.cbe_current != None:
self.cbe_current.set_period(0)
l = self.mw.voltage_current_layout
for i in range(l.count()):
l.itemAt(i).widget().setVisible(False)
def get_device_identifier(self):
return BrickletVoltageCurrent.DEVICE_IDENTIFIER
def flash_clicked(self):
self.flash_bricklet(get_bricklet_firmware_filename(BrickletVoltageCurrent.DEVICE_URL_PART))
def new_enum(self, device_information):
if self.cbe_voltage != None:
self.cbe_voltage.set_period(0)
if self.cbe_current != None:
self.cbe_current.set_period(0)
l = self.mw.voltage_current_layout
for i in range(l.count()):
l.itemAt(i).widget().setVisible(True)
self.voltage_current = BrickletVoltageCurrent(device_information.uid, self.get_ipcon())
self.cbe_voltage = CallbackEmulator(lambda: self.voltage_current.get_voltage(), self.cb_voltage)
self.cbe_voltage.set_period(100)
self.cbe_current = CallbackEmulator(lambda: self.voltage_current.get_current(), self.cb_current)
self.cbe_current.set_period(100)
self.show_device_information(device_information)
def cb_voltage(self, voltage):
self.last_values[0] = voltage/1000.0
self.mw.set_value_normal('Spannung: ' + str(self.last_values[0]) + ' V, Strom: ' + str(self.last_values[1]) + ' A')
def cb_current(self, current):
self.last_values[1] = current/1000.0
self.mw.set_value_normal('Spannung: ' + str(self.last_values[0]) + ' V, Strom: ' + str(self.last_values[1]) + ' A')
def save_clicked(self):
self.mw.set_tool_status_action('Kalibriere... ')
QtWidgets.QApplication.processEvents()
self.voltage_current.set_calibration(1, 1)
time.sleep(0.5)
current_device = self.voltage_current.get_current()
current_real = self.mw.spinbox_current_vc.value()
self.voltage_current.set_calibration(current_real, current_device)
self.mw.set_tool_status_okay('Kalibrierung OK: ' + str(current_device) + '/' + str(current_real))
| UTF-8 | Python | false | false | 4,248 | py | 131 | bricklet_voltage_current.py | 121 | 0.685303 | 0.669733 | 0 | 117 | 35.230769 | 124 |
ibrg/coloring-page.ru | 16,381,005,298,545 | 0a0d274faa682ff9601c594f51f83b881152ca35 | 79f3c9f5556b57e6f233d2779951986488834395 | /colloring/core/admin.py | 9b3fa4cfab1742b1c768ac4a4b62a7b16d740f63 | []
| no_license | https://github.com/ibrg/coloring-page.ru | 33c5cb0023d03edb3672be6f13f9387cc99a4ac3 | a7c0412255b4b308bba897aa4effd575b9e38fac | refs/heads/master | 2018-09-29T12:48:59.324881 | 2017-02-19T08:22:03 | 2017-02-19T08:22:03 | 82,320,226 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from core.models import Category, SubCategory, ColoringPage
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('title',)}
class SubCategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('title',)}
admin.site.register(Category, CategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(ColoringPage)
| UTF-8 | Python | false | false | 399 | py | 15 | admin.py | 8 | 0.784461 | 0.784461 | 0 | 12 | 32.25 | 59 |
c-bata/wsgicli | 10,041,633,566,883 | 180efdca356ac441a9e8f32d78eaf06dab118db6 | c964b1a03a3c980857272c2ba11bc89f354d72ea | /wsgicli.py | 0beb1fceefb6d06450c6b2e55e5530dd25f9a0a3 | [
"MIT"
]
| permissive | https://github.com/c-bata/wsgicli | 49f222fb6f8350185d2f89a053c922bf0ef30eba | 30fee1550a263c0821a0b79283f293f393c8dc27 | refs/heads/master | 2020-06-17T17:04:10.564152 | 2016-12-25T12:07:17 | 2016-12-25T12:07:17 | 74,986,811 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import click
from importlib.machinery import SourceFileLoader
import os
import site
import sys
import time
import threading
import _thread
from wsgiref.simple_server import make_server
#####################################################################################
# Command Line Interface
#####################################################################################
@click.group()
def cli():
pass
#####################################################################################
# For run server
#####################################################################################
def run_server(app, host, port):
click.echo('Start: {host}:{port}'.format(host=host, port=port))
httpd = make_server(host, port, app)
httpd.serve_forever()
# For reloading server when detected python files changes.
EXIT_STATUS_RELOAD = 3
class FileCheckerThread(threading.Thread):
# This class is copied and pasted from following source code of Bottle.
# https://github.com/bottlepy/bottle/blob/master/bottle.py#L3647-L3686
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'):
path = path[:-1]
if path and os.path.exists(path):
files[path] = os.stat(path).st_mtime
while not self.status:
if not os.path.exists(self.lockfile) or \
os.stat(self.lockfile).st_mtime < time.time() - self.interval - 5:
self.status = 'error'
_thread.interrupt_main()
for path, last_mtime in files.items():
if not os.path.exists(path) or os.stat(path).st_mtime > last_mtime:
self.status = 'reload'
_thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status:
self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
def run_live_reloading_server(interval, app, host, port):
if not os.environ.get('WSGICLI_CHILD'):
import subprocess
import tempfile
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='wsgicli.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['WSGICLI_CHILD'] = 'true'
environ['WSGICLI_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # Alive! If lockfile is unlinked, it raises FileNotFoundError.
time.sleep(interval)
if p.poll() != EXIT_STATUS_RELOAD:
if os.path.exists(lockfile):
os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
lockfile = os.environ.get('WSGICLI_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
run_server(app=app, host=host, port=port)
if bgcheck.status == 'reload':
sys.exit(EXIT_STATUS_RELOAD)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
time.sleep(interval)
sys.exit(3)
@cli.command()
@click.argument('filepath', nargs=1, envvar='WSGICLI_FILE', type=click.Path(exists=True))
@click.argument('wsgiapp', nargs=1, envvar='WSGICLI_WSGI_APP')
@click.option('--host', '-h', type=click.STRING, default='127.0.0.1', envvar='WSGICLI_HOST',
help='The interface to bind to.')
@click.option('--port', '-p', type=click.INT, default=8000, envvar='WSGICLI_PORT',
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None, envvar='WSGICLI_RELOAD',
help='Enable live reloading')
@click.option('--interval', type=click.INT, default=1, envvar='WSGICLI_INTERVAL',
help='Interval time to check file changed for reloading')
@click.option('--static/--no-static', default=None, envvar='WSGICLI_STATIC',
help='Enable static file serving')
@click.option('--static-root', default='static', envvar='WSGICLI_STATIC_ROOT',
help='URL path to static files')
@click.option('--static-dirs', default=['./static/'], multiple=True, envvar='WSGICLI_STATIC_DIRS',
help='Directories for static files')
@click.option('--lineprof/--no-lineprof', envvar='WSGICLI__LINEPROF',
help='Enable line profiler')
@click.option('--lineprof-file', multiple=True, envvar='WSGICLI_LINEPROF_FILE',
help='The filename profiled by line-profiler')
@click.option('--validate/--no-validate', default=False, envvar='WSGICLI_VALIDATE',
help='Validating your WSGI application complying with PEP3333 compliance.')
def run(filepath, wsgiapp, host, port, reload, interval,
static, static_root, static_dirs, lineprof, lineprof_file, validate):
"""
Runs a development server for WSGI Application.
Usage:
$ wsgicli run hello.py app -h 0.0.0.0 -p 5000 --reload
$ wsgicli run hello.py app --static --static-root /static/ --static-dirs ./static/
"""
insert_import_path_to_sys_modules(filepath)
module = SourceFileLoader('module', filepath).load_module()
app = getattr(module, wsgiapp)
if static:
from wsgi_static_middleware import StaticMiddleware
app = StaticMiddleware(app, static_root=static_root, static_dirs=static_dirs)
if validate:
from wsgiref.validate import validator
app = validator(app)
if lineprof:
# Caution: wsgi-lineprof is still pre-alpha. Except breaking API Changes.
from wsgi_lineprof.middleware import LineProfilerMiddleware
from wsgi_lineprof.filters import FilenameFilter, TotalTimeSorter
if lineprof_file:
# Now wsgi-lineprof is now supported only 1 file checking.
lineprof_file = lineprof_file[0]
else:
lineprof_file = os.path.basename(filepath)
filters = [FilenameFilter(lineprof_file), TotalTimeSorter()]
app = LineProfilerMiddleware(app, filters=filters)
if reload:
run_live_reloading_server(interval, app=app, host=host, port=port)
else:
run_server(app=app, host=host, port=port)
#####################################################################################
# For run shell
#####################################################################################
# Find Models
def import_from_path(import_path):
abspath = os.path.abspath(import_path)
if not os.path.exists(abspath):
raise ValueError('{path} does not exists.'.format(path=import_path))
basename = os.path.basename(abspath)
if os.path.isdir(abspath) and os.path.exists(os.path.join(abspath, '__init__.py')):
name = basename
return SourceFileLoader(name, os.path.join(abspath, '__init__.py')).load_module()
elif basename.endswith('.py'):
name, _ = os.path.splitext(basename)
return SourceFileLoader(name, abspath).load_module()
else:
raise ValueError('{path} is not a python package.'.format(path=import_path))
def find_modules_from_path(import_path):
import_from_path(import_path)
site_dirs = site.getsitepackages()
lib_dirs = [os.path.dirname(path) for path in site_dirs]
for module in sys.modules.values():
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'):
path = path[:-1]
if path and os.path.exists(path):
if all(not path.startswith(lib_dir) for lib_dir in lib_dirs):
yield module
def insert_import_path_to_sys_modules(import_path):
"""
When importing a module, Python references the directories in sys.path.
The default value of sys.path varies depending on the system, But:
When you start Python with a script, the directory of the script is inserted into sys.path[0].
So we have to replace sys.path to import object in specified scripts.
"""
abspath = os.path.abspath(import_path)
if os.path.isdir(abspath):
sys.path.insert(0, abspath)
else:
sys.path.insert(0, os.path.dirname(abspath))
# Get model base classes
def _sqlalchemy_model():
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import sessionmaker
return [sessionmaker, DeclarativeMeta]
def _peewee_model():
from peewee import BaseModel, Database
return [BaseModel, Database]
def get_model_base_classes():
model_base_classes = []
for x in (_sqlalchemy_model, _peewee_model):
try:
model_base = x()
except ImportError:
continue
else:
model_base_classes.extend(model_base)
return tuple(model_base_classes)
# Run shell
def run_plain(imported_objects):
import code
code.interact(local=imported_objects)
def run_ipython(imported_objects):
# Start IPython >= 1.0
from IPython import start_ipython
start_ipython(argv=[], user_ns=imported_objects)
def run_bpython(imported_objects):
from bpython import embed
embed(imported_objects)
def run_ptpython(imported_objects, vi_mode=False):
from ptpython.repl import embed, run_config
history_filename = os.path.expanduser('~/.ptpython_history')
embed(globals=imported_objects, history_filename=history_filename,
vi_mode=vi_mode, configure=run_config)
def run_ptipython(imported_objects, vi_mode=False):
from ptpython.repl import run_config
from ptpython.ipython import embed
history_filename = os.path.expanduser('~/.ptpython_history')
embed(user_ns=imported_objects, history_filename=history_filename,
vi_mode=vi_mode, configure=run_config)
interpreters = {
'python': run_plain,
'ipython': run_ipython,
'bpython': run_bpython,
'ptpython': run_ptpython,
'ptipython': run_ptipython,
}
def run_python(interpreter, imported_objects):
for name, _run_python in interpreters.items():
if interpreter == name:
_run_python(imported_objects)
else:
click.BadParameter('Please select from ' + ', '.join(interpreters.keys()))
@cli.command()
@click.argument('filepath', nargs=1, envvar='WSGICLI_FILE_PATH', type=click.Path(exists=True))
@click.argument('wsgiapp', nargs=1, envvar='WSGICLI_WSGI_APP')
@click.option('-i', '--interpreter', default='python', envvar='WSGICLI_INTERPRETER',
help="Select python interpreters (default: plain)"
"Supported interpreters are ipython, bpython, ptpython and ptipython.")
@click.option('--models/--no-models', default=True, envvar='WSGICLI_SHELL_MODELS',
help="Automatically recursively search and import ORM table definition"
" from specified package. Now wsgicli supports SQLAlchemy and peewee."
" (default: ``--models`` )")
def shell(filepath, wsgiapp, interpreter, models):
"""
Runs a python shell.
Usage:
$ wsgicli shell app.py app -i ipython
"""
model_base_classes = get_model_base_classes()
imported_objects = {}
if models and model_base_classes:
insert_import_path_to_sys_modules(filepath)
for module in find_modules_from_path(filepath):
for name in dir(module):
if name.startswith('_'):
continue
obj = getattr(module, name)
if isinstance(obj, model_base_classes):
key = name.split('.')[-1] if '.' in name else name
if key in imported_objects:
continue
imported_objects[key] = obj
module = SourceFileLoader('module', filepath).load_module()
imported_objects['app'] = getattr(module, wsgiapp)
for key in imported_objects.keys():
click.secho("import {}".format(key), fg='green')
run_python(interpreter, imported_objects)
if __name__ == '__main__':
cli()
| UTF-8 | Python | false | false | 12,960 | py | 12 | wsgicli.py | 5 | 0.602623 | 0.598765 | 0 | 354 | 35.610169 | 108 |
ripsj/true-home-test-api | 12,661,563,600,251 | d2363cdcca8ae51fe35e38e8121d75c46d0099f2 | d75b1e0b3c7ac734cb77820351b772ca56f8f8df | /project/apps/posts/tests.py | b3feb1dffd8ba49a6f08448803afe4efbb8e7cdc | []
| no_license | https://github.com/ripsj/true-home-test-api | e750090865b5731ec24f36269f7c6f90096ec0c2 | 6378a050ed4b1684350d4113eb3b6c0329018b82 | refs/heads/master | 2022-11-29T19:38:05.660840 | 2020-08-07T03:34:23 | 2020-08-07T03:34:23 | 285,876,985 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.test import TestCase, Client
from django.db import IntegrityError
from project.apps.categories.models import Category, Subcategory
from project.apps.posts.models import Posts
import json
class PostsTestViewCase(TestCase):
def setUp(self):
self.client = Client()
self.category = Category.objects.create(name='Category 1')
self.subcategory = Subcategory.objects.create(name='Category 1', category=self.category)
def test_post_create_ok(self):
response = self.client.post ('/posts/', {
"title":"Post 1",
"prev":"Previously...",
"body":"This is a story...",
"tags":json.dumps({'tag': 'sometag'}),
"category":self.category.id,
"subcategory":self.subcategory.id
})
self.assertEquals(response.status_code, 201)
def test_post_create_bad_request(self):
response = self.client.post ('/posts/', {
"badtitle":"Bad Post",
})
self.assertEquals(response.status_code, 400)
def test_post_create_repeated_title(self):
self.client.post ('/posts/', {
"title":"Post 1",
"prev":"Previously...",
"body":"This is a story...",
"tags":json.dumps({'tag': 'sometag'}),
"category":self.category.id,
"subcategory":self.subcategory.id
})
with self.assertRaises(IntegrityError):
self.client.post ('/posts/', {
"title":"Post 1",
"prev":"Previously...",
"body":"This is a story...",
"tags":json.dumps({'tag': 'sometag'}),
"category":self.category.id,
"subcategory":self.subcategory.id
})
def test_post_list_empty_ok(self):
response = self.client.get ('/posts/')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(response.content),2)
def test_post_list_with_content_ok(self):
self.client.post ('/posts/', {
"title":"Post 1",
"prev":"Previously...",
"body":"This is a story...",
"tags":json.dumps({'tag': 'sometag'}),
"category":self.category.id,
"subcategory":self.subcategory.id
})
self.client.post ('/posts/', {
"title":"Post 2",
"prev":"Previously...",
"body":"This is a second story...",
"tags":json.dumps({'tag': 'sometag'}),
"category":self.category.id,
"subcategory":self.subcategory.id
})
response = self.client.get ('/posts/')
self.assertEquals(response.status_code, 200)
self.assertGreater(len(response.content),2) | UTF-8 | Python | false | false | 2,769 | py | 18 | tests.py | 15 | 0.546046 | 0.538462 | 0 | 78 | 34.512821 | 96 |
Jaieu/NLP_related_projects | 2,654,289,821,291 | 1d4f4a80b92f7ebf89b2f65c33784171ab158664 | 8d5f4c18fc8bfcc5ab4c827d1005f024b8027915 | /GNN/RGCN/data_preprocess.py | 44bd6c1813778bc7644a62e053c565fd4a1a18ee | []
| no_license | https://github.com/Jaieu/NLP_related_projects | 2f9a5596cd71cc9840e816dcee27dd5bc6ba5795 | 635661d7a4ba14ffd1d7bb955f26351b8b59d291 | refs/heads/master | 2023-07-18T22:14:50.254091 | 2021-09-15T10:02:38 | 2021-09-15T10:02:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Name: data_preprocess.py
Purpose: transform raw RDF data into vertex and edge data
Data: 2021.3.18
Author: lixh
"""
import pandas as pd
import random
import numpy as np
import pickle as pkl
from utils import *
import time
import scipy.sparse as sp
"""
先拿旧数据测一下,需要确定所有的edges数据加起来,共有多少种edge类型,构造关系-id、实体-id字典
step1、准备数据集数据,然后将数据集划分为训练数据和测试数据
补充:
1、
"""
# 载入所有边关系
compose_path = './data/compose_rdf_1.csv'
belong_path = './data/belong_rdf_2.csv'
relevent_path = './data/relevent_rdf_3.csv'
benefit_path = './data/benefit_rdf_4.csv'
not_benefit_path = './data/not_benefit_rdf_5.csv'
fullof_path = './data/fullof_rdf_6.csv'
not_fullof_path = './data/not_fullof_rdf_7.csv'
cure_path = './data/cure_rdf_8_update.csv'
not_cure_path = './data/not_cure_rdf_9_update.csv'
# 指定rdf路径与类型
raw_rdf = [(compose_path, 'raw_food-compose-food'), (belong_path, 'food-belong-food_cate'),
(relevent_path, 'food-relevent-food'),
(benefit_path, 'food-benefit-crowd'), (not_benefit_path, 'food-not_benefit-crowd'),
(fullof_path, 'food-fullof-nutrient'), (not_fullof_path, 'food-not_fullof-nutrient'),
(cure_path, 'food-cure-disease'), (not_cure_path, 'food-not_cure-disease')]
rel_zh_en = {'组成': 'compose', '属于': 'belong', '相关': 'relevent', '适合':'benefit', '适量': 'not_benefit',
'富含': 'fullof', '较少': 'not_fullof', '适宜': 'cure', '不适宜': 'not_cure'}
rel_en_zh = {'compose': '组成', 'belong': '属于', 'relevent': '相关', 'benefit':'适合', 'not_benefit': '适量',
'fullof': '富含', 'not_fullof': '较少', 'cure': '适宜', 'not_cure': '不适宜'}
def readData(raw_data):
"""
读取所有关系数据,并整合在一起
:param raw_data:
:return: RDFs 所有数据 dataframe
node_type_dict 实体-实体类型 dict
freq_rel 关系-出现频率 dict
"""
RDFs = pd.DataFrame(columns=['source_name', 'relation', 'target_name'])
node_type_dict = dict() # 构建实体名称和实体类型的字典
freq_rel = dict()
for pair in raw_data:
loaded_df = pd.read_csv(pair[0])
# 获取实体-类型字典
node_type_dict = generateNodeTypeDict(node_type_dict, loaded_df, pair[1])
# 在每个dataframe中添加rdf类型,即【实体1-关系-实体2】的类型
loaded_df_len = len(loaded_df)
# 统计每种关系类型出现的频率,key为关系名称,value为出现频率
freq_rel[pair[1].split('-')[1]] = loaded_df_len
loaded_df['rdf_type'] = [pair[1] for _ in range(loaded_df_len)]
RDFs = pd.concat([RDFs, loaded_df], ignore_index=True)
return RDFs, node_type_dict, freq_rel
def generateNodeTypeDict(node_type_dict, loaded_df, rdf_type_name):
"""
返回键值对分别为实体名称和实体类型的字典
:param node_type_dict: 融合字典
:param loaded_rdf:
:param type_name: 诸如 raw_food-compose-food
:return:
"""
part_dict = dict()
source_type, target_type = rdf_type_name.split('-')[0], rdf_type_name.split('-')[2]
# 构建【出边顶点】的【顶点-顶点类型】字典
source_nodes = list(set(loaded_df['source_name'].tolist()))
for node in source_nodes:
if node not in part_dict.keys():
part_dict[node] = source_type
# 构建【入边顶点】的【顶点-顶点类型】字典
target_nodes = list(set(loaded_df['target_name'].tolist()))
for node in target_nodes:
if node not in part_dict.keys():
part_dict[node] = target_type
# 字典融合
node_type_dict.update(part_dict)
return node_type_dict
def generateNodeIDDict(data_list, id_lookup=True):
"""
返回键值对分别为实体(关系)名称和id的字典
:param data_dict:
:param data_list:
:return: {relation_name(entity_name): id}
"""
data_dict = dict()
data_list = [(i, item) for i, item in enumerate(data_list)]
for pair in data_list:
if id_lookup:
data_dict[pair[1]] = pair[0]
else:
data_dict[pair[0]] = pair[1]
return data_dict
def readTripletsAsList(RDFs, node_type_dict, freq_rel):
"""
读取RDF,将所有字符转化为序号,并生成train,valid,test数据
:param RDFs:
:return:
"""
"""生成关系,实体字典
step 1
"""
# 分别获取【关系列表】和【实体列表】
relation_list = list(set(RDFs['relation'].tolist())) # 关系列表
entity_list = list(set(RDFs['source_name'].tolist() + RDFs['target_name'].tolist())) # 实体列表
rdf_list = list(set(RDFs['rdf_type'].tolist())) # rdf类型列表
# 构建【关系字典】和【实体列表】
relation_dict = generateNodeIDDict(relation_list, id_lookup=True)
entity_dict = generateNodeIDDict(entity_list, id_lookup=True)
rdf_type_dict = generateNodeIDDict(rdf_list, id_lookup=True)
# print('关系类型数量:' + str(len(relation_dict))) # 9
# print('实体数量:' + str(len(entity_dict))) # 15851
# print('RDF类型数量:' + str(len(rdf_type_dict))) # 9
# 将实体词字典转化为列表并写入文件
with open('./datasets/entity_index_dict.txt', 'w', encoding='utf-8') as f:
for ent_name, ent_idx in entity_dict.items():
f.write(ent_name + '\t' + str(ent_idx) + '\n')
label_header = 'type' # RDFs中【实体类型】的列名
nodes_header = 'nodes' # RDFs中【实体名称】的列名
"""创建邻接矩阵
step 2
"""
# 确定邻接矩阵的维度
adj_shape = (len(entity_list), len(entity_list))
adjacencies = adjGeneration(relation_list, RDFs, freq_rel, entity_dict, adj_shape)
# 将nodes_dict中的key转化为unicorn,以 encoding 指定的编码格式解码字符串。默认编码为字符串编码。
entity_u_dict = {np.unicode(to_unicode(key)): val for key, val in entity_dict.items()}
"""构建数据集
step 3. 使用构建好的实体字典,选取300个实体,并查询其id,和实体类型
"""
nodes_dataset = [] # 收集用于训练的顶点数据集
# count_r, count_f, count_d = 0, 0, 0 # 食物,原始食材,疾病需要定量收集,其他类型不需要
for k, v in node_type_dict.items():
# if v == 'raw_food':
# if count_r < 2000:
# nodes_dataset.append(k)
# count_r += 1
# elif v == 'food':
# if count_f < 3000:
# nodes_dataset.append(k)
# count_f += 1
# elif v == 'disease':
# if count_d < 1000:
# nodes_dataset.append(k)
# count_d += 1
# else:
# nodes_dataset.append(k)
if k not in nodes_dataset:
nodes_dataset.append(k)
random.shuffle(nodes_dataset)
# 几个常见疾病需要添加进去
# nodes_dataset += ['糖尿病', '高血压', '血脂异常', '痛风']
# nodes_dataset = list(set(nodes_dataset))
# 根据收集好的顶点转化为labels_df
labels_df = pd.DataFrame(columns=('nodes', 'id', 'type'))
for name in nodes_dataset:
new = pd.DataFrame({'nodes': name,
'id': entity_dict[name],
'type': node_type_dict[name]}, index=[1])
labels_df = labels_df.append(new, ignore_index=True)
# print('数据集的长度为:' + str(len(labels_df))) # 326, or 327
# print(labels_df)
# 划分数据集
cut = int(len(labels_df) // 5)
labels_train_df = labels_df[cut:] # 训练数据
labels_test_df = labels_df[:cut] # 测试数据
"""构造数据集
step 4. 使用labels_df, labels_train_df, labels_test_df
"""
# 将nodes_dict中的key转化为unicorn,以 encoding 指定的编码格式解码字符串。默认编码为字符串编码。
entity_u_dict = {np.unicode(to_unicode(key)): val for key, val in entity_dict.items()}
# 取出列名为【type】的数据,构造标签集
labels_set = set(labels_df[label_header].values.tolist())
# 形成标签(顶点类型)字典:{'raw_food': 0, 'food': 1, 'disease': 2, 'food_cate': 3, 'nutrient': 4, 'crowd': 5}
labels_dict = {lab: i for i, lab in enumerate(list(labels_set))}
# print('{} classes: {}'.format(len(labels_set), labels_set)) # 共有6个类型的实体
# 生成全0的稀疏矩阵
labels = sp.lil_matrix((adj_shape[0], len(labels_set))) # labels稀疏矩阵的 shape=(total_node_nums, total_node_type_nums)
labeled_nodes_idx = []
print('Loading training set')
train_idx = [] # 记录训练数据集中的顶点id
train_names = [] # 记录训练数据集中的顶点
for nod, lab in zip(labels_train_df[nodes_header].values, labels_train_df[label_header].values):
# 取出顶点和标签
nod = np.unicode(to_unicode(nod)) # type转为unicode
if nod in entity_u_dict:
labeled_nodes_idx.append(entity_u_dict[nod]) # 添加训练、测试数据顶点id
label_idx = labels_dict[lab] # 取出标签id
# 根据【顶点id】和【顶点类型id】确定labels中对应位置,给这个位置赋值1
labels[labeled_nodes_idx[-1], label_idx] = 1
train_idx.append(entity_u_dict[nod]) # 添加顶点id
train_names.append(nod) # 添加顶点名称
else:
print(u'Node not in dictionary, skipped: ', nod.encode('utf-8', errors='replace'))
print('Loading test set') # 与上面处理训练集是一样的步骤
test_idx = []
test_names = []
for nod, lab in zip(labels_test_df[nodes_header].values, labels_test_df[label_header].values):
nod = np.unicode(to_unicode(nod))
if nod in entity_u_dict:
labeled_nodes_idx.append(entity_u_dict[nod])
label_idx = labels_dict[lab]
labels[labeled_nodes_idx[-1], label_idx] = 1
test_idx.append(entity_u_dict[nod])
test_names.append(nod)
else:
print(u'Node not in dictionary, skipped: ', nod.encode('utf-8', errors='replace'))
# 对列表进行排序
labeled_nodes_idx = sorted(labeled_nodes_idx)
# 保存【标签】稀疏矩阵
labels = labels.tocsr()
save_sparse_csr('./labels/labels.npz', labels)
# 保存所有train,test的idx和names数据
np.save('./datasets/train_idx.npy', train_idx)
np.save('./datasets/train_names.npy', train_names)
np.save('./datasets/test_idx.npy', test_idx)
np.save('./datasets/test_names.npy', test_names)
# 保存【关系】和【实体】字典
pkl.dump(relation_dict, open('./datasets/rel_dict.pkl', 'wb'))
pkl.dump(entity_list, open('./datasets/nodes.pkl', 'wb'))
# 创建单位矩阵
features = sp.identity(adj_shape[0], format='csr') # 构建单位矩阵
# 将字符转化为id
# datasets = []
# for i in range(len(RDFs)):
# # 将训练数据取出,每次取一行,分别将每行的实体和关系都转化为id
# entity_1 = entity_dict[RDFs.iloc[i]['source_name']]
# relation = relation_dict[RDFs.iloc[i]['relation']]
# entity_2 = entity_dict[RDFs.iloc[i]['target_name']]
# datasets.append([entity_1, relation, entity_2]) # 将每行数据处理后的结果添加到列表
# # 打乱数据顺序,然后切分train,valid,test数据
# random.shuffle(datasets)
# cut_1 = int(len(datasets) / 5 * 3)
# cut_2 = int(len(datasets) / 5) + cut_1
# train_triplets = np.array(datasets[: cut_1])
# valid_triplets = np.array(datasets[cut_1: cut_2])
# test_triplets = np.array(datasets[cut_2 :])
return adjacencies, features, labels, labeled_nodes_idx, train_idx, test_idx, relation_dict, train_names, test_names
def adjGeneration(relation_list, RDFs, freq_rel, entity_dict, adj_shape):
# 创建邻接矩阵
adjacencies = []
for i, rel in enumerate(relation_list):
# 针对每一种关系,输出序号,关系,出现频率
print(u'Creating adjacency matrix for relation {}: {}, frequency {}'.format(i, rel, freq_rel[rel_zh_en[rel]]))
# 创建 shape = (freq(rel), 2) 的空数组
edges = np.empty((freq_rel[rel_zh_en[rel]], 2), dtype=np.int32)
# 记录edges的大小
size = 0
# 输出在【rel】关系下的三元组
chosen_df = RDFs[RDFs['relation'] == rel]
for j in range(len(chosen_df)):
s = chosen_df.iloc[j]['source_name']
o = chosen_df.iloc[j]['target_name']
# 在【rel】的关系下,[entity_dict[s], entity_dict[o]]位置上的值为1
edges[j] = np.array([entity_dict[s], entity_dict[o]])
size += 1
print('{} edges added'.format(size))
row, col = np.transpose(edges) # 取出的row就是s坐标,col就是o坐标
data = np.ones(len(row), dtype=np.int32) # 生成全1向量data
# 根据行列坐标及全1向量生成邻接矩阵和邻接矩阵转置
adj = sp.csr_matrix((data, (row, col)), shape=adj_shape, dtype=np.int8)
# 这里能够取到 adj.data, adj.indices, adj.indptr, adj.shape
adjacencies.append(adj)
adj_transp = sp.csr_matrix((data, (col, row)), shape=adj_shape, dtype=np.int8)
adjacencies.append(adj_transp)
# 保存两个邻接矩阵,即adj, adj_transp
save_sparse_csr('./adjacencies/' + '%d.npz' % (i * 2), adj)
save_sparse_csr('./adjacencies/' + '%d.npz' % (i * 2 + 1), adj_transp)
return adjacencies
def to_unicode(input):
input = input.encode('utf-8', errors='replace')
if isinstance(input, str):
return input.decode('utf-8', errors='replace')
else:
return input
return str(input).decode('utf-8', errors='replace')
def save_sparse_csr(filename, array):
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def adj_generation(train_triplets, entity_dict):
"""
根据数据创建邻接矩阵和度矩阵(记录节点位置)
一个训练集对应者一个邻接矩阵和度矩阵
:param train_triplets:
:param entity_dict:
:return:
"""
# 有多少个实体,邻接矩阵的行就有多少个
adj_list = [[] for _ in entity_dict]
for i, triplet in enumerate(train_triplets):
# adj_list[triplet[]]为【中心节点】,将如果存在相关实体,则将这个实体的位置记录在adj_list对应实体中
# 对于同一条关系边,要记两条,主次顺序相反,最终生成的adj_list是有个三层嵌套的数组
adj_list[triplet[0]].append([i, triplet[2]])
adj_list[triplet[2]].append([i, triplet[0]])
# 生成度矩阵,adj_list的一个元素长度为多少,与【中心顶点】相关的顶点就有多少
degrees = np.array([len(a) for a in adj_list])
adj_list = [np.array(a) for a in adj_list] # 将邻接矩阵的每行进行数组化
return degrees, adj_list
def dataProcess(raw_data):
"""
数据处理主函数
:param raw_data_paths:
:return:
"""
RDFs, node_type_dict, freq_rel = readData(raw_data) # 所有RDF,shape=(119506, 4)
adjacencies, features, labels, labeled_nodes_idx, train_idx, test_idx, relation_dict, train_names, test_names = readTripletsAsList(RDFs,
node_type_dict,
freq_rel)
# rel_list = range(len(adjacencies))
# # 每个关系产生两个邻接矩阵,因此【adjacencies】的维度是9*2=18;rel_list 为 range(18)
# for key, value in relation_dict.items():
# rel_list[value * 2] = key
# rel_list[value * 2 + 1] = key + '_INV'
num_nodes = adjacencies[0].shape[0]
identity_matrix = sp.identity(num_nodes, format='csr') # 构建单位矩阵
adjacencies.append(identity_matrix) # add identity matrix
support = len(adjacencies) # 邻接矩阵的个数(包含一个单位矩阵,support = 19)
# a.sum() 实际上统计的是一个邻接矩阵中位置是1的个数,以次来计算度矩阵
print("Relations used and their frequencies" + str([a.sum() for a in adjacencies]))
print("Calculating level sets...")
t = time.time()
# Get level sets (used for memory optimization)
bfs_generator = bfs_relational(adjacencies, labeled_nodes_idx)
lvls = list()
lvls.append(set(labeled_nodes_idx))
lvls.append(set.union(*bfs_generator.__next__()))
print("Done! Elapsed time " + str(time.time() - t))
# Delete unnecessary rows in adjacencies for memory efficiency
todel = list(set(range(num_nodes)) - set.union(lvls[0], lvls[1]))
for i in range(len(adjacencies)):
csr_zero_rows(adjacencies[i], todel)
data = {'A': adjacencies,
'y': labels,
'train_idx': train_idx,
'test_idx': test_idx}
with open('./datasets/food' + '.pickle', 'wb') as f:
pkl.dump(data, f, pkl.HIGHEST_PROTOCOL)
return relation_dict,
if __name__ == '__main__':
dataProcess(raw_rdf)
| UTF-8 | Python | false | false | 17,578 | py | 94 | data_preprocess.py | 30 | 0.596464 | 0.586524 | 0 | 394 | 37.045685 | 150 |
ffsjp/ffsjp | 10,024,453,713,694 | bd681a8e524d0f63c3000fb1dae02ef404ab1fd8 | 1388d1c5fb2c0486663e741b28dab9f7d9fbe834 | /ffsjp/models.py | bd37d505177602dd4876ff4e280151d17ed5a2a3 | [
"MIT"
]
| permissive | https://github.com/ffsjp/ffsjp | 24401ee923f283bc5e7068507e2e33b78ec9ff88 | 439092752c74959f37361cdb73fa242b4529fa49 | refs/heads/master | 2021-01-01T20:23:20.009320 | 2015-01-17T19:12:10 | 2015-01-17T19:12:10 | 29,202,803 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from ffsjp import db
class Pages(db.Model):
id = db.Column(db.Integer, primary_key = True)
model = db.Column(db.Text)
url = db.Column(db.String(20), index = True)
title = db.Column(db.Text)
content = db.Column(db.Text)
anons = db.Column(db.Text)
keywords = db.Column(db.Text)
level = db.Column(db.Integer)
activeMenu = db.Column(db.Text)
class Cat(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(40), index = True, unique = True)
title = db.Column(db.String(80))
ord = db.Column(db.Integer)
mat = db.relationship('Materials', backref = 'cat', lazy = 'dynamic')
class Materials(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.Text)
url = db.Column(db.Text)
category = db.Column(db.String(40), db.ForeignKey('cat.name'))
anons = db.Column(db.Text)
ord = db.Column(db.Integer)
| UTF-8 | Python | false | false | 950 | py | 8 | models.py | 4 | 0.637895 | 0.628421 | 0 | 28 | 32.892857 | 73 |
billsix/Craft | 2,061,584,302,117 | 2ec13414c889db418b875b205fba59c5b5b80df3 | 02bd298b75deac972a54b44101d575d7e13cd157 | /deps/curl/tests/http/test_12_reuse.py | 83bfadfe4ab339c937c027872dc02c9819b97f35 | [
"curl",
"MIT"
]
| permissive | https://github.com/billsix/Craft | 184c9c432c17d79cea5e077dc2ce536bdebfdffa | 5165ed913d1295095170747ec277e54d8bbeee91 | refs/heads/master | 2023-08-18T12:31:08.302524 | 2023-08-08T21:43:17 | 2023-08-08T21:43:17 | 241,174,423 | 1 | 1 | MIT | true | 2020-03-28T04:18:12 | 2020-02-17T18:04:08 | 2020-03-28T03:46:45 | 2020-03-28T04:18:11 | 14,783 | 0 | 1 | 0 | C | false | false | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import difflib
import filecmp
import logging
import os
import pytest
from testenv import Env, CurlClient
log = logging.getLogger(__name__)
@pytest.mark.skipif(condition=Env.curl_uses_lib('bearssl'), reason='BearSSL too slow')
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
class TestReuse:
# check if HTTP/1.1 handles 'Connection: close' correctly
@pytest.mark.parametrize("proto", ['http/1.1'])
def test_12_01_h1_conn_close(self, env: Env,
httpd, nghttpx, repeat, proto):
httpd.clear_extra_configs()
httpd.set_extra_config('base', [
f'MaxKeepAliveRequests 1',
])
httpd.reload()
count = 100
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_response(count=count, http_status=200)
# Server sends `Connection: close` on every 2nd request, requiring
# a new connection
delta = 5
assert (count/2 - delta) < r.total_connects < (count/2 + delta)
@pytest.mark.parametrize("proto", ['http/1.1'])
def test_12_02_h1_conn_timeout(self, env: Env,
httpd, nghttpx, repeat, proto):
httpd.clear_extra_configs()
httpd.set_extra_config('base', [
f'KeepAliveTimeout 1',
])
httpd.reload()
count = 5
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--rate', '30/m',
])
r.check_response(count=count, http_status=200)
# Connections time out on server before we send another request,
assert r.total_connects == count
| UTF-8 | Python | false | false | 2,979 | py | 126 | test_12_reuse.py | 54 | 0.549849 | 0.53575 | 0 | 78 | 37.192308 | 89 |
Adripdv/Dictionary-Data-Structure- | 6,682,969,133,807 | 8905f118cf2d635de1185873ed4565ae66e6903f | 0c828fcc5a26a2eb30485aee456da192e5773116 | /main.py | ff55d2bb39144e4cb11f63f08c06d4cf08146941 | []
| no_license | https://github.com/Adripdv/Dictionary-Data-Structure- | 6f01cd84037ced91cb164f0117bbd7b6de0d4fcd | 5d13107b44bb073ab8e5c4586bbcc2fcd0823e8a | refs/heads/master | 2023-03-07T23:46:04.545256 | 2021-02-18T12:15:45 | 2021-02-18T12:15:45 | 340,040,080 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Dictionary items are key value pairs enclosed in curly brackets
# Dictionary is ordered of python 3.7
# Dictionary is mutuable
# Dictionary keys are unique, cannot be duplicated
# Dictionary accepts different data types
''' Dict Attributes '''
# print(dir(dict))
# print(help(dict.pop))
''' Creating Python Dictionary '''
# dict_example = {}
# dict_example = {'name': 'Adriana', 'age': 40}
# dict_example = ([(1, 'car'), (2, 'bicycle')]) # list of tuples
# dict_example = dict([(1, 'car'), (2, 'bicycle')]) # convert list of tuples into key value pair dictionary
# print(dict_example)
''' Access Dictionary Values '''
# student = {'name': 'Adriana', 'age': 40}
# print(student['name'])
# print(student.get('age'))
# print(student.keys())
# print(student.values())
# students = [{'name': 'Adriana', 'age': 40}, {'name': 'Jonh', 'age': 45}]
# print(students[1]['name'])
# print(students[0])
# for i in range(len(students)):
# print(students[i]['name'])
''' Changing Dictionary Elements '''
# student = {'name': 'Adriana', 'age': 40}
# student['age'] = 37
# student.update({'name': 'Laura', 'age': 34})
# print(student)
# ====================================================
# student = {'name': 'Adriana', 'age': 40}
# student.setdefault('name', 'Laura') # Check if the key exists and if does do nothing
# student.setdefault('subject', 'python') # If deosn't, it creates
# student.setdefault('subject', 'math') # do nothing as 'subject' has already populated
# print(student)
''' Remove Elements from Dictionary '''
# student = {'name': 'Adriana', 'age': 40}
# student.pop('age')
# print(student)
# ====================================================
# student = {'name': 'Adriana', 'age': 40}
# student.popitem()
# print(student)
# print(help(dict.popitem))
# ====================================================
# student = {'name': 'Adriana', 'age': 40}
# student.clear()
# print(student)
# ====================================================
# student = {'name': 'Adriana', 'age': 40}
# del student
# print(student)
''' Dictionary Membership Test '''
# student = {'name': 'Adriana', 'age': 40}
# print('name' in student) # return a boolean
# print('age' not in student) | UTF-8 | Python | false | false | 2,217 | py | 1 | main.py | 1 | 0.57465 | 0.559314 | 0 | 86 | 24.790698 | 108 |
XA1903LastTeam/OneGouAPI | 1,812,476,212,517 | 0e63419472e9e0c19679911b099867219173ac64 | 5bcba1aae1dca51bc5eb42a47c72ebf51d61e1b3 | /UserApp/views.py | 3649a4607624d608193677c885829104a80e79ac | []
| no_license | https://github.com/XA1903LastTeam/OneGouAPI | acdb6e1e1c96b37f1867e50113fbd51623efcbf2 | 2a92757c1ecbacfcd4d7d478310c1d9ab1f55b07 | refs/heads/master | 2022-12-01T04:46:22.878640 | 2019-09-16T13:20:32 | 2019-09-16T13:20:32 | 206,899,167 | 1 | 1 | null | false | 2022-11-22T04:14:29 | 2019-09-07T01:37:07 | 2019-09-16T13:20:55 | 2022-11-22T04:14:26 | 5,097 | 0 | 1 | 3 | Python | false | false | from django.core.cache import cache
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.views import View
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from UserApp.models import UserModel
from Address.models import AddressModel
from CartList.models import Order_listModel, OrderGoods
from CartList.api import Order_listSeraLizer
from Goods.api import GoodsModelSerializers
from CartList.models import CartModel
from .api import UserSeraLizer
from .api import AdderssSeraLizer
# Create your views here.
# 用户登陆接口,接收用户手机号和模拟短信验证码,登陆成功后将成功登陆的用户ID写入session中,时间设置位关闭连接时清除session
class UserAPIView(View):
def get(self, request):
datas = UserModel.objects.all()
serializer = UserSeraLizer(datas, many=True)
return JsonResponse({'data': serializer.data })
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def post(self, request):
menu = request.POST.get('menu', None)
# 用户登陆接口,接收用户手机号和模拟短信验证码,登陆成功后将成功登陆的用户ID写入session中,时间设置位关闭连接时清除session
if menu == '0':
phone = request.POST.get('phone', None)
yan = request.POST.get('yan', None)
print(yan)
print(cache.get('yanzhengma'))
if phone:
if UserModel.objects.filter(phone=phone).first():
if yan == cache.get('yanzhengma'):
user = UserModel.objects.filter(phone=phone).first()
request.session['user'] = UserSeraLizer(user).data
print(request.session['user'])
request.session.set_expiry(0)
return JsonResponse({'msg': '登陆成功', 'code': 200, })
else:
return JsonResponse({'msg': '验证码错误', 'code': 400})
else:
return JsonResponse({'msg': '该用户未注册', 'code': 400})
else:
return JsonResponse({'msg': '手机号错误!', 'code': 400})
# 用户创建处理函数,使用图片验证码模拟手机验证码注册账号
elif menu == '1':
u = UserModel()
yan = request.POST.get('yan')
if yan == cache.get('yanzhengma'):
u.name = request.POST.get('name')
u.phone = request.POST.get('phone')
u.sex = int(request.POST.get('sex'))
u.bool = request.POST.get('bool')
try:
u.save()
except:
return JsonResponse({ 'msg': '数据异常创建失败'})
else:
if request.FILES.get('image'):
file_content = ImageFile(request.FILES['image'])
default_storage.delete('photo/%s.jpg' % u.id)
default_storage.save('photo/%s.jpg' % u.id, file_content)
u.image = 'photo/%s.jpg' % u.id
try:
u.save()
except:
return JsonResponse({'msg': '图片数据异使用默认头像, 用户创建成功!', 'code': 200})
return JsonResponse({ 'msg': '用户创建成功' })
else:
return JsonResponse({'msg': '验证码错误'})
# 用户注销登陆操作
elif menu == '2':
u = request.session.get('user')
print(u)
if u:
request.session.flush()
return JsonResponse({ 'code': 200, 'msg': '退出登陆成功'})
else:
return JsonResponse({'msg': '用户未登陆'})
elif menu == '3':
user = request.session.get('user', None)
print(request.POST.get('name'))
if not user:
return JsonResponse({'msg': '登陆已经失效'})
u = UserModel.objects.filter(id=user['id']).first()
if u:
u.name = request.POST.get('name') if request.POST.get('name') else u.name
u.phone = request.POST.get('phone') if request.POST.get('phone') else u.phone
u.sex = int(request.POST.get('sex')) if request.POST.get('sex') else u.sex
u.sex = request.POST.get('bool') if request.POST.get('bool') else u.bool
if request.FILES.get('image'):
file_content = ImageFile(request.FILES['image'])
default_storage.delete('photo/%s.jpg' % user)
default_storage.save('photo/%s.jpg' % user, file_content)
u.image = 'photo/%s.jpg' % user
try:
u.save()
except:
return JsonResponse({'msg': '数据异常更新失败'})
else:
return JsonResponse({'msg': '数据更新成功'})
else:
return JsonResponse({'msg': '用户不存在或'})
else:
return JsonResponse({'msg': '无效的操作'})
# 数据更新接口接收用户上传到的数据,获取数据并传入首先需要用户登陆成功,若数据超出限制返回数据异常更新失败
def delete(self, request):
print('执行删除')
user = request.session.get('user')
print(request.body.decode('utf-8'))
if user:
u = UserModel.objects.filter(id=user['id']).first()
if u:
u.bool = False
try:
u.save()
except:
return JsonResponse({ 'msg': '数据异常用户注销失败'})
else:
request.session.delete('user')
print(request.session.get('user'))
return JsonResponse({ 'msg': '用户注销成功'})
else:
return JsonResponse({ 'msg': '该用户不存在'})
else:
return JsonResponse({ 'msg': '用户未登陆'})
# 地址相关接口
class AddressAPIView(View):
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def post(self, request):
# 返回该登陆用户所有的地址
menu = request.POST.get('menu')
if menu == '0':
user = request.session.get('user')
if not user:
return JsonResponse({ 'code': 200, 'msg': '未检测到登陆用户'})
datas = AddressModel.objects.filter(user=user['id'])
address = AdderssSeraLizer(datas, many=True)
return JsonResponse({'code': 200, 'data': address.data})
# 根据登陆的userID和address数据创建新的地址
elif menu == '1':
user = request.session.get('user')
address = AddressModel()
ress = request.POST.get('address')
if not user:
user = UserModel.objects.filter(id=user).first()
return JsonResponse({'code': 400, 'msg':'用户未登陆'})
if ress:
address.address = ress
address.state = request.POST.get('state') if request.POST.get('state') else True
address.user = user
try:
address.save()
except:
return JsonResponse({'code': 400, 'msg': '数据异常保存失败'})
else:
return JsonResponse({'code': 200, 'msg': '添加地址成功'})
else:
return JsonResponse({'code': 400, 'msg': '无效的地址'})
# 接收UserID和AddressID修改地址
elif menu == '2':
address_id = request.POST.get('address_id')
address = AddressModel.objects.filter(id=address_id).first()
if address:
address.address = request.POST.get('address') if request.POST.get('address') else address.address
address.state = request.POST.get('state') if request.POST.get('state') else address.state
try:
address.save()
except:
return JsonResponse({ 'code': 400, 'msg': '数据异常保存修改失败'})
else:
return JsonResponse({ 'code': 200, 'msg': '修改成功'})
else:
return JsonResponse({ 'msg': '无效的用户或地址', 'code': 400})
else:
return JsonResponse({ 'code': 400, 'msg': '无效的操作'})
# 用户订单接口
class UserOrder(View):
# 接收UserID返回该属于用户所有订单
def get(self, request):
user = request.session.get('user')
if not user:
return JsonResponse({ 'code': '200', 'msg': '未检测到登陆的用户'})
data = {}
if UserModel.objects.filter(id=user['id']):
user = UserModel.objects.filter(id=user['id']).first()
order = Order_listModel.objects.filter(user=user)
if not order:
return JsonResponse({ 'code': 200, 'msg': '该用户没有任何订单'})
for o in order:
data[o.id] = Order_listSeraLizer(o).data
goods = OrderGoods.objects.filter(order=o)
for g in goods:
data[o.id][g.id] = GoodsModelSerializers(g.goods).data
data[o.id]['count'] = g.count
return JsonResponse({ 'code': 200, 'msg': '查询成功', 'data':data})
else:
return JsonResponse({ 'code': 400, 'msg': '查找的用户不存在' })
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
# 接收订单ID返回该订单所有的商品
def post(self, request):
order = request.POST.get('order')
print(order)
data = {}
if Order_listModel.objects.filter(id=order):
goods = OrderGoods.objects.filter(order=order)
for g in goods:
data[g.id] = GoodsModelSerializers(g.goods).data
data[g.id]['count'] = g.count
return JsonResponse({ 'code': 200, 'msg': '查询成功', 'data': data})
else:
return JsonResponse( { 'code': 400, 'msg': '该订单不存在'})
# 用户购物车接口
class UserCart(View):
# 接收UserID返回所有的Cart所有商品信息
def get(self, request):
user = request.session.get('user')
if not user:
return JsonResponse({ 'code': 200, 'msg': '没有检测到登陆的用户'})
goods = CartModel.objects.filter(user=user['id'])
data = {}
if goods:
for g in goods:
data[g.id] = GoodsModelSerializers(g.goods).data
return JsonResponse({ 'code': 200, 'msg': '查询成功', 'data': data})
else:
return JsonResponse({ 'code': 400, 'msg': '该用户购物车为空'}) | UTF-8 | Python | false | false | 11,335 | py | 57 | views.py | 53 | 0.521929 | 0.513761 | 0 | 252 | 39.809524 | 113 |
0-ng/ice-wine | 13,572,096,700,137 | b119d9dc4bcc7dbd6ae57323d8f2a2d76e65c0f9 | bbb17bc20e7e504a85f164b895dcf4dbefe9ba40 | /static/migrations/新建文件夹/0006_auto_20200616_2120.py | 08c4372743f2d68e149e550d71ea4074afaf45df | []
| no_license | https://github.com/0-ng/ice-wine | f19f3ba9a8a145d8e6eabb88344f6f434bf1a3fb | f10010630fb86e3aebe81974a13421240d162fb0 | refs/heads/master | 2023-02-12T22:17:17.689332 | 2021-01-07T04:52:18 | 2021-01-07T04:52:18 | 326,909,926 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.6 on 2020-06-16 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('luogu', '0005_auto_20200616_2103'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(blank=True, choices=[(1, '函数与极限'), (2, '导数与微分'), (3, '微分中值定理与导数的应用'), (4, '不定积分'), (5, '定积分'), (6, '微分方程'), (7, '向量代数与空间解析几何'), (8, '多元函数微分法及其应用'), (9, '重积分'), (10, '曲线积分与曲面积分'), (11, '无穷级数')], default='', max_length=100, null=True),
),
]
| UTF-8 | Python | false | false | 739 | py | 302 | 0006_auto_20200616_2120.py | 93 | 0.561139 | 0.482412 | 0 | 18 | 32.166667 | 268 |
ikamensh/pydolons | 18,511,309,068,467 | aebb36293c867e34c20406697dfefb893f2188f4 | bdc948500f60da3e24e4be7a508cded9c6a65a4d | /ui/core/gameconfig/UserConfig.py | 714440129b85b09841b095c9e54eb573b1fcd600 | [
"Unlicense"
]
| permissive | https://github.com/ikamensh/pydolons | 098fed13f2d11be61d7b74922817654f27f64b99 | 5af1e6461530a5d4548588c2f9804a91c28f33ea | refs/heads/develop | 2021-12-14T14:11:17.808044 | 2021-12-01T15:23:33 | 2021-12-01T15:23:33 | 138,201,231 | 5 | 0 | null | false | 2021-12-01T15:22:08 | 2018-06-21T17:21:21 | 2021-12-01T15:19:48 | 2021-12-01T15:22:07 | 107,343 | 3 | 0 | 15 | Python | false | false | from os import path, mkdir, environ, name as osname
from datetime import datetime
from json import dumps, loads
from copy import copy
DEFAULT_CONFIG = {
'window': {
'resolution': {
'width': 1366,
'height': 768},
'fullscreen': True,
},
'sounds': {
'muted': False,
'volume': 1.0
},
'musics': {
'muted': False,
'volume': 1.0
}
}
DEFAULT_SIZE_CONFIG = {
'window': {
'resolution': {
'width': 1024,
'height': 768},
'fullscreen': False
}
}
class UserConfig(object):
def __init__(self):
if osname == 'nt':
self.home = path.join('c:\\', environ['HOMEPATH'])
else:
self.home = environ['HOME']
self.config_dir = path.join(self.home, 'Pydolons-dev')
self.dev_size = None
@property
def default_config(self):
global DEFAULT_CONFIG
return dumps(DEFAULT_CONFIG)
def readSetting(self):
config = path.join(self.config_dir, 'config.json')
if path.exists(self.config_dir):
if path.isdir(self.config_dir):
if path.exists(config):
self.updateOldConfig(config)
with open(config, 'r') as f:
raw_config = f.read()
self.read_config = loads(raw_config)
else:
self.create_default()
else:
self.create_default()
else:
self.create_default()
def saveSetting(self):
config = path.join(self.config_dir, 'config.json')
raw_config = dumps(self.read_config)
with open(config, 'w') as f:
f.write(raw_config)
def create_default(self):
global DEFAULT_CONFIG
if not path.exists(self.config_dir):
mkdir(self.config_dir)
config = path.join(self.config_dir, 'config.json')
raw_config = self.default_config
self.read_config = copy(DEFAULT_CONFIG)
with open(config, 'w') as f:
f.write(raw_config)
def setSize(self, size):
self.read_config['window']['resolution']['width'] = size[0]
self.read_config['window']['resolution']['height'] = size[1]
def updateOldConfig(self, config):
new_dt = datetime.fromtimestamp(1566853815.7816741)
config_dt = datetime.fromtimestamp(path.getmtime(config))
if new_dt >= config_dt:
self.create_default()
# if __name__ == '__main__':
# cfg = Settings()
# cfg.readSetting()
# print(cfg.read_config)
# cfg.saveSetting(DEFAULT_SIZE_CONFIG)
# print(cfg.read_config)
| UTF-8 | Python | false | false | 2,745 | py | 400 | UserConfig.py | 391 | 0.530055 | 0.516576 | 0 | 94 | 28.191489 | 68 |
hyxz/script | 10,591,389,380,932 | 480e8a7317b813b69afb9642717af9426be292fb | c44b8c4e08818c1e368c0b813c807aa1e23fc971 | /bigram_emcoUN.py | 8e785e2bff895472890b682974ecb9e5ab89def8 | []
| no_license | https://github.com/hyxz/script | 6e77099fb54edfb4485fe41a78e5e2a1f8166d31 | a23d4faea361e4800cdda33d0615473de7170867 | refs/heads/master | 2020-04-07T22:56:36.872279 | 2018-11-23T07:30:42 | 2018-11-23T07:30:42 | 158,791,244 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import json
from tqdm import tqdm
#process emnlp data
def gener_bigramNUA(inputfile):
relation = []
bi_list = []
word_count_list = []
word_count_all = {}
with open(inputfile,'r') as rf:
lines = rf.readlines()
for line in tqdm(lines):
line_item = [int(i) for i in line[:-1].strip().split(' ')]
if len(line_item) < 51:
line_item.insert(0,5255)
line_item.insert(line_item.index(5254),5256)
else:
line_item.insert(0,5255)
line_item.insert(52,5256)
# print("lines:",line_item)
# for line in lines:
# print(line)
# # word_ = word_tokenize(line)[:-1]
for it in tqdm(range(len(line_item)-1)):
item = []
item.append(line_item[it])
item.append(line_item[it + 1])
bi_list.append(item)
# for word_key in range(5257):
word_count_list = []
word_count = {}
item_word_all = []
same_item_dict = {}
for items in tqdm(range(len(bi_list))):
# word_count_list = []
same_item = []
# print(li[items][0],li[items + 1][0])
if bi_list[items][0] not in same_item_dict:
same_item.append(bi_list[items][1])
same_item_dict[bi_list[items][0]] = same_item
else:
same_item_dict[bi_list[items][0]].append(bi_list[items][1])
all_dict = {}
for k, v in tqdm(same_item_dict.items()):
count_fn = {}
for item in v:
if item not in count_fn:
count_fn[item] = 1
else:
count_fn[item] += 1
# sorted(num_dict.items(), key=lambda x: x[1], reverse=True)
all_dict[k] = sorted(count_fn.items(), key=lambda x: x[1], reverse=True)
# print("coiuntfn:", all_dict)
with open("image_cocoNUA.json","w") as wf:
json.dump(all_dict,wf)
#process coco
def gener_bigramCONUA(inputfile):
relation = []
bi_list = []
word_count_list = []
word_count_all = {}
with open(inputfile,'r') as rf:
lines = rf.readlines()
for line in tqdm(lines):
line_item = [int(i) for i in line[:-1].strip().split(' ')]
if len(line_item) < 37:
line_item.insert(0,4682)
try:
line_item.insert(line_item.index(4681),4683)
except:
print("###line_item:",line_item)
print("###line_item len:",len(line_item))
else:
line_item.insert(0,4682)
line_item.insert(38,4683)
# print("lines:",line_item)
# for line in lines:
# print(line)
# # word_ = word_tokenize(line)[:-1]
for it in tqdm(range(len(line_item)-1)):
item = []
item.append(line_item[it])
item.append(line_item[it + 1])
bi_list.append(item)
# for word_key in range(5257):
word_count_list = []
word_count = {}
item_word_all = []
same_item_dict = {}
for items in tqdm(range(len(bi_list))):
# word_count_list = []
same_item = []
# print(li[items][0],li[items + 1][0])
if bi_list[items][0] not in same_item_dict:
same_item.append(bi_list[items][1])
same_item_dict[bi_list[items][0]] = same_item
else:
same_item_dict[bi_list[items][0]].append(bi_list[items][1])
all_dict = {}
for k, v in tqdm(same_item_dict.items()):
count_fn = {}
for item in v:
if item not in count_fn:
count_fn[item] = 1
else:
count_fn[item] += 1
# sorted(num_dict.items(), key=lambda x: x[1], reverse=True)
all_dict[k] = sorted(count_fn.items(), key=lambda x: x[1], reverse=True)
# print("coiuntfn:", all_dict)
with open("image_cocoNUA.json","w") as wf:
json.dump(all_dict,wf)
def gener_bigramNU(inputfile):
relation = []
bi_list = []
word_count_list = []
word_count_all = {}
with open(inputfile,'r') as rf:
lines = rf.readlines()
for line in tqdm(lines):
line_item = [int(i) for i in line[:-1].strip().split(' ')]
if len(line_item) < 51:
line_item.insert(0,5255)
line_item.insert(line_item.index(5254),5256)
else:
line_item.insert(0,5255)
line_item.insert(52,5256)
# print("lines:",line_item)
# for line in lines:
# print(line)
# # word_ = word_tokenize(line)[:-1]
for it in tqdm(range(len(line_item)-1)):
item = []
item.append(line_item[it])
item.append(line_item[it + 1])
bi_list.append(item)
for word_key in tqdm(range(5257)):
word_count_list = []
word_count = {}
item_word_all = []
for item in tqdm(bi_list):
# word_count_list = []
if word_key == item[0]:
# word_count = {}
if item[1] not in word_count:
word_count[item[1]] = 1
else:
word_count[item[1]] += 1
else:
pass
# sorted(word_count.items(), key=lambda x: x[1], reverse=True)
word_count_list.append(word_count)
num_dict = {}
for k, v in word_count_list[0].items():
num_dict[k] = v
word_count_all[word_key] = sorted(num_dict.items() ,key=lambda x:x[1],reverse=True)
with open("bigram_emnlpNU.json","w") as wf:
json.dump(word_count_all,wf)
# print("word_count[word_key]:", word_count_all)
def load_word_to_dict(file):
with open(file,"r") as rf:
line = json.load(rf)
return line
if __name__ == "__main__":
# gener_dict("emnlp10.txt")
# te([2358 ,5080 ,3343, 1868, 4785, 2789, 4773,2789,4773,2358,5080,3343,2789,1000,2358,4773,2789,4773,2358,5080,3343,2789,1000,2358])
#gener_bigramNU("jiak.txt")
#gener_bigramNUA("emnlp_news.txt")
gener_bigramCONUA("image_coco.txt")
| UTF-8 | Python | false | false | 5,466 | py | 4 | bigram_emcoUN.py | 3 | 0.57135 | 0.52854 | 0 | 174 | 30.413793 | 135 |
ppcecho/mysite | 4,956,392,261,326 | 07539bbb4033ed272cd0baea1ea977ecd1460336 | 2a364db2c964a8f5f2b2f49b3be582ca9ecd0a72 | /apache/django.wsgi | 5d4ec2a69c89af8fdbb045b3f2356aef31dbd353 | []
| no_license | https://github.com/ppcecho/mysite | d2d4f773d1db47209693d140f13fe78f31f7c2e5 | f8981d6bef512716a951b493f9d60da250a6f823 | refs/heads/master | 2021-01-10T18:46:35.577432 | 2012-12-14T12:07:06 | 2012-12-14T12:07:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
mport os
import sys
import django.core.handlers.wsgi
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
app_apth = "/root/mysite/"
sys.path.append(app_apth)
application = django.core.handlers.wsgi.WSGIHandler()
| UTF-8 | Python | false | false | 240 | wsgi | 4 | django.wsgi | 3 | 0.7625 | 0.7625 | 0 | 9 | 25.666667 | 56 |
kiruthihan10/smile-Recognition-with-Tensorflow | 3,934,190,081,618 | 95191c345fae2ba5b88fa3714a883c348fdd3b16 | 190c3445affa95a5854f0eba2bc24433b78362c6 | /classify.py | 351e6dbd63fa31c5185f43bca202dd0f009e22e6 | [
"MIT"
]
| permissive | https://github.com/kiruthihan10/smile-Recognition-with-Tensorflow | e5ec7d03ad0df14a28d8ae6690882ac7de18b74b | 173cda52310cc9e47fc9b19a6eecc88a56d39e84 | refs/heads/master | 2021-12-29T00:07:52.268891 | 2021-12-11T12:11:47 | 2021-12-11T12:11:47 | 178,582,494 | 1 | 0 | MIT | false | 2020-07-16T09:09:15 | 2019-03-30T16:20:07 | 2020-07-16T09:08:56 | 2020-07-16T09:09:14 | 13 | 0 | 0 | 0 | Jupyter Notebook | false | false | import shutil
smile = open("SMILE_list.txt")
for line in smile:
line = line.strip()
try:
shutil.move("C:\\Users\\kirut\\Documents\\smile_rec\\lfwcrop_color\\faces\\"+line[:-3]+"ppm","C:\\Users\\kirut\\Documents\\smile_rec\\SMILE_list\\"+line[:-3]+"ppm")
except :
None
non_smile = open("NON-SMILE_list.txt")
for line in non_smile:
line = line.strip()
#print(line)
try:
shutil.move("C:\\Users\\kirut\\Documents\\smile_rec\\lfwcrop_color\\faces\\"+line[:-3]+"ppm","C:\\Users\\kirut\\Documents\\smile_rec\\NON-SMILE_list\\"+line[:-3]+"ppm")
except :
None | UTF-8 | Python | false | false | 625 | py | 7 | classify.py | 4 | 0.5936 | 0.5872 | 0 | 16 | 37.1875 | 176 |
internetimagery/animCopy | 16,346,645,551,675 | 6ba30e2821f3e4755957da747ae57b164342f377 | 632fc29f9a1b9b12756d7c8ee2b0e85813d9d4f6 | /view/maya/clipEdit_OLD WITH ADVANCED CLIP PREVIEW.py | b67261fc62d1db3964aa30f925f8fae45bb6987f | []
| no_license | https://github.com/internetimagery/animCopy | 4713a7a351674d3ddb3ae4246497778b704458f5 | 40552689bbdb84be8d8da3534458c62add844b82 | refs/heads/master | 2021-01-10T10:21:49.899565 | 2016-01-06T11:41:15 | 2016-01-06T11:41:15 | 43,822,420 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Create a new/edit clip
import maya.cmds as cmds
import os.path
import time
import warn
import os
class ClipEdit(object):
"""
Create or edit an Animation
"""
def __init__(s, i18n, char, clip, previewImage, requestThumb, requestCharData, requestClipCapture):
s.i18n = i18n
s.char = char
s.clip = clip
s.previewImage = previewImage # Initial preview image
s.requestThumb = requestThumb # asking for new thumbnail
s.requestCharData = requestCharData # Grab the character data
s.requestClipCapture = requestClipCapture # Grab capture information
s.thumbs = {} # Captured thumbs
s.winWidth = 500 # Window width
# VALIDATE BEFORE DOING ANYTHING
with warn:
s.validateObjs()
s.camName = "TempCam_%s" % int(time.time())
s.createCam()
# INIT DATA:
clip.metadata["name"] = clip.metadata.get("name", "CLIP")
clip.metadata["range"] = clip.metadata.get("range", [
cmds.playbackOptions(q=True, min=True),
cmds.playbackOptions(q=True, max=True)
])
r = clip.metadata["range"]
s.pose = True if r[0] == r[1] else False # Is the range a single frame?
s.range = r
s.name = clip.metadata["name"]
s.winName = "ClipNewWin"
if cmds.window(s.winName, ex=True):
cmds.deleteUI(s.winName)
s.window = cmds.window(s.winName, rtf=True, t=s.i18n["title"])
mainLayout = cmds.columnLayout()
## CAMERA CONTROLS
s.live = False if s.previewImage else True # Live cam?
s.camLayout = cmds.paneLayout(h=s.winWidth, w=s.winWidth, p=mainLayout)
viewer = cmds.modelPanel(
menuBarVisible=False,
camera=s.camera,
)
cmds.modelEditor( # Tweak nice default visuals
viewer,
e=True,
grid=False,
da="smoothShaded",
allObjects=False,
nurbsSurfaces=True,
polymeshes=True,
subdivSurfaces=True,
displayTextures=True
)
s.previewLayout = cmds.columnLayout(
h=s.winWidth,
w=s.winWidth,
p=mainLayout,
m=False
)
s.preview = cmds.iconTextStaticLabel(
style="iconOnly",
h=s.winWidth,
w=s.winWidth,
bgc=[0.2,0.2,0.2],
image="out_snapshot.png"
)
cmds.columnLayout(w=s.winWidth, p=mainLayout)
cmds.separator()
## DATA CONTROLS
cmds.rowLayout(nc=2, adj=1)
cmds.columnLayout(adj=True)
s.clipname = cmds.textFieldGrp(
l=s.i18n["clipname"],
text=clip.metadata["name"],
h=30,
tcc=s.nameChange
)
r = clip.metadata["range"]
s.clippose = cmds.checkBoxGrp(
l=s.i18n["clippose"],
h=30,
v1=r[0] == r[1],
cc=s.poseChange
)
s.cliprange = cmds.intFieldGrp(
l=s.i18n["cliprange"],
nf=2,
v1=r[0],
v2=r[1],
en=False if cmds.checkBoxGrp(s.clippose, q=True, v1=True) else True,
h=30,
cc=s.rangeChange
)
cmds.setParent("..")
s.thumb = cmds.iconTextButton(
l=s.i18n["captureBtn"],
ann=s.i18n["thumbDesc"],
style="iconAndTextVertical",
h=90,
w=90,
bgc=[0.2,0.2,0.2],
image="out_snapshot.png",
c=s.captureThumb
)
cmds.columnLayout(w=s.winWidth, p=mainLayout)
cmds.button(
l="CAPTURE CLIP",
h=40,
w=s.winWidth
)
if s.live:
s.captureMode()
else:
s.previewMode()
cmds.showWindow(s.window)
cmds.scriptJob(uid=[s.window, s.save], ro=True)
cmds.scriptJob(e=["quitApplication", s.cleanup], ro=True)
def validateObjs(s):
for obj in s.requestCharData(s.char):
if not cmds.objExists(obj):
raise RuntimeError, "%s could not be found." % obj
def captureMode(s):
s.live = True
cmds.layout(s.previewLayout, e=True, m=False)
cmds.layout(s.camLayout, e=True, m=True)
cmds.iconTextButton(s.thumb, e=True, l=s.i18n["captureBtn"])
def previewMode(s):
s.live = False
cmds.layout(s.camLayout, e=True, m=False)
cmds.layout(s.previewLayout, e=True, m=True)
cmds.iconTextButton(s.thumb, e=True, l=s.i18n["recaptureBtn"])
cmds.iconTextStaticLabel(s.preview, e=True, image=s.previewImage.name if s.previewImage else "out_snapshot.png")
def createCam(s):
if not cmds.objExists(s.camName):
s.camera = cmds.camera(n=s.camName)[0]
else:
s.camera = cmds.ls(s.camName)[0]
cmds.viewSet(s.camera, p=True) # Move camera to perspective position
cmds.setAttr("%s.focalLength" % s.camera, 500)
cmds.setAttr("%s.horizontalFilmAperture" % s.camera, 5)
cmds.setAttr("%s.verticalFilmAperture" % s.camera, 5)
cmds.setAttr("%s.visibility" % s.camera, 0)
def captureThumb(s):
if s.live:
with warn:
s.thumbs = s.requestThumb(s.camera)
s.previewImage = s.thumbs["thumbLarge"]
s.previewMode()
else:
s.captureMode()
def nameChange(s, text):
s.name = text
def poseChange(s, val):
cmds.intFieldGrp(s.cliprange, e=True, en=False if val else True)
s.pose = val
def rangeChange(s):
min_ = cmds.intFieldGrp(s.cliprange, q=True, v1=True)
max_ = cmds.intFieldGrp(s.cliprange, q=True, v2=True)
s.range = sorted([min_, max_])
def cleanup(s):
# Remove temporary camera
if cmds.objExists(s.camera):
cmds.delete(s.camera)
def save(s):
print "is window here?", cmds.window(s.window, ex=True)
s.name
s.pose
s.range
s.char.save()
# from animCopy.i18n.en import En as i18n
#
# def test(*arg):
# print arg
#
# ClipNew(i18n["clipNew"], test, test)
| UTF-8 | Python | false | false | 6,280 | py | 24 | clipEdit_OLD WITH ADVANCED CLIP PREVIEW.py | 23 | 0.546019 | 0.533599 | 0 | 196 | 31.040816 | 120 |
kevin-leptons/clink | 11,416,023,072,863 | 0e4a2eda5d57f3275a657ba2815f7b92acd9f62e | db7cc9e850171993e9804289d22b97632d259df2 | /clink/service/mongo/error.py | bdfda3c2d37cda08947e74948bf0ae2272063304 | [
"CC-BY-4.0"
]
| permissive | https://github.com/kevin-leptons/clink | 6047f470fb4caf342322981d7122c3862c613416 | 5257691381eceeb4ae6218e50bcecb4a70f7a637 | refs/heads/master | 2021-01-20T16:25:01.723700 | 2017-09-17T08:43:26 | 2017-09-17T08:43:26 | 90,838,416 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class DocSpecExit(Exception):
def __init__(self, name):
self._msg = name
def __str__(self):
return self._msg
class DocumentNotExist(Exception):
def __init__(self, doc_name):
self._doc_name = doc_name
def __str__(self):
return self._doc_name
class DocumentIndexError(Exception):
def __init__(self, doc_name, req_index):
index_doc = req_index.document
attr_unique = None
if 'unique' in index_doc:
attr_unique = index_doc['unique']
attr_min = None
if 'min' in index_doc:
attr_min = index_doc['min']
attr_max = None
if 'max' in index_doc:
attr_max = index_doc['max']
index_str = 'name={}; unique={}; min={}; max={}; key={};'.format(
index_doc['name'], attr_unique, attr_min, attr_max,
index_doc['key']
)
self._msg = 'document \'{}\' must be specify indexes: {}'.format(
doc_name, index_str
)
def __str__(self):
return self._msg
| UTF-8 | Python | false | false | 1,055 | py | 179 | error.py | 105 | 0.523223 | 0.523223 | 0 | 38 | 26.763158 | 73 |
carlbarcenas/Temp-PDF-Generator | 7,249,904,844,203 | cb6c60409c41794c46e3ccb6fb05f5ab447657b8 | 270947e7cf90b861a52ef26e0abcb60f368d7e09 | /reportlabtest.py | e06e1d1570ea673d6d2a3c99b9b76d0fdaca2503 | []
| no_license | https://github.com/carlbarcenas/Temp-PDF-Generator | 0248da9ac6bc4ea40d6034229bf89f3d6bdfb079 | 560e0f264c8a44fd1dff5d71457f4dfcc2606ac9 | refs/heads/main | 2023-08-21T16:14:09.916509 | 2021-10-20T20:34:19 | 2021-10-20T20:34:19 | 415,154,176 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from reportlab.pdfgen import canvas
from reportlab.lib.units import inch, cm, mm
from reportlab.lib.pagesizes import landscape, A4
from reportlab.lib import colors
from reportlab.platypus import Table, TableStyle
from reportlab.pdfbase.ttfonts import TTFont
from reportlab import graphics
#-------------------------------INITIALIZE----------------------------------
# Create and Initialize Report Page
pdf = canvas.Canvas("test.pdf") # CHANGEME
canvas.Canvas.setPageSize(pdf, (landscape(A4))) #8.27 x 11.69 inches
#--------------------------------PAGE 1: OFFENSE----------------------------
# Add Title and Subtitle
visname = "Butler" # TODO: Automate this
homename = "Marquette"
pdf.drawCentredString(5.845*inch, 8*inch, "POST GAME SHORT REPORT")
pdf.drawCentredString(5.845*inch, 7.75*inch, homename + " vs. " + visname)
#*****Offensive Tempo Analysis (OTA)*****
# Section Title String
pdf.drawString(0.2*inch, 6.75*inch, "OFFENSIVE TEMPO ANALYSIS")
# Variable Declaration for OTA
labels = None
poss = None
col_PPP = None
deadball_PPP = 0.00
madeBasket_PPP = 0.00
defRebound_PPP = 0.00
steal_PPP = 0.00
offRebound_PPP = 0.00
headers = [labels, poss, "1-6\nseconds", "7-12\nseconds", "13-18\nseconds", "19-24\nseconds", "25-30\nseconds", col_PPP]
data = [headers,
["Deadball", 100, 0, 0, 0, 0, 0, str(deadball_PPP) + " PPP"],
["Made Basket", 100, 0, 0, 0, 0, 0, str(madeBasket_PPP) + " PPP"],
["Defensive Rebound", 100, 0, 0, 0, 0, 0, str(defRebound_PPP) + " PPP"],
["Steal", 100, 0, 0, 0, 0, 0, str(steal_PPP) + " PPP"],
["Offensive Rebound", 100, 0, 0, 0, 0, 0, str(offRebound_PPP) + " PPP"],
["Overall", 100, 0, 0, 0, 0, 0, None],
[None, None, str(0.0)+" PPP", str(0.0)+" PPP", str(0.0)+" PPP", str(0.0)+" PPP", str(0.0)+" PPP", None]]
t=Table(data)#, colWidths=9*mm, rowHeights=5*mm) # Create Table
t.setStyle(TableStyle([
('ALIGN',(0,0),(-1,-1),'CENTER'), # Alignment for main table
('ALIGN', (0,0),(0,8),'RIGHT'), # Row Label Alignment
('VALIGN',(0,0),(-1,-1),'MIDDLE'), # Vertical Alignment
('INNERGRID', (2,1), (-2, -2), 0.45, colors.gold), # Add Grid
('BOX', (2,1), (-2,-2), 0.25, colors.gold), # Add Box
('BACKGROUND', (2,1), (-2, -2), colors.navy),
('TEXTCOLOR', (2,1), (-2,-2), colors.wheat),
('FONTSIZE', (0,0), (-1,-1), 7)
]))
t.wrapOn(pdf, 0.5*inch, 0.5*inch) # Determine table size
t.drawOn(pdf, 0*inch, 4.5*inch) # Determine table coords and draw
#*****Offensive Efficiency Analysis*****
# Section Title String
pdf.drawString(5.75*inch, 6.75*inch, "OFFENSIVE EFFICIENCY ANALYSIS")
# Variable Declaration
halfcourt_PPP = 0.0
transition_PPP = 0.0
putbacks_PPP = 0.0
inbounds_PPP = 0.0
half1_PPP = 0.0
half2_PPP = 0.0
# Table formation
headers = [None, None, "TOR", "EFG", "ORR", "TS%", "FTAR", "RSS", "RS", "3JS", "3J", "2JS", "2J", "Overall", None]
data = [headers,
["Halfcourt", 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, halfcourt_PPP],
["Transition", 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, transition_PPP],
["Putbacks", 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, putbacks_PPP],
["Inbounds", 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, inbounds_PPP],
["1st Half", 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, half1_PPP],
["2nd Half", 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, half2_PPP]
]
t = Table(data, colWidths=9*mm, rowHeights=5*mm)
t.setStyle(TableStyle([
('ALIGN',(0,0),(-1,-1),'CENTER'), # Alignment for main table
('ALIGN', (0,0),(0,8),'RIGHT'), # Row Label Alignment
('VALIGN',(0,0),(-1,-1),'MIDDLE'), # Vertical Alignment
('INNERGRID', (2,1), (-2, -1), 0.45, colors.gold), # Add Grid
('BOX', (2,1), (-2,-1), 0.25, colors.gold), # Add Box
('BACKGROUND', (2,1), (-2, -1), colors.navy),
('TEXTCOLOR', (2,1), (-2,-1), colors.wheat),
('FONTSIZE', (0,0), (-1,-1), 6)
]))
t.wrapOn(pdf, 2*inch, 2.81*inch) # Determine table size
t.drawOn(pdf, 5.75*inch, 5*inch) # Determine table coords and draw
#*****OFFENSIVE PLAYER ANALYSIS*****
roster = []
#--------------------------------PAGE 2: DEFENSE----------------------------
pdf.showPage() # End previous page, begin new page
pdf.drawString(4*inch,2*inch, "TEST")
#-------------------------------SAVE CHANGES AND CLOSE----------------------
pdf.save() | UTF-8 | Python | false | false | 4,461 | py | 1 | reportlabtest.py | 1 | 0.549428 | 0.474333 | 0 | 111 | 39.198198 | 123 |
fhaynes/slithermud | 6,528,350,311,394 | a8964f0bb614e4ba2a342d042cbda3379081955a | 2a44b0ac89c536f43bc8e878ad4e7af5e9077037 | /MudCharacter.py | 014feb5ede7f76c141c38b16ec0d68d9938d9970 | [
"Artistic-1.0"
]
| permissive | https://github.com/fhaynes/slithermud | 25d5924b23a66f8f060a381e924588afb52b93bc | f14da3d65cdc5187678e8e7cc05e56643a663c56 | refs/heads/master | 2021-01-10T11:27:38.825008 | 2007-10-22T02:48:32 | 2007-10-22T02:48:32 | 44,458,589 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import MudObject
import MudProtocol
class MudCharacter(MudObject.MudObject):
def __init__(self, sockRef=''):
MudObject.MudObject.__init__(self)
self.sockRef = sockRef
self.info['roomRef'] = None
self.info['zoneRef'] = None
self.info['password'] = ''
self.info['login_state'] = None
self.info['admin_level'] = 1
self.info['items'] = {}
# ID of the template the character was created from
self.info['templateId'] = None
# The rank of the player. Player, Builder, Admin, etc
self.info['rank'] = 1
# Should they see color? By default, no
self.info['color'] = False
# ------------------- #
# Get/Set Functions #
# ------------------- #
def setRoomRef(self, room):
"""Sets the room reference the char is in."""
self.info['roomRef'] = room
def getRoomRef(self):
"""Returns the room reference the char is in."""
return self.info['roomRef']
def setZoneRef(self, zone):
"""Sets the zone reference the char is in."""
self.info['zoneRef'] = zone
def getZoneRef(self):
"""Returns the zone reference that char is in."""
return self.info['zoneRef']
def getSockRef(self):
"""Returns a reference to the transport layer of the character."""
return self.sockRef
def setSockRef(self, sockRef):
"""Sets the socket reference of the character."""
self.info['sockRef'] = sockRef
def setLoginState(self, newState):
"""Sets the login state of the character."""
self.info['login_state'] = newState
def getLoginState(self):
"""Returns the login state of the character."""
return self.info['login_state']
def setPassword(self, password):
"""Sets the char's password."""
self.info['password'] = password
def getPassword(self):
"""Returns the char's password."""
return self.info['password']
def setAdminLevel(self, newLevel):
"""Sets the admin level of the char."""
self.info['admin_level'] = newLevel
def getAdminLevel(self):
"""Returns the admin level of the char."""
return self.info['admin_level']
def addItem(self, item):
"""Adds an item to the dictionary. Indexed by ID Number."""
self.info['items'][item.info['id_num']] = item
item.setOwner(self)
def removeItem(self, item):
"""Removes an item from the dictionary."""
del self.info['items'][item.getId()]
def getItems(self):
"""Returns the item dictionary."""
return self.info['items']
def clearItems(self):
"""Deletes all the items in the dictionary."""
self.info['items'] = {}
def setColor(self, flag):
"""Sets the color flag to true or false."""
self.info['color'] = flag
def getColor(self):
"""Returns true/false if the char wants color."""
return self.info['color']
def findItemByName(self, name):
"""Searches for an item in char's inventory by name."""
for eachItem in self.getItems().values():
if eachItem.getName().lower() == name.lower():
return eachItem
return None
def getTemplateId(self):
"""Returns the template ID."""
return self.info['templateId']
def setTemplateId(self, idNum):
"""Sets the template ID."""
self.info['templateId'] = int(idNum)
# ---------------------- #
# Data Writing Functions #
# ---------------------- #
def prompt(self):
"""Returns a prompt to show to the user."""
if self.getColor() == True:
try:
return MudProtocol.protocolHandler.processText('\r\n'+self.info['name']+'> ')
except AttributeError:
pass
else:
try:
return MudProtocol.protocolHandler.processNoColor('\r\n'+self.info['name']+'> ')
except AttributeError:
pass
def writeWithPrompt(self, data):
"""Writes a string to the socket with a prompt following."""
if self.getColor() == True:
try:
self.sockRef.write(MudProtocol.protocolHandler.processText('\r\n'+data+'\r\n'+self.prompt()))
except AttributeError:
pass
else:
try:
self.sockRef.write(MudProtocol.protocolHandler.processNoColor('\r\n'+data+'\r\n'+self.prompt()))
except AttributeError:
pass
def writePlain(self, data):
"""Writes data to the socket without a prompt following."""
if self.getColor():
try:
self.sockRef.write(MudProtocol.protocolHandler.processText(data))
except AttributeError:
pass
else:
try:
self.sockRef.write(MudProtocol.protocolHandler.processNoColor(data))
except AttributeError:
pass
| UTF-8 | Python | false | false | 5,359 | py | 62 | MudCharacter.py | 58 | 0.529017 | 0.528643 | 0 | 168 | 30.666667 | 112 |
jamesremuscat/avx | 1,391,569,446,160 | c11ec89ed7e17d2b7442ac821b36a0769bae33e5 | 8c4432951318ef9ca5d2d183841ec7318a278c48 | /src/avx/controller/Controller.py | 6953f3a5d04985c8996ffff3f40c10d0f791fcc8 | []
| no_license | https://github.com/jamesremuscat/avx | b0fe466f3bbb7ad208c8e62d5ebb32e87ad0acad | 9cc5d2f2b6e7b86f1c843318c0a8e3e3cceaa60a | refs/heads/master | 2021-08-14T07:25:08.636508 | 2021-04-16T15:41:28 | 2021-04-16T15:41:28 | 8,010,587 | 4 | 2 | null | false | 2020-10-26T17:06:44 | 2013-02-04T16:00:24 | 2020-05-26T02:30:57 | 2020-10-26T17:06:43 | 3,303 | 4 | 2 | 12 | Python | false | false | from argparse import ArgumentParser, FileType
from avx import PyroUtils, _version
from avx.controller.ControllerHttp import ControllerHttp
from avx.controller.messagebus import make_messagebus, MessageBus, PYRO_MSGBUS_NAME
from avx.devices import Device
from avx.Sequencer import Sequencer
from avx.utils import loadState, saveState
from logging import Handler
from Pyro4.errors import PyroError, NamingError
from semantic_version import Version as SemVer
import atexit
import logging.config
import Pyro4
import json
Pyro4.config.SERIALIZER = 'pickle'
Pyro4.config.SERIALIZERS_ACCEPTED.add('pickle')
Pyro4.config.REQUIRE_EXPOSE = False
def versionsCompatible(remote, local):
rv = SemVer(remote)
lv = SemVer(local)
if rv.major == 0:
return rv.major == lv.major and rv.minor == lv.minor
return rv.major == lv.major and rv.minor >= lv.minor
class Controller(object):
'''
A Controller is essentially a bucket of devices, each identified with a string deviceID.
'''
pyroName = "avx.controller"
version = _version.__version__
def __init__(self):
self.devices = {}
self.proxies = {}
self.sequencer = Sequencer(self)
self.sequencer.start()
self.logHandler = ControllerLogHandler()
logging.getLogger().addHandler(self.logHandler)
self.slaves = []
self.daemon = Pyro4.Daemon(PyroUtils.getHostname())
self.messagebus = None
@staticmethod
def fromPyro(controllerID=""):
controllerAddress = "PYRONAME:" + Controller.pyroName
if controllerID != "":
controllerAddress += "." + controllerID
logging.info("Creating proxy to controller at " + controllerAddress)
controller = ControllerProxy(Pyro4.Proxy(controllerAddress))
remoteVersion = controller.getVersion()
if not versionsCompatible(remoteVersion, Controller.version):
raise VersionMismatchError(remoteVersion, Controller.version)
return controller
def loadConfig(self, configFile, overrideToDebug=False):
try:
if isinstance(configFile, file):
config = json.load(configFile)
self.configFile = configFile.name
else:
config = json.load(open(configFile))
self.configFile = configFile
self.config = config
for d in config["devices"]:
device = Device.create(d, self)
self.addDevice(device)
if "options" in config:
if "controllerID" in config["options"]:
self.controllerID = config["options"]["controllerID"]
if "slaves" in config["options"]:
for slave in config["options"]["slaves"]:
try:
sc = Controller.fromPyro(slave)
if versionsCompatible(sc.getVersion(), self.getVersion()):
self.slaves.append(sc)
else:
logging.error("This Controller is version " + str(self.getVersion()) + " but tried to add slave " + slave + " of version " + str(sc.getVersion()))
except NamingError:
logging.error("Could not connect to slave with controller ID " + slave)
if "http" in config["options"]:
if config["options"]["http"] is True:
ch = ControllerHttp(self)
ch.start()
if "logging" in config:
logging.config.dictConfig(config["logging"])
if overrideToDebug:
logging.getLogger().setLevel(logging.DEBUG)
logging.info("-d specified, overriding any specified default logger level to DEBUG")
except ValueError:
logging.exception("Cannot parse config.json!")
def registerClient(self, clientURI):
logging.warn('Client {} called deprecated and non-functional method registerClient'.format(clientURI))
def unregisterClient(self, clientURI):
logging.warn('Client {} called deprecated and non-functional method unegisterClient'.format(clientURI))
def broadcast(self, msgType, source, data=None):
''' Send a message to all clients '''
logging.debug("Broadcast: {}, {}, {}".format(msgType, source, data))
if self.messagebus:
self.messagebus.send_no_ack('avx', (msgType, source, data))
for device in self.devices.values():
if hasattr(device, 'receiveMessage'):
device.receiveMessage(msgType, source, data)
for slave in self.slaves:
slave.broadcast(msgType, source, data)
def getVersion(self):
return self.version
def addDevice(self, device):
if self.hasDevice(device.deviceID):
raise DuplicateDeviceIDError(device.deviceID)
self.devices[device.deviceID] = device
device.broadcast = lambda t, b=None: self.broadcast(t, device.deviceID, b)
def getDevice(self, deviceID):
return self.devices[deviceID]
def proxyDevice(self, deviceID):
if deviceID not in self.proxies.keys():
if self.hasDevice(deviceID):
self.proxies[deviceID] = self.daemon.register(self.getDevice(deviceID))
else:
for slave in self.slaves:
if slave.hasDevice(deviceID):
self.proxies[deviceID] = slave.proxyDevice(deviceID)
return self.proxies[deviceID]
def hasDevice(self, deviceID):
return deviceID in self.devices
def initialise(self):
for device in self.devices.itervalues():
device.initialise()
atexit.register(self.deinitialise)
def deinitialise(self):
for device in self.devices.itervalues():
device.deinitialise()
def startServing(self):
PyroUtils.setHostname()
ns = Pyro4.locateNS()
uri = self.daemon.register(self)
if hasattr(self, "controllerID"):
name = self.pyroName + "." + self.controllerID
else:
name = self.pyroName
logging.info("Registering controller as " + name)
ns.register(name, uri)
logging.info('Registering messagebus...')
make_messagebus.storagetype = 'memory'
messagebus_uri = self.daemon.register(MessageBus)
ns.register(PYRO_MSGBUS_NAME, messagebus_uri)
self.messagebus = Pyro4.Proxy('PYRONAME:' + PYRO_MSGBUS_NAME)
atexit.register(lambda: self.daemon.shutdown())
logging.info('Entering request loop')
self.daemon.requestLoop()
def sequence(self, *events):
self.sequencer.sequence(*events)
def getLog(self):
return self.logHandler.entries
class DeviceProxy(object):
def __init__(self, controller, deviceID):
self._proxy = Pyro4.Proxy(controller.proxyDevice(deviceID))
self._controller = controller
self._deviceID = deviceID
self._attr_cache = {}
def proxy_attribute(self, attr, name):
if not callable(attr):
return attr
else:
def proxy(*args, **kwargs):
try:
return attr(*args, **kwargs)
except (Pyro4.errors.CommunicationError, Pyro4.errors.ConnectionClosedError):
# These tend to happen when the controller restarts, and all our device proxies get different URIs/ports
self._invalidate_cache()
self._reproxy()
return getattr(self, name)(*args, **kwargs)
return proxy
def _invalidate_cache(self):
self._attr_cache.clear()
def _reproxy(self):
self._proxy = Pyro4.Proxy(self._controller.proxyDevice(self._deviceID))
def __getattr__(self, name):
if name not in self._attr_cache:
self._attr_cache[name] = self.proxy_attribute(getattr(self._proxy, name), name)
return self._attr_cache[name]
class ControllerProxy(object):
def __init__(self, controller):
self.controller = controller
def __getattr__(self, name):
return getattr(self.controller, name)
def __getitem__(self, item):
return DeviceProxy(self, item)
class ControllerLogHandler(Handler):
def __init__(self):
Handler.__init__(self)
self.entries = []
def emit(self, record):
self.entries.append(record)
if len(self.entries) > 100:
self.entries.pop(0)
if record.exc_info is not None:
record.exc_info = None
fakeRecord = logging.LogRecord("Controller", logging.WARNING, record.pathname, record.lineno, "", {}, None, None)
fakeRecord.created = record.created
fakeRecord.asctime = record.asctime if hasattr(record, "asctime") else "--"
self.format(fakeRecord)
fakeRecord.message = "An exception was stripped from this log, see controller logs for details"
self.entries.append(fakeRecord)
class VersionMismatchError(Exception):
def __init__(self, remoteVersion, localVersion):
super(VersionMismatchError, self).__init__("Controller is version " + str(remoteVersion) + " but this client is written for version " + str(localVersion) + ". Check your installation and try again.")
class DuplicateDeviceIDError(Exception):
def __init__(self, duplicatedID):
super(DuplicateDeviceIDError, self).__init__("Device already exists: " + duplicatedID)
def main():
parser = ArgumentParser()
parser.add_argument("-d", "--debug",
help="Show debugging output.",
action="store_true")
parser.add_argument("-c", "--config",
help="Configuration file to use",
type=FileType("r"))
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=(logging.DEBUG if args.debug else logging.INFO))
controller = Controller()
logging.info("Starting avx controller v{}".format(controller.getVersion()))
if args.config:
controller.loadConfig(args.config, args.debug)
else:
try:
configFile = open('config.json', 'r')
controller.loadConfig(configFile)
except IOError:
logging.error("No config file specified and config.json not found! Exiting...")
exit(1)
controller.initialise()
controller.startServing()
logging.info("avx controller terminated.")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 10,708 | py | 62 | Controller.py | 49 | 0.612066 | 0.610291 | 0 | 297 | 35.053872 | 207 |
1337tester/pyfund | 1,924,145,366,596 | 3e0dd5b09dba35b52a4dd308550206549269cd09 | 73e77bd85e55f1580edad1b40c4d7dafacb1d640 | /Scripts/all_pairs.py | 3a82ba87d0bff93fad2d928040d346b94d69c86b | []
| no_license | https://github.com/1337tester/pyfund | 572e5183c6ac59f4e5f96d8ad0dc30953e5854a5 | b023b8945477ca5c8340c1ce8a9b65005a660f06 | refs/heads/master | 2022-08-30T19:09:34.041156 | 2022-08-27T09:56:11 | 2022-08-27T09:56:11 | 18,031,913 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import itertools
import pprint
SET1 = ('W10', 'Ubuntu', 'Debian')
SET2 = ('HP', 'Epson', 'Lexmark')
SET3 = ('16GB', '24GB', '64GB')
SET4 = ('6core', '8core', '12core')
SET5 = ('Slow', 'Medium', 'RTX3090')
SET6 = ('HDD', 'SDD', 'HDD+SDD')
SUPERSET = (SET1, SET2, SET3, SET4, SET5, SET6)
SUPERSET_MEMB = set(itertools.chain(*SUPERSET))
# print('superset: ', *superset, sep='\n')
MYLIST = SUPERSET[0:6]
SUPERSET_PRODUCT = set(itertools.product(*MYLIST))
print(SUPERSET_PRODUCT)
# print(*SUPERSET_PRODUCT, sep='\n')
# print(set(itertools.combinations(superset[0], superset[1], 2)))
# print('size of SUPERSET_PRODUCT is ', len(SUPERSET_PRODUCT))
def create_pairs(set_of_sets):
"""Returns pairs from set_of_sets and all_members of that set"""
# combine subsets into a set
try:
all_members = set(itertools.chain(*set_of_sets))
except Exception as e_xception:
raise e_xception
#create all possible pairs
all_pairs = set(itertools.combinations(all_members, 2))
unique_pairs = list(all_pairs)
#deletes pair from the list which members both belong to the same list
for group in set_of_sets:
for pair in all_pairs:
if set(pair).issubset(group):
unique_pairs.remove(pair)
return unique_pairs
def all_combo(set_of_sets):
"""Returns a cartesian product of a set of sets"""
return list(itertools.product(*set_of_sets))
def cover_all_pairs(set_of_sets):
"""
Takes set of complete combinations of a product and set of all_pairs
returns an optimal subset of former set containing all the pairs
"""
set_of_pairs = create_pairs(set_of_sets)
# print("Number of pairs: ", len(set_of_pairs))
all_comb = all_combo(set_of_sets)
# print("Number of combinations: ", len(all_comb))
efficient_comb = []
for pair in set_of_pairs:
for group in all_comb:
if set(pair).issubset(group) and group not in efficient_comb:
efficient_comb.append(group)
break
return efficient_comb
# print('all combo ', all_combo(MYLIST))
A = create_pairs(MYLIST)
# A = create_pairs(3)
B = all_combo(SUPERSET)
# print(type(MYLIST))
print(MYLIST)
test_list = (('a', 'b', 'c'), ('d', 'e'), ('g', 'h'))
C = cover_all_pairs(SUPERSET)
# C = cover_all_pairs()
pp = pprint.PrettyPrinter(indent=4)
# print('A', len(A), A)
# print('B', len(B), B)
# print('C', len(C), C)
pp.pprint('A')
pp.pprint(len(A))
pp.pprint(A)
# pp.pprint('B')
# pp.pprint(len(B))
# pp.pprint(B)
pp.pprint('C')
pp.pprint(len(C))
pp.pprint(C)
# for item in A:
# print(item)
# print(ALL_MEMBERS)
| UTF-8 | Python | false | false | 2,613 | py | 118 | all_pairs.py | 84 | 0.637199 | 0.623421 | 0 | 89 | 28.359551 | 74 |
JinalShah2002/House-Prices-Challenge-Solution | 6,983,616,857,393 | 463fcf7f158a1ed0ff78af0d8690b0f76b414639 | 3f0728770a5d3b974bfe1da6199631f5c1ef2645 | /Code/Scripts/Data Preparation Scripts/UnitTests.py | 90529b0967825c1bb29138f0a177c8d8d247d30f | []
| no_license | https://github.com/JinalShah2002/House-Prices-Challenge-Solution | b3b083d75ec7f788636e7a6475c37b88db9dc552 | 8337d39f0d06d619cf3a90568a318bfaf196fdd3 | refs/heads/master | 2023-03-20T18:55:49.621381 | 2021-03-18T04:30:00 | 2021-03-18T04:30:00 | 289,570,958 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
@author Jinal Shah
This file will conduct all unit tests on the custom built transformers.
It is important to conduct these unit tests because I have built a lot of
custom transformers ,and I would like to make sure that I have implemented
them correctly. Running these tests will allow me to make sure that the
final transformed training data that I feed to my models is correct.
"""
# Importing all necessary libraries
import unittest
import pandas as pd
import numpy as np
from Tranformers import Remove, TransformNum
# Creating the Class
class TestTransformers(unittest.TestCase):
def test_remove(self):
# Initializing Basic Information
test_data = pd.DataFrame({
'Name': ['Jinal', 'Juan', 'Joe', 'John', 'Sarah', 'Mike', 'Rachel'],
'Age': [18, 21, 24, 25, 30, 19, 25],
'Height': [6.5, 5.5, 5.25, 6, 5.9, 5, 6.8],
'Weight': [120, 130, 140, 150, 125, 133, 145],
'Grade': [11, 11, 11, 12, 10, 9, 11],
'Math Level': [1, 2, 3, 4, 5, 6, 7],
'Reading Level': [1, 2, 3, 4, 5, 6, 7],
'Programming Ability': [1, 2, 3, 4, 5, 6, 7],
})
"""
Test One -> removing only 1 feature
Status: Passed
"""
features = ['Grade']
temp = test_data.drop('Grade',axis=1)
remove = Remove.Remove(features)
test = remove.fit_transform(test_data)
self.assertEqual(temp.keys().all(), test.keys().all())
self.assertEqual(type(test), pd.DataFrame)
"""
Test Two -> removing multiple features
Status: Passed
"""
features = ['Grade','Programming Ability']
temp = test_data.drop(features,axis=1)
remove = Remove.Remove(features)
test = remove.fit_transform(test_data)
self.assertEqual(temp.keys().all(), test.keys().all())
self.assertEqual(type(test), pd.DataFrame)
"""
Test Three -> removing a feature that doesn't exist
Status: Passed
"""
features = ['Job']
remove = Remove.Remove(features)
test = remove.fit_transform(remove)
self.assertEqual(test, AttributeError)
"""
Test Four -> removing no features
Status: Passed
"""
features = []
remove = Remove.Remove(features)
test = remove.fit_transform(test_data)
self.assertEqual(test_data.keys().all(), test.keys().all())
self.assertEqual(type(test), pd.DataFrame)
def test_transformNum(self):
test_data = pd.DataFrame({
'Age': [18, 21, 24, 25, 30, 19, 25],
'Height': [6.5, 5.5, 5.25, 6, 5.9, 5, 6.8],
'Weight': [120, 130, 140, 150, 125, 133, 145],
'Grade': [11, 11, 11, 12, 10, 9, 11],
'Math Level': [1, 2, 3, 4, 5, 6, 7],
'Reading Level': [1, 2, 3, 4, 5, 6, 7],
'Programming Ability': [1, 2, 3, 4, 5, 6, 7],
})
"""
Test One -> Transforming each numerical feature
Test Status: Passed
"""
transform = {
'Age': 'log(x+1)',
'Height': 'log(x)',
'Weight': 'x ** .5'
}
temp2 = test_data
temp = TransformNum.TransformNum(transform)
result = temp.fit_transform(test_data)
self.assertEqual(type(result), pd.DataFrame)
self.assertEqual(result['Age'].all(), np.log(temp2['Age']+1).all())
self.assertEqual(test_data['Height'].all(), np.log(temp2['Height']).all())
self.assertEqual(result['Weight'].all(), (temp2['Weight'] ** .5).all())
"""
Test Two -> trying to transform a key that doesn't exist
Test Status: Passed
"""
transform = {
'Job': 'log(x+1)'
}
temp = TransformNum.TransformNum(transform)
result = temp.fit_transform(test_data)
self.assertEqual(result, KeyError)
"""
Test Three -> trying to make a transformation that isn't log(x+1), log(x), or x ** .5
Test Status: Passed
"""
transform = {
'Age': 'x ** 1/3',
}
temp = TransformNum.TransformNum(transform)
result = temp.fit_transform(test_data)
self.assertEqual(type(result), pd.DataFrame)
self.assertEqual(result['Age'].all(), test_data['Age'].all())
# Runner
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 4,438 | py | 33 | UnitTests.py | 6 | 0.549347 | 0.508788 | 0 | 130 | 33.130769 | 93 |
ezirmusitua/School-Work | 19,696,720,044,762 | 30cfee11ff66b9abf857d1071cb8b3b95c90cc90 | e9a470d29c4cf184ace57f0c907d8788cbe27cd2 | /入侵检测/JWinpcapy/PcapOperation/basic_operate.py | 1fa0750d23b5a3870390bbe978dce09f5a691720 | []
| no_license | https://github.com/ezirmusitua/School-Work | 800d0167db19532c871e762a523a1d4f3a41067b | 798f023ee61927f74f8bdf57d2a51e0d9560960c | refs/heads/master | 2016-06-06T21:34:10.622205 | 2016-03-07T07:27:04 | 2016-03-07T07:27:04 | 53,295,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
def GetMac(hex_in) :
return "%s:%s:%s:%s:%s:%s" % (hex_in[0] ,hex_in[1] ,hex_in[2] ,hex_in[3] ,hex_in[4] ,hex_in[5])
def GetIP(dec_in) :
return "%d.%d.%d.%d" % (dec_in[0] ,dec_in[1] ,dec_in[2] ,dec_in[3])
def get_dotted_decimal_addr(adrTmp) :
return [
adrTmp.contents.sin_addr.S_un.S_un_b.s_b1 ,
adrTmp.contents.sin_addr.S_un.S_un_b.s_b2 ,
adrTmp.contents.sin_addr.S_un.S_un_b.s_b3 ,
adrTmp.contents.sin_addr.S_un.S_un_b.s_b4
]
def get_ipv6_hex_addr(adrTmp) :
return [x for x in adrTmp.contents.sin6_addr._S6_un._S6_u16]
def is_flag_true(flag) :
if self.tcp_flag_urg == 1 :
return True
else :
return False
pass
"""
Unittest for GetIP and GetMac
"""
class basic_operate_unit_test(unittest.TestCase) :
def setUp(self) :
self.test_data_mac = [255,255,255,255,255,255]
self.test_res_mac = 'ff:ff:ff:ff:ff:ff'
self.test_data_ip = [255,255,255,255]
self.test_res_ip = '255.255.255.255'
pass
def tearDown(self) :
pass
def test_get_mac(self) :
self.assertEqual(GetMac([hex(x)[2:] for x in self.test_data_mac]) ,self.test_res_mac ,'Error in Get Mac')
pass
def test_get_ip(self) :
self.assertEqual(GetIP(self.test_data_ip) ,self.test_res_ip ,'Error in Get IP')
pass
if __name__ == '__main__' :
unittest.main() | UTF-8 | Python | false | false | 1,296 | py | 179 | basic_operate.py | 133 | 0.631944 | 0.582562 | 0 | 46 | 27.195652 | 107 |
drewbsmith19/shipwell_assessment | 5,480,378,305,713 | d68915c2ceaa170e14bc7ae4791bd1341c79c71a | b52b240768e05108e3f893aab9bf5217464d98a3 | /assessment/weather/urls.py | 381e94ff7ef02422aff1c4e57d572a5131ce438a | []
| no_license | https://github.com/drewbsmith19/shipwell_assessment | 7c7692159686d9b9252f89e430e2433c259ec653 | 287772d143a98c1705efe2824b055fa907d6e840 | refs/heads/master | 2022-12-11T19:58:32.088825 | 2019-12-04T23:13:33 | 2019-12-04T23:13:33 | 159,742,041 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('weather/<lat>/<lon>/', views.weather),
path('weather/<lat>/<lon>/<site_1>/', views.weather),
path('weather/<lat>/<lon>/<site_1>/<site_2>/', views.weather),
]
| UTF-8 | Python | false | false | 242 | py | 4 | urls.py | 2 | 0.628099 | 0.615702 | 0 | 8 | 29.25 | 66 |
devzgabriel/python-calculator | 1,949,915,192,304 | 9a1124f76dcbc0ba41f793bbe8d6a83dd82862ff | 0a307d31883161edb7d3cfeb6b44863f6d3ae417 | /calc_defs/calc_part2.py | ade061bf6afde146d9d7d59c6b6b9cbbc82e9a2c | []
| no_license | https://github.com/devzgabriel/python-calculator | b5c16917754793782946fcb6b71c9b688fb97c49 | 24a7dae9445bdd0a30e60f32f7767721fa26184e | refs/heads/master | 2023-03-01T09:17:18.588680 | 2021-01-30T01:22:20 | 2021-01-30T01:22:20 | 277,417,250 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
def part(opcao):
str(opcao)
if opcao == '21':
n_tab = int(input('Qual o Número da tabuada:'))
for t in(1, 10):
print(f'{t} x {n_tab} = {t*n_tab}')
elif opcao == '22':
print('Digite os valores de A,B e C')
a = float(input('A:'))
b = float(input('B:'))
c = float(input('C:'))
delta = ((b * b) - 4 * a * c) ** (1 / 2)
resultado1 = (-b + delta) / (2 * a)
resultado2 = (-b - delta) / (2 * a)
print(f'As Raizes da Equaçâo são {resultado1} e {resultado2}')
elif opcao == '23':
i = input('Retas de triangulo:').split()
v = [float(i) for i in i]
v.sort(reverse=True)
a = v[0]
b = v[1]
c = v[2]
if a >= b + c:
print('NAO FORMA TRIANGULO')
elif a ** 2 == b ** 2 + c ** 2:
print('TRIANGULO RETANGULO')
elif a ** 2 > b ** 2 + c ** 2:
print('TRIANGULO OBTUSANGULO')
elif a ** 2 < b ** 2 + c ** 2:
print('TRIANGULO ACUTANGULO')
if a == b == c:
print('TRIANGULO EQUILATERO')
elif a == b or b == c or a == b:
print('TRIANGULO ISOSCELES')
elif opcao == '24':
qtd = input('Quantos numeros há:')
numeros = []
for n in range(qtd):
numeros.append(input(f'Digite o {n+1}° valor: '))
ordem = input('Qual a ordem[C/D]: ').upper()
if ordem not in 'CD':
ordem = input('Qual a ordem[C/D]: ').upper()
elif ordem == 'C':
print(numeros.sort())
elif ordem == 'D':
print(numeros.sort(reverse=True))
elif opcao == '25':
conv = 0
print(''' [0][dBm] para [w]
[1][w] para [dBm]''')
opc = int(input())
if opc == 0:
conv = int(input('Qual a Potência a ser convertida:'))
print('Em [w]: ', 0.001 * 10**(conv / 10))
elif opc == 1:
conv = int(input('Qual a Potência a ser convertida:'), 16)
print('Em [dBm]: ', 10 * math.log(conv / 0.001))
| UTF-8 | Python | false | false | 2,121 | py | 5 | calc_part2.py | 5 | 0.448651 | 0.422149 | 0 | 64 | 32.015625 | 70 |
indralab/adeft_app | 15,212,774,184,810 | 6a68505fdc6db4cf7e395ed20560cf99e2bb44de | c6d196c6309f85fb7f002aa58aee13a879761cca | /adeft_app/scripts/consistency.py | ba646be7e3dcc2c5e81f656ed277d82918f8bae7 | [
"BSD-2-Clause"
]
| permissive | https://github.com/indralab/adeft_app | 96f7806ac6e9ea24e7e10f2afce3804904eaaff5 | 3f20d04791f598e089bb59ca1ca133d5d51d6c28 | refs/heads/master | 2020-05-02T15:44:23.315304 | 2019-05-30T16:54:56 | 2019-05-30T16:54:56 | 178,050,936 | 0 | 0 | BSD-2-Clause | false | 2019-05-30T16:54:57 | 2019-03-27T18:16:54 | 2019-05-30T16:25:19 | 2019-05-30T16:54:57 | 104 | 0 | 3 | 0 | Python | false | false | from collections import defaultdict
def check_grounding_dict(grounding_dict):
"""Check that a grounding_dict doesn't have the same longform mapping
to different groundings in different grounding maps.
"""
return check_dictionaries(grounding_dict.values())
def check_consistency_names_grounding_dict(grounding_dict, names_map):
"""Check that a grounding dict and names map have consistent names
"""
groundings = {grounding for grounding_map in grounding_dict.values()
for grounding in grounding_map.values()
if grounding != 'ungrounded'}
return groundings == set(names_map.keys())
def check_consistency_grounding_dict_pos_labels(grounding_dict, pos_labels):
"""Check that there are no pos labels not in the grounding dict
"""
groundings = {grounding for grounding_map in grounding_dict.values()
for grounding in grounding_map.values()
if grounding != 'ungrounded'}
return set(pos_labels) <= groundings
def check_model_consistency(model, grounding_dict, pos_labels):
"""Check that serialized model is consistent with associated json files.
"""
groundings = {grounding for grounding_map in grounding_dict.values()
for grounding in grounding_map.values()}
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_labels = groundings <= model_labels
shortforms = set(grounding_dict.keys())
model_shortforms = set(model.shortforms)
consistent_shortforms = shortforms == model_shortforms
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_pos_labels = set(pos_labels) <= model_labels
return consistent_labels and consistent_shortforms and \
consistent_pos_labels
def check_names_consistency(names_list):
"""Ensure names maps are consistent for model with multiple shortforms
"""
return check_dictionaries(names_list)
def check_dictionaries(dicts):
"""Check if a list of dictionaries are pairwise consistent
Two dictionaries are consistent with eachother if there does not exist a
key k that has a different associated value in each dictionary
"""
big_dict = defaultdict(set)
for dictionary in dicts:
for key, value in dictionary.items():
big_dict[key].add(value)
lengths = [len(value) for value in big_dict.values()]
return max(lengths) <= 1
| UTF-8 | Python | false | false | 2,450 | py | 19 | consistency.py | 12 | 0.696735 | 0.696327 | 0 | 64 | 37.28125 | 76 |
zhiqiarcher/QbvCalculationTool | 11,192,684,805,081 | b21e975518f756f62e27f791286bde02b3590bf8 | d1aee1b604e21d6ea371f2a8d5cf257e7d4995e0 | /test.py | cc6290b0e7d140991f30d0751019b72530bcdfa3 | [
"MIT"
]
| permissive | https://github.com/zhiqiarcher/QbvCalculationTool | 76b33c03eec6704fc5abe2b3ba9650bbae4d5474 | cc6650eda55ae1534a8d21d6de1f235023389d6b | refs/heads/main | 2023-01-13T00:37:53.129120 | 2020-11-04T12:34:14 | 2020-11-04T12:34:14 | 309,983,586 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
os.startfile(os.path.dirname(__file__) + '/QbvSchedulerDisplay.xlsm') | UTF-8 | Python | false | false | 82 | py | 13 | test.py | 9 | 0.719512 | 0.719512 | 0 | 3 | 26 | 69 |
ONSdigital/dp-conceptual-search | 4,964,982,238,128 | 05eb78788551e83735c01cf0ecfad135e44d23d0 | 77a5aa02f1a47f28e922d21acc83881855fb61f4 | /unit/api/search/test_search_content.py | 6e31146146357603ebe76231a6583cbccbeb50dc | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/ONSdigital/dp-conceptual-search | 5deabda99f70cd3fc3b6df66291d3922df6695df | 8f62ff76e8df339249befcb6b83f4c882546032c | refs/heads/develop | 2021-06-06T06:06:44.776885 | 2020-02-07T14:41:57 | 2020-02-07T14:41:57 | 132,128,428 | 3 | 3 | MIT | false | 2020-02-07T14:42:31 | 2018-05-04T10:55:15 | 2020-02-07T14:42:09 | 2020-02-07T14:42:30 | 112,724 | 2 | 0 | 1 | Python | false | false | """
Tests the ONS content search API
"""
from json import dumps
from typing import List
from unittest import mock
from unit.utils.search_test_app import SearchTestApp
from unit.elasticsearch.elasticsearch_test_utils import mock_search_client, mock_hits_highlighted
from dp_conceptual_search.config import CONFIG
from dp_conceptual_search.ons.search.index import Index
from dp_conceptual_search.search.search_type import SearchType
from dp_conceptual_search.ons.search.sort_fields import query_sort, SortField
from dp_conceptual_search.ons.search.fields import get_highlighted_fields, Field
from dp_conceptual_search.ons.search.content_type import AvailableContentTypes, ContentType
from dp_conceptual_search.app.elasticsearch.elasticsearch_client_service import ElasticsearchClientService
from dp_conceptual_search.ons.search.queries.ons_query_builders import (
build_content_query, build_function_score_content_query
)
class SearchContentApiTestCase(SearchTestApp):
@staticmethod
def paginate():
"""
Calls paginate and makes some basic assertions
:return:
"""
import random
# Generate a random page number between 1 and 10
current_page = random.randint(1, 10)
# Generate a random page size between 11 and 20
size = random.randint(11, 20)
# Calculate correct start page number
from_start = 0 if current_page <= 1 else (current_page - 1) * size
return from_start, current_page, size
@property
def search_term(self):
"""
Mock search term to be used for testing
:return:
"""
return "Zuul"
@property
def highlight_dict(self):
"""
Builds the expected highlight query dict
:return:
"""
highlight_fields: List[Field] = get_highlighted_fields()
highlight_query = {
"fields": {
highlight_field.name: {
"number_of_fragments": 0,
"pre_tags": ["<strong>"],
"post_tags": ["</strong>"]
} for highlight_field in highlight_fields
}
}
return highlight_query
@mock.patch.object(ElasticsearchClientService, '_init_client', mock_search_client)
def test_content_query_search_called(self):
"""
Tests that the search method is called properly by the api for a content query
:return:
"""
# Make the request
# Set pagination params
from_start, current_page, size = self.paginate()
# Set sort_by
sort_by: SortField = SortField.relevance
# Build params dict
params = {
"q": self.search_term,
"page": current_page,
"size": size
}
# Build post JSON
data = {
"sort_by": sort_by.name
}
# URL encode
url_encoded_params = self.url_encode(params)
target = "/search/content?{q}".format(q=url_encoded_params)
# Make the request
request, response = self.post(target, 200, data=dumps(data))
# Get a list of all available content types
content_types: List[ContentType] = AvailableContentTypes.available_content_types()
# Build the filter query
type_filters = [content_type.name for content_type in content_types]
filter_query = [
{
"terms": {
"type": type_filters
}
}
]
content_query = build_content_query(self.search_term)
# Build the expected query dict - note this should not change
expected = {
"from": from_start,
"query": {
"bool": {
"filter": filter_query,
"must": [
build_function_score_content_query(content_query, content_types).to_dict(),
]
}
},
"size": size,
"sort": query_sort(SortField.relevance),
"highlight": self.highlight_dict
}
# Assert search was called with correct arguments
self.mock_client.search.assert_called_with(index=[Index.ONS.value], doc_type=[], body=expected,
search_type=SearchType.DFS_QUERY_THEN_FETCH.value)
data = response.json
results = data['results']
expected_hits_highlighted = mock_hits_highlighted()
self.assertEqual(results, expected_hits_highlighted, "returned hits should match expected")
def test_max_request_size_400(self):
"""
Test that making a request where the page size if greater than the max allowed raises a 400 BAD_REQUEST
:return:
"""
# Make the request
# Set correct from_start and page size for featured result query
from_start = 0
current_page = from_start + 1
size = CONFIG.SEARCH.max_request_size + 1
# Set sort_by
sort_by: SortField = SortField.relevance
# Build params dict
params = {
"q": self.search_term,
"page": current_page,
"size": size
}
# Build post JSON
data = {
"sort_by": sort_by.name
}
# URL encode
url_encoded_params = self.url_encode(params)
target = "/search/content?{q}".format(q=url_encoded_params)
# Make the request
request, response = self.post(target, 400, data=dumps(data))
| UTF-8 | Python | false | false | 5,588 | py | 101 | test_search_content.py | 91 | 0.586435 | 0.58053 | 0 | 179 | 30.217877 | 111 |
NIck-Meng/MyPythonScripts | 12,652,973,667,517 | eb3c4ccb6ce04b3bd7d386279a99375052099139 | 01d8342bb2d0550b61f11420e564018e14b0048c | /Projectpy3/Exercise/os_module/os_contents.py | 25c94f9315828988cbc4cf51307eb1771db02a9f | []
| no_license | https://github.com/NIck-Meng/MyPythonScripts | a6c8f62f7e375a8726b2b45f4dd7f64b8796e540 | 485140135f3ad9cab302bc80e87276f9eafd7ac1 | refs/heads/master | 2016-12-13T14:37:39.859693 | 2016-12-11T13:49:48 | 2016-12-11T13:49:48 | 54,879,959 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding:utf-8
__author__ = 'Nick'
'''
os.getcwd() 获取当前工作目录,即当前python脚本工作的目录路径
os.chdir("dirname") 改变当前脚本工作目录,相当于shell下cd
os.curdir 返回当前目录: ('.')
os.makedirs('dirname1/dirname2') 可生成多层递归目录
os.removedirs('dirname1') 若目录为空,则删除,并递归到上一级目录,若也为空,则删除,依此类推
os.mkdir('dirname') 生成单级目录,相当于shell中mkdir dirname
os.rmdir('dirname') 删除单级空目录,若目录不为空则无法删除,报错,相当于shell中rmdir dirnameos.
listdir('dirname') 列出指定目录下的所有文件和子目录,包括隐藏文件,并以列表方式打印
os.remove() 删除一个文件
os.rename("oldname","newname") 重命名文件/目录
os.stat('path/filename') 获取文件/目录信息
os.utime() 修改时间属性
os.walk() 生成一个目录树下的所有文件名
os.walk(top[, topdown=True[, onerror=None[, followlinks=False]]])
该函数返回一个元组,该元组有3个元素,这3个元素分别表示每次遍历的路径名,目录列表和文件列表
top表示需要遍历的目录树的路径
topdown的默认值是”True”,表示首先返回目录树下的文件,然后在遍历目录树的子目录.Topdown的值为”False”时,则表示先遍历目录树的子目录,返回子目录下的文件,最后返回根目录下的文件
onerror的默认值是”None”,表示忽略文件遍历时产生的错误.如果不为空,则提供一个自定义函数提示错误信息后继续遍历或抛出异常中止遍历
'''
import os
# print(os.curdir)
# for root, dirs, files in os.walk("D:\英语资料", topdown=False):
# for name in files:
# print(os.path.join(root, name)) #打印文件绝对路径 ...
# for name in dirs:
# print(os.path.join(root, name)) #打印目录绝对路径 ...
for root, dirs, files in os.walk("D:\英语资料", topdown=False):
print('root:',root)
print('dirs:',dirs)
print('files:',files)
for name in files:
print(name)
for name in dirs:
print(name)
'''
os.path常用模块详解
os.path.abspath(path) 返回path规范化的绝对路径
os.path.split(path) 将path分割成目录和文件名二元组返回
os.path.dirname(path) 返回path的目录,其实就是os.path.split(path)的第一个元素
os.path.basename(path) 返回path最后的文件名,即os.path.split(path)的第二个元素
os.path.commonprefix(list) 返回list中,所有path共有的最长的路径,从左向右,相同字符
os.path.exists(path) 如果path存在,返回True;如果path不存在,返回False
os.path.isabs(path) 如果path是绝对路径,返回True
os.path.isfile(path) 如果path是一个存在的文件,返回True。否则返回False
os.path.isdir(path) 如果path是一个存在的目录,则返回True。否则返回False
os.path.join(path1[, path2[, ...]]) 将多个路径组合后返回,第一个绝对路径之前的参数将被忽略
os.path.normcase(path) 在Linux下,该函数会原样返回path,在windows平台上会将路径中所有字符转换为小写,并将所有斜杠转换为反斜杠
os.path.normpath(path) 规范化路径
os.path.splitdrive(path) 拆分驱动器名和路径,主要对win,对linux元组第一个总是空的
os.path.splitext(path) 分离文件名与扩展名;默认返回(fname,fextension)元组,可做分片操作 ,以“.”为分隔符
os.path.getsize(path) 返回path的大小(字节)
os.path.getatime(path) 返回path所指向的文件或者目录的最后存取时间
os.path.getmtime(path) 返回path所指向的文件或者目录的最后修改时间
os.path.walk(top,func,arg)
top表示需要遍历的目录树的路径
func表示回调函数,对遍历路径进行处理.所谓回调函数,是作为某个函数的参数使用,当某个时间触发时,程序将调用定义好的回调函数处理某个任务.回调函数必须提供3个参数:第1个参数为walk()的参数tag,第2个参数表示目录列表,第3个参数表示文件列表
arg是传递给回调参数func的元组.回调函数的一个参数必须是arg,为回调函数提供处理参数.参数arg可以为空
os.path.walk()与os.walk()产生的文件名列表并不相同:
os.path.walk()产生目录树下的目录路径和文件路径,而os.walk()只产生文件路径
'''
import os.path
print(os.path.abspath('os_project.py'))
root,name=os.path.split(r'C:\Users\Nick\Documents\Python Scripts\Projectpy3\Exercise\os_module\os_project.py')
print('root:',root,'name:',name,sep='\n')
print(os.path.normcase('c:/windows\\system32\\'))
print(os.path.normpath('c://windows\\System32\\../Temp/'))
print(os.path.splitdrive('c:\\windows'))
root,ext=os.path.splitext(r'C:\Users\Nick\Documents\Python Scripts\Projectpy3\Exercise\os_module\os_project.py')
print('root:',root,'ext:',ext,sep='\n')
print(os.path.getsize(r'C:\Users\Nick\Documents\Python Scripts\Projectpy3\Exercise\os_module\os_project.py'))
def VisitDir(arg,dirname,names):
for filespath in names:
print(os.path.join(dirname,filespath))
path=r"D:\英语资料"
# os.path.walk(path,VisitDir,()) | UTF-8 | Python | false | false | 5,318 | py | 152 | os_contents.py | 149 | 0.744065 | 0.738564 | 0 | 93 | 35.16129 | 125 |
01Eddie/holbertonschool-higher_level_programming | 15,229,954,069,252 | eb264718fbdce942761c9925016ed7129ca26195 | 41a128b3cbf37a3c7f24cb5ef9a09b9e05c6a32d | /0x0B-python-input_output/7-add_item.py | 59a94975a5aec6cafccb8801fad0a0797c6151d8 | []
| no_license | https://github.com/01Eddie/holbertonschool-higher_level_programming | 11b29c5792e7105cb85e654e994782d7d4a5eace | 6013c31ff01a3aaf67911436bd3b7e97fa6a9566 | refs/heads/main | 2023-08-04T14:26:11.519307 | 2021-09-23T05:15:55 | 2021-09-23T05:15:55 | 361,882,781 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
"""Using my previous functions"""
import sys
save_to_json_file = __import__('5-save_to_json_file').save_to_json_file
load_from_json_file = __import__('6-load_from_json_file').load_from_json_file
add_item = sys.argv
"""pop: remove and return item at index"""
add_item.pop(0)
try:
"""The list must be saved as a JSON
representation in a file named add_item.json
- If the file doesn’t exist, it should be
created"""
with open('add_item.json', 'x') as f:
save_to_json_file(add_item, "add_item.json")
except:
listNew = load_from_json_file("add_item.json")
save_to_json_file(listNew + add_item, "add_item.json")
| UTF-8 | Python | false | false | 665 | py | 87 | 7-add_item.py | 75 | 0.66365 | 0.657617 | 0 | 20 | 32.15 | 77 |
tovmeod/anaf | 1,657,857,400,017 | d6904166ad13c8ff7bf7fba0bf6817312f5268f5 | 3db24619d0a361f392854be074387a8d9a2975d5 | /anaf/finance/urls.py | 5c101bd66751bd59a74fbecf691ef7e23323feac | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
]
| permissive | https://github.com/tovmeod/anaf | 3af8b75d00c05005ad38a4dc3bcfdbd42586b81f | 80e4a00532ce6f4ce76c5ffc858ff90c759a9879 | refs/heads/drf | 2022-06-11T20:25:38.831819 | 2019-10-23T18:49:32 | 2019-10-23T18:49:32 | 49,788,641 | 2 | 4 | BSD-3-Clause | false | 2022-07-06T19:59:19 | 2016-01-16T19:56:03 | 2019-10-23T18:49:39 | 2022-07-06T19:59:16 | 32,357 | 2 | 4 | 5 | JavaScript | false | false | """
Finance module URLs
"""
from django.conf.urls import patterns, url
from anaf.finance import views
urlpatterns = patterns('anaf.finance.views',
url(r'^(\.(?P<response_format>\w+))?$', views.index_transactions, name='finance'),
url(r'^index(\.(?P<response_format>\w+))?$', views.index_transactions,
name='finance_index_transactions'),
url(r'^categories(\.(?P<response_format>\w+))?/?$', views.index_categories,
name='finance_categories'),
url(r'^income(\.(?P<response_format>\w+))?/?$', views.income_view, name='finance_income_view'),
url(r'^balance(\.(?P<response_format>\w+))?/?$', views.balance_sheet,
name='finance_balance_sheet'),
# Accounts
url(r'^accounts(\.(?P<response_format>\w+))?/?$', views.index_accounts,
name='finance_index_accounts'),
url(r'^account/add(\.(?P<response_format>\w+))?/?$', views.account_add,
name='finance_account_add'),
url(r'^account/edit/(?P<account_id>\d+)(\.(?P<response_format>\w+))?/?$', views.account_edit,
name='finance_account_edit'),
url(r'^account/view/(?P<account_id>\d+)(\.(?P<response_format>\w+))?/?$', views.account_view,
name='finance_account_view'),
url(r'^account/delete/(?P<account_id>\d+)(\.(?P<response_format>\w+))?/?$', views.account_delete,
name='finance_account_delete'),
# Assets
url(r'^assets(\.(?P<response_format>\w+))?/?$', views.index_assets, name='finance_index_assets'),
url(r'^asset/add(\.(?P<response_format>\w+))?/?$', views.asset_add, name='finance_asset_add'),
url(r'^asset/edit/(?P<asset_id>\d+)(\.(?P<response_format>\w+))?/?$', views.asset_edit,
name='finance_asset_edit'),
url(r'^asset/view/(?P<asset_id>\d+)(\.(?P<response_format>\w+))?/?$', views.asset_view,
name='finance_asset_view'),
url(r'^asset/delete/(?P<asset_id>\d+)(\.(?P<response_format>\w+))?/?$', views.asset_delete,
name='finance_asset_delete'),
# Equities
url(r'^equities(\.(?P<response_format>\w+))?/?$', views.index_equities,
name='finance_index_equities'),
url(r'^equity/add(\.(?P<response_format>\w+))?/?$', views.equity_add, name='finance_equity_add'),
url(r'^equity/edit/(?P<equity_id>\d+)(\.(?P<response_format>\w+))?/?$', views.equity_edit,
name='finance_equity_edit'),
url(r'^equity/view/(?P<equity_id>\d+)(\.(?P<response_format>\w+))?/?$', views.equity_view,
name='finance_equity_view'),
url(r'^equity/delete/(?P<equity_id>\d+)(\.(?P<response_format>\w+))?/?$', views.equity_delete,
name='finance_equity_delete'),
# Transactions
url(r'^transactions(\.(?P<response_format>\w+))?/?$', views.index_transactions,
name='finance_index_transactions'),
url(r'^transaction/add/order/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.transaction_add, name='finance_transaction_add_order'),
url(r'^transaction/add/(?P<liability_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.transaction_add, name='finance_transaction_add'),
url(r'^transaction/add(\.(?P<response_format>\w+))?/?$', views.transaction_add,
name='finance_transaction_add'),
url(r'^transaction/edit/(?P<transaction_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.transaction_edit, name='finance_transaction_edit'),
url(r'^transaction/view/(?P<transaction_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.transaction_view, name='finance_transaction_view'),
url(r'^transaction/delete/(?P<transaction_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.transaction_delete, name='finance_transaction_delete'),
# Liabilities
url(r'^liabilities(\.(?P<response_format>\w+))?/?$', views.index_liabilities,
name='finance_index_liabilities'),
url(r'^liability/add(\.(?P<response_format>\w+))?/?$', views.liability_add,
name='finance_liability_add'),
url(r'^liability/edit/(?P<liability_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.liability_edit, name='finance_liability_edit'),
url(r'^liability/view/(?P<liability_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.liability_view, name='finance_liability_view'),
url(r'^liability/delete/(?P<liability_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.liability_delete, name='finance_liability_delete'),
# Receivables
url(r'^receivables(\.(?P<response_format>\w+))?/?$', views.index_receivables,
name='finance_index_receivables'),
url(r'^receivable/add(\.(?P<response_format>\w+))?/?$', views.receivable_add,
name='finance_receivable_add'),
url(r'^receivable/edit/(?P<receivable_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.receivable_edit, name='finance_receivable_edit'),
url(r'^receivable/view/(?P<receivable_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.receivable_view, name='finance_receivable_view'),
url(r'^receivable/delete/(?P<receivable_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.receivable_delete, name='finance_receivable_delete'),
# Categories
url(r'^category/add(\.(?P<response_format>\w+))?/?$', views.category_add,
name='finance_category_add'),
url(r'^category/edit/(?P<category_id>\d+)(\.(?P<response_format>\w+))?/?$', views.category_edit,
name='finance_category_edit'),
url(r'^category/view/(?P<category_id>\d+)(\.(?P<response_format>\w+))?/?$', views.category_view,
name='finance_category_view'),
url(r'^category/delete/(?P<category_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.category_delete, name='finance_category_delete'),
# Currencies
url(r'^currency/add(\.(?P<response_format>\w+))?/?$', views.currency_add,
name='finance_currency_add'),
url(r'^currency/edit/(?P<currency_id>\d+)(\.(?P<response_format>\w+))?/?$', views.currency_edit,
name='finance_currency_edit'),
url(r'^currency/view/(?P<currency_id>\d+)(\.(?P<response_format>\w+))?/?$', views.currency_view,
name='finance_currency_view'),
url(r'^currency/delete/(?P<currency_id>\d+)(\.(?P<response_format>\w+))?/?$',
views.currency_delete, name='finance_currency_delete'),
# Taxes
url(r'^tax/add(\.(?P<response_format>\w+))?/?$', views.tax_add, name='finance_tax_add'),
url(r'^tax/edit/(?P<tax_id>\d+)(\.(?P<response_format>\w+))?/?$', views.tax_edit,
name='finance_tax_edit'),
url(r'^tax/view/(?P<tax_id>\d+)(\.(?P<response_format>\w+))?/?$', views.tax_view,
name='finance_tax_view'),
url(r'^tax/delete/(?P<tax_id>\d+)(\.(?P<response_format>\w+))?/?$', views.tax_delete,
name='finance_tax_delete'),
# Settings
url(r'^settings/view(\.(?P<response_format>\w+))?/?$', views.settings_view,
name='finance_settings_view'),
url(r'^settings/edit(\.(?P<response_format>\w+))?/?$', views.settings_edit,
name='finance_settings_edit'),
)
| UTF-8 | Python | false | false | 8,924 | py | 693 | urls.py | 321 | 0.467727 | 0.467727 | 0 | 126 | 69.825397 | 120 |
powderluv/edgetpu | 14,439,680,075,804 | e36fc5794ca8b08b769b5f6b6b012f51128930e0 | a38646f0798adf035aa76147e36b27edf3170b92 | /examples/classify_image.py | f462cb63690cae4305cb7af383df7adda6d59f3f | [
"Apache-2.0"
]
| permissive | https://github.com/powderluv/edgetpu | d305cbf9a7a6a0981e2c33288dc5a2b3cb07f243 | a968b4a72546c8cad26d25fcb6bb504849503c4a | refs/heads/master | 2020-07-31T20:20:50.762191 | 2019-10-18T23:14:38 | 2020-02-16T01:47:12 | 210,742,049 | 1 | 1 | Apache-2.0 | true | 2019-10-18T23:55:04 | 2019-09-25T02:51:10 | 2019-10-08T16:24:32 | 2019-10-18T23:55:03 | 329,292 | 0 | 0 | 0 | C++ | false | false | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo to classify image."""
import argparse
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.utils import dataset_utils
from PIL import Image
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='File path of Tflite model.', required=True)
parser.add_argument('--label', help='File path of label file.', required=True)
parser.add_argument(
'--image', help='File path of the image to be recognized.', required=True)
args = parser.parse_args()
# Prepare labels.
labels = dataset_utils.read_label_file(args.label)
# Initialize engine.
engine = ClassificationEngine(args.model)
# Run inference.
img = Image.open(args.image)
for result in engine.classify_with_image(img, top_k=3):
print('---------------------------')
print(labels[result[0]])
print('Score : ', result[1])
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,494 | py | 166 | classify_image.py | 101 | 0.706158 | 0.698795 | 0 | 44 | 32.954545 | 80 |
Juru-10/News | 19,181,323,958,396 | b17e943c46fc1e53bb0a98baadaf1fce56788323 | 85ac52268338207c69cca15db93a57811abd09a0 | /app/main/views.py | 9571a79b5bdf0585aeaf459492de74dab88114b2 | [
"MIT"
]
| permissive | https://github.com/Juru-10/News | 7757999ff567ce5d52861f3247cdae43efc595ff | bbf2771afb423679dafefe8d1099e233210ca708 | refs/heads/master | 2020-04-24T03:43:50.904738 | 2019-02-22T13:20:52 | 2019-02-22T13:20:52 | 171,679,379 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_news,get_newsd,search_news
from .forms import ReviewForm
from ..models import Arti
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
technology_news = get_news('technology')
general_news = get_news('general')
entertainment_news = get_news('entertainment')
title = 'Home - Welcome to The best News Review Website Online'
search_news = request.args.get('news_query')
if search_news:
return redirect(url_for('search',news_title=search_news))
else:
return render_template('index.html', title = title,technology = technology_news,general = general_news, entertainment = entertainment_news )
@main.route('/news/<id>')
def newsd(id):
'''
View news page function that returns the news details page and its data
'''
technology_news = get_newsd(id)
title = 'Home - Welcome to The Articles'
search_news = request.args.get('news_query')
if search_news:
return redirect(url_for('search',news_title=search_news))
else:
return render_template('news.html', title = title,technology = technology_news)
# @main.route('/search/<news_title>')
# def search(news_title):
# '''
# View function to display the search results
# '''
# news_title_list = news_title.split(" ")
# news_title_format = "+".join(news_title_list)
# searched_news = search_news(news_title_format)
# title = f'search results for {news_title}'
#
# return render_template('search.html',news = searched_news)
#
# @main.route('/news/review/new/<id>', methods = ['GET','POST'])
# def new_review(id):
# form = ReviewForm()
# news = get_newsd(id)
#
# if form.validate_on_submit():
# title = form.title.data
# review = form.review.data
# new_review = Review(news.id,title,news.poster,review)
# new_review.save_review()
# return redirect(url_for('news',id = news.id ))
#
# title = f'{news.title} review'
# return render_template('new_review.html',title = title, review_form=form, news=news)
| UTF-8 | Python | false | false | 2,204 | py | 5 | views.py | 4 | 0.651996 | 0.651996 | 0 | 70 | 30.485714 | 148 |
Vikingdev13/LeetCode-problems | 8,375,186,250,623 | beed68690f654c10c3c8eb2b24165911c7bdb389 | 477bd0ab8396f7eb20f3d89ecee1e0199580c360 | /Easy Problems/Strings/To Lower Case.py | 0157117f4b738ece5cfa4e5aca2eb3da60b6bc53 | []
| no_license | https://github.com/Vikingdev13/LeetCode-problems | 300ec5653bf4347e14db0022443cf7c7ae2ecff8 | 2c359874b9bbf0619e59ef0a4240afddc1fb04a3 | refs/heads/master | 2021-07-12T02:00:29.863411 | 2021-02-25T15:35:46 | 2021-02-25T15:35:46 | 235,006,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''Runtime: 32 ms, faster than 11.35% of Python3 online submissions for To Lower Case.
Memory Usage: 12.4 MB, less than 100.00% of Python3 online submissions for To Lower Case.
'''
def toLowerCase(str):
return str.lower()
| UTF-8 | Python | false | false | 232 | py | 20 | To Lower Case.py | 20 | 0.711207 | 0.642241 | 0 | 7 | 32 | 92 |
mkuhn/se_protein | 5,446,018,534,847 | 1b40afd2a6a7a51c1bf27e0a0ac2bff862087537 | ed377333c996180c4ef9b9b7f4d9a532124e398e | /map_ann_7_to_9.py | 2154d5eef1afc9244d201a03e531e3737f914f07 | []
| no_license | https://github.com/mkuhn/se_protein | 8bd1b4ece41b3a3d11d57f96d5a56abe89229b83 | a809995bd56991bf0f73fd9ed367c32710606da4 | refs/heads/master | 2020-12-22T10:44:22.970738 | 2014-07-17T12:56:37 | 2014-07-17T12:56:37 | 236,755,343 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2.7
# encoding: utf-8
from __future__ import print_function
import sys
import os
import re
from collections import defaultdict
def main():
mapping = {}
for line in open("string7to9.tsv"):
(s7, s9) = line.strip("\n").split("\t")
mapping[s7] = s9
for line in sys.stdin:
if line.startswith("#") or "ENSP" not in line: continue
fields = line.strip("\n").split("\t")
if re.search(r"(ENSP|D)[0-9]{5}", fields[2]):
fields = fields[:2] + fields[3:]
for protein in fields[1].split("_"):
if protein.startswith("D"): continue
if "@" in protein:
protein, suffix = protein.split("@")
suffix = "@" + suffix
else:
suffix = ""
if protein not in mapping:
print("Not found:", protein, "in\n", line, file=sys.stderr)
continue
fields[1] = mapping[protein] + suffix
print("\t".join(fields))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,091 | py | 22 | map_ann_7_to_9.py | 17 | 0.500458 | 0.484876 | 0 | 49 | 21.265306 | 75 |
cyan198/pure-predict | 14,697,378,104,543 | 9d3a1e49769c656cce4c7124e5dbccca0f91b4c6 | 144226196dc63710126399bd512b2d32847e4dda | /pure_sklearn/xgboost/tests/test_xgboost.py | 3d54d2fd44a9eaad07ccb5685ea0189a80a58dff | [
"Apache-2.0"
]
| permissive | https://github.com/cyan198/pure-predict | 011314230de72060d135c5bd7514a803020f8861 | c3431b79af4df9794c9f99246fa359a6c72a10ee | refs/heads/master | 2023-02-11T22:43:15.698909 | 2021-01-02T23:16:42 | 2021-01-02T23:16:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
import sys
import numpy as np
try:
import xgboost
from xgboost import XGBClassifier
except ImportError:
pass
from sklearn.datasets import load_iris
from pure_sklearn.map import convert_estimator
METHODS = ["predict", "predict_proba"]
@pytest.mark.skipif("xgboost" not in sys.modules, reason="requires xgboost")
def test_xgboost():
X, y = load_iris(return_X_y=True)
X_ = X.tolist()
for y_ in [y, (y == 0).astype(int), (y == 2).astype(int)]:
for n_estimators in [2, 10]:
for max_depth in [3, 10]:
clf = XGBClassifier(
booster="gbtree",
random_state=5,
n_estimators=n_estimators,
max_depth=max_depth,
)
clf.fit(X, y_)
clf_ = convert_estimator(clf)
for method in METHODS:
scores = getattr(clf, method)(X)
scores_ = getattr(clf_, method)(X_)
assert np.allclose(scores, scores_, equal_nan=True)
| UTF-8 | Python | false | false | 1,072 | py | 74 | test_xgboost.py | 69 | 0.54291 | 0.534515 | 0 | 35 | 29.628571 | 76 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.