repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
rachione/picoctf2019-scripts | 8,435,315,812,201 | 68a5d6b2a5efe94092d58de5589d1d1dc077f5ba | 7fc76a81b43cd0d0fce1b9985228178d21ce5ada | /rsa-pop-quiz/main2.py | 77b93f1973dcfdde5d18c230377e36b174cf7516 | [] | no_license | https://github.com/rachione/picoctf2019-scripts | 070b35b22774779a2ff44713316c0ba68f2ee01e | ce8c280cd2cc2982d43f1c9a4cb29d31aa36c512 | refs/heads/main | 2023-01-30T23:59:45.185647 | 2020-12-13T10:50:34 | 2020-12-13T10:50:34 | 321,036,261 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def multiplicative_inverse(a, b):
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
"""
# r = gcd(a,b) i = multiplicitive inverse of a mod b
# or j = multiplicitive inverse of b mod a
# Neg return values for i or j are made positive mod b or a respectively
# Iterateive Version is faster and uses much less stack space
x = 0
y = 1
lx = 1
ly = 0
oa = a # Remember original a/b to remove
ob = b # negative values from return results
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)), x)
(y, ly) = ((ly - (q * y)), y)
if lx < 0:
lx += ob # If neg wrap modulo orignal b
if ly < 0:
ly += oa # If neg wrap modulo orignal a
# return a , lx, ly # Return only positive values
return lx
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def multiply(x, y):
_CUTOFF = 1536
if x.bit_length() <= _CUTOFF or y.bit_length() <= _CUTOFF: # Base case
return x * y
else:
n = max(x.bit_length(), y.bit_length())
half = (n + 32) // 64 * 32
mask = (1 << half) - 1
xlow = x & mask
ylow = y & mask
xhigh = x >> half
yhigh = y >> half
a = multiply(xhigh, yhigh)
b = multiply(xlow + xhigh, ylow + yhigh)
c = multiply(xlow, ylow)
d = b - a - c
return (((a << half) + d) << half) + c
p = 153143042272527868798412612417204434156935146874282990942386694020462861918068684561281763577034706600608387699148071015194725533394126069826857182428660427818277378724977554365910231524827258160904493774748749088477328204812171935987088715261127321911849092207070653272176072509933245978935455542420691737433
n = 23952937352643527451379227516428377705004894508566304313177880191662177061878993798938496818120987817049538365206671401938265663712351239785237507341311858383628932183083145614696585411921662992078376103990806989257289472590902167457302888198293135333083734504191910953238278860923153746261500759411620299864395158783509535039259714359526738924736952759753503357614939203434092075676169179112452620687731670534906069845965633455748606649062394293289967059348143206600765820021392608270528856238306849191113241355842396325210132358046616312901337987464473799040762271876389031455051640937681745409057246190498795697239
e = 65537
ciphertext = 4699954403535877728943212516495239996093493409461427795061606820019520385578403561120385764629211115765041521697969103538878070126128059106090044437598460283768854171495071441758538307495380993096127617485853022154997313813963653770523746165616397996160676397490439829116013032980784837094738356175991364395455204835324455810814055944764109234129010492269581408600009386595427991513236458464354768157315483091898970879300954540175247825718514107084608264564889098214264863604883438961600216645976532706988513244819161793096143681897379315082134265617697635800727770233591268184387676917842275673893483582432877323662
q = n//p
chn = multiply(p, q)
assert chn == n
phi = multiply((p-1), (q-1))
g = gcd(e, phi)
assert g == 1
d = multiplicative_inverse(e, phi)
ans = pow(ciphertext, d, n)
print(ans)
| UTF-8 | Python | false | false | 3,232 | py | 112 | main2.py | 80 | 0.737933 | 0.252166 | 0 | 70 | 45.071429 | 629 |
matihost/learning | 8,864,812,527,562 | e2865c59d718308c68a47119fa77d04496545a3c | dd5f45e10c61f86239afe3117af36345a3051968 | /python/apps/exchange-rate/src/exchange_rate/cli/exchange_rate_web.py | cff44453aedf626870f72c694c177fbc8d3082c2 | [
"MIT"
] | permissive | https://github.com/matihost/learning | 49a5d91b78afa8ff2dd760b56cb6d2dfee52d736 | 04f9d7ccc7d458fe754aa5c2ac145f9b4ca2a552 | refs/heads/master | 2022-03-07T12:16:01.151763 | 2022-03-06T22:48:53 | 2022-03-06T22:48:53 | 144,377,092 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""
Show Foreign Currency/PLN pair.
It is based on Polish Central Bank (NBP) fixing exchange rate.
"""
from datetime import date
import logging
from waitress import serve
from flask import Flask, jsonify, make_response
from exchange_rate.helpers.version import package_version
from exchange_rate.exchange_rate_to_pln import ExchangeRateToPLN
app = Flask(__name__)
logger = logging.getLogger('waitress')
logger.setLevel(logging.INFO)
_DESCRIPTION = "Shows Foreign Currency/PLN pair based on Polish \
Central Bank (NBP) fixing exchange rate."
@app.route("/exchanges/<currency>/<convert_date>")
def exchanges(currency, convert_date):
"""Expose /exchanges GET endpoint."""
rate_to_pln = ExchangeRateToPLN().get_exchange_rate_to_pln(currency, date.fromisoformat(convert_date))
return jsonify({"currency": currency, "rate_to_pln": rate_to_pln, "date": convert_date})
@app.route('/about')
def about():
"""Expose /about GET endpoint."""
response = make_response(_DESCRIPTION)
response.mimetype = 'text/plain'
return response
@app.route('/version')
def version():
"""Expose /version GET endpoint."""
return package_version('exchange-rate')
def main():
"""Enter the program."""
# app.run(host='0.0.0.0', port=8080)
serve(app, host="0.0.0.0", port=8080)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,367 | py | 437 | exchange_rate_web.py | 136 | 0.700073 | 0.687637 | 0 | 52 | 25.288462 | 106 |
Leonardo760/Desafios-UriOnline-Python | 16,973,710,792,513 | c82235902c8122ab57c033143e2330f84915192e | 04d01a838c1e8d0a504d8d3e74dfa4247fd5aec4 | /Desafio 1151.py | f9e85d482eb32b2ecf4cecc5ce3aed4da019fd22 | [] | no_license | https://github.com/Leonardo760/Desafios-UriOnline-Python | 611470a9a4d47b7c9bbaa35c62c763c97c20cea8 | 9095e614d9d61f98e5137d7c753add1bf96b4fea | refs/heads/master | 2020-05-07T20:44:36.073377 | 2019-04-11T20:52:18 | 2019-04-11T20:52:18 | 180,875,777 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | N = int(input())
while 0 < N < 46:
X = ("0 1 2 3 4 5 6 7 8 ")
| UTF-8 | Python | false | false | 75 | py | 15 | Desafio 1151.py | 15 | 0.373333 | 0.213333 | 0 | 3 | 19.666667 | 26 |
ptone/harvard-view-tools | 15,530,601,783,163 | 01264881eaee0e141678fc634b6103c2b379267c | ed25efd667d58ba300d868089da1fb0442d3f93c | /get_class.py | 0a4e4fdec33c099756b38fe79465e38bf2ed545c | [
"Apache-2.0"
] | permissive | https://github.com/ptone/harvard-view-tools | 4fac06abe43ae29f0be5993613d862bacf201c34 | 97157cb1115b214336822df24a5deb040ff926a2 | refs/heads/master | 2021-01-19T20:18:54.008519 | 2014-04-24T03:17:55 | 2014-04-24T03:17:55 | 19,093,896 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
import datetime
from pyquery import PyQuery as pq
import urllib2
def is_lecture(item):
for l in item.find('ul').findall('li'):
if 'list-type' in l.attrib['class']:
l_id = l.attrib['id']
if 'L' in l_id:
return l_id
return False
def get_date(item):
month = item.find('div').text.strip()
day = item.find('div').find('span').text
dateobj = datetime.datetime.strptime("%s-%s-2013" % (month, day), "%b-%d-%Y")
return dateobj.strftime('%Y%m%d')
def main():
d = pq('http://cm.dce.harvard.edu/2014/01/14328/publicationListing.shtml')
items = d('.list-asset').find('li')
lectures = [x for x in items if (x.find('div') and 'list-date' in x.find('div').attrib['class'])]
lecture_data_format = 'http://cm.dce.harvard.edu/2014/01/14328/{lect_num}/14328-{lect_date}-{lect_num}-H264MultipleHighLTH-16x9.xml'
for l in lectures:
# gets the lecture label in the form "L03"
l_id = is_lecture(l)
if l_id:
print("getting: ", l_id)
datestr = get_date(l)
data_url = lecture_data_format.format(lect_num=l_id, lect_date=datestr)
with open('%s.xml' % l_id, 'w') as f:
f.write(
urllib2.urlopen(data_url ).read()
)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,387 | py | 5 | get_class.py | 3 | 0.565249 | 0.535689 | 0 | 43 | 31.255814 | 136 |
dima2395/tables | 523,986,056,208 | 35704f45229653bfd43776d2476cdfe3b8c5fde2 | 7bcbbd557856033d4840f7b19ccb118409a067a7 | /tables/templatetags/orders_helpers.py | 5e9199a949a1f32272867dadc1e3dd8e534321d0 | [] | no_license | https://github.com/dima2395/tables | 73857707e5de85488c9d250d0bb5eb299653bb04 | 08422e5a619f97994f732adf7302e23527ebef59 | refs/heads/master | 2020-12-02T11:17:19.132846 | 2017-08-26T14:53:31 | 2017-08-26T14:53:31 | 96,623,393 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import template
from tables.models import Order
register = template.Library()
@register.simple_tag(takes_context=True)
def get_orders_count(context, filtered=""):
user = context['user']
company = context['company']
if filtered:
orders = Order.objects.filter(company=company, status=filtered)
else:
orders = Order.objects.filter(company=company)
if user.profile.is_agent():
orders = orders.filter(created_by=user)
return orders.count() | UTF-8 | Python | false | false | 501 | py | 47 | orders_helpers.py | 19 | 0.692615 | 0.692615 | 0 | 20 | 24.05 | 71 |
Manasikotalpur1999/JISAssasins | 14,456,859,925,142 | 085738f12f1e6d667668179afc51b9816ee7aafa | 4cd0d7dbaa68160c8257551d2e640927de2f0dc5 | /Q24.py | 9d54d72b20f68d035843f159467f1ae47b2953b2 | [] | no_license | https://github.com/Manasikotalpur1999/JISAssasins | 9ca8c028094d9d60eb1a7573fbc511d88fdd4c41 | f9dccab588f17b9c361e3b4cd796b0f52000fb58 | refs/heads/master | 2020-04-13T22:24:24.555411 | 2018-12-30T17:18:44 | 2018-12-30T17:18:44 | 163,478,134 | 0 | 0 | null | true | 2018-12-29T05:14:03 | 2018-12-29T05:14:03 | 2018-12-29T05:09:14 | 2018-12-29T03:06:55 | 15 | 0 | 0 | 0 | null | false | null | #!/usr/bin/env python
# coding: utf-8
# In[5]:
#question 24
Num=10
for i in range(1,10):
print(Num *i)
# In[ ]:
| UTF-8 | Python | false | false | 125 | py | 15 | Q24.py | 15 | 0.544 | 0.472 | 0 | 13 | 8.307692 | 21 |
lpp1985/ZTPG | 3,925,600,147,185 | d7cd475ddda57b9b32cce7d8538de54ae8864483 | 411f9978ec335b1368e4c6821214c91af4b8f8e1 | /Pacbio_Corrected.bak | c60b96ddeef1a0b280cb199903bd1798cc6b2c0e | [] | no_license | https://github.com/lpp1985/ZTPG | 04f05c9c5619d358e7f52299edd280ada4f4443c | aed2b842aba13d8825ce37f9f39f1917e1132def | refs/heads/master | 2021-01-19T22:29:11.338471 | 2017-01-09T02:53:33 | 2017-01-09T02:53:33 | 22,055,532 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2016/6/13
"""
from Dependency import *
def get_para( ):
#获得运行参数
usage = '''
%prog [ options ]
'''
parser = OptionParser( usage = usage )
parser.add_option("-i", "--Input", action="store",
dest="fasta",
help="Contig Sequence in Fasta Format")
parser.add_option("-o","--Ouptut",action= "store",
dest = "output",
help=" Result Suffix "
)
parser.add_option("-p","--Pacbio",action= "store",
dest = "pacbio",
help=" Pacbio Reads "
)
(options, args) = parser.parse_args()
return options,args
if __name__ == '__main__':
options,args = get_para()
out_path = os.path.dirname(os.path.abspath(options.output))
raw_file = options.fasta
output_category = Mummer_parse( raw_file )
G_Contig= Relation_parse( raw_file, output_category, options.output,contain_trim=0 )
#nx.single_source_shortest_path_length(G_Contig)
os.system( "nucmer --maxmatch %s %s -p %s/Pacbio"%( options.pacbio,options.fasta,out_path ) )
all_ref_data = os.popen( "show-coords -orlTH %s/Pacbio.delta"%(out_path) )
all_ref_nodes = {}
all_sorted_nodes = []
for line in all_ref_data:
line_l = line[:-1].split("\t")
q_start = int(line_l[2])
q_end = int(line_l[3])
if q_start >q_end:
tag = '-'
else:
tag = '+'
q_length = int(line_l[8])
q_name = line_l[-2]
all_ref_nodes [ q_name ] = ""
if q_length>1000 and line_l[-1] in ["[CONTAINS]","[IDENTITY]"]:
all_sorted_nodes.append( q_name+tag )
try:
new_path = nx.shortest_path( G_Contig,all_sorted_nodes[0], all_sorted_nodes[-1] )
except:
try:
new_path = nx.shortest_path( G_Contig,all_sorted_nodes[0], all_sorted_nodes[-2] )
except:
try:
new_path = nx.shortest_path( G_Contig,all_sorted_nodes[1], all_sorted_nodes[-2] )
except:
new_path = nx.shortest_path( G_Contig,all_sorted_nodes[1], all_sorted_nodes[-1] )
END = open("%sNew_path.list"%(out_path),'w')
name = out_path.split("/")[-1]
END.write('%s\t'%(name)+"; ".join(new_path)+'\tDoubt\n')
os.system( "Assembly_by_Nucmer.py -i %s -o %s -u %s/Reads.fasta"%(END.name,options.output,out_path) )
#removed_node = {}
#for node in G_Contig.nodes():
#if node[:-1] not in all_ref_nodes:
#removed_node[ node[:-1] ] = ""
#for key in removed_node:
#G_Contig.remove_bi_node( key )
| UTF-8 | Python | false | false | 2,829 | bak | 97 | Pacbio_Corrected.bak | 48 | 0.513312 | 0.502662 | 0 | 86 | 31.593023 | 109 |
alphafan/quadratic-optimization | 16,810,502,015,144 | 4bc016f58eb9f6cad0d4cf0b402317dd16e47ce1 | d76f2380980b76137f224f4675278504444f26c7 | /PortfolioOptimization.py | 7fc262bad9c9110c1e59df98a5c4a538f91dcef0 | [] | no_license | https://github.com/alphafan/quadratic-optimization | c1ce92ddfe6d4bfa216017b8ba59d181a6eaccbd | 7f8697c98fa0b691fc10a509309e7cfa9ac93219 | refs/heads/master | 2021-06-19T16:16:14.657415 | 2017-06-28T19:25:56 | 2017-06-28T19:25:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 11:47:06 2017
@author: Yufan
"""
import numpy as np
import matplotlib.pyplot as plt
import cvxopt as opt
from cvxopt import blas, solvers
import pandas as pd
import seaborn as sns; sns.set(color_codes=True)
np.random.seed(123)
# Turn off progress printing
solvers.options['show_progress'] = False
#Assign Asset Classes
AC = np.array(["Date","stocks", "hedge funds", "government bonds", "real estate","money market","corporate bond","renew energy"])
#read return data
f = pd.read_excel("pythonindices.xlsx")
#Drop Days in YYYY-MM-DD
f['Datum'] = pd.to_datetime(f['Datum'], format = "%Y%m")
#f.index = f['Datum']
#f.drop(f.columns[0], axis = 1, inplace = True)
#f.index = f.index.map(lambda x: x.strftime('%Y-%m'))
#Drop last row of data - String
f = f[79:233]
#f = f[229:233]
#map asset class to index
f.columns = AC
print f.describe()
points_mus = f.mean()
points_sigma = f.std()
points_label = f.columns
def describeData():
global f
fig, axs = plt.subplots(ncols=7, figsize = (24,12))
fig.tight_layout()
st_plt = sns.regplot(x=np.array(f.index),y="stocks",data = f,ax=axs[0])
hf_plt = sns.regplot(x=np.array(f.index),y="hedge funds",data = f,marker='x',ax=axs[1])
gb_plt = sns.regplot(x=np.array(f.index),y="government bonds",data = f,marker='+',ax=axs[2])
re_plt = sns.regplot(x=np.array(f.index),y="real estate", data = f, marker = '*', ax = axs[3])
mm_plt = sns.regplot(x=np.array(f.index),y="money market", data = f, marker = 'o', ax = axs[4])
cp_plt = sns.regplot(x=np.array(f.index),y="corporate bond", data = f, marker = 'x', ax = axs[5])
re_plt = sns.regplot(x=np.array(f.index),y="renew energy", data = f, marker = 'o', ax = axs[6])
plt.show()
## NUMBER OF ASSETS
n_assets = len(f.columns)
## NUMBER OF OBSERVATIONS
n_obs = len(f.index)
n_portfolios = 1000
def rand_weights(n):
''' Produces n random weights that sum to 1 '''
k = np.random.rand(n)
return k / sum(k)
wei_list = []
for i in range(n_portfolios):
wei_list.append(rand_weights(n_assets))
wei_list = np.sort(wei_list)
f.drop(f.columns[0], axis = 1, inplace = True)
returns = f.T
def random_portfolio(returns):
'''
Returns the mean and standard deviation of returns for a random portfolio
'''
p = np.asmatrix(np.mean(returns, axis=1))
w = np.asmatrix(rand_weights(returns.shape[0]))
C = np.asmatrix(np.cov(returns))
mu = w * p.T
sigma = np.sqrt(w * C * w.T)
# This recursion reduces outliers to keep plots pretty
if sigma > 2:
return random_portfolio(returns)
return mu, sigma
means, stds = np.column_stack([
random_portfolio(returns)
for _ in xrange(n_portfolios)
])
def plotData():
global stds, means, points_sigma, points_mus, f
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
plt.plot(stds, means, 'o', markersize=5)
plt.plot(points_sigma, points_mus, 'o', color = 'r', markersize = 5)
for i, text in enumerate(f.columns):
ax.annotate(text,(points_sigma[i],points_mus[i]))
plt.xlabel('std')
plt.ylabel('mean')
plt.title('Mean and standard deviation of Asset Class returns From Datastream')
plt.show()
def optimal_portfolio(returns):
n = len(returns)
returns = np.asmatrix(returns)
N = 1000
mus = [10**(5.0 * t/N - 1.0) for t in range(N)]
# Convert to cvxopt matrices
S = opt.matrix(np.cov(returns))
pbar = opt.matrix(np.mean(returns, axis=1))
# Create constraint matrices
G = -opt.matrix(np.eye(n)) # negative n x n identity matrix
h = opt.matrix(0.0, (n ,1))
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Calculate efficient frontier weights using quadratic programming
portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x']
for mu in mus]
## CALCULATE RISKS AND RETURNS FOR FRONTIER
returns = [blas.dot(pbar, x) for x in portfolios]
risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios]
## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE
m1 = np.polyfit(returns, risks, 2)
x1 = np.sqrt(m1[2] / m1[0])
# CALCULATE THE OPTIMAL PORTFOLIO
wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x']
return np.asarray(wt), returns, risks
weights, returns, risks = optimal_portfolio(returns)
def froPlot():
global weights, returns, risks, stds, means, points_sigma, points_mus, f
fig = plt.figure(figsize= (24,12))
ax = fig.add_subplot(111)
plt.plot(risks,returns,'y-o')
plt.plot(stds, means, 'o')
plt.plot(points_sigma, points_mus, 'o', color = 'r', markersize = 5)
for i, text in enumerate(f.columns):
ax.annotate(text,(points_sigma[i],points_mus[i]))
plt.ylabel('mean')
plt.xlabel('std')
plt.show()
#==============================================================================
# Generate the plots
#==============================================================================
describeData()
plotData()
froPlot() | UTF-8 | Python | false | false | 5,075 | py | 2 | PortfolioOptimization.py | 1 | 0.612414 | 0.593695 | 0 | 167 | 29.39521 | 129 |
LabaraTor/newsfeed | 910,533,089,180 | b3cb1498c6fa6cf95460be604e3c36f2816d45d9 | 242591130ba1a96e2d249ad58b3c702e69ffe643 | /src/newsfeed/webapi/handlers/__init__.py | 1cd3b612cbf13139df3eef1ffeb8a7cda513f4e8 | [
"BSD-3-Clause"
] | permissive | https://github.com/LabaraTor/newsfeed | 572cfa90d6f6527a9769df3bac129da14c5a0761 | c68e7b361851633671a873ebdd5105f6beff7d1a | refs/heads/master | 2022-03-30T14:59:53.387549 | 2020-01-14T15:16:59 | 2020-01-14T15:16:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Web handlers."""
from . import subscriptions # noqa: F401
from . import events # noqa: F401
from . import misc # noqa: F401
| UTF-8 | Python | false | false | 131 | py | 22 | __init__.py | 18 | 0.671756 | 0.603053 | 0 | 5 | 25.2 | 41 |
mdahmed-abdus/C-sem-IV | 5,102,421,183,600 | 2390e4f663dba62cea1250c3b4d3d9de11093018 | 7a4db44acdc47828990184c45b6e91b5b50bf342 | /OpenSourceTechLab/exp2b.py | 7ed135d7973bdc3d744c4c8fc4664b7142d56b87 | [] | no_license | https://github.com/mdahmed-abdus/C-sem-IV | c20a62bb2188aa2ae655519ff5ce5568cb70137e | d2b10f5c7fb36cb669f4401bca7a83e951f28617 | refs/heads/master | 2020-12-15T13:15:22.472095 | 2020-04-09T08:42:31 | 2020-04-09T08:42:31 | 235,114,962 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # using while loop to find power of a number
num = int(input("Enter the number: "))
power = int(input("Enter the power: "))
ans, i = 1, 1
while i <= power:
ans *= num
i += 1
print(f"{num} to the power {power} = {ans}")
| UTF-8 | Python | false | false | 230 | py | 43 | exp2b.py | 23 | 0.595652 | 0.582609 | 0 | 11 | 19.909091 | 44 |
Hnobles12/ChessAI | 15,436,112,504,418 | a612b9632eb6a6117633ec9466d5112aeb03b951 | 30429e8b95d61ace636232b7441cea9227ea6eee | /Checkers/checkers/__init__.py | 5486d9a1907ef60c6a6d0450c4ae245d3b0a0382 | [] | no_license | https://github.com/Hnobles12/ChessAI | f424f258a7bb25f3a0a345b8fca8ced1f0d1498b | 3de2f5f360272c6d72d23b6b455229cca72d21e9 | refs/heads/master | 2023-02-10T15:49:31.598614 | 2021-01-05T01:55:33 | 2021-01-05T01:55:33 | 326,818,255 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch as t
from .errors import *
SYMBOLS = {'black': 1, 'white': 2}
FORWARD = {'black': 1, 'white': -1}
class Piece:
def __init__(self, loc: list, color: str):
self.loc = loc
self.color = color
self.is_king = False
self.id = id(self)
def move(self, new_loc):
self.loc = new_loc
class Board(object):
"""
Checkers Board with states.
"""
def __init__(self):
# init board tensor with zeros for empty space
self.curr = t.zeros(8, 8, dtype=t.int)
self.piecies = []
self.focused_piece = None
def movePiece(self, color: str, start_loc: list, end_loc: list) -> None:
# Check locations for errors.
if not 0 <= (start_loc[0] | start_loc[1] | end_loc[0] | end_loc[1]) <= 7:
raise LocationRangeError()
# Find piece on board
piece_found = False
for i in range(len(self.piecies)):
if (self.piecies[i].loc == start_loc) and (self.piecies[i].color == color):
piece_found = True
break
if not piece_found:
raise PieceNotFound()
else:
self.focused_piece = self.piecies[i]
# Check valid move
# ...
try:
valid_mv = self.checkValidMove(end_loc)
if not valid_mv:
raise InvalidMove(add_info="Moving in this manner is illegal.")
except InvalidMove as err:
# Deal with invalid input
print(err.message) ## TEMP
# Game.print(err.message)
return
# Move piece
self.piecies[i].move(end_loc)
self.curr[start_loc[0], start_loc[1]] = 0
self.curr[end_loc[0], end_loc[1]] = SYMBOLS[color]
# Check for eliminated piecies and remove them.
# ...
return
def generateBoard(self):
# Setup black piecies
for i in range(0, 3):
if i % 2 != 0:
start = 0
else:
start = 1
for j in range(start, 8, 2):
self.curr[i, j] = 1
self.piecies.append(Piece([i, j], "black"))
# Setup white piecies
for i in range(5, 8):
if i % 2 != 0:
start = 0
else:
start = 1
for j in range(start, 8, 2):
self.curr[i, j] = 2
self.piecies.append(Piece([i, j], "white"))
def update(self):
pass
def checkValidMove(self, end_loc: list) -> bool:
# Check if space is occupied
for piece in self.piecies:
if piece.loc == end_loc:
raise SpaceOccupied()
# Check if end location is in valid paths
valid_paths = []
# Forward + abs::right and forward + abs::left
if 0 <= self.focused_piece.loc[0]+FORWARD[self.focused_piece.color] <= 7:
if 0 <= self.focused_piece.loc[1]+1 <= 7:
valid_paths.append([self.focused_piece.loc[0]+FORWARD[self.focused_piece.color], self.focused_piece.loc[1]+1])
if 0 <= self.focused_piece.loc[1]-1 <= 7:
valid_paths.append([self.focused_piece.loc[0]+FORWARD[self.focused_piece.color], self.focused_piece.loc[1]-1])
if self.focused_piece.is_king:
if 0 <= self.focused_piece.loc[1]+1 <= 7:
valid_paths.append([self.focused_piece.loc[0]-FORWARD[self.focused_piece.color], self.focused_piece.loc[1]+1])
if 0 <= self.focused_piece.loc[1]-1 <= 7:
valid_paths.append([self.focused_piece.loc[0]-FORWARD[self.focused_piece.color], self.focused_piece.loc[1]-1])
if end_loc in valid_paths:
return True
else:
return False
| UTF-8 | Python | false | false | 3,818 | py | 6 | __init__.py | 4 | 0.52043 | 0.503143 | 0 | 125 | 29.544 | 126 |
athmey/MyLeetCode | 4,647,154,624,179 | 632a26d63cfbb17f95cb59c8b5b5613ea1a301d7 | 1d581781ed7f2466de23d6ec7fdf12ddd05a2b9a | /findContentChildren.py | 9014347cf3e3160c63224ee09e6be5e92e008fd3 | [] | no_license | https://github.com/athmey/MyLeetCode | 472292c74d7b6c4ae3e7dc9de297341f2bc5969e | 79ca9fdc471a1c84fce188cb05d2ef7b2469eb69 | refs/heads/master | 2020-09-14T17:36:24.222631 | 2019-11-21T15:30:32 | 2019-11-21T15:30:32 | 223,202,214 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 455. 分发饼干
class Solution(object):
def findContentChildren(self, g, s):
"""
:type g: List[int]
:type s: List[int]
:rtype: int
"""
biscuits = list(s)
list.sort(g)
list.sort(biscuits)
for child in g:
if not self.findBiscuit(child, biscuits):
break
# 发出去了多少块饼干,就是满足了多少个孩子
return len(s) - len(biscuits)
def findBiscuit(self, child, biscuits):
if len(biscuits) == 0 or max(biscuits) < child:
return False
if min(biscuits) >= child:
biscuits.pop(0)
return True
for biscuit in biscuits:
if biscuit >= child:
biscuits.remove(biscuit)
return True
return False
| UTF-8 | Python | false | false | 844 | py | 175 | findContentChildren.py | 175 | 0.506281 | 0.5 | 0 | 35 | 21.742857 | 55 |
umax/python-links | 14,620,068,686,436 | 337f59c00f3eed7c964fe242086e38403bd79053 | 5408c27ef6379d48ef43b7db0fd32fccafa7f418 | /webapp/core/decorators.py | 30bf0cc173eebe8c637a20793992b7979c77091c | [] | no_license | https://github.com/umax/python-links | f4de3b6cec68e6e5b69b5765dad8346bd1edb4c3 | 12a04107416e9b49865d0cec2463dff20d5a02e2 | refs/heads/master | 2016-08-07T04:23:48.546821 | 2011-09-27T13:30:40 | 2011-09-27T13:30:40 | 2,336,106 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from webapp.core.responses import HtmlResponse, JsonResponse
def html_response(handler_method):
"Decorator, that transform request to HTMLRequest and process it."
def wrapper(self, *args, **kwargs):
response = HtmlResponse(self.request)
handler_method(self, response, *args, **kwargs)
self.handle_response(response)
return wrapper
def json_response(handler_method):
"Decorator, that transform request to JSONRequest and process it."
def wrapper(self, *args, **kwargs):
response = JsonResponse(self.request)
handler_method(self, response, *args, **kwargs)
self.handle_response(response)
return wrapper | UTF-8 | Python | false | false | 682 | py | 37 | decorators.py | 16 | 0.696481 | 0.696481 | 0 | 21 | 31.52381 | 70 |
willdvaz/test_lasagne-digits | 335,007,490,550 | 0009777d1cc2b9d9a6884ae515e8f9f830503ff2 | b36317fd375e7f5e2e309b1b5d2f6b76eae71287 | /code/main.py | d77252ed5b3b21c797a45804e7725c6478cc6952 | [] | no_license | https://github.com/willdvaz/test_lasagne-digits | 4eeb406241e255fa44bc14cf614a8948678113e0 | 6dd1555585f4e675529b3e33ed58e60035db6e64 | refs/heads/master | 2016-08-08T21:31:29.883621 | 2015-10-10T17:46:07 | 2015-10-10T17:46:07 | 43,760,474 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #:#importing dependencies
import numpy as np
import theano
import theano.tensor as T
import lasagne
#importing the data
#selecting the number of epochs
num_epoch = 30;
def load_dataset():
dftrain = pd.read_csv('../data/train.csv');
dftest = pd.read_csv('../data/test.csv');
imtrain = dftrain.values;
imtest = dftest.values;
X_train= imtrain[:, 1:];
y_train= imtrain[:, 0];
X_val= imtest[:, 1:];
y_val= imtest[:, 0];
X_test=imtest[:, 1:];
y_test=imtest[:, 0];
return X_train, y_train, X_val, y_val, X_test, y_test
batch_size=100 ;
length_data = 28*28;
def build_mlp():
l_in = lasagne.layers.InputLayers(shape= (batch_size, length_data), input_var = input_var)
l_in_drop = lasagne.layers.DropoutLayer(l_in, p= 0.2)
l_hid1 = lasagne.layers.DenseLayer(
l_in_drop, num_units = 800,
nonlinearity= lasagne.nonlinearities.rectify,
W= lasagne.init.GlorotUniform())
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
l_hid2 = lasagne.layers.DenseLayer(
l_hid1_drop, num_units = 800,
nonlinearity= lasagne.nonlinearities.rectify)
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units = 10,
nonlinearity = lasagne.nonlinearity.softmax)
return l_out
def iterate_minibatches(inputs, targets, batchsize ):
dataf= pd.DataFrame(targets, columns = ['label'])
for i in range(len(inputs[0])):
dataf['pixel'+str(i)] = pd.Series(inputs[:,i])
size_max =min(dataf['label'].value_counts().values);
nb_numbers = len(dataf['label'].value_counts().values);
# if size_max < batch_size:
# self.trap_error('attention batch trop grand ou pas assez de donnees pour pouvoir equilibrer la base')
# return False
#print nb_numbers;
length = batchsize/nb_numbers;
#print length;
total_result = [];
tot = pd.DataFrame();
for k in range(nb_numbers):
a = dataf[dataf['label']==k];
a.reindex(np.random.permutation(a.index));
a = a[0:length];
#print a
tot = tot.append(a, ignore_index = True );
yield tot.values[:, 1:],tot.values[:, 0];
def main():
#load the dataset
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
#Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
#create a neural network model
network = build_mlp(input_var)
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
#defining the params and the updates
params = lasagne.layers.get_all_params(network, trainable = True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate= 0.01, momentum = 0.9)
#here we talk about the predictions and evaluating the total loss of the neural net
test_prediction = lasagne.layers.get_output(network, deterministic = True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var)
test_loss = test_loss.mean()
#test accuracy but I didn't understood it well....
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis= 1), target_var),
dtype = theano.config.floatX)
###COMPILING
train_fn = theano.function([input_var, target_var], loss, updates = updates)
test_fn = theano.function([input_var, target_var], [test_loss, test_acc])
for epoch in range(num_epochs):
train_err= 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500 ):
inputs, targets=batch
train_err += train_fn(inputs, targets)
train_batches += 1
#evaluate the global error
val_err = 0
val_acc = 0
val_batches = 0
for batches in iterate_minibatches(X_val, y_val, 500 ):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
print("Epoch {} of {} took {:3f}s".format(epoch + 1, num_epoch, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss: \t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy: \t\t{:.2f} %".format( val_acc / val_batches *100))
| UTF-8 | Python | false | false | 4,575 | py | 1 | main.py | 1 | 0.618579 | 0.603497 | 0 | 135 | 32.77037 | 110 |
made-ml-in-prod-2021/kadetfrolov | 609,885,374,960 | 22a4fdf0e8887c90cf0170b3e1fdb4e7be6594f4 | 2101181a01fdea8b5c53c2ce20536bcd0f0224c3 | /airflow_ml_dags/dags/prod3_3_predict.py | 0f92e826a6a2aad2c57f81fc26f0437807ad42fe | [] | no_license | https://github.com/made-ml-in-prod-2021/kadetfrolov | f7d237bf465d6f2419feeedd062cea75f08ebad2 | 148a2ca81c75e5fcbba18134f327ceb97538b393 | refs/heads/main | 2023-06-03T12:27:59.606466 | 2021-06-26T22:50:24 | 2021-06-26T22:50:24 | 355,492,845 | 0 | 0 | null | false | 2021-06-26T22:50:25 | 2021-04-07T09:51:35 | 2021-06-16T20:55:53 | 2021-06-26T22:50:24 | 372 | 0 | 0 | 0 | Python | false | false | from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.utils.dates import days_ago
from utils import default_args, VOLUME
from airflow.sensors.filesystem import FileSensor
prod_model_path = '{{ var.value.PROD_MODEL_PATH }}'
with DAG(dag_id='_prod3_3_predict',
default_args=default_args,
schedule_interval="@daily",
start_date=days_ago(0, 2)) as dag:
start = DummyOperator(task_id='start')
data_sensor = FileSensor(task_id='data_sensor',
filepath='data/raw/{{ ds }}/data.csv',
poke_interval=10,
retries=100)
model_sensor = FileSensor(task_id='model_sensor',
filepath='data/model/{{ ds }}/model.pkl',
poke_interval=10,
retries=100)
transformer_sensor = FileSensor(task_id='transformer_sensor',
filepath='data/model/{{ ds }}/transformer.pkl',
poke_interval=10,
retries=100)
prediction = DockerOperator(task_id='prediction',
image='prediction',
command='/data/raw/{{ ds }}/ ' + prod_model_path,
network_mode='bridge',
volumes=[VOLUME],
do_xcom_push=False)
end = DummyOperator(task_id='end')
start >> [data_sensor, model_sensor, transformer_sensor] >> prediction >> end
| UTF-8 | Python | false | false | 1,677 | py | 30 | prod3_3_predict.py | 20 | 0.52415 | 0.512821 | 0 | 42 | 38.904762 | 83 |
jason841226/polymul16x16 | 6,528,350,333,010 | 9051948d233de9aed92652eafee082748a60e397 | 8f7c9eef62e16b6964bd66747a8cac6beaf93f14 | /random_generator.py | f68874768309b23d2a5f117e659b46f8a0dd78a3 | [] | no_license | https://github.com/jason841226/polymul16x16 | 2abea92d0113b190045f3c4a36a79003a9c1c7bb | 2e2466f292b07ef2cb2c69bae1a625e5bbb9af4e | refs/heads/master | 2020-04-19T23:18:14.126450 | 2019-03-15T05:51:46 | 2019-03-15T05:51:46 | 168,492,111 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
f=open('random_input.txt','w');
for i in range(0,16*10000):
f.write(str(random.randint(0,4591)));
f.write(" ");
if i%16==15:
f.write("\n");
| UTF-8 | Python | false | false | 166 | py | 8 | random_generator.py | 1 | 0.590361 | 0.487952 | 0 | 7 | 21.714286 | 38 |
DAVIDJJX/Python_self_practice | 1,958,505,122,157 | c785cd87bf6e9707332a07da648960351011fac1 | f887bdccacd7503c1e524888cca7125b9b128bfb | /20210806/quiz_formal.py | c4dadbb8eaae6ff3b8c2f3504bbc9b71a4c6a7cd | [] | no_license | https://github.com/DAVIDJJX/Python_self_practice | 91575b7960ba5c398b368abc183ae585e28084b0 | c81e3ce031e9049ebf935c8b5863be5fd3394785 | refs/heads/main | 2023-07-13T08:43:16.209413 | 2021-08-10T06:51:13 | 2021-08-10T06:51:13 | 394,317,219 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def add_func(a,b):
c=int(a+b)
return c
def sub_func(d,e):
f=int(e-d)
return f
print(add_func(2,sub_func(3,100))) | UTF-8 | Python | false | false | 134 | py | 10 | quiz_formal.py | 10 | 0.552239 | 0.514925 | 0 | 9 | 14 | 34 |
shixingjian/PythonItems | 704,374,671,041 | 63d543d1725d00a1e6734a674a60e166b0cf07ea | 37f1125544ac1b4800402d981ce030c82b4993d8 | /pythonitems/APIAutoTest20200304/demo/course/add2.py | c66c4a152d8ff974364248731ee8669a16ee38cc | [] | no_license | https://github.com/shixingjian/PythonItems | db6392451fda388ef4f4805aaf9991ec01bd36bd | 6b6a4e4bae2e727581c8805676422f28b8f6231f | refs/heads/master | 2022-11-21T01:17:23.607196 | 2020-07-22T08:37:44 | 2020-07-22T08:37:44 | 281,614,603 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from config import HOST
#1.请求头-字典类型
dict1={'Content-Type':'application/json'}
#2.请求消息体-字符串类型
payload='''
{
"action" : "add_course_json",
"data" : {
"name":"初中化22学",
"desc":"初中化学课程",
"display_idx":"4"
}
}
'''
payload=payload.encode(encoding='utf-8')
#3.用requests发送post请求,data参数如果传入的是字符串,会按原格式发送
r=requests.post(f'{HOST}/apijson/mgr/sq_mgr/',data=payload,headers=dict1)
print(r.text)
| UTF-8 | Python | false | false | 531 | py | 257 | add2.py | 139 | 0.662763 | 0.641686 | 0 | 23 | 17.26087 | 73 |
Shakiestnerd/PelicanArticleGenerator | 12,077,448,050,607 | 021668668ffb597cebb41a5762497f9038f3632a | 8173af07d65ab5d201157b64fe7dc7cec9b726d1 | /article/output.py | 0d8bd9c39c6d26756f08e64f44aa1f154bb4ac6b | [
"MIT"
] | permissive | https://github.com/Shakiestnerd/PelicanArticleGenerator | 37600fe663993ef8d17e6a61c649abe5713a3349 | 8b1a948535e12f171975aedafa387f9f2de50da2 | refs/heads/master | 2021-09-16T06:25:10.272031 | 2021-08-15T01:00:43 | 2021-08-15T01:00:43 | 241,252,837 | 3 | 0 | MIT | false | 2021-04-15T00:43:38 | 2020-02-18T02:18:31 | 2021-04-15T00:42:48 | 2021-04-15T00:43:25 | 119 | 2 | 0 | 1 | Python | false | false | import os
class Output:
def __init__(self):
super().__init__()
self.title = ""
self.date = ""
self.tags = ""
self.slug = ""
self.category = ""
self.status = ""
self.author = ""
self.output_type = "md"
self.summary = ""
self.is_recipe = False
self.filename = ""
self.base_folder = ""
self.full_file = ""
def save_article(self):
if self.output_type == "md":
body = self.format_markdown()
else:
body = self.format_rst()
if os.path.isdir(self.base_folder):
with open(self.full_file, "w") as out_file:
out_file.writelines(body)
print("File created: " + self.full_file)
else:
print(f"Folder {self.base_folder} not found.")
def format_markdown(self):
content = ["---\n", f"Title: {self.title}\n", f"Date: {self.date}\n",
f"Category: {self.category}\n", f"Tags: {self.tags}\n"]
if "," in self.author:
author_title = "Authors:"
else:
author_title = "Author:"
content.append(f"{author_title} {self.author}\n")
content.append(f"Status: {self.status}\n")
if self.summary:
content.append(f"Summary: {self.summary}\n")
content.append("---\n\n")
if self.is_recipe:
content.append("Insert description of recipe here\n\n")
content.append("![_Image Title_][1]\n\n")
content.append("## Ingredients\n\n")
content.append("List all the ingredients\n\n")
content.append("## Instructions\n\n")
content.append("Write detailed instructions here.\n\n")
content.append("## Remarks\n\n")
content.append("Add closing remarks here\n\n")
content.append('[1]: ../images/_filename_ "_Image Description_"\n')
else:
content.append("Insert markdown article here.\n")
return content
def format_rst(self):
content = [f"{self.title}\n", "=" * len(self.title), f"\n:Date: {self.date}\n",
f":Category: {self.category}\n", f":Tags: {self.tags}\n",
f":Author: {self.author}\n", f":Status: {self.status}\n\n"]
if self.is_recipe:
content.append("Insert description of recipe here\n\n")
content.append(".. image:: ../images/_filename_\n\n")
content.append("Ingredients\n")
content.append("-----------\n\n")
content.append("List all the ingredients\n\n")
content.append("Instructions\n")
content.append("------------\n\n")
content.append("Write detailed instructions here.\n\n")
content.append("Remarks\n")
content.append("-------\n\n")
content.append("Add closing remarks here\n\n")
else:
content.append("Insert restructured text article here.\n")
return content
| UTF-8 | Python | false | false | 3,026 | py | 21 | output.py | 11 | 0.523463 | 0.522802 | 0 | 77 | 38.298701 | 87 |
confluentinc/ducktape | 2,473,901,179,576 | 226842e7e4846bf8e907f6938696c3e531a1f094 | 9909a6373749cfb444f42239ee8edcf12979440f | /ducktape/mark/mark_expander.py | f3451e7160a6914ba0043e36464e682c10ccfe07 | [
"Apache-2.0"
] | permissive | https://github.com/confluentinc/ducktape | 2d342b57b60219ff85fb3a4e9dae3b88a2920358 | da162ed9db6652ac562cdaacaf1b96b5b527dbef | refs/heads/master | 2023-08-28T04:57:06.580880 | 2023-08-18T11:46:28 | 2023-08-18T11:46:28 | 28,360,638 | 328 | 99 | null | false | 2023-09-04T17:26:57 | 2014-12-22T21:38:32 | 2023-08-29T08:47:49 | 2023-09-04T17:21:36 | 1,392 | 295 | 95 | 58 | Python | false | false | # Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._mark import parametrized, Parametrize, _is_parametrize_mark
from ducktape.tests.test import TestContext
class MarkedFunctionExpander(object):
"""This class helps expand decorated/marked functions into a list of test context objects. """
def __init__(self, session_context=None, module=None, cls=None, function=None, file=None, cluster=None):
self.seed_context = TestContext(
session_context=session_context, module=module, cls=cls, function=function, file=file, cluster=cluster)
if parametrized(function):
self.context_list = []
else:
self.context_list = [self.seed_context]
def expand(self, test_parameters=None):
"""Inspect self.function for marks, and expand into a list of test context objects useable by the test runner.
"""
f = self.seed_context.function
# If the user has specified that they want to run tests with specific parameters, apply the parameters first,
# then subsequently strip any parametrization decorators. Otherwise, everything gets applied normally.
if test_parameters is not None:
self.context_list = Parametrize(**test_parameters).apply(self.seed_context, self.context_list)
for m in getattr(f, "marks", []):
if test_parameters is None or not _is_parametrize_mark(m):
self.context_list = m.apply(self.seed_context, self.context_list)
return self.context_list
| UTF-8 | Python | false | false | 2,052 | py | 128 | mark_expander.py | 79 | 0.706628 | 0.702729 | 0 | 46 | 43.608696 | 118 |
Jardelpz/cloud-workflow | 4,303,557,252,535 | 5c30d724572e47dc325cad04b5d6eec1dda34c96 | 76d2148c62c24e51f22a75cfe7e44a7cfa442112 | /consumer/utils/s3.py | d87f932e1d91112a4c75a622c1b3773e613c10a8 | [] | no_license | https://github.com/Jardelpz/cloud-workflow | f6cec1c1a664d6a2468616e4bf47c9a9690f38be | f0eaa65a18a81c41c561a37fe2ebe4b0b480a814 | refs/heads/main | 2023-02-12T06:10:38.630757 | 2021-01-12T01:13:22 | 2021-01-12T01:13:22 | 321,170,565 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import boto3
from constants import REGION_NAME, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
s3 = boto3.client('s3', region_name=REGION_NAME, aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY) | UTF-8 | Python | false | false | 241 | py | 17 | s3.py | 13 | 0.709544 | 0.692946 | 0 | 7 | 33.571429 | 85 |
k1-k0/wishl | 14,310,831,070,040 | 0cec909784c8e632af8ba49c09153068f1457436 | 8325f238b8d59f9302a945340070809aab3b1901 | /wishl/urls.py | 36d52409050e76876e68deb02272b6fad7129e95 | [] | no_license | https://github.com/k1-k0/wishl | c2c512499f2f18aa08b5427e4d2ba6435343a8b5 | f04e134b37a26b58d1a5acfdf41bab5d94eb8ae7 | refs/heads/main | 2022-12-26T04:58:00.238984 | 2020-10-10T11:35:46 | 2020-10-10T11:35:46 | 302,756,186 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """wishl URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from rest_framework import routers
from wishl.wishl_api import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'wishes', views.WishViewSet)
router.register(r'moneyboxes', views.MoneyboxViewSet)
router.register(r'shops', views.ShopViewSet)
router.register(r'images', views.ImageViewSet)
router.register(r'tags', views.TagViewSet)
urlpatterns = [
path('', include(router.urls)),
path('api-auth', include('rest_framework.urls', namespace='rest_framework')),
]
| UTF-8 | Python | false | false | 1,222 | py | 24 | urls.py | 22 | 0.735679 | 0.729133 | 0 | 34 | 34.941176 | 81 |
mrscp/bengali-sentence-correction-notebook | 2,671,469,687,738 | 82877fa8fb95937188bd077d17c8ab035257ab1c | 2fec5c80132acca8d2e07f0eef8ac690c8740b22 | /processors/data.py | dbcc1617492aa0b2574cd37dbc4b561df6ed12c3 | [] | no_license | https://github.com/mrscp/bengali-sentence-correction-notebook | 1f7e5880cb58761900161ac5f86161991a3a639a | 61d9d53337401735b62234bb3ac6674cca6a3e68 | refs/heads/master | 2020-07-23T23:46:05.964192 | 2019-09-11T15:43:07 | 2019-09-11T15:43:07 | 207,741,828 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from common.config import Config
from common.path import ProjectPath
from os.path import join
class RawData:
def __init__(self):
self.__config = Config()
self.__path = ProjectPath()
def get_data(self):
data_location = self.__path.format_location(
join(
self.__config["PROCESS_DATA"]["DATA_LOCATION"],
self.__config["PROCESS_DATA"]["FILENAME"]
)
)
print(data_location)
# with open(, "r") as file:
# for line in file:
# if i > config["max_line_read"]:
# break
# wss = line.strip()
# ws = wss.split(" ")
#
# vocabulary.extend(ws)
# lines.append(ws)
#
# i += 1
# if i % report_point == 0:
# print("{} lines processed".format(i)) | UTF-8 | Python | false | false | 920 | py | 7 | data.py | 5 | 0.453261 | 0.451087 | 0 | 32 | 27.78125 | 63 |
amychenmit/misdj-ww | 3,367,254,399,842 | 4e57f06a04ded9f594c76a11ddc25609ce5e6927 | f379ea004ed5e309aba62914790cd605a3388a81 | /note/views.py | 57aedf5072ff42da0838a7849a387a24cc83c158 | [] | no_license | https://github.com/amychenmit/misdj-ww | 91185a5fdc8c7e62af95e86e779969ad2ef0059b | 0ab1fbe12b9f6d85eb01dea8b8fe87d8e42a69ff | refs/heads/master | 2022-02-23T16:40:11.180372 | 2019-10-24T03:12:32 | 2019-10-24T03:12:32 | 214,589,934 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.db.models import Count
from django.db.models.functions import TruncMonth
from datetime import date, timedelta
from datetime import datetime
from itertools import chain
# https://kaiching.org/pydoing/py/python-library-datetime.html
from .models import Note
from .models import Wk
from .models import Work
from .models import Work2
def index(request):
list = Note.objects.all()
context = {'list': list}
return render(request, 'note/index.html', context)
def ww(request):
list = Wk.objects.all()
context = {'list': list}
return render(request, 'note/ww.html', context)
def ww2(request):
list = Wk.objects.all()
context = {'list': list}
return render(request, 'note/ww2.html', context)
#表格 open quiz
def work(request):
list = Work.objects.all()
context = {'list': list}
return render(request, 'note/work.html', context)
"""
list1 = Work.objects.values('place', 'date1__year', 'date1__month').annotate(
num_dates=Count('date1__month', distinct = True) , num_worker=Count('worker', distinct = True), worktimes=Count('id')).order_by('place')
temp_p = Work.objects.order_by('place').values('place').distinct()
dis_p = temp_p.values_list('place', flat=True).order_by('place')
dis_m = {7,8,9}
print("DEBUG ... ")
for i in dis_p:
for j in dis_m
try x = d.filter(place=i, date1__month=j)
"""
#妹妹的實驗室:這個寫法可以應付多個不限定月份
def work3(request):
#輸入想要的年份即可 Generate 整年的數據
year = 2019
list0={}
dis_p = Work.objects.values_list('place', flat=True).distinct().order_by('place')
num_p = dis_p.count()
dis_m = Work.objects.order_by('date1__month').values('date1__month').distinct()
num_m = dis_m.count()
for i in range(num_p):
#現在只可支援連續的月份
for j in range(num_m):
#目前的寫法如果 place 超過 99 筆會有錯誤,到時候改成 *1000就可以了
list0[i+100*j]={'place':dis_p[i], 'date1__year':year ,'date1__month':dis_m[j]['date1__month']}
list0 = list0.values()
list1 = Work.objects.values('place', 'date1__year', 'date1__month').annotate(
num_dates=Count('date1__month', distinct = True) , num_worker=Count('worker', distinct = True), worktimes=Count('id')).order_by('place')
firstdate = Work.objects.values('date1').order_by('date1').first()
lastdate = Work.objects.values('date1').order_by('date1').last()
for x in list0:
list2 = list1.filter(place=x['place'],date1__month=x['date1__month'],date1__year=year)
x['New_num_dates']=x['New_num_worker']=x['New_worktimes']=0
if list2:
x['New_num_dates']=list2[0]['num_dates']
x['New_num_worker']=list2[0]['num_worker']
x['New_worktimes']=list2[0]['worktimes']
context = {'list': list0, 'dis_m':dis_m, 'year':year, 'firstdate':firstdate, 'lastdate':lastdate}
return render(request, 'note/work3.html', context)
def init_ww(request):
def getList():
return Wk.objects.order_by('yr','num')
def getCnt():
return Wk.objects.count()
# https://docs.djangoproject.com/en/2.2/intro/tutorial02/
# list = Wk.objects.order_by('yr','num')
list = getList()
for x in list:
x.delete()
k1 = getCnt()
d1='2018-12-31'
date1 = datetime.strptime(d1, "%Y-%m-%d")
date2 = date1 + timedelta(days=6)
for num in range(1,53):
# print(num) # to ensure num is 1,2,3 ..., 52
date2 = date1 + timedelta(days=6)
x =Wk(yr=2019,num=num,date1=date1,date2=date2)
x.save()
date1 = date2 + timedelta(days=1)
k2 = getCnt()
user = request.user
key={'k1':k1,'k2':k2}
list = getList()
context = {'user': user,'key': key,'list':list }
return render(request, 'note/ww.html', context)
#成功版本
def work2(request):
#不能解決算 m 的方式,因為他會是個<QuerySet [(7,), (8,), (9,)]>
list_month = Work.objects.values_list('date1__month').distinct()
for z in list_month:
z[0]
print(list_month)
list0 = Work.objects.values('place').annotate(
num_dates=Count('date1', distinct = True) , num_worker=Count('worker', distinct = True), worktimes=Count('id'))
for x in list0:
for i in {7,8,9}:
listi = Work.objects.filter(place=x['place'],date1__year=2019,date1__month=i).values('place').annotate(
num_dates=Count('date1', distinct = True) , num_worker=Count('worker', distinct = True), worktimes=Count('id')).order_by('place')
x[str(i)+'num_dates']=x[str(i)+'num_worker']=x[str(i)+'worktimes']=0
#如果list1有值的话,就运行缩排那些指令。
if listi:
x[str(i)+'num_dates']=listi[0]['num_dates']
x[str(i)+'num_worker']=listi[0]['num_worker']
x[str(i)+'worktimes']=listi[0]['worktimes']
print(list0)
list_keys = list0[0].keys()
context = {'list': list0, 'month':list_month, 'keys':list_keys}
return render(request, 'note/work2.html', context)
#迴圈版本
"""
def work2(request):
list0 = Work.objects.values('place').annotate(
num_dates=Count('date1', distinct = True) , num_worker=Count('worker', distinct = True), worktimes=Count('id'))
list_month = {7,8,9}
for x in list0:
for i in list_month:
listi = Work.objects.filter(place=x['place'],date1__year=2019,date1__month=i).values('place').annotate(
num_dates=Count('date1', distinct = True) , num_worker=Count('worker', distinct = True), worktimes=Count('id'))
x[str(i)+'num_dates']=x[str(i)+'num_worker']=x[str(i)+'worktimes']=0
if listi:
x[str(i)+'num_dates']=listi[0]['num_dates']
x[str(i)+'num_worker']=listi[0]['num_worker']
x[str(i)+'worktimes']=listi[0]['worktimes']
list_keys = list0[0].keys()
context = {'list': list0, 'month':list_month, 'keys':list_keys}
return render(request, 'note/work2.html', context)
""" | UTF-8 | Python | false | false | 6,311 | py | 14 | views.py | 9 | 0.59129 | 0.565818 | 0 | 207 | 28.400966 | 144 |
afcarl/mambo | 13,322,988,595,317 | 8d263045c8e0ce2976291e3aa8e5cc517d035cb6 | 252afd63d594bce727c92878196b438f5ff8df65 | /utils/create_mambo_crossnet_table.py | 534876b36c55408390193dea23b96e3f5638dd3d | [] | no_license | https://github.com/afcarl/mambo | 0f22bb832288c83fad19d0da9291b09e23c3c5cb | cd47cb5b68c5b344fb7d95ee7d180cd003c04a2a | refs/heads/master | 2020-12-04T04:37:06.655032 | 2017-12-03T09:15:13 | 2017-12-03T09:15:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
file: create_mambo_crossnet_table.py
Script that creates Mambo tables for a given crossnet.
Usage:
python create_mambo_crossnet_table.py <input_file_path> <src_file_path> <dst_file_path> <dataset_name> <dataset_id>
Positional Arguments:
input_file: Path to the input file; Input file should be a tsv.
src_file: Path to a dataset specific file, as outputted by create_mambo_mode_table.py,
corresponding to the source mode. File name MUST MATCH FORMAT:
miner-<mode_name>-<dataset_id>-<dataset>-<date>.tsv
dst_file: Path to a dataset specific file, as outputted by create_mambo_mode_table.py,
corresponding to the destination mode. File name MUST MATCH FORMAT:
miner-<mode_name>-<dataset_id>-<dataset>-<date>.tsv
dataset_name: Name of dataset being used to create the mambo crossnet tables i.e. the
dataset the input file comes from. e.g. STRING
dataset_id: unique integer id for this dataset.
Optional arguments:
--src_node_index: If there are multiple columns in the input tsv, the index of the column with the src node id.
Defaults to 0.
--dst_node_index: If there are multiple columns in the input tsv, the index of the column with the dst node id.
Defaults to 1.
--output_dir: Directory to create output files. Defaults to the current working directory.
--full_crossnet_file: Name of output file tsv containing a list of <mambo_id>\t<dataset_id>.
Defaults to output_dir/miner-<src_mode_name>-<dst_mode_name>-<date>.tsv
--db_edge_file: Name of output file tsv for a specific dataset; contains a list of <mambo_id>\t<dataset_specific_entity_id>
Defaults to output_dir/miner-<src_mode_name>-<dst_mode_name>-<dataset_id>-<dataset>-<date>.tsv
--mambo_id_counter_start Start assigning mambo ids from this integer value; this number MUST be greater
than any id found in the full crossnet file. If not specified, finds the max id in the
full_crossnet_file.
--skip_missing_ids Flag; If any of the ids in the input tsv do not have mambo ids (which are fetched from
the src and dst files), skip the line and continue parsing the data.
--src_mode_filter The name of a function in utils.py that should be applied to the source node id in
in the input file before using it to look up the mambo id in the src_file. Defaults to None.
--dst_mode_filter The name of a function in utils.py that should be applied to the destination node id in
in the input file before using it to look up the mambo id in the dst_file. Defaults to None.
Example usage:
Creating files for genes-function relationships using Gene Ontology:
Input files: go.tsv, miner-gene-0-GO-20160520.tsv, miner-function-0-GO-20160520.tsv
Output directory: outputs/genes-functions/
Output files: miner-gene-function-20160520.tsv, miner-gene-function-0-GO-20160520.tsv
Workflow:
python create_mambo_crossnet_table.py go.tsv miner-gene-0-GO-20160520.tsv miner-function-0-GO-20160520.tsv GO 0 --output_dir outputs/genes-functions/
'''
import argparse
import utils
import os
COMMENT = ["#", "!", "\n"]
DELIMITER = "\t"
def create_mambo_crossnet_table(input_file, src_file, dst_file, dataset_name,
db_id, src_node_index, dst_node_index, mode_name1,
mode_name2, output_dir, full_crossnet_file, db_edge_file,
src_mode_filter, dst_mode_filter, mambo_id_counter_start,
skip_missing_ids, verbose=False, delimiter=DELIMITER):
inFNm = input_file
srcFile = src_file
dstFile = dst_file
dataset = dataset_name
db_id = db_id
srcIdx = src_node_index
dstIdx = dst_node_index
src_db_id = utils.parse_dataset_id_from_name(os.path.basename(srcFile))
dst_db_id = utils.parse_dataset_id_from_name(os.path.basename(dstFile))
mode_name1 = utils.parse_mode_name_from_name(os.path.basename(srcFile)) if mode_name1 is None else mode_name1
mode_name2 = utils.parse_mode_name_from_name(os.path.basename(dstFile)) if mode_name2 is None else mode_name2
outFNm = full_crossnet_file
if outFNm is None:
outFNm = os.path.join(output_dir, utils.get_full_cross_file_name(mode_name1, mode_name2))
outFNm2 = db_edge_file
if outFNm2 is None:
outFNm2 = os.path.join(output_dir, utils.get_cross_file_name(mode_name1, mode_name2, db_id, dataset))
src_mapping = utils.read_mode_file(srcFile)
if os.path.samefile(srcFile, dstFile):
dst_mapping = src_mapping
else:
dst_mapping = utils.read_mode_file(dstFile)
src_filter = utils.get_filter(src_mode_filter)
dst_filter = utils.get_filter(dst_mode_filter)
add_schema = True
counter = mambo_id_counter_start
if counter == -1:
counter = utils.get_max_id(outFNm)
if verbose:
print 'Starting at mambo id: %d' % counter
with open(inFNm, 'r') as inF, open(outFNm, 'a') as fullF, open(outFNm2, 'w') as dbF:
# Add schema/metadata
if counter == 0:
fullF.write('# Full crossnet file for %s to %s\n' % (mode_name1, mode_name2))
fullF.write('# File generated on: %s\n' % utils.get_current_date())
fullF.write('# mambo_eid%sdataset_id%ssrc_mambo_nid%sdst_mambo_nid\n' % (
delimiter, delimiter, delimiter))
dbF.write('# Crossnet table for dataset: %s\n' % dataset)
dbF.write('# File generated on: %s\n' % utils.get_current_date())
# Process file
for line in inF:
if line[0] in COMMENT:
continue
vals = utils.split_then_strip(line, delimiter)
if add_schema:
attrs_schema = '# mambo_eid%ssrc_dataset_id%sdst_dataset_id' % (delimiter, delimiter)
for i in range(len(vals)):
if i != srcIdx and i != dstIdx:
attrs_schema += '%sC%d' % (delimiter, i)
dbF.write('%s\n' % attrs_schema)
add_schema = False
id1 = vals[srcIdx]
id2 = vals[dstIdx]
if src_filter:
id1 = src_filter(id1)
if dst_filter:
id2 = dst_filter(id2)
if id1 == '' or id2 == '':
continue
if skip_missing_ids and (id1 not in src_mapping or id2 not in dst_mapping):
#print id1, id2
continue
attr_strs = ''
for i in range(len(vals)):
if i != srcIdx and i != dstIdx:
attr_strs += delimiter + vals[i]
fullF.write('%d%s%d%s%d%s%d\n' % (
counter, delimiter, db_id, delimiter, src_mapping[id1], delimiter, dst_mapping[id2]))
dbF.write('%d%s%d%s%d%s\n' % (counter, delimiter, src_db_id, delimiter, dst_db_id, attr_strs))
counter += 1
if verbose:
print 'Ending at mambo id: %d' % counter
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create mambo edge tables')
parser.add_argument('input_file', help='input file name. File should be a tsv, containing interactions between ids found in src_file_name and ids found in dst_file_name')
parser.add_argument('src_file', help='input file name. Should be a file outputted by create_mambo_mode_table (with properly formatted name).')
parser.add_argument('dst_file', help='input file name. Should be a file outputted by create_mambo_mode_table (with properly formatted name).')
parser.add_argument('dataset_name', type=str, help='name of dataset')
parser.add_argument('db_id', type=int, help='int id for this dataset')
parser.add_argument('--mode_name1', type=str, default = None)
parser.add_argument('--mode_name2', type=str, default=None)
parser.add_argument('--src_node_index', type=int, help='column index that contains src node ids (NOT mambo ids, from src_input_file)', default=0)
parser.add_argument('--dst_node_index', type=int, help='column index that contains dst node ids (NOT mambo ids, from dst_input_file)', default=1)
parser.add_argument('--output_dir', help='directory to output files; either this argument or full_crossnet_file and db_edge_file MUST be specified', default='.')
parser.add_argument('--full_crossnet_file', help='output file name; outputs a list of mambo ids, the db ids (db the mambo id was derived from), and source and destination mambo node ids;' \
+ 'note that this file is appended to; OVERRIDES output_dir argument', default=None)
parser.add_argument('--db_edge_file', help='output file name; output contains mapping of mambo ids to dataset ids; OVERRIDES output dir argument', default=None)
parser.add_argument('--skip_missing_ids', action='store_true', help='don\'t throw an error if ids in input_file not found in src or dst file.')
parser.add_argument('--mambo_id_counter_start', type=int, help='where to start assigning mambo ids', default=-1)
parser.add_argument('--src_mode_filter', type=str, default=None)
parser.add_argument('--dst_mode_filter', type=str, default=None)
args = parser.parse_args()
inFNm = args.input_file
srcFile = args.src_file
dstFile = args.dst_file
dataset = args.dataset_name
db_id = args.db_id
srcIdx = args.src_node_index
dstIdx = args.dst_node_index
mode_name1 = args.mode_name1
mode_name2 = args.mode_name2
output_dir = args.output_dir
outFNm = args.full_crossnet_file
outFNm2 = args.db_edge_file
src_mode_filter = args.src_mode_filter
dst_mode_filter = args.dst_mode_filter
counter = args.mambo_id_counter_start
skip_missing_ids = args.skip_missing_ids
create_mambo_crossnet_table(inFNm, srcFile, dstFile, dataset,
db_id, srcIdx, dstIdx, mode_name1,
mode_name2, output_dir, outFNm, outFNm2,
src_mode_filter, dst_mode_filter, counter,
skip_missing_ids)
| UTF-8 | Python | false | false | 10,401 | py | 516 | create_mambo_crossnet_table.py | 6 | 0.629843 | 0.619748 | 0 | 200 | 51.005 | 193 |
Hope6537/hope6537-utils | 14,671,608,296,890 | b9cc20018f9861f48bc84349aaa158aa59bf12d1 | 6f8295e82bcd2e8c0618ba65867ab895603b3b56 | /hope-python-script/zhihu/zhihu.py | 3f7640a752b2b9a3fe6376287e609ded425e79b5 | [
"Apache-2.0"
] | permissive | https://github.com/Hope6537/hope6537-utils | 995cd1d179ea64d19bcb2537e6b0ab808bba3b18 | 503ba3a42c5899130d496a4693d05fca27136e9b | refs/heads/master | 2020-12-19T21:14:20.488435 | 2019-01-12T04:54:32 | 2019-01-12T04:54:32 | 30,529,875 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding:utf-8
import requests
import requests
cookies = {
'd_c0': 'AACAamB1LQqPToMcUMk8zMHR10FwezFwQsg=|1467629360',
'_zap': '540a813f-95b8-477d-9141-728140f8b544',
'_za': 'b9974a5f-14d0-4fe3-971a-0ebed07a7c0f',
'_ga': 'GA1.2.1022349923.1490619099',
'q_c1': '26735a973129474dac8f09af940badb3|1502676273000|1491468158000',
'r_cap_id': 'YTY3MzRiZjNkNTIxNGI1OGJiOTcwNmI0NDBhMGE0OGQ=|1510285264|09b6bc23aea437c66511ae0ff3ac10f685ff3ce6',
'cap_id': 'ZDE5NjNhYTMxNzY5NGMzOTg2NjAyZmYwMDcyNGY5ZjQ=|1510285264|3b964da66815fff59b200dcef7a22da45202322e',
'z_c0': 'Mi4xM3E5S0FBQUFBQUFBQUlCcVlIVXRDaGNBQUFCaEFsVk4xbTN5V2dBNTZ5dEkyVlc5OTd3QjU1R3Rrb1NmYTVBWkxB|1510285270|9c1c1a171c16873e03ddcab5a3fe2d6dce757358',
'aliyungf_tc': 'AQAAAErtZUYRUgEAwk3gerg+xadr2Zq8',
's-q': '%E5%88%98%E9%91%AB',
's-i': '1',
'sid': 'k0eskato',
'__utma': '51854390.1022349923.1490619099.1510734700.1510734700.1',
'__utmb': '51854390.0.10.1510734700',
'__utmc': '51854390',
'__utmz': '51854390.1510734700.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)',
'__utmv': '51854390.110-1|2=registration_date=20140405=1^3=entry_date=20140405=1',
'_xsrf': '8b66d9ea-9296-4e5b-ba26-98984118d3af',
}
headers = {
'authorization': 'Bearer Mi4xM3E5S0FBQUFBQUFBQUlCcVlIVXRDaGNBQUFCaEFsVk4xbTN5V2dBNTZ5dEkyVlc5OTd3QjU1R3Rrb1NmYTVBWkxB|1510285270|9c1c1a171c16873e03ddcab5a3fe2d6dce757358',
'DNT': '1',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4,zh-TW;q=0.2',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'accept': 'application/json, text/plain, */*',
'Referer': 'https://www.zhihu.com/',
'X-UDID': 'AACAamB1LQqPToMcUMk8zMHR10FwezFwQsg=',
'Connection': 'keep-alive',
'X-API-VERSION': '3.0.53',
}
params = (
('action_feed', 'True'),
('limit', '10'),
('session_token', '2de830e516bd9d2b2fb54bb97712308f'),
('action', 'down'),
('after_id', '19'),
('desktop', 'true'),
)
data = requests.get('https://www.zhihu.com/api/v3/feed/topstory', headers=headers, params=params, cookies=cookies)
print(data.text)
| UTF-8 | Python | false | false | 2,236 | py | 766 | zhihu.py | 721 | 0.704383 | 0.466905 | 0 | 50 | 43.72 | 175 |
KrutkoAndrii/Itea_Graduation_Project | 6,614,249,670,091 | 2e758aa15911c5e69b913f17851e3a0eb82d6973 | c121e8444621badb8af60777130f15f2aef94fb2 | /Itea_Graduation_Project/database/db_tools.py | d66ebff56c7b5ad355dcebb551d8215a63bb0f18 | [] | no_license | https://github.com/KrutkoAndrii/Itea_Graduation_Project | 9ea9209d406604f9c7a966b504cece8b62a0351a | 61dca84e5291d064f12ad6cdece8c9853e0d5257 | refs/heads/master | 2022-02-01T07:51:09.907425 | 2019-07-20T21:31:42 | 2019-07-20T21:31:42 | 197,945,302 | 1 | 0 | null | false | 2019-07-20T21:03:45 | 2019-07-20T15:01:52 | 2019-07-20T15:11:49 | 2019-07-20T21:03:44 | 17 | 1 | 0 | 0 | null | false | false | ''' package for connection to DB '''
import psycopg2
import sys
from configparser import ConfigParser
from psycopg2 import sql
def get_ini_file():
'''read ini file for connection database'''
config = ConfigParser()
config.read(sys.path[0] + r'\database\db.ini')
return config.get('db', 'db_name'),\
config.get('db', 'db_user'),\
config.get('db', 'db_password'),\
config.get('db', 'db_location')
def is_check_db_instance(dbase, user_in, password_in, server):
'''Is 'things' database instance?'''
try:
conn_ch = psycopg2.connect(user=user_in, password=password_in,
host=server)
cur = conn_ch.cursor()
cur.execute("SELECT 1 FROM pg_database WHERE datname='{dbname}'".
format(dbname=dbase))
except psycopg2.Error as e:
print("No connect to server. Check logon/password in db.ini")
exit()
if cur.fetchone():
cur.close()
conn_ch.close()
return True
else:
cur.close()
conn_ch.close()
return False
def create_db(dbase, user_in, password_in, server):
''' create new base if its new locate '''
with open(sys.path[0]+r'\database\base.sql', 'r') as myfile:
file_sql = myfile.read()
script = file_sql.split(';')
conn = psycopg2.connect(user=user_in, password=password_in, host=server)
conn.autocommit = True
cur = conn.cursor()
print('Create database ' + dbase)
cur.execute("CREATE DATABASE {}".format(dbase))
cur.close()
conn.close()
conn = connect_to_db(dbase, user_in, password_in, server)
conn.autocommit = True
cur = conn.cursor()
for string_sql in script:
if len(string_sql.replace('\n', '')) > 0:
print(string_sql.replace('\n', ''))
cur.execute(string_sql.replace('\n', ''))
cur.close()
conn.close()
def connect_to_db(dbase, user_in, password_in, server):
'''try to connect database'''
try:
conn = psycopg2.connect(dbname=dbase, user=user_in,
password=password_in, host=server)
except psycopg2.ProgrammingError as e:
print("Data base doesn`t exist!")
return conn
def sql_result(conn, sql_text):
with conn.cursor() as cursor:
conn.autocommit = True
sql_create = sql.SQL(sql_text)
try:
cursor.execute(sql_create)
data = cursor.fetchall()
except psycopg2.Error as e:
return 0
cursor.close()
conn.close()
return data
def is_sql_no_result(conn, sql_text):
with conn.cursor() as cursor:
conn.autocommit = True
sql_create = sql.SQL(sql_text)
try:
cursor.execute(sql_create)
except psycopg2.Error as e:
return False
cursor.close()
conn.close()
return True
if __name__ == "__main__":
print(" This module not for running!")
| UTF-8 | Python | false | false | 2,977 | py | 15 | db_tools.py | 13 | 0.581122 | 0.576419 | 0 | 100 | 28.77 | 76 |
maiot-io/zenml | 2,774,548,876,273 | ca502f80393f49e8a274c0449b6b3ad7a5dfaaae | 66b4903aeedebdc1fef924fb8ce4efe068f6a495 | /src/zenml/integrations/deepchecks/steps/deepchecks_data_drift.py | fbe53d82456bd10b73628a2c76c049ae2eca03fb | [
"Apache-2.0"
] | permissive | https://github.com/maiot-io/zenml | c64aea52daa9b0b7d2abdfd6eb89d7f8c5a34266 | 99a0861765b1dad3651ccfa89ae1c83379f201ab | refs/heads/main | 2023-09-05T15:44:25.632078 | 2023-09-05T12:20:11 | 2023-09-05T12:20:11 | 314,197,645 | 1,275 | 75 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Implementation of the Deepchecks data drift validation step."""
from typing import Any, Dict, Optional, Sequence, cast
import pandas as pd
from deepchecks.core.suite import SuiteResult
from zenml import step
from zenml.integrations.deepchecks.data_validators.deepchecks_data_validator import (
DeepchecksDataValidator,
)
from zenml.integrations.deepchecks.validation_checks import (
DeepchecksDataDriftCheck,
)
@step
def deepchecks_data_drift_check_step(
reference_dataset: pd.DataFrame,
target_dataset: pd.DataFrame,
check_list: Optional[Sequence[DeepchecksDataDriftCheck]] = None,
dataset_kwargs: Optional[Dict[str, Any]] = None,
check_kwargs: Optional[Dict[str, Any]] = None,
run_kwargs: Optional[Dict[str, Any]] = None,
) -> SuiteResult:
"""Run data drift checks on two pandas datasets using Deepchecks.
Args:
reference_dataset: Reference dataset for the data drift check.
target_dataset: Target dataset to be used for the data drift check.
check_list: Optional list of DeepchecksDataDriftCheck identifiers
specifying the subset of Deepchecks data drift checks to be
performed. If not supplied, the entire set of data drift checks will
be performed.
dataset_kwargs: Additional keyword arguments to be passed to the
Deepchecks `tabular.Dataset` or `vision.VisionData` constructor.
check_kwargs: Additional keyword arguments to be passed to the
Deepchecks check object constructors. Arguments are grouped for
each check and indexed using the full check class name or
check enum value as dictionary keys.
run_kwargs: Additional keyword arguments to be passed to the
Deepchecks Suite `run` method.
Returns:
A Deepchecks suite result with the validation results.
"""
data_validator = cast(
DeepchecksDataValidator,
DeepchecksDataValidator.get_active_data_validator(),
)
return data_validator.data_validation(
dataset=reference_dataset,
comparison_dataset=target_dataset,
check_list=check_list,
dataset_kwargs=dataset_kwargs or {},
check_kwargs=check_kwargs or {},
run_kwargs=run_kwargs or {},
)
| UTF-8 | Python | false | false | 2,898 | py | 1,158 | deepchecks_data_drift.py | 920 | 0.71256 | 0.7098 | 0 | 72 | 39.25 | 85 |
KittyMac/MLClock | 15,908,558,877,231 | a23d3602679ef03c86e5f68acc4e55d49b2997f4 | aaec82ed8d3d077afe8f6ca10db463a23eaf6635 | /time_detection/train.py | 3a446901b1a946bc27d24aed6dc1acbd65ddca04 | [
"MIT"
] | permissive | https://github.com/KittyMac/MLClock | 4cf0316deeccd311de87d5e0fe764a8450e472e8 | 9cfd03395ee14383aa3ee2d60a9644ceb5362737 | refs/heads/master | 2021-07-11T05:03:54.313294 | 2019-02-12T02:20:02 | 2019-02-12T02:20:02 | 142,016,187 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division
import sys
sys.path.insert(0, '../')
from keras import backend as keras
from keras.preprocessing import sequence
from dateutil import parser
import numpy as np
import coremltools
import model
import data
import json
import operator
import keras.callbacks
import random
import time
import sys
import math
import signal
import time
import coremltools
######
# allows us to used ctrl-c to end gracefully instead of just dying
######
class SignalHandler:
stop_processing = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.stop_processing = True
######
def Learn():
# 1. create the model
print("creating the model")
_model = model.createModel(True)
# 2. train the model
print("initializing the generator")
batch_size = 1
generator = data.ClockGenerator(model.IMG_SIZE,model.INCLUDE_SECONDS_HAND,0.2)
generator.shakeVariance = 16
iterations = 1000000
print("beginning training")
handler = SignalHandler()
i = 0
while True:
if handler.stop_processing:
break
n = int(50000)
print(i)
Train(generator,_model,n)
i += n
if i >= iterations:
break
_model.save(model.MODEL_H5_NAME)
def Convert():
output_labels = []
output_labels.append("notclock")
for i in range(0,12):
output_labels.append("hour%d" % i)
for i in range(0,60):
output_labels.append("minute%d" % i)
if model.INCLUDE_SECONDS_HAND:
for i in range(0,60):
output_labels.append("second%d" % i)
coreml_model = coremltools.converters.keras.convert(model.MODEL_H5_NAME,input_names='image',image_input_names='image',class_labels=output_labels, image_scale=1/255.0)
coreml_model.author = 'Rocco Bowling'
coreml_model.short_description = 'model to tell time off of an analog clock'
coreml_model.input_description['image'] = 'image of the clock face'
coreml_model.save(model.MODEL_COREML_NAME)
def Train(generator,_model,n):
train,label = generator.generateClockFaces(n)
batch_size = 32
if n < batch_size:
batch_size = n
_model.fit(train,label,batch_size=batch_size,shuffle=True,epochs=1,verbose=1)
def Test():
_model = model.createModel(True)
generator = data.ClockGenerator(model.IMG_SIZE,model.INCLUDE_SECONDS_HAND,0.2)
generator.shakeVariance = 16
train,label = generator.generateClockFaces(12*60*60)
results = _model.predict(train)
correct = 0
for i in range(0,len(label)):
expected = generator.convertOutputToTime(label[i])
predicted = generator.convertOutputToTime(results[i])
if expected == predicted:
correct += 1
print("expected", expected, "predicted", predicted, "correct", expected == predicted)
print("correct", correct, "total", len(label))
def Test2(timeAsString):
parsedTime = parser.parse(timeAsString)
_model = model.createModel(True)
generator = data.ClockGenerator(model.IMG_SIZE,model.INCLUDE_SECONDS_HAND,0.2)
generator.shakeVariance = 16
train,label = generator.generateClockFace(parsedTime.hour, parsedTime.minute)
results = _model.predict(train)
for i in range(0,len(label)):
filepath = '/tmp/clock_%s.png' % (generator.convertOutputToTime(results[i]))
generator.saveImageToFile(train[i], filepath)
print("expected", generator.convertOutputToTime(label[i]), "predicted", generator.convertOutputToTime(results[i]), "file", filepath)
if __name__ == '__main__':
if sys.argv >= 2:
if sys.argv[1] == "test":
Test()
elif sys.argv[1] == "learn":
Learn()
elif sys.argv[1] == "convert":
Convert()
else:
Test2(sys.argv[2])
else:
Test()
| UTF-8 | Python | false | false | 3,654 | py | 14 | train.py | 10 | 0.712644 | 0.694581 | 0 | 150 | 23.353333 | 167 |
reeFridge/djangoSandbox | 4,200,478,047,145 | f9044112762175aae805c384425008a9ed1e312f | 32e4da428fead360cd5ecbc64e60d7e745f0e399 | /firstapp/novels/urls.py | e39ba9d2aaccb3b05bc8b0456ec4e52bbb3835d5 | [] | no_license | https://github.com/reeFridge/djangoSandbox | 947adda4586701dde876b5ebd0ed4b4a8ac1db90 | efb9f169b172923dc30a833270d8429e92c76ec0 | refs/heads/master | 2021-01-10T05:02:06.060190 | 2016-01-01T23:05:30 | 2016-01-01T23:05:30 | 48,891,599 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns, include, url
from novels.views import MainListView, MainDetailView, subscribe_to, IndexView, download_issue_archive, ReadOnlineView
from novels.models import Group, Character, Publisher, Arc, Title, Issue, Strip
urlpatterns = patterns('',
#Index:
url(r'^$', IndexView.as_view(), name='index'),
#Publisher:
url(r'^publishers/$', MainListView.as_view(model = Publisher, template_name = "novels/pub_list.html"), name='PublisherList'),
url(r'^publishers/(?P<slug>[\w-]+)/$', MainDetailView.as_view(model = Publisher, template_name = "novels/pub_detail.html"), name='PublisherDetail'),
url(r'^subscribe/publisher/$', subscribe_to, {'mod': Publisher}, name='Sub_to_Pub'),
#Title:
url(r'^titles/$', MainListView.as_view(model = Title, template_name = "novels/tit_list.html"), name='TitleList'),
url(r'^titles/(?P<slug>[\w-]+)/$', MainDetailView.as_view(model = Title, template_name = "novels/tit_detail.html"), name='TitleDetail'),
url(r'^subscribe/title/$', subscribe_to, {'mod': Title}, name='Sub_to_Title'),
#Arc:
url(r'^arcs/$', MainListView.as_view(model = Arc, template_name = "novels/arc_list.html"), name='ArcList'),
url(r'^arcs/(?P<slug>[\w-]+)/$', MainDetailView.as_view(model = Arc, template_name = "novels/arc_detail.html"), name='ArcDetail'),
url(r'^subscribe/arc/$', subscribe_to, {'mod': Arc}, name='Sub_to_Arc'),
#Issue
url(r'^titles/issue/(?P<slug>[\w-]+)/$', MainDetailView.as_view(model = Issue, template_name = "novels/issue_detail.html"), name='IssueDetail'),
url(r'^download/issue/(?P<pk>\d+)/$', download_issue_archive, name='Download_Issue'),
#Group:
url(r'^groups/$', MainListView.as_view(model = Group, template_name = "novels/group_list.html"), name='GroupList'),
url(r'^groups/(?P<slug>[\w-]+)/$', MainDetailView.as_view(model = Group, template_name = "novels/group_detail.html"), name='GroupDetail'),
url(r'^subscribe/group/$', subscribe_to, {'mod': Group}, name='Sub_to_Group'),
#Character:
url(r'^chars/$', MainListView.as_view(model = Character, template_name = "novels/char_list.html"), name='CharList'),
url(r'^chars/(?P<slug>[\w-]+)/$', MainDetailView.as_view(model = Character, template_name = "novels/char_detail.html"), name='CharDetail'),
url(r'^subscribe/char/$', subscribe_to, {'mod': Character}, name='Sub_to_Char'),
#OnlineReading
url(r'^read/(?P<title_slug>[\w-]+)/(?P<issue_slug>[\w-]+)/(?P<page>[0-9]+)/$', ReadOnlineView.as_view(model = Strip, template_name = "novels/read_strip.html"), name='ReadStrip'),
) | UTF-8 | Python | false | false | 2,596 | py | 60 | urls.py | 39 | 0.660247 | 0.659476 | 0 | 34 | 75.382353 | 182 |
mingyuan-xia/candy | 7,052,336,310,630 | 4706821525e3d7d9fb33e9130547df4dc50eacb4 | adfa452d4209978edb01dcf53a6e4eaab526a945 | /candy/cli.py | dd2087e4ccda143a0192f66e042bf58653eabcf5 | [
"Apache-2.0"
] | permissive | https://github.com/mingyuan-xia/candy | be22d1a08fd7ae6a79d4c0aa38a7e81ea11315bd | 5faeebd2f47dbf72431b7f0515026789cc5b0d17 | refs/heads/master | 2016-04-23T06:44:42.752955 | 2016-01-13T02:16:57 | 2016-01-13T02:16:57 | 48,387,126 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import candy
import sys
import os
from . import util
import pprint
def usage():
util.print_info("candy - the package/library manager for Android apps")
util.print_info("Commands:")
util.print_info(" init - initialize the current directory as a candy managed Android projet")
util.print_info(" install <package_names...> - install a new package to the current project")
util.print_info(" uninstall <package_names...> - uninstall a given package from the current project")
util.print_info(" search <package_names...> - search a package in all known registries")
def init_project(project_root):
# ask for name and version
name = input('the name of the project ()?')
ver = input('the version of the project (default 0.0.0)?')
# TODO
return 0
def main(argv=sys.argv):
if len(argv) < 2:
usage()
return 1
cmd, args = argv[1], argv[2:]
project_root = os.getcwd()
if cmd == 'init':
return init_project(project_root)
registry = candy.find_registry()
pkg_names = args
pkgs = [(p, registry.fetch_meta(pkg=p)) for p in pkg_names] # package name, package object
if cmd == 'search':
ret = 0
for p, pkg in pkgs:
if pkg is None:
util.print_warning('Package %s not found' % (p,))
ret = 1
else:
util.print_info(pprint.pformat(pkg))
return ret
project = candy.Project(project_root)
if cmd == 'install':
ret = 0
for p, pkg in pkgs:
if pkg is None:
util.print_warning('Package %s not found' % (p,))
ret = 1
else:
project.install(pkg)
project.fsync()
return ret
if cmd == 'uninstall':
ret = 0
for p, pkg in pkgs:
if pkg is None:
util.print_warning('Package %s not found' % (p,))
ret = 1
else:
project.uninstall(pkg)
project.fsync()
return ret
util.print_err('Unknown command: %s' % (cmd, ))
usage()
return 1
| UTF-8 | Python | false | false | 2,199 | py | 9 | cli.py | 6 | 0.554343 | 0.547067 | 0 | 70 | 30.414286 | 108 |
KartikAhuja1996/digiprice_bnd | 18,837,726,567,074 | 43163c8c69419341155797326ea7d506ca72cc0c | 0a9027ca983c3fc51a1e2f9279ad36a81fa496a3 | /chip/urls.py | 5ec4555b0bd060bdc852fe2843bb66ff4de1af7c | [] | no_license | https://github.com/KartikAhuja1996/digiprice_bnd | 48f4fb35b40307a14fb5c7fa0a7c5b8fa3de39ba | d5adb926aa16992288ed31214233cf6119f440ce | refs/heads/master | 2018-07-05T08:17:49.348680 | 2018-07-01T20:56:25 | 2018-07-01T20:56:25 | 124,904,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from .views import (login,
add_product,
index,
update,
delete,
add_brand)
from django.contrib.auth import views as auth_views
urlpatterns = [
path("",login,name="login"),
path("index",index,name="index"),
path("logout",auth_views.logout,{'next_page':'chip:login'},name="logout"),
path('add-product/',add_product,name="add_product"),
path('add-brand/',add_brand,name="add_brand"),
path('<str:model_name>/<str:instance_name>/edit',update,name="update_model"),
path('<str:model_name>/<str:instance_name>/delete',delete,name="delete_model")
]
| UTF-8 | Python | false | false | 618 | py | 79 | urls.py | 55 | 0.66343 | 0.66343 | 0 | 17 | 35.235294 | 82 |
garygriswold/SafeBible | 19,181,323,979,920 | 45384f3f222c3b101beccccc3143ead20426a44a | 28f0dc2b48ed019dfef08d84e842c5d75e116dfc | /Versions/py/AudioDBPImporter.py | 7fe24c5d771f8a862b05816d2d4a94519238117d | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | https://github.com/garygriswold/SafeBible | 9da0e8d89cb08888b8cf48773b4b3860086c49f7 | 2d378e84cbd6b81641bcccd6ba66699d24208548 | refs/heads/master | 2022-02-25T19:41:10.367183 | 2019-08-22T03:35:02 | 2019-08-22T03:35:02 | 34,028,119 | 0 | 0 | MIT | false | 2019-10-30T07:11:44 | 2015-04-16T01:40:19 | 2019-08-22T03:35:26 | 2019-10-30T07:11:42 | 250,730 | 0 | 0 | 1 | TSQL | false | false | #
# The purpose of this program is to generate the Audio meta data tables,
# including AudioVersion, Audio, and AudioBook. AudioChapter is separately
# created.
#
import io
import sys
# This table controls what Audio versions will be included, and what
# text versions that are associated with
versions = [
['ERV-ARB', 'ARB', 'WTC', True, ['ARBWTCN1DA', 'ARBWTCO1DA']],
['ARBVDPD', 'ARB', 'VDV', True, ['ARZVDVN2DA', 'ARZVDVO2DA']],
['ERV-AWA', 'AWA', 'WTC', True, ['AWAWTCN2DA']],
['ERV-BEN', 'BEN', 'WTC', True, ['BNGWTCN1DA', 'BNGWTCN2DA']],
['ERV-BUL', 'BUL', 'PRB', False, ['BLGAMBN1DA']],
['ERV-CMN', 'CMN', 'UNV', False, ['CHNUNVN2DA', 'CHNUNVO2DA']],
['ERV-ENG', 'ENG', 'ESV', False, ['ENGESVN2DA', 'ENGESVO2DA']],
['KJVPD', 'ENG', 'KJV', True, ['ENGKJVN2DA', 'ENGKJVO2DA']],
['WEB', 'ENG', 'WEB', True, ['ENGWEBN2DA', 'ENGWEBO2DA']],
['ERV-HRV', 'SRC', None, False, []],
['ERV-HIN', 'HIN', 'WTC', True, ['HNDWTCN2DA']],
['ERV-HUN', 'HUN', 'HBS', False, ['HUNHBSN1DA']],
['ERV-IND', 'IND', 'SHL', False, ['INZSHLN2DA']],
['ERV-KAN', 'KAN', 'WTC', True, ['ERVWTCN1DA', 'ERVWTCN2DA']],
['ERV-MAR', 'MAR', 'WTC', True, ['MARWTCN1DA', 'MARWTCN2DA']],
['ERV-NEP', 'NEP', None, False, []],
['ERV-ORI', 'ORY', 'WTC', True, ['ORYWTCN1DA', 'ORYWTCN2DA']],
['ERV-PAN', 'PAN', None, False, []],
['ERV-POR', 'POR', 'BAR', False, ['PORARAN2DA']],
['ERV-RUS', 'RUS', 'S76', False, ['RUSS76N2DA', 'RUSS76O2DA']],
['ERV-SPA', 'SPA', 'WTC', True, ['SPNWTCN2DA']],
['ERV-SRP', 'SRP', None, False, []],
['ERV-TAM', 'TAM', 'WTC', True, ['TCVWTCN2DA']],
['ERV-THA', 'THA', None, False, []],
['ERV-UKR', 'UKR', 'N39', False, ['UKRO95N2DA']],
['ERV-URD', 'URD', 'WTC', True, ['URDWTCN2DA']],
['ERV-VIE', 'VIE', None, False, []],
['NMV', 'PES', None, False, []]
]
#for version in versions:
# print version
def usfmBookId(bookName):
books = {
'Genesis': 'GEN',
'Exodus': 'EXO',
'Leviticus': 'LEV',
'Numbers': 'NUM',
'Deuteronomy': 'DEU',
'Joshua': 'JOS',
'Judges': 'JDG',
'Ruth': 'RUT',
'1Samuel': '1SA',
'2Samuel': '2SA',
'1Kings': '1KI',
'2Kings': '2KI',
'1Chronicles': '1CH',
'2Chronicles': '2CH',
'Ezra': 'EZR',
'Nehemiah': 'NEH',
'Esther': 'EST',
'Job': 'JOB',
'Psalms': 'PSA',
'Proverbs': 'PRO',
'Ecclesiastes': 'ECC',
'SongofSongs': 'SNG',
'Isaiah': 'ISA',
'Jeremiah': 'JER',
'Lamentations': 'LAM',
'Ezekiel': 'EZK',
'Daniel': 'DAN',
'Hosea': 'HOS',
'Joel': 'JOL',
'Amos': 'AMO',
'Obadiah': 'OBA',
'Jonah': 'JON',
'Micah': 'MIC',
'Nahum': 'NAM',
'Habakkuk': 'HAB',
'Zephaniah': 'ZEP',
'Haggai': 'HAG',
'Zechariah': 'ZEC',
'Malachi': 'MAL',
'Matthew': 'MAT',
'Mark': 'MRK',
'Luke': 'LUK',
'John': 'JHN',
'Acts': 'ACT',
'Romans': 'ROM',
'1Corinthians': '1CO',
'2Corinthians': '2CO',
'Galatians': 'GAL',
'Ephesians': 'EPH',
'Philippians': 'PHP',
'Colossians': 'COL',
'1Thess': '1TH',
'2Thess': '2TH',
'1Timothy': '1TI',
'2Timothy': '2TI',
'Titus': 'TIT',
'Philemon': 'PHM',
'Hebrews': 'HEB',
'James': 'JAS',
'1Peter': '1PE',
'2Peter': '2PE',
'1John': '1JN',
'2John': '2JN',
'3John': '3JN',
'Jude': 'JUD',
'Revelation': 'REV',
# Spanish
'San Mateo': 'MAT',
'San Marcos': 'MRK',
'San Lucas': 'LUK',
'San Juan': 'JHN',
'Hechos': 'ACT',
'Romanos': 'ROM',
'1Corintios': '1CO',
'2Corintios': '2CO',
'Galatas': 'GAL',
'Efesios': 'EPH',
'Filipenses': 'PHP',
'Colosenses': 'COL',
'1Tes': '1TH',
'2Tes': '2TH',
'1Timoteo': '1TI',
'2Timoteo': '2TI',
'Tito': 'TIT',
'Filemon': 'PHM',
'Hebreos': 'HEB',
'Santiago': 'JAS',
'1San Pedro': '1PE',
'2San Pedro': '2PE',
'1San Juan': '1JN',
'2San Juan': '2JN',
'3San Juan': '3JN',
'Judas': 'JUD',
'Apocalipsis': 'REV',
# Portuguese
'S Mateus': 'MAT',
'S Marcos': 'MRK',
'S Lucas': 'LUK',
'S Joao': 'JHN',
'Atos': 'ACT',
'Colossenses': 'COL',
'1Tess': '1TH',
'2Tess': '2TH',
'Hebreus': 'HEB',
'S Tiago': 'JAS',
'1Pedro': '1PE',
'2Pedro': '2PE',
'1S Joao': '1JN',
'2S Joao': '2JN',
'3S Joao': '3JN',
'S Judas': 'JUD',
'Apocalipse': 'REV',
# Indonesian
'Matius': 'MAT',
'Markus': 'MRK',
'Lukas': 'LUK',
'Yohanes': 'JHN',
'Kisah Rasul': 'ACT',
'Roma': 'ROM',
'1Korintus': '1CO',
'2Korintus': '2CO',
'Galatia': 'GAL',
'Efesus': 'EPH',
'Filipi': 'PHP',
'Kolose': 'COL',
'1Tesalonika': '1TH',
'2Tesalonika': '2TH',
'1Timotius': '1TI',
'2Timotius': '2TI',
'Ibrani': 'HEB',
'Yakobus': 'JAS',
'1Petrus': '1PE',
'2Petrus': '2PE',
'1Yohanes': '1JN',
'2Yohanes': '2JN',
'3Yohanes': '3JN',
'Yudas': 'JUD',
'Wahyu': 'REV'
}
result = books.get(bookName, None)
return result
abbrDict = dict()
for version in versions:
if version[2] != None:
abbr = version[1] + version[2]
abbrDict[abbr] = (version[0], version[4])
versionOut = io.open("output/AudioVersionTable.sql", mode="w", encoding="utf-8")
versionOut.write(u"DROP TABLE IF EXISTS AudioVersion;\n")
versionOut.write(u"CREATE TABLE AudioVersion(\n")
versionOut.write(u" ssVersionCode TEXT NOT NULL PRIMARY KEY,\n")
versionOut.write(u" dbpLanguageCode TEXT NOT NULL,\n")
versionOut.write(u" dbpVersionCode TEXT NOT NULL);\n")
audioOut = io.open("output/AudioTable.sql", mode="w", encoding="utf-8")
audioOut.write(u"DROP TABLE IF EXISTS Audio;\n")
audioOut.write(u"CREATE TABLE Audio(\n")
audioOut.write(u" damId TEXT NOT NULL PRIMARY KEY,\n")
audioOut.write(u" dbpLanguageCode TEXT NOT NULL,\n")
audioOut.write(u" dbpVersionCode TEXT NOT NULL,\n")
audioOut.write(u" collectionCode TEXT NOT NULL,\n")
audioOut.write(u" mediaType TEXT NOT NULL);\n")
bookOut = io.open("output/AudioBookTable.sql", mode="w", encoding="utf-8")
bookOut.write(u"DROP TABLE IF EXISTS AudioBook;\n")
bookOut.write(u"CREATE TABLE AudioBook(\n")
bookOut.write(u" damId TEXT NOT NULL REFERENCES Audio(damId),\n")
bookOut.write(u" bookId TEXT NOT NULL,\n")
bookOut.write(u" bookOrder TEXT NOT NULL,\n")
bookOut.write(u" bookName TEXT NOT NULL,\n")
bookOut.write(u" numberOfChapters INTEGER NOT NULL,\n")
bookOut.write(u" PRIMARY KEY (damId, bookId));\n")
versionIdSet = set()
damIdSet = set()
lastDamId = None
lastUsfm = None
bookLine = None
dbpProd = io.open("Release.1.13/metadata/FCBH/dbp_prod.txt", mode="r", encoding="utf-8")
for line in dbpProd:
line = line.strip()
parts = line.split("/")
numParts = len(parts)
if parts[0] == 'audio' and parts[numParts -1][-4:] == ".mp3":
abbr = parts[1]
damId = parts[2]
if numParts == 4 and abbr in abbrDict.keys():
ssVersionCode = abbrDict[abbr][0]
allowDamId = abbrDict[abbr][1]
if damId in allowDamId:
# Write AudioVersion Row
if not abbr in versionIdSet:
versionIdSet.add(abbr)
versionOut.write(u"INSERT INTO AudioVersion VALUES ('%s', '%s', '%s');\n"
% (ssVersionCode, abbr[0:3], abbr[3:]))
# Write Audio Row
if not damId in damIdSet:
damIdSet.add(damId)
collectionCode = damId[6:7] + "T"
mType = damId[7:]
if mType != '1DA' and mType != '2DA':
print "ERROR mediaType", line
mediaType = 'Drama' if (mType == '2DA') else 'Non-Drama'
audioOut.write(u"REPLACE INTO Audio VALUES('%s', '%s', '%s', '%s', '%s');\n"
% (damId, abbr[0:3], abbr[3:6], collectionCode, mediaType))
# Write AudioBookRow
book = parts[3]
damId2 = book[21:31].replace("_", " ").strip()
if damId == damId2:
order = book[0:3]
chapter = book[5:8]
chapter = chapter.replace("_", "")
name = book[9:21]
name = name.replace("_", " ").strip()
usfm = usfmBookId(name)
if usfm == None:
print "ERROR", line, name
bookIdKey = damId + usfm
if usfm != lastUsfm or damId != lastDamId:
if bookLine != None:
bookOut.write(bookLine)
lastUsfm = usfm
lastDamId = damId
bookLine = u"REPLACE INTO AudioBook VALUES('%s', '%s', '%s', '%s', '%s');\n" % (damId, usfm, order, name, chapter)
# Validate Key Generation Logic
checkChapter = chapter
if len(checkChapter) < 3:
checkChapter = "_" + checkChapter
checkName = name.replace(" ", "_")
checkName = checkName + "_______________"[0: 12 - len(name)]
generated = "audio/%s/%s/%s__%s_%s%s.mp3" % (abbr, damId, order, checkChapter, checkName, damId)
if line != generated:
print "ERROR"
print line
print generated
dbpProd.close()
versionOut.close()
audioOut.close()
bookOut.write(bookLine)
bookOut.close()
#for order in orderSet:
# print order
#for chapter in chapterSet.keys():
# print chapter, chapterSet[chapter]
#for book in bookSet:
# print book
| UTF-8 | Python | false | false | 8,842 | py | 655 | AudioDBPImporter.py | 496 | 0.572156 | 0.551007 | 0 | 305 | 27.986885 | 119 |
NLeSC/litstudy | 5,145,370,865,791 | 85f4599e50b650f2936b0401d74764579ffa6d8a | a692ce4250b8c79cac3ab8abe936e5559d5c5e96 | /tests/test_sources_csv.py | 1e54aaed648dc04a477e085ed18522fe78364bd4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/NLeSC/litstudy | 7ec64ad955c01570b7e2ec446f9f20a763b3edd4 | adb86a9e59a5bc38ec218e296d76521583ad0c0a | refs/heads/master | 2023-08-29T10:08:22.343155 | 2023-08-28T12:09:26 | 2023-08-28T12:09:26 | 206,312,286 | 81 | 24 | Apache-2.0 | false | 2023-08-29T23:45:42 | 2019-09-04T12:19:05 | 2023-08-22T03:27:04 | 2023-08-29T23:45:41 | 18,740 | 98 | 36 | 16 | Python | false | false | from litstudy import load_csv
import os
def test_load_ieee_csv():
path = os.path.dirname(__file__) + "/resources/ieee.csv"
docs = load_csv(path)
doc = docs[0]
assert (
doc.title
== 'Exascale Computing Trends: Adjusting to the "New Normal"\' for Computer Architecture'
)
assert doc.publication_year == 2013
# assert len(doc.keywords) == 37
assert "Transistors" in doc.keywords
assert len(doc.abstract) == 774
assert doc.citation_count == 51
assert len(doc.authors) == 2
author = doc.authors[0]
assert author.name == "P. Kogge"
def test_load_springer_csv():
path = os.path.dirname(__file__) + "/resources/springer.csv"
docs = load_csv(path)
doc = docs[0]
assert doc.title == "Graph-Based Load Balancing Model for Exascale Computing Systems"
assert doc.publication_year == 2022
assert doc.id.doi == "10.1007/978-3-030-92127-9_33"
def test_load_scopus_csv():
path = os.path.dirname(__file__) + "/resources/scopus.csv"
docs = load_csv(path)
doc = docs[0]
assert doc.title == "Scalable molecular dynamics with NAMD"
assert doc.abstract.startswith("NAMD is a parallel molecular dynamics code")
assert doc.publication_source == "Journal of Computational Chemistry"
assert doc.language is None
assert doc.publisher == "John Wiley and Sons Inc."
assert doc.citation_count == 13169
assert doc.keywords == ["Biomolecular simulation", "Molecular dynamics", "Parallel computing"]
assert doc.publication_year == 2005
assert len(doc.authors) == 10
assert doc.authors[0].name == "Phillips J.C."
| UTF-8 | Python | false | false | 1,633 | py | 62 | test_sources_csv.py | 32 | 0.665646 | 0.63319 | 0 | 50 | 31.66 | 98 |
LiuFang816/SALSTM_py_data | 16,716,012,728,312 | 601aeb55ee1302710b4cb3231221a2cfb7a0edb7 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/rhiever_tpot/tpot-master/tpot/operators/classifiers/linear_svc.py | 76e80d9b7383444c035c0063fe95e71c6264ba09 | [] | no_license | https://github.com/LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | false | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | 2022-05-26T15:16:41 | 2022-12-19T02:53:00 | 208,253 | 9 | 5 | 1 | Python | false | false | # -*- coding: utf-8 -*-
"""
Copyright 2016 Randal S. Olson
This file is part of the TPOT library.
The TPOT library is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
The TPOT library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details. You should have received a copy of the GNU General Public License along
with the TPOT library. If not, see http://www.gnu.org/licenses/.
"""
from ...gp_types import Bool
from .base import Classifier
from sklearn.svm import LinearSVC
class TPOTLinearSVC(Classifier):
"""Fits a Linear Support Vector Classifier
Parameters
----------
C: float
Penalty parameter C of the error term.
penalty: int
Integer used to specify the norm used in the penalization (l1 or l2)
dual: bool
Select the algorithm to either solve the dual or primal optimization problem.
"""
import_hash = {'sklearn.svm': ['LinearSVC']}
sklearn_class = LinearSVC
arg_types = (float, int, Bool)
def __init__(self):
pass
def preprocess_args(self, C, penalty, dual):
penalty_values = ['l1', 'l2']
penalty_selection = penalty_values[penalty % len(penalty_values)]
C = min(25., max(0.0001, C))
if penalty_selection == 'l1':
dual = False
return {
'C': C,
'penalty': penalty_selection,
'dual': dual
}
| UTF-8 | Python | false | false | 1,726 | py | 5,038 | linear_svc.py | 5,037 | 0.66628 | 0.655852 | 0 | 59 | 28.254237 | 85 |
yeseni-today/robin | 17,867,063,970,921 | 91152e93f1a905d2af4009af16415d57721a0343 | f7e40d291edba7bacc851af813f4f3fdd119778b | /lexer/_lexer.py | 8ccfcf1ab36aed1aee46ff6b7a13078fd6a14d65 | [
"MIT"
] | permissive | https://github.com/yeseni-today/robin | 76debcb1ecd8d2da5feed0508792b7a9a1580963 | de23f436857a427d23d310a392b46a0ed4749dee | refs/heads/master | 2021-09-08T14:16:44.952921 | 2018-03-10T11:42:05 | 2018-03-10T11:42:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
from robin import settings
from robin import util
from lexer import util, automate
from lexer import tokens
from lexer.tokens import Token, iskeyword
import logging
from robin.util import log_def, log_cls
def lf_lines(text):
"""将text中的换行符替换为\n"""
lines = []
for line in text.splitlines():
lines.append(line + '\n')
# logging.info(f'\nlines{lines}')
# logging.info('\n'+text)
return lines
class Context(object):
def __init__(self, text):
self.lines = lf_lines(text)
self.line_no = 0
self.line = self.lines[self.line_no]
self.position = 0 # todo offset '\t'
self.current_char = self.line[self.position]
self.indent_stack = [0] # 处理indent dedent
self.brackets_stack = [] # 处理隐式行连接 在() [] {}中
class Scanner(ABC):
def __init__(self, context):
self.context = context
def __str__(self):
return '<%s>' % self.__class__.__name__
__repr__ = __str__
@property
def lines(self):
return self.context.lines
@property
def line_no(self):
return self.context.line_no
@line_no.setter
def line_no(self, line_no):
self.context.line_no = line_no
@property
def line(self):
return self.context.line
@line.setter
def line(self, line):
self.context.line = line
@property
def position(self):
return self.context.position
@position.setter
def position(self, position):
self.context.position = position
@property
def current_char(self):
return self.context.current_char
@current_char.setter
def current_char(self, current_char):
self.context.current_char = current_char
@property
def indent_stack(self):
return self.context.indent_stack
@indent_stack.setter
def indent_stack(self, indent_stack):
self.context.indent_stack = indent_stack
@property
def brackets_stack(self):
return self.context.brackets_stack
@brackets_stack.setter
def brackets_stack(self, brackets_stack):
self.context.brackets_stack = brackets_stack
#############################
def look_around(self, n):
""":argument n 左负 右正"""
pos = self.position + n
if pos <= len(self.line) - 1:
return self.line[pos]
def next_char(self):
if self.current_char == '\n': # 行末
self.next_line()
else:
self.position += 1
self.current_char = self.line[self.position]
def next_line(self):
if self.line_no == len(self.lines) - 1: # 结束
self.current_char = None
return
self.line_no += 1
self.position = 0
self.line = self.lines[self.line_no]
self.current_char = self.line[self.position]
def make_token(self, type, value=None):
return Token(type=type, value=value, line=self.line_no, column=self.position) # todo offset '\t'
def skip_whitespace(self):
while self.current_char not in (None, '\n') and self.current_char.isspace():
self.next_char()
def error(self):
# todo 自定义异常 offset '\t'
print(f'line {self.line_no}')
print(self.lines[self.line_no][0:-1])
print(' ' * self.position + '^')
print('Lexical error: invalid char')
exit()
#############################
# 判断当前位置是否符合子类需要的token类型
@abstractmethod
def match(self):
pass
# 扫描并返回token
@abstractmethod
def scan(self):
pass
class IndentScanner(Scanner):
def match(self):
if self.position != 0:
return False
if len(self.brackets_stack) != 0: # 行连接
return False
if self.line_no and len(self.lines[self.line_no - 1]) > 1 and self.lines[self.line_no - 1][-2] == '\\': # 行连接
return False
return True
@log_def(name='IndentScanner')
def scan(self):
indent_num = self.indent_skip()
while self.current_char in ('#', '\n'): # 跳过 注释行 空白行
self.next_line()
indent_num = self.indent_skip()
return self.indent_judge(indent_num)
def indent_skip(self):
"""跳过缩进,并返回缩进的格数"""
indent_num = 0
while self.current_char in (' ', '\t'): # 空格符 制表符
if self.current_char == ' ':
indent_num += 1
else:
indent_num += settings.TABSIZE - indent_num % settings.TABSIZE
self.next_char()
return indent_num
def indent_judge(self, indent_num):
""":argument indent_num 以此判断应该INDENT还是DEDENT或没有"""
last_indent = self.indent_stack[-1]
if indent_num > last_indent: # INDENT
self.indent_stack.append(indent_num)
return self.make_token(tokens.INDENT, indent_num)
elif indent_num < last_indent: # DEDENT
dedent_count = 0
while indent_num < last_indent:
dedent_count += 1
last_indent = self.indent_stack[-1 - dedent_count]
self.indent_stack = self.indent_stack[:- dedent_count]
return self.make_token(tokens.DEDENT, dedent_count)
elif indent_num == last_indent:
return None
self.error() # IndentationError: unindent does not match any outer indentation level
class EndScanner(Scanner):
def match(self):
return self.current_char in ('#', '\\', '\n', None)
# 全文结束ENDMARKER 或 行结束NEWLINE 或 None
@log_def(name='EndScanner')
def scan(self):
char = self.current_char
if char is None: # 全结束
return self.make_token(tokens.ENDMARKER)
if char in '#\n' and len(self.brackets_stack) == 0: # 逻辑行结束
token = self.make_token(tokens.NEWLINE)
self.next_line()
return token
if (char == '\\' and self.look_around(1) == '\n') \
or (char == '\n' and len(self.brackets_stack) != 0): # 显式行连接 隐式行连接
self.next_line()
self.skip_whitespace()
class NumberScanner(Scanner):
def match(self):
return self.current_char in '0123456789' or (self.current_char == '.' and self.look_around(1) in '0123456789')
@log_def(name='NumberScanner')
def scan(self):
number_dfa = automate.number_dfa
while number_dfa.accept(self.current_char):
self.next_char()
if number_dfa.is_final():
token = self.make_token(tokens.NUMBER, number_dfa.string)
# token = self.make_token(tokens.NUMBER, self.str2num(number_dfa.string))
number_dfa.reset()
return token
else:
self.error()
class NameScanner(Scanner):
def match(self):
return self.current_char and self.current_char.isidentifier()
@log_def(name='NameScanner')
def scan(self):
name = self.current_char
self.next_char()
while (self.current_char and self.current_char.isidentifier()) or self.current_char in '0123456789':
name += self.current_char
self.next_char()
if iskeyword(name):
return self.make_token(name)
return self.make_token(tokens.ID, name)
class StrScanner(Scanner):
def match(self):
head = self.current_char
if head in '\'\"':
return True
if head.lower() in 'rufb':
if self.look_around(1) in '\'\"':
return True
head += self.look_around(1)
if head.lower() in ('fr', 'rf', 'br', 'rb') and self.look_around(2) in '\'\"':
return True
return False
@log_def(name='StrScanner')
def scan(self):
string = ''
while self.current_char not in '\'\"': # 前缀
string += self.current_char
self.next_char()
is_bytes = False
if 'b' in string.lower():
is_bytes = True
quote = self.current_char # 单引号或双引号
quote_num = self.quote_num() # 1 or 3
string += quote * quote_num
while True:
char = self.current_char
if quote_num == 1 and char == '\n' or char is None:
self.error() # SyntaxError: EOL while scanning string literal
elif char == '\\':
string += '\\'
self.next_char()
string += self.current_char
self.next_char()
elif char == quote and self.quote_num() == quote_num:
string += quote * quote_num
if is_bytes:
return self.make_token(tokens.BYTES, string)
else:
return self.make_token(tokens.STRING, string)
elif is_bytes and not util.is_ascii(char):
self.error() # SyntaxError: bytes can only contain ASCII literal characters.
else:
string += char
self.next_char()
def quote_num(self):
if self.current_char == self.look_around(1) == self.look_around(2):
self.next_char()
self.next_char()
self.next_char()
return 3
else:
self.next_char()
return 1
class OpDelimiterScanner(Scanner):
def __init__(self, context):
super().__init__(context)
self.len = 0
self.brackets_dict = {'(': ')', '[': ']', '{': '}'}
def deal_brackets(self, bracket):
logging.debug(f'brackets_stack: {self.brackets_stack}, cur_char:{self.current_char}')
if bracket in '([{':
self.brackets_stack.append(bracket)
elif len(self.brackets_stack) == 0 or self.brackets_dict[self.brackets_stack[-1]] != bracket:
self.error()
else:
self.brackets_stack.pop()
@log_def(name='OpDelimiterScanner')
def match(self):
self.len = 0
op_delimiter = self.current_char
if op_delimiter is None:
return False
if op_delimiter in '()[]{},:.;@~':
if op_delimiter in '()[]{}':
self.deal_brackets(op_delimiter)
self.len = 1
return True
elif op_delimiter in '+-*/%&|^<>=':
if self.look_around(1) == '=':
self.len = 2
else:
self.len = 1
return True
op_delimiter += self.look_around(1)
if op_delimiter in ('->', '!='):
self.len = 2
return True
elif op_delimiter in ('**', '//', '<<', '>>'):
if self.look_around(2) == '=':
self.len = 3
else:
self.len = 2
return True
return False
@log_def(name='OpDelimiterScanner')
def scan(self):
op_delimiter = ''
for i in range(self.len):
op_delimiter += self.current_char
self.next_char()
logging.debug(f'op_delimiter= "{op_delimiter}" len={self.len}')
if op_delimiter in tokens.operator & tokens.delimiter:
return self.make_token(op_delimiter)
else:
self.error()
# @log_cls
class Lexer(Scanner):
def match(self):
pass
def scan(self):
pass
def __init__(self, text: str):
super().__init__(Context(text))
self.indent_scanner = IndentScanner(self.context)
self.str_scanner = StrScanner(self.context)
self.name_scanner = NameScanner(self.context)
self.number_scanner = NumberScanner(self.context)
self.end_scanner = EndScanner(self.context)
self.op_delimiter_scanner = OpDelimiterScanner(self.context)
@log_def(name='Lexer')
def get_token(self):
if self.indent_scanner.match(): # 行开始
token = self.indent_scanner.scan()
if token:
return token
if self.end_scanner.match(): # 全结束 或 行结束
token = self.end_scanner.scan()
if token:
return token
self.skip_whitespace() # 空白符
if self.str_scanner.match(): # 字符串 在标识符或关键字之前判断
return self.str_scanner.scan()
elif self.name_scanner.match(): # 标识符或关键字
return self.name_scanner.scan()
elif self.number_scanner.match(): # 数字
return self.number_scanner.scan()
elif self.op_delimiter_scanner.match(): # 操作符 分割符
return self.op_delimiter_scanner.scan()
# indent_scanner 和 end_scanner 可能返回None skip_whitespace没返回 就再次调用get_token()
return self.get_token()
class PeekTokenLexer(object):
def __init__(self, text):
self.lexer = Lexer(text)
self.index = -1
self.token_stream = []
self._stream_token()
def _stream_token(self):
token = self.lexer.get_token()
while token.type != tokens.ENDMARKER:
if token.type == tokens.DEDENT:
self._add_dedent(token)
else:
self.token_stream.append(token)
token = self.lexer.get_token()
logging.info(token)
self.token_stream.append(self.lexer.get_token())
def _add_dedent(self, token):
"""根据DEDENT.value个数 拆成value个DEDENT"""
num = 0
while num != token.value:
num += 1
self.token_stream.append(Token(tokens.DEDENT, None, token.line, token.column))
def next_token(self):
self.index += 1
return self.token_stream[self.index]
def peek_token(self, peek=1):
index = self.index + peek
if index >= len(self.token_stream):
return self.token_stream[-1]
return self.token_stream[index]
| UTF-8 | Python | false | false | 14,158 | py | 23 | _lexer.py | 21 | 0.550356 | 0.543962 | 0 | 450 | 29.582222 | 118 |
Ovilia/Randocy | 13,967,233,685,740 | 32257c7eb2047f9b5481c7edd1b5a4e029b88f5d | 9f2ecb608de644bbc4041a39a146c4ca1dda15bf | /scripts/youku.py | e040e749f9163c4b897737dcb91b541e0b662fc3 | [] | no_license | https://github.com/Ovilia/Randocy | 2236b4e35ef3455f6a7ee5ae245977ae13e9959f | e99e7343a01eb0577f6a299026b7c92221c1a0ba | refs/heads/master | 2020-04-04T12:04:58.557194 | 2012-08-30T12:01:31 | 2012-08-30T12:01:31 | 5,141,106 | 0 | 2 | null | false | 2014-01-18T15:25:00 | 2012-07-22T11:32:21 | 2013-10-18T07:33:00 | 2012-08-30T12:01:51 | 1,639 | 2 | 2 | 1 | Python | null | null | # -*- coding: UTF-8 -*-
from function import *
def youkuSearch():
print 'Searching youku now...'
movies = []
# available movies
page = 1
lastMovie = ''
while True:
print page,
html_src = getHtml('http://movie.youku.com/search/index2/_page63561_' + str(page) + '_cmodid_63561?ccat40486%5Bfe%5D=1&m40487%5Bcc-ms-q%5D=a%7Cpaid%3A0&__rt=1&__ro=m13382821540')
html_src = unicode(html_src, 'utf-8')
soup = BeautifulSoup(html_src)
root = soup.find('div', {'class': 'collgrid6t'}).find('div', {'class': 'items'})
items = root.findAll('ul', {'class': 'p pv'})
# if movie is the same as the former one, this is the last page
curMovie = items[0].find('li', {'class': 'p_title'}).find('a').string
if curMovie == lastMovie:
break
lastMovie = curMovie
length = len(items)
for i in range(length):
# ignore those not free
if items[i].find('li', {'class': 'p_ischarge'}) != None:
continue
name = items[i].find('li', {'class': 'p_title'}).find('a').string
url = items[i].find('li', {'class': 'p_link'}).find('a')['href']
newMovie = {
'name': name,
'url': url
}
movies.append(newMovie)
page += 1
return movies
| UTF-8 | Python | false | false | 1,502 | py | 17 | youku.py | 12 | 0.477364 | 0.446072 | 0 | 43 | 32.790698 | 186 |
dyjakan/misc | 16,355,235,495,662 | 51a3a60c4da1bce26d19fd5a8baf480cf95da93b | 4912cc97429baf4c4e0d4108a3d21f8268c70b41 | /mangler.py | 709d6086c2a21743587e051760fadafbd3fda121 | [] | no_license | https://github.com/dyjakan/misc | 148389b7a4c5f4f9eb76f293bc20fb4bc47e0a50 | 762ff8a9f07c042849d526cc1e05ff512ca98bb1 | refs/heads/master | 2021-01-18T18:25:05.299537 | 2017-02-05T17:26:44 | 2017-02-05T17:26:44 | 16,204,618 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # mangler.py
# =========
#
# This utility applies user defined function mangle() for given byte range
# inside given file. (Comes in handy *way* too often.)
#
# Usage:
# python mangler.py <filename> <offset_start> <offset_end>
#
# NOTE: Using range <0;2> means mangling bytes 0-1-2. ;)
#
import os
import sys
import mmap
# Basic inefficient example that XOR-es given byte range with 0x20
def mangle(fd, start, end):
mapped = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_WRITE)
mapped.seek(start)
XOR = 0x20
while(start < end):
mapped[start] = chr(ord(mapped[start]) ^ XOR)
start += 1
mapped.close()
if len(sys.argv) < 3:
print sys.argv[0] + " <filename> <offset_start> <offset_end>"
sys.exit(1)
if len(sys.argv) == 3:
FILENAME = sys.argv[1]
OFFSET_START = int(sys.argv[2])
OFFSET_END = -1
if len(sys.argv) >= 4:
FILENAME = sys.argv[1]
OFFSET_START = int(sys.argv[2])
OFFSET_END = int(sys.argv[3]) + 1
fd = open(FILENAME, "r+b")
# If <offset_end> arg is empty, mangle() is used until EOF
if OFFSET_END == -1:
OFFSET_END = os.path.getsize(FILENAME)
mangle(fd, OFFSET_START, OFFSET_END)
fd.close()
| UTF-8 | Python | false | false | 1,168 | py | 6 | mangler.py | 4 | 0.640411 | 0.618151 | 0 | 51 | 21.882353 | 75 |
Humam-Hamdan/Code4Learn_BootCamp | 773,094,116,377 | ceed959675f96990014d34eb85148e31e5ed10d0 | a63f1e940965100d354f2fdeee5e0317dc4f937a | /Day7/3.py | ba88372062cd06890f81d6ec5d66ef7240e66b79 | [] | no_license | https://github.com/Humam-Hamdan/Code4Learn_BootCamp | 2619dd71d7946a9b3a0f968f78c80884212bd094 | 6a7dbaa48bd1b21829e6690165bee8da8b89970f | refs/heads/main | 2023-02-04T17:00:28.169670 | 2020-12-22T12:23:24 | 2020-12-22T12:23:24 | 323,609,472 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class student:
def __init__(self, name):
self.name = name
self.marks = []
print('Welcome {} To MIoTC School!'.format(name))
def AddMarks(self, marks):
self.marks.append(marks)
def AVG(self):
return sum(self.marks)/len(self.marks)
s1 = student('Humam')
print(s1.marks)
s1.AddMarks(50)
s1.AddMarks(40)
s1.AddMarks(30)
s1.AddMarks(60)
s1.AddMarks(70)
s1.AddMarks(90)
s1.AddMarks(10)
print(s1.marks)
print(s1.AVG())
| UTF-8 | Python | false | false | 514 | py | 47 | 3.py | 46 | 0.57393 | 0.525292 | 0 | 35 | 12.685714 | 57 |
viveksharmapoudel/weather-app | 2,602,750,197,313 | 7307a30c2a66c54af55269ca1b79c9d0c98ee026 | 0270a15637dd6a750384cedda366e482f6f62a47 | /weather/forms.py | 32ba8190baf0d6fc48c23048a4b871d5d0a6e6a7 | [] | no_license | https://github.com/viveksharmapoudel/weather-app | 2402fda71ad78e9d6790acf6872664ccfdbcbe50 | 05fc5d648d169a48f0b14b6c6272ca439bb6b9f6 | refs/heads/master | 2022-12-22T17:05:32.024037 | 2020-09-29T04:39:09 | 2020-09-29T04:39:09 | 299,505,715 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.forms import ModelForm, TextInput
from .models import City
class CityForm(ModelForm):
class Meta:
model = City
fields = ['name',]
def clean(self):
cleaned_data=super(CityForm,self).clean()
return cleaned_data
| UTF-8 | Python | false | false | 287 | py | 3 | forms.py | 3 | 0.595819 | 0.595819 | 0 | 13 | 19.615385 | 46 |
aramande/Nirena | 876,173,328,882 | 69d713e14849205ff1846eadc98de9950ed82afb | 891e876aafde31b093ff8a67907c4b36a1e632b2 | /nirena/util/direction.py | 025c2b34164e08bb5e8a3026c060453257311200 | [] | no_license | https://github.com/aramande/Nirena | fc51d8768e9a9fb7e44dd6fadf726ed3fc1d51c8 | 1e7299ee4915ab073dc7c64569789a07769e71be | refs/heads/master | 2021-01-18T20:17:52.943977 | 2010-12-04T00:05:10 | 2010-12-04T00:05:10 | 1,217,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #14:33-16:00
class Direction:
def __init__(self, degree):
if degree < -180:
degree += 360
elif degree > 180:
degree -= 360
self._dir = degree
def __add__(self, other):
"Addition between direction objects, good for figuring out the result of an additional force."
dir = self._dir + other._dir
return Direction(dir)
def __sub__(self, other):
"Subtraction between direction objects, good for figuring out the result of removing a force."
dir = self._dir - other._dir
return Direction(dir)
def get(self):
"Returns the current direction"
return self._dir
def __float__(self):
"Synonym to get()"
return self._dir | UTF-8 | Python | false | false | 651 | py | 61 | direction.py | 43 | 0.669739 | 0.639017 | 0 | 26 | 24.076923 | 96 |
Rexiome/LibreASR | 7,696,581,414,219 | c9ca3f5e2eec583ea3b6514623233f9dfd1fda05 | ef10bf82a7c7aaa96e1a68a27a188abbd045cb07 | /libreasr/lib/layers/custom_rnn.py | 638876868f4af8c2ca207869216de7bb612ea2cd | [
"MIT"
] | permissive | https://github.com/Rexiome/LibreASR | c1df287fad563e565ca7eedad3f4d3e9cfee5917 | 8a12c2f7cdccf42a5df47b63b2d60e97d389eba6 | refs/heads/master | 2023-08-11T05:46:33.975028 | 2020-12-18T16:28:31 | 2020-12-18T16:28:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import random
import torch
from torch.nn import Parameter, ParameterList
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from IPython.core.debugger import set_trace
ZONEOUT = 0.01
DEVICES = ["CPU", "GPU"]
RNN_TYPES = ["LSTM", "GRU", "NBRC"]
USE_PYTORCH = True
def get_rnn_impl(device, rnn_type, layer_norm=False):
assert device in DEVICES
assert rnn_type in RNN_TYPES
if device == "GPU":
if rnn_type == "LSTM":
if layer_norm:
# from haste_pytorch import LayerNormLSTM as RNN
from torch.nn import LSTM as RNN
else:
# from haste_pytorch import LSTM as RNN
from torch.nn import LSTM as RNN
if rnn_type == "GRU":
# from haste_pytorch import GRU as RNN
from torch.nn import GRU as RNN
if rnn_type == "NBRC":
raise Exception("NBRC GPU not available")
if device == "CPU":
if rnn_type == "LSTM":
if layer_norm:
# from .haste import LayerNormLSTM as RNN
from torch.nn import LSTM as RNN
else:
# from .haste import LSTM as RNN
from torch.nn import LSTM as RNN
if rnn_type == "GRU":
# from .haste import GRU as RNN
from torch.nn import GRU as RNN
if rnn_type == "NBRC":
from .haste import NBRC as RNN
return RNN
def get_weight_attrs(rnn_type, layer_norm):
attrs = [
"kernel",
"recurrent_kernel",
"bias",
]
if rnn_type == "GRU" or rnn_type == "NBRC":
attrs += [
"recurrent_bias",
]
if layer_norm:
attrs += [
"gamma",
"gamma_h",
"beta_h",
]
return attrs
def copy_weights(_from, _to, attrs):
for attr in attrs:
setattr(_to, attr, getattr(_from, attr))
def get_initial_state(rnn_type, hidden_size, init=torch.zeros):
if rnn_type == "LSTM":
h = nn.Parameter(init(2, 1, 1, hidden_size))
tmp = init(2, 1, 1, hidden_size)
else:
h = nn.Parameter(init(1, 1, 1, hidden_size))
tmp = init(1, 1, 1, hidden_size)
return h, tmp
class CustomRNN(nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers=1,
batch_first=True,
rnn_type="LSTM",
reduction_indices=[],
reduction_factors=[],
reduction_drop=True,
rezero=False,
layer_norm=False,
utsp=0.9,
):
super().__init__()
self.batch_first = batch_first
self.hidden_size = hidden_size
self._is = [input_size] + [hidden_size] * (num_layers - 1)
self._os = [hidden_size] * num_layers
self.rnn_type = rnn_type
# reduction
assert len(reduction_indices) == len(reduction_factors)
self.reduction_indices = reduction_indices
self.reduction_factors = reduction_factors
# learnable state & temporary state
self.hs = nn.ParameterList()
for hidden_size in self._os:
h, tmp = get_initial_state(rnn_type, hidden_size)
self.hs.append(h)
# state cache (key: bs, value: state)
self.cache = {}
# norm (BN or LN)
self.bns = nn.ModuleList()
for i, o in zip(self._is, self._os):
norm = nn.BatchNorm1d(o)
# norm = nn.LayerNorm(o)
self.bns.append(norm)
# rezero
self.rezero = rezero
# percentage of carrying over last state
self.utsp = utsp
def convert_to_cpu(self):
return self
def convert_to_gpu(self):
return self
def forward_one_rnn(
self, x, i, state=None, should_use_tmp_state=False, lengths=None
):
bs = x.size(0)
if state is None:
s = self.cache[bs][i] if self.cache.get(bs) is not None else None
is_tmp_state_possible = self.training and s is not None
if is_tmp_state_possible and should_use_tmp_state:
# temporary state
pass
else:
# learnable state
if self.hs[i].size(0) == 2:
s = []
for h in self.hs[i]:
s.append(h.expand(1, bs, self._os[i]).contiguous())
s = tuple(s)
else:
s = self.hs[i][0].expand(1, bs, self._os[i]).contiguous()
else:
s = state[i]
if self.rnn_type == "LSTM" or self.rnn_type == "GRU":
# PyTorch
if lengths is not None:
seq = pack_padded_sequence(
x, lengths, batch_first=True, enforce_sorted=False
)
seq, new_state = self.rnns[i](seq, s)
x, _ = pad_packed_sequence(seq, batch_first=True)
return (x, new_state)
else:
return self.rnns[i](x, s)
else:
# haste
return self.rnns[i](x, s, lengths=lengths if lengths is not None else None)
def forward(self, x, state=None, lengths=None):
bs = x.size(0)
residual = 0.0
new_states = []
suts = random.random() > (1.0 - self.utsp)
for i, rnn in enumerate(self.rnns):
# reduce if necessary
if i in self.reduction_indices:
idx = self.reduction_indices.index(i)
r_f = self.reduction_factors[idx]
# to [N, H, T]
x = x.permute(0, 2, 1)
x = x.unfold(-1, r_f, r_f)
x = x.permute(0, 2, 1, 3)
# keep last
# x = x[:,:,:,-1]
# or take the mean
x = x.mean(-1)
# also reduce lengths
if lengths is not None:
lengths = lengths // r_f
# apply rnn
inp = x
x, new_state = self.forward_one_rnn(
inp, i, state=state, should_use_tmp_state=suts, lengths=lengths,
)
# apply norm
x = x.permute(0, 2, 1)
x = self.bns[i](x)
x = x.permute(0, 2, 1)
# apply residual
if self.rezero:
if torch.is_tensor(residual) and residual.shape == x.shape:
x = x + residual
# store new residual
residual = inp
new_states.append(new_state)
if self.training:
if len(new_states[0]) == 2:
self.cache[bs] = [
(h.detach().contiguous(), c.detach().contiguous())
for (h, c) in new_states
]
else:
self.cache[bs] = [h.detach() for h in new_states]
return x, new_states
class CustomGPURNN(CustomRNN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._args = args
self._kwargs = kwargs
RNN = get_rnn_impl("GPU", self.rnn_type, kwargs["layer_norm"])
self.rnns = nn.ModuleList()
for i, o in zip(self._is, self._os):
# r = RNN(i, o, batch_first=self.batch_first, zoneout=ZONEOUT)
r = RNN(i, o, batch_first=self.batch_first)
self.rnns.append(r)
def convert_to_cpu(self):
if USE_PYTORCH:
return self.to("cpu")
dev = next(self.parameters()).device
inst = CustomCPURNN(*self._args, **self._kwargs)
attrs = get_weight_attrs(self.rnn_type, self._kwargs["layer_norm"])
for i, rnn in enumerate(self.rnns):
grabbed_rnn = inst.rnns[i]
copy_weights(rnn, grabbed_rnn, attrs)
return inst.to("cpu")
class CustomCPURNN(CustomRNN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._args = args
self._kwargs = kwargs
RNN = get_rnn_impl("CPU", self.rnn_type, kwargs["layer_norm"])
self.rnns = nn.ModuleList()
for i, o in zip(self._is, self._os):
# r = RNN(i, o, batch_first=self.batch_first, zoneout=ZONEOUT)
r = RNN(i, o, batch_first=self.batch_first)
self.rnns.append(r)
def convert_to_gpu(self):
dev = next(self.parameters()).device
if USE_PYTORCH or self.rnn_type == "NBRC":
return self.to(dev)
inst = CustomGPURNN(*self._args, **self._kwargs)
attrs = get_weight_attrs(self.rnn_type, self._kwargs["layer_norm"])
for i, rnn in enumerate(self.rnns):
grabbed_rnn = inst.rnns[i]
copy_weights(rnn, grabbed_rnn, attrs)
return inst.to(dev)
| UTF-8 | Python | false | false | 8,828 | py | 56 | custom_rnn.py | 36 | 0.510648 | 0.505097 | 0 | 280 | 30.528571 | 87 |
RavinSG/twitter_fastText | 3,504,693,356,093 | 9255bd0ab7ccc703e7d259b761595aaae97f1316 | 622911053cec2c775e0d8cc68e71ff8f6fe7582a | /preprocess_fasttext.py | 8eff80609311701d3c430d8533fd90f067b0acce | [] | no_license | https://github.com/RavinSG/twitter_fastText | d5d1a8c17a4ffe40b5ae1a3df67a0f06f9da9e46 | 735fc8c6ed65470c636b4dbf1a146dd576c5f8c6 | refs/heads/master | 2020-04-01T00:09:03.994115 | 2018-10-12T10:06:56 | 2018-10-12T10:06:56 | 152,684,103 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
text_processor = TextPreProcessor(
# terms that will be normalized
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
# terms that will be annotated
annotate={"hashtag", "repeated",
'emphasis', 'censored'},
fix_html=True, # fix HTML tokens
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="twitter",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="twitter",
unpack_hashtags=False, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=False, # spell correction for elongated words
# select a tokenizer. You can use SocialTokenizer, or pass your own
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer().tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[emoticons]
)
def get_file():
input_file_name = input('Enter input file path: ')
output_file_name = input('Enter output file path: ')
file = open(input_file_name, encoding='utf-8')
processed = open(output_file_name+'.txt', 'a', encoding='utf-8')
counter = 0
words = 0
for s in file:
counter = counter + 1
tweet = s.split(',')[6]
tweet = tweet.split(' ')
words = words + len(tweet)
processed_tweet = " ".join(text_processor.pre_process_doc(tweet))
processed.write(processed_tweet+'\n')
if counter % 10000 == 0:
print("Processed number of tweets: ", counter)
print("Processed number of words: ", words)
def ft_process(sentence):
return " ".join(text_processor.pre_process_doc(sentence))
| UTF-8 | Python | false | false | 2,101 | py | 11 | preprocess_fasttext.py | 8 | 0.664921 | 0.65921 | 0 | 56 | 36.517857 | 78 |
Hecatesiren/siwj | 7,172,595,418,095 | 1b7dc45d978d419787a41f9be9a8bdd9461817f0 | acee435c81531afa4d85ba9086394ff26c15c82a | /项目一.py | 9dec262731bbf10b1766e4656b33dd7fecfdfc66 | [] | no_license | https://github.com/Hecatesiren/siwj | 68ec9a37ea2627f86433dd3391e92aaba8f85584 | 0fd317c92814640bc117511870f38eb112097630 | refs/heads/master | 2022-04-20T16:00:51.424123 | 2020-04-19T10:00:22 | 2020-04-19T10:00:22 | 256,975,323 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
F1 = open("C:/Users/USER/Desktop/magic04.txt", "r")
List_row = F1.readlines()
list_source = []
for i in range(len(List_row)):
column_list = List_row[i].strip().split(",") # 每一行split后是一个列表
column_list.pop()#去掉最后的g/h
list_source.append(column_list) # 加入list_source
b=np.array(list_source)#转化为np数组
b=b.astype(float)#转换为浮点类型
MeanVector=np.mean(b,axis=0)#均值向量
print(MeanVector)
#中心化
center=b-MeanVector
#求内积
innerP=np.dot(center.T,center)
print(innerP/len(center))
#求外积
outP=0
for i in range(len(center)):
outP = outP+center[i].reshape(len(center[0]),1)*center[i]
print(outP/len(center))
#通过中心化后的向量计算属性1和2的夹角
t=center.T
corr=np.corrcoef(t[0],t[1])
print(corr[0][1])
fig = plt.figure()
ax1 = fig.add_subplot(111) #设置标题
ax1.set_title('Scatter Plot')
plt.scatter(t[0],t[1])
plt.xlabel('x1') #设置X轴标签
plt.ylabel('x2') #设置Y轴标签
plt.show()
u=np.mean(b,axis=0)[0]#第一列均值
sig=np.var(b.T[0])#第一列方差
fig = plt.figure()
ax1 = fig.add_subplot(111) #设置标题
ax1.set_title('ZTFB')
x = np.linspace(u - 3*sig, u + 3*sig, 50)
y_sig = np.exp(-(x - u) ** 2 /(2* sig **2))/(np.sqrt(2*np.pi)*sig)
plt.plot(x, y_sig, "r-", linewidth=2)
plt.show()
#求每一列的方差
list=[]
for i in range(len(b[0])):
list.append(np.var(b.T[i]))
print(list)
maxIndex=list.index(max(list))
minIndex=list.index(min(list))
print(maxIndex+1)
print(minIndex+1)
#求矩阵两列协方差
pairCov={}
for i in range(9):
for j in range(i+1,10):
st=str(i+1)+'-'+str(j+1)
pairCov[st]= np.cov(b.T[i],b.T[j])[0][1]
print(pairCov)
print(min(pairCov, key=pairCov.get))
print(max(pairCov, key=pairCov.get)) | UTF-8 | Python | false | false | 1,835 | py | 3 | 项目一.py | 3 | 0.668096 | 0.636865 | 0 | 68 | 23.029412 | 66 |
solvable/oa2 | 8,495,445,337,517 | 4902228b50838dcfcdac67afb7d2053c4e9dfeab | 1baab1016dd600d78e95e62ad2bf8343cf249878 | /oa/forms.py | 69cb8cfe07d169f8eaa29ed3cc57e6e8745deafd | [] | no_license | https://github.com/solvable/oa2 | 08240774dfc1f4cbad8e39a47d03a6efbcd9b734 | 1a72dbc621c94cacc7517ee145d677d888bcba14 | refs/heads/master | 2021-09-08T05:37:38.460139 | 2018-03-07T18:23:09 | 2018-03-07T18:23:09 | 119,281,950 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
# our new form
class ContactForm(forms.Form):
contact_name = forms.CharField(required=True)
contact_email = forms.EmailField(required=True)
message = forms.CharField(
required=True,
widget=forms.Textarea
)
phone_number = forms.CharField(required=False)
| UTF-8 | Python | false | false | 314 | py | 21 | forms.py | 9 | 0.707006 | 0.707006 | 0 | 10 | 30.4 | 51 |
krishna-saravan/WSoC2101-TestRepo | 1,374,389,567,576 | 6f76e4eb5906f4aa940e3f928d5adf40f6b3e580 | dd92d381658e924c3e0883d7efd110142198b3ef | /NotesAdder/notes_app/migrations/0003_alter_note_content.py | ca3ddd5c7a56337ac5ceb127ae0cbdc014b8670f | [
"MIT"
] | permissive | https://github.com/krishna-saravan/WSoC2101-TestRepo | 3a120e4fdfdddc5407f04a7de831ec9e46d56d88 | 51a27a128c837e1ff561fddb713c50ed582ca1a1 | refs/heads/main | 2023-07-05T19:18:55.133191 | 2021-08-31T07:20:49 | 2021-08-31T07:20:49 | 401,055,928 | 0 | 0 | MIT | true | 2021-08-29T14:00:48 | 2021-08-29T14:00:47 | 2021-08-16T11:03:53 | 2021-08-29T13:52:04 | 4,063 | 0 | 0 | 0 | null | false | false | # Generated by Django 3.2.6 on 2021-08-26 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes_app', '0002_alter_note_content'),
]
operations = [
migrations.AlterField(
model_name='note',
name='content',
field=models.TextField(),
),
]
| UTF-8 | Python | false | false | 374 | py | 23 | 0003_alter_note_content.py | 7 | 0.574866 | 0.524064 | 0 | 18 | 19.777778 | 49 |
AlexChanson/ELFIN_Z-Suite_Comaptibility | 2,980,707,333,840 | 3250cf0c5bf39776e4552839f698852cf9093ffa | a2644d5f3c7b7091de1a018bfe4c67af34eb43f1 | /convert/cv_main.py | 5e70a613e87b7a40535fe31b631dfa04f5969cd7 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | https://github.com/AlexChanson/ELFIN_Z-Suite_Comaptibility | a401420cc2335f5648941c2c06849cac6958be65 | 4f744fdfd131d2da1028225abf2d4d9c24c06db6 | refs/heads/master | 2022-12-11T14:28:38.993471 | 2020-08-20T08:59:31 | 2020-08-20T08:59:31 | 285,602,589 | 0 | 0 | null | false | 2020-08-06T17:35:34 | 2020-08-06T15:12:24 | 2020-08-06T15:12:42 | 2020-08-06T17:35:33 | 10 | 0 | 0 | 0 | Python | false | false | from zipfile import ZipFile, ZIP_DEFLATED
from datetime import datetime
import os
import argparse
verbose = False
# Settings table : match between Z-Suite gcode header and expected silce.conf
master_table = [("Pix per mm X", "xppm", float),
("Pix per mm Y", "yppm", float),
("X Resolution", "xres", int),
("Y Resolution", "yres", int),
("Layer Thickness", "thickness", float),
("Number of Slices", "layers_num", int),
("Number of Bottom Layers", "head_layers_num", int),
("Layer Time", "layers_expo_ms", int),
("Bottom Layers Time", "head_layers_expo_ms", int),
("Blanking Layer Time", "wait_before_expo_ms", int),
("Lift Distance", "lift_distance", float),
("Z Lift Feed Rate", "lift_up_speed", int),
("Z Lift Retract Rate", "lift_down_speed", int)]
extra_table = [("Anti Aliasing", "aa", str),
("Anti Aliasing Value", "aa_v", float),
("Flip X", "flip_x", str),
("Flip Y", "flip_y", str),
("Z Lift Feed Rate", "lift_feed", float),
("Z Lift Retract Rate", "lift_retract", float),
("Blanking Layer Time", "wait", int)]
# Header generation for GCODE
def gen_header(conf, extra, timestr):
lines = [
"# nova3d.cn NovaMaker v2.4.20 64-bits " + timestr,
";(****Build and Slicing Parameters****)",
f";(Pix per mm X = {conf['xppm']} )",
f";(Pix per mm Y = {conf['yppm']} )",
f";(X Resolution = {conf['xres']} )",
f";(Y Resolution = {conf['yres']} )",
f";(Layer Thickness = {conf['thickness']} mm )",
f";(Layer Time = {conf['layers_expo_ms']} ms )",
";(Render Outlines = False",
";(Outline Width Inset = 2",
";(Outline Width Outset = 0",
f";(Bottom Layers Time = {conf['head_layers_expo_ms']} ms )",
f";(Number of Bottom Layers = {conf['head_layers_num']} )",
";(Blanking Layer Time = 2000 ms )",
";(Build Direction = Bottom_Up)",
f";(Lift Distance = {conf['lift_distance']} mm )",
";(Slide/Tilt Value = 0)",
";(Use Mainlift GCode Tab = False)",
f";(Anti Aliasing = {extra['aa']})",
f";(Anti Aliasing Value = {extra['aa_v']} )",
f";(Z Lift Feed Rate = {extra['lift_feed']} mm/s )", # All feeds are wrong should be in mm/minute but original files are produces with mm/s
f";(Z Bottom Lift Feed Rate = {40.0} mm/s )",
f";(Z Lift Retract Rate = {extra['lift_retract']} mm/s )",
f";(Flip X = {extra['flip_x']})",
f";(Flip Y = {extra['flip_y']})",
f";Number of Slices = {conf['layers_num']}",
";(****Machine Configuration ******)",
";(Platform X Size = 65.02mm )",
";(Platform Y Size = 116mm )",
";(Platform Z Size = 130mm )",
";(Max X Feedrate = 200mm/s )",
";(Max Y Feedrate = 200mm/s )",
";(Max Z Feedrate = 200mm/s )",
";(Machine Type = UV_LCD)",
"",
"G28",
"G21 ;Set units to be mm",
"G91 ;Relative Positioning",
"M17 ;Enable motors",
"<Slice> Blank", # This is probably missing the ; but it's also probably useless anyway
"M106 S0",
""
]
return lines
def gen_slices(conf, extra):
lines = []
slice_idx = 0
lift = float(conf['lift_distance'])
down = lift - conf['thickness']
while slice_idx < conf["head_layers_num"]:
lines.append(f";<Slice> {slice_idx}")
lines.append("M106 S255")
lines.append(f";<Delay> {conf['head_layers_expo_ms']}")
lines.append("M106 S0")
lines.append(";<Slice> Blank")
# Reproducing original behavior might have to do with cooling
# We go very slow and cool for at least 30 seconds
lines.append(f"G1 Z{lift} F20")
lines.append(f"G1 Z-{down} F20")
delay = 10000 + int(((int(lift) / (20. / 60.)) + (int(lift) / (20. / 60.))) * 1000)
# OverHeat Safety
delay = max(delay, conf['head_layers_expo_ms'])
print(f"[DEBUG] bottom time is {delay} ms")
lines.append(f";<Delay> {delay}")
lines.append("")
slice_idx += 1
while slice_idx < conf['layers_num']:
lines.append(f";<Slice> {slice_idx}")
lines.append("M106 S255")
lines.append(f";<Delay> {conf['layers_expo_ms']}")
lines.append("M106 S0\n;<Slice> Blank")
lines.append(f"G1 Z{lift} F{extra['lift_feed']}")
lines.append(f"G1 Z-{down} F{extra['lift_retract']}")
# The printer is dumb can't wait until gcode is done to execute delay we have to anticipate for it
# extra blank time (at least 500 ms to give me some margin) + time to lift + time to go back for next exposition
delay = max(extra['wait'], 500) + int(((int(lift)/(extra['lift_feed']/60)) + (int(lift)/(extra['lift_retract']/60)))*1000)
# OverHeat Safety
delay = max(delay, 10000)
lines.append(f";<Delay> {delay}")
lines.append("")
slice_idx += 1
return lines
def find_raw_line(search_str_, raw_settings_lines_):
raw_str_ = None
for candidate in raw_settings_lines_:
if search_str in candidate:
raw_str_ = candidate
break
if raw_str_ is None:
print("Could not find:", search_str_)
raise ValueError
return raw_str_
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process a Z-Suite CWS archive to make it compatible with the Nova3D Elfin Printer')
parser.add_argument('--file', required=True, help='The .cws print file to modify')
args = parser.parse_args()
test_file_input = args.file
#test_file_input = 'd:\\calibration_cube_zsuite.cws'
canonical_name = None
# Open original archive
with ZipFile(test_file_input, mode="r") as zip_input:
zinfo_gcode = None
# Fetch gcode
for z_file_info in zip_input.filelist:
if z_file_info.filename.endswith(".gcode"):
zinfo_gcode = z_file_info
canonical_name = z_file_info.filename.split(".gcode")[0]
# Get settings (before Header)
raw_settings_lines = []
with zip_input.open(zinfo_gcode) as gcode_in:
for line in gcode_in:
if line.startswith(b";"):
if b'Header Start' in line:
break
else:
raw_settings_lines.append(line)
# Cleanup and convert to string
raw_settings_lines = list(map(lambda s: s.decode('utf-8').strip().strip(";()").strip(), raw_settings_lines))
# Let's pretend really hard we are the real slicer
now = datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M:%S")
outlines = ["# nova3d.cn NovaMaker v2.4.20 64-bits " + now_str, "# conf version 1.0", ""]
# Number of chars before '='
padding_target = 24
# Machine height for lift_when_finished
machine_height = 130
# Building the config in original order
config_data = {}
for search_str, key, type in master_table:
raw_str = find_raw_line(search_str, raw_settings_lines)
clean_str = raw_str.split(' = ')[1].strip(" ms/px")
config_data[key] = type(clean_str)
whitespace_count = padding_target - len(key)
whitespaces = ' ' * whitespace_count
clean_str = key + whitespaces + "= " + clean_str
outlines.append(clean_str)
if verbose: print(clean_str)
# Fetch extra parameters (only for Gcode)
extra_data = {}
for search_str, key, type in extra_table:
raw_str = find_raw_line(search_str, raw_settings_lines)
clean_str = raw_str.split(' = ')[1].strip(" ms/px")
extra_data[key] = type(clean_str)
outlines.append("lift_when_finished = 80")
with open("slice_tmp.conf", mode="w") as out:
out.write('\n'.join(outlines) + '\n')
# Generating Gcode
with ZipFile(test_file_input, mode="r") as zip_input:
zinfo_gcode = None
# Fetch gcode
for z_file_info in zip_input.filelist:
if z_file_info.filename.endswith(".gcode"):
zinfo_gcode = z_file_info
with open(canonical_name + "_tmp.gcode", mode="w", newline="\n") as gcode_out:
# Write Header
gcode_out.write("\n".join(gen_header(config_data, extra_data, now_str)) + "\n")
# Write Slices
gcode_out.write("\n".join(gen_slices(config_data, extra_data)))
# Write end
gcode_out.write("\nM106 S0\n")
# Compute the height to raise to the top with 5mm margin instead of lifting by 80 like a moron
lift_height = 150 - (config_data['thickness'] * config_data['layers_num'])
if lift_height < 0:
raise ValueError
gcode_out.write(f"G1 Z{lift_height}\n")
gcode_out.write("M18 ;Disable Motors\n")
gcode_out.write(";<Completed>\n")
# Building new archive
if test_file_input.endswith('.cws') or test_file_input.endswith('.CWS'):
out_zip = test_file_input[:-4] + "_clean.cws"
else:
out_zip = test_file_input + "_clean.cws"
with ZipFile(out_zip, mode="w") as zip:
# Put conf file in
zip.write("slice_tmp.conf", arcname="slice.conf", compress_type=ZIP_DEFLATED)
# Put GCODE in
zip.write(canonical_name + "_tmp.gcode", arcname=canonical_name + ".gcode", compress_type=ZIP_DEFLATED)
#Copy images
with ZipFile(test_file_input, 'r') as zin:
for item in zin.infolist():
buffer = zin.read(item.filename)
if item.filename.endswith("png"):
zip.writestr(item, buffer)
if not verbose:
os.remove("slice_tmp.conf")
os.remove(canonical_name + "_tmp.gcode")
os.remove(test_file_input)
os.rename(out_zip, test_file_input)
print("Done, your file is ready for the Elfin.")
else:
print("Done, all temp files kept for debug.")
| UTF-8 | Python | false | false | 10,493 | py | 7 | cv_main.py | 4 | 0.540741 | 0.52616 | 0 | 264 | 38.746212 | 154 |
peng211217610/Python | 12,472,585,049,383 | fae5471a62768a5e650cb4fdbf50587e01b481ed | db82ec070fd356ea5a5aebd5ada39c7fe58a2b38 | /知识/课程代码/python课堂代码/samples/muti_proc_threads/mt3.py | b129e046ed8370de22826f2544c8926e24d7e3ce | [] | no_license | https://github.com/peng211217610/Python | 8d7217af06262fda2133e50c634eac0148e2ace5 | e65889e87aa51e1cb94601d3470e3906ad9e2d0a | refs/heads/master | 2023-08-22T16:55:51.592534 | 2023-08-10T09:43:22 | 2023-08-10T09:43:22 | 379,676,153 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import threading
from time import sleep
def thread_entry():
# 注意 局部变量 var 的值 ,会被两个线程搞乱吗?
var = 1
for i in range(10):
print('th #{} :{}'.format(threading.currentThread().ident, var))
sleep(1)
var += 1
print('main thread start.')
t1 = threading.Thread(target=thread_entry)
t2 = threading.Thread(target=thread_entry)
t1.start()
t2.start()
t1.join()
t2.join()
print('main thread end.')
| UTF-8 | Python | false | false | 462 | py | 273 | mt3.py | 251 | 0.636792 | 0.610849 | 0 | 23 | 17.434783 | 72 |
codonnell/fabian | 12,403,865,572,547 | a8a2415f012039ee52e3de48578678d15e5ae732 | d5efaa4b27aed0f341d6e51ef5f3731126fde9dd | /fabian.py | 19ae2bc7edf5f82687f50e1e0780c474a831e378 | [] | no_license | https://github.com/codonnell/fabian | 5f72139918bcb8e273f17e6bf02b2750d02a3962 | a301516e0fc33da0942b226e72b9678283827d8e | refs/heads/master | 2021-07-17T16:07:14.683442 | 2017-10-25T19:13:10 | 2017-10-25T19:13:10 | 108,305,394 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request
from flask_restful import Resource, Api
import pickle
import numpy as np
app = Flask(__name__)
app.config.from_object('fabian.cfg')
app.config.from_envvar('FABIAN_SETTINGS')
api = Api(app)
with open(app.config['CLASSIFIER'],'rb') as f:
clf = pickle.load(f)
class Difficulty(Resource):
def get(self):
data = request.get_json()['data']
X = np.array(data).astype(np.float)
y = clf.predict_proba(X)
return {'data': [probs for probs in y.tolist()]}
api.add_resource(Difficulty, '/difficulty/')
if __name__ == "__main__":
app.run(debug=app.config['DEBUG'])
| UTF-8 | Python | false | false | 633 | py | 5 | fabian.py | 2 | 0.652449 | 0.652449 | 0 | 24 | 25.375 | 56 |
qateam123/eq | 2,087,354,108,413 | 5515654957b47c0fd2ded414bf1bd86b123837ff | c7ea36544ae5f7a8e34bf95b8c38240ca6ebda83 | /tests/app/questionnaire/test_location.py | 5fb3742dd84e288161014ef9323e174fdde6bfcc | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | https://github.com/qateam123/eq | 40daa6ea214e61b572affd0b5ab9990371c70be4 | 704757952323647d659c49a71975c56406ff4047 | refs/heads/master | 2023-01-11T01:46:11.174792 | 2017-02-09T13:46:56 | 2017-02-09T13:46:56 | 80,821,577 | 0 | 0 | MIT | false | 2023-01-04T14:31:08 | 2017-02-03T11:02:24 | 2017-02-03T14:19:35 | 2023-01-04T14:31:05 | 1,722 | 0 | 0 | 23 | JavaScript | false | false | import unittest
from app import create_app
from app.questionnaire.location import Location
class TestLocation(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.app.config['SERVER_NAME'] = "test"
self.app_context = self.app.app_context()
self.app_context.push()
def test_location_url(self):
location = Location('some-group', 0, 'some-block')
metadata = {
"eq_id": '1',
"collection_exercise_sid": '999',
"form_type": "some_form"
}
location_url = location.url(metadata)
self.assertEqual(location_url, "http://test/questionnaire/1/some_form/999/some-group/0/some-block")
| UTF-8 | Python | false | false | 706 | py | 608 | test_location.py | 514 | 0.613314 | 0.59915 | 0 | 25 | 27.24 | 107 |
cupofjoey/UdemyPython1 | 1,743,756,766,380 | 2034a4fe2b4616ce3d2c0aa617df40150a3927c1 | 7df1c0a48cd89f7e1255a1672f577b9b8cbda1c8 | /firstMethod.py | 0a18ace2f6dcf15de7adcfc6e520821ba106051e | [] | no_license | https://github.com/cupofjoey/UdemyPython1 | 5c1d736ba3cd8d709bcc09c2c4bc96efe4bb9bc6 | af7c2bc0fb249eaf7f4cf0a0ae887ffd7db3f2e7 | refs/heads/master | 2021-04-15T14:06:53.850435 | 2018-04-30T02:49:16 | 2018-04-30T02:49:16 | 126,557,248 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
#Doing our first method in Python. Working with the numInputRando.py program
magic_numbers = [random.randint(0, 9), random.randint(0, 9)]
def ask_user_and_check_number():
user_number = int(input("Enter a number between 1 and 10: "))
if user_number in magic_numbers:
return "You guessed the magic number! Congratulations!!!"
if user_number not in magic_numbers:
return "You didn't guess the magic number. That's a bummer. Try it again."
# We want the parameter of the method to take this over. So no more (chances = 3) variable
def run_program_x_times(chances):
#We want the user to determine how many chances they get.
for i in range(chances):
print("This is attempt {}".format(i))
print(ask_user_and_check_number())
user_chances = int(input("Enter a number for how many chances you'd like: "))
run_program_x_times(user_chances)
| UTF-8 | Python | false | false | 909 | py | 14 | firstMethod.py | 13 | 0.691969 | 0.683168 | 0 | 22 | 40.318182 | 90 |
simonsobs/PSpipe | 1,984,274,895,434 | c18270f521a0dfda2f96c383d24c962c4d3675ca | ed3d3df8477daa6241c5bd64bf7e6063f53b5d34 | /project/old/analyse_sims/sim_spectra.py | ec1b97db12c78031419fbdfbba55e42e2d1e99a9 | [
"BSD-3-Clause"
] | permissive | https://github.com/simonsobs/PSpipe | 4d6bf5254e8ecef27a33164a43f10da258e30a42 | 8f83bd672c431903febcb0ee4a11b1c0eab36f1b | refs/heads/master | 2023-08-10T10:11:35.657314 | 2023-07-26T11:14:26 | 2023-07-26T11:14:26 | 157,757,989 | 14 | 12 | NOASSERTION | false | 2023-09-14T14:42:06 | 2018-11-15T18:54:50 | 2023-02-17T03:26:45 | 2023-09-14T14:42:05 | 126,005 | 12 | 9 | 5 | Python | false | false | """
This script is used to compute all power spectra of the SO simulations.
You can either write the spectra as separate files in a folder or put them all in a single hdf5 file.
The code will run as follow (example):
python sim_spectra.py global_sims_all.dict
"""
from pspy import pspy_utils, so_dict,so_map,so_mpi,sph_tools,so_mcm,so_spectra
import numpy as np, pylab as plt, healpy as hp
import os,sys
from pixell import curvedsky,powspec
import h5py
import time
# We start by reading the info in the dictionnary
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
experiment=d['experiment']
lmax=d['lmax']
niter=d['niter']
type=d['type']
binning_file=d['binning_file']
hdf5=d['hdf5']
writeAll=d['writeAll']
run_name=d['run_name']
def remove_mean(map,window,ncomp):
# single function to remove the mean of the data before taking the power spectrum
for i in range(ncomp):
map.data[0]-=np.mean(map.data[0]*window[0].data)
map.data[1]-=np.mean(map.data[1]*window[1].data)
map.data[2]-=np.mean(map.data[2]*window[1].data)
return map
window_dir='window'
mcm_dir='mcm'
specDir='spectra'
# create spectra folder or initiale hdf5 file
if hdf5:
spectra_hdf5 = h5py.File('%s.hdf5'%(specDir), 'w')
else:
pspy_utils.create_directory(specDir)
ncomp=3
spectra=['TT','TE','TB','ET','BT','EE','EB','BE','BB']
spin_pairs=['spin0xspin0','spin0xspin2','spin2xspin0', 'spin2xspin2']
# We first compute all the alms of the different split frequency maps (that have been multiplied by their associated window function)
master_alms={}
nSplits={}
for exp in experiment:
freqs=d['freq_%s'%exp]
for fid,f in enumerate(freqs):
maps=d['maps_%s_%s'%(exp,f)]
nSplits[exp]=len(maps)
window_T=so_map.read_map('%s/window_T_%s_%s.fits'%(window_dir,exp,f))
window_P=so_map.read_map('%s/window_P_%s_%s.fits'%(window_dir,exp,f))
window_tuple=(window_T,window_P)
count=0
for map in maps:
split=so_map.read_map(map)
if split.ncomp==1:
Tsplit=split.copy()
split= so_map.healpix_template(ncomp=3,nside=split.nside)
split.data[0]=Tsplit.data
split.data[1]=Tsplit.data*0
split.data[2]=Tsplit.data*0
split=remove_mean(split,window_tuple,ncomp)
master_alms[exp,f,count]= sph_tools.get_alms(split,window_tuple,niter,lmax)
count+=1
# We then compute the cls from the alms and deconvolve the mcm that take into account the effect of the window function
Db_dict={}
for id_exp1,exp1 in enumerate(experiment):
freqs1=d['freq_%s'%exp1]
nSplits1=nSplits[exp1]
for id_f1,f1 in enumerate(freqs1):
for id_exp2,exp2 in enumerate(experiment):
freqs2=d['freq_%s'%exp2]
nSplits2=nSplits[exp2]
for id_f2,f2 in enumerate(freqs2):
if (id_exp1==id_exp2) & (id_f1>id_f2) : continue
if (id_exp1>id_exp2) : continue
for spec in spectra:
Db_dict[exp1,f1,exp2,f2,spec,'auto']=[]
Db_dict[exp1,f1,exp2,f2,spec,'cross']=[]
prefix= '%s/%s_%sx%s_%s'%(mcm_dir,exp1,f1,exp2,f2)
for s1 in range(nSplits1):
for s2 in range(nSplits2):
mbb_inv,Bbl=so_mcm.read_coupling(prefix=prefix,spin_pairs=spin_pairs)
l,ps_master= so_spectra.get_spectra(master_alms[exp1,f1,s1],master_alms[exp2,f2,s2],spectra=spectra)
spec_name='%s_%s_%s_%s_%dx%s_%s_%d'%(type,run_name,exp1,f1,s1,exp2,f2,s2)
lb,Db=so_spectra.bin_spectra(l,ps_master,binning_file,lmax,type=type,mbb_inv=mbb_inv,spectra=spectra)
if writeAll:
if hdf5:
so_spectra.write_ps_hdf5(spectra_hdf5,spec_name,lb,Db,spectra=spectra)
else:
so_spectra.write_ps(specDir+'/%s.dat'%spec_name,lb,Db,type,spectra=spectra)
for spec in spectra:
if (s1==s2) & (exp1==exp2):
if spec=='TT':
print ('auto %s_%s split%d X %s_%s split%d'%(exp1,f1,s1,exp2,f2,s2))
Db_dict[exp1,f1,exp2,f2,spec,'auto']+=[Db[spec]]
else:
if spec=='TT':
print ('cross %s_%s split%d X %s_%s split%d'%(exp1,f1,s1,exp2,f2,s2))
Db_dict[exp1,f1,exp2,f2,spec,'cross']+=[Db[spec]]
Db_dict_auto={}
Db_dict_cross={}
nb={}
# we combine the different cross spectra and auto spectra together and write them to disk
# we also write the noise power spectra defined as (auto-cross)/nsplits
for spec in spectra:
Db_dict_cross[spec]=np.mean(Db_dict[exp1,f1,exp2,f2,spec,'cross'],axis=0)
spec_name_cross='%s_%s_%s_%sx%s_%s_cross'%(type,run_name,exp1,f1,exp2,f2)
if exp1==exp2:
Db_dict_auto[spec]=np.mean(Db_dict[exp1,f1,exp2,f2,spec,'auto'],axis=0)
spec_name_auto='%s_%s_%s_%sx%s_%s_auto'%(type,run_name,exp1,f1,exp2,f2)
nb[spec]= (Db_dict_auto[spec]- Db_dict_cross[spec])/nSplits[exp]
spec_name_noise='%s_%s_%s_%sx%s_%s_noise'%(type,run_name,exp1,f1,exp2,f2)
if hdf5:
so_spectra.write_ps_hdf5(spectra_hdf5,spec_name_cross,lb,Db_dict_cross,spectra=spectra)
if exp1==exp2:
so_spectra.write_ps_hdf5(spectra_hdf5,spec_name_auto,lb,Db_dict_auto,spectra=spectra)
so_spectra.write_ps_hdf5(spectra_hdf5,spec_name_noise,lb,nb,spectra=spectra)
else:
so_spectra.write_ps(specDir+'/%s.dat'%spec_name_cross,lb,Db_dict_cross,type,spectra=spectra)
if exp1==exp2:
so_spectra.write_ps(specDir+'/%s.dat'%spec_name_auto,lb,Db_dict_auto,type,spectra=spectra)
so_spectra.write_ps(specDir+'/%s.dat'%spec_name_noise,lb,nb,type,spectra=spectra)
if hdf5:
spectra_hdf5.close()
| UTF-8 | Python | false | false | 6,758 | py | 207 | sim_spectra.py | 146 | 0.533146 | 0.510506 | 0 | 161 | 40.975155 | 133 |
PeteRichardson/pets | 6,880,537,613,511 | 146599f76a47a5539932027da516d93c1824e1f0 | dac8f0074c300b7a3f315dc41ef3f60ff8baca53 | /pets/snake.py | 07831f468e78c92bc5e992787d00bfdc9796608d | [] | no_license | https://github.com/PeteRichardson/pets | 4fcce098a64f3cd2528af909954bd076385c102e | 284ee42facdc438d8b7b413c8ad27f3c510a6249 | refs/heads/master | 2020-04-02T16:16:25.929973 | 2019-09-26T01:22:19 | 2019-09-26T01:22:19 | 154,605,883 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from pets import Pet
class Snake (Pet):
breeds = [ "King Cobra",
"American Copperhead",
"Black Mamba",
"Corn Snake",
"Rattlesnake",
"Boa Constrictor",
"Eastern Coral Snake",
"Black Rat Snake",
"Burmese Python",
"Ball Python",
"Royal Python",
"Reticulated Python",
"Garter Snake",
"Green Anaconda",
"Water Moccasin Snake",
"Green Tree Python"]
species = "Snake"
max_age = 25
def __init__(self, name=None, age=None, breed=None):
super(Snake, self).__init__(Snake.species, name=name, age=age, breed=breed)
@property
def sound(self):
return "Hisss"
if __name__ == "__main__":
striker = Snake("Striker", 6, "Rattlesnake")
striker.speak() | UTF-8 | Python | false | false | 959 | py | 18 | snake.py | 14 | 0.467153 | 0.462982 | 0 | 36 | 25.666667 | 83 |
nguyentuananh/OptimizeTaxiDestinationPredict | 14,224,931,711,055 | b13460f0317138b8f66482416902ab0332243dd6 | f09ccf9d67a6a34f7692b0f105b8a5df13f809a0 | /model/rnn_tgtcls.py | 0c0faf2e99e0a56790564261dfc91721ec9b8874 | [] | no_license | https://github.com/nguyentuananh/OptimizeTaxiDestinationPredict | 36dc724cbd36d7cc246aa49d20ec077e611e3375 | 14909ae3db8d3d194ba022970014cad358388bbc | refs/heads/master | 2020-04-06T08:49:41.939534 | 2018-11-13T04:13:13 | 2018-11-13T04:13:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy
import theano
from theano import tensor
from blocks.bricks.base import lazy
from blocks.bricks import Softmax
from model.rnn import RNN, Stream
class Model(RNN):
@lazy()
def __init__(self, config, **kwargs):
super(Model, self).__init__(config, output_dim=config.tgtcls.shape[0], **kwargs)
self.classes = theano.shared(numpy.array(config.tgtcls, dtype=theano.config.floatX), name='classes')
self.softmax = Softmax()
self.children.append(self.softmax)
def process_rto(self, rto):
return tensor.dot(self.softmax.apply(rto), self.classes)
| UTF-8 | Python | false | false | 604 | py | 42 | rnn_tgtcls.py | 38 | 0.693709 | 0.692053 | 0 | 19 | 30.789474 | 108 |
mileshill/cs313e-life | 11,596,411,732,988 | 194e6a47372315b820c45ee77538ccaf213fa691 | 712ff45b676b6764fd2843c9f60c4decf7d320d9 | /RunLife.py | 768710c40b2d9a1920eb751475df44e93051651f | [] | no_license | https://github.com/mileshill/cs313e-life | 52665c9dcbc30bfd49356e539d76f2817e2cd8c1 | b9a327dc8f721a9c5270a5100449243a7e02f565 | refs/heads/master | 2021-01-21T03:51:02.810204 | 2014-11-25T13:35:06 | 2014-11-25T13:35:06 | 26,598,524 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# ---------------------------
# projects/collatz/RunLife.py
# Copyright (C) 2014
# Glenn P. Downing
# ---------------------------
# -------
# imports
# -------
from sys import stdout
from Life import *
# ---------------------
# Life ConwayCell 21x13
# ---------------------
print("*** Life ConwayCell 21x13 ***")
initial = [['c', 8, 4], ['c', 8, 5], ['c', 8, 6], ['c', 8, 7], ['c', 8, 8], ['c', 9, 7], ['c', 10, 6], ['c', 11, 5], ['c', 12, 4], ['c', 12, 5], ['c', 12, 6], ['c', 12, 7], ['c', 12, 8], ['dim', 21, 13]]
to_print = list(range(1,13))
l = Life(initial, ".")
l.Evolve(12,to_print)
"""
Simulate 12 evolutions.
Print every grid (i.e. 0, 1, 2, 3, ... 12)
"""
# ---------------------
# Life ConwayCell 20x29
# ---------------------
print("*** Life ConwayCell 20x29 ***")
initial2 = [['c', 3, 3], ['c', 3, 4], ['c', 4, 3], ['c', 4, 11], ['c', 4, 12], ['c', 4, 13], ['c', 4, 20], ['c', 4, 21], ['c', 4, 22], ['c', 4, 23], ['c', 11, 4], ['c', 11, 11], ['c', 11, 21], ['c', 12, 4], ['c', 12, 5], ['c', 12, 11], ['c', 12, 12], ['c', 12, 13], ['c', 12, 20], ['c', 12, 21], ['c', 12, 22], ['c', 13, 5], ['dim', 20, 29]]
to_print2 = list(range(0,29,4))
l2 = Life(initial2, ".")
l2.Evolve(28,to_print2)
"""
Simulate 28 evolutions.
Print every 4th grid (i.e. 0, 4, 8, ... 28)
"""
# ----------------------
# Life ConwayCell 109x69
# ----------------------
print("*** Life ConwayCell 109x69 ***")
initial3 = [['c', 34, 34], ['c', 35, 34], ['c', 36, 34], ['c', 37, 34], ['c', 38, 34], ['c', 40, 34], ['c', 41, 34], ['c', 42, 34], ['c', 43, 34], ['c', 44, 34], ['c', 46, 34], ['c', 47, 34], ['c', 48, 34], ['c', 49, 34], ['c', 50, 34], ['c', 52, 34], ['c', 53, 34], ['c', 54, 34], ['c', 55, 34], ['c', 56, 34], ['c', 58, 34], ['c', 59, 34], ['c', 60, 34], ['c', 61, 34], ['c', 62, 34], ['c', 64, 34], ['c', 65, 34], ['c', 66, 34], ['c', 67, 34], ['c', 68, 34], ['c', 70, 34], ['c', 71, 34], ['c', 72, 34], ['c', 73, 34], ['c', 74, 34], ['dim', 109, 69]]
to_print3 = [0,1,2,3,4,5,6,7,8,9,283,323,2500]
l3 = Life(initial3, ".")
l3.Evolve(2500, to_print3)
"""
Simulate 283 evolutions.
Print the first 10 grids (i.e. 0, 1, 2...9).
Print the 283rd grid.
Simulate 40 evolutions.
Print the 323rd grid.
Simulate 2177 evolutions.
Print the 2500th grid.
"""
# ----------------------
# Life FredkinCell 20x20
# ----------------------
print("*** Life FredkinCell 20x20 ****")
initial4 = [['f', 9, 9], ['f', 9, 10],['f', 10, 9], ['f', 10, 10], ['dim', 20, 20]]
to_print4 = [1,2,3,4,5]
l4 = Life(initial4,"-")
l4.Evolve(5,[1,2,3,4,5])
"""
Simulate 5 evolutions.
Print every grid (i.e. 0, 1, 2, ..., 5)
"""
| UTF-8 | Python | false | false | 2,619 | py | 4 | RunLife.py | 3 | 0.436044 | 0.275296 | 0 | 76 | 33.460526 | 554 |
yinyaonot/Databases | 6,330,781,824,115 | ea14091bba3e6ef204ceb087a8748bd0b3bf16d2 | 82ab2846b512fd5b94957c292ecbea52ecfeab97 | /HomeWork/10-30/python/filter.py | d55fa0ca653356680f57cffec428822f630fe8f7 | [] | no_license | https://github.com/yinyaonot/Databases | 2ac9a35834a26d9d097c512c7dcfff65fec6403c | 7a79acfe5612fa6a68783d2b2de6d1681ff90012 | refs/heads/master | 2020-04-03T19:29:48.840085 | 2018-10-31T12:05:21 | 2018-10-31T12:05:59 | 155,525,377 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
def insure_user(fun):
user = 'jiao'
pwd = 'jiao'
username = input('please input your name>>>')
password = input('please input your password>>>')
if username == user and password == pwd:
print('login success')
fun()
else:
print('error')
@insure_user
def count():
def count_time(f):
time.clock()
result = f()
timer = time.clock()
print('用时:', timer, '\n总计:', result)
@count_time
def timer_long():
z = 0
for i in range(1, 10000):
for j in range(1, 10000):
z += i + j
return z
| UTF-8 | Python | false | false | 644 | py | 7 | filter.py | 3 | 0.504732 | 0.484227 | 0 | 30 | 20.133333 | 53 |
Russell-Ford/Python-Blackjack | 5,832,565,588,018 | 08f5ba8304688a7c31489a7a1d269495d84f0b5d | 83118f067378a8357f22ebaa401afdb0fa8c920f | /blackjack.py | f9fb431496d7aa28ed563a25e2d49739ea918491 | [] | no_license | https://github.com/Russell-Ford/Python-Blackjack | 28eef5b2a1c615a62a427b74a1dc0bfc0feb506c | bdd1edaac7b146f8ceab552569b43517f5f689b4 | refs/heads/master | 2021-01-13T11:28:37.454461 | 2016-12-21T04:55:07 | 2016-12-21T04:55:07 | 77,010,379 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
def main():
Game = BlackjackGame()
while(boolResponse('Start new game? (y/n)')):
Game.startNewGame()
class BlackjackGame():
def __init__(self):
self.deck = []
self.Dealer = Player()
self.Player = Player()
def startNewGame(self):
self.refreshAndShuffleDeck()
self.resetPlayer(self.Player)
self.resetPlayer(self.Dealer)
self.dealCard(self.Player)
self.dealCard(self.Player)
self.dealCard(self.Dealer)
self.dealCard(self.Dealer)
playerHitting = True
while(playerHitting):
print("Dealers hand: ?, ", self.Dealer.hand[1:])
print("Your hand: ", self.Player.hand)
print("Your hand value: ", self.Player.handValue)
if(boolResponse('Hit?: ')):
self.dealCard(self.Player)
else:
playerHitting = False
print(self.Player.hand)
print(self.Player.handValue)
if(self.Player.handValue > 21):
print('Bust!')
playerHitting = False
self.Player.handValue = 0
elif(self.Player.handValue == 21):
playerHitting = False
if(self.Player.handValue <= 21):
while(self.Dealer.handValue < self.Player.handValue):
self.dealCard(self.Dealer)
print("Dealers hand: ", self.Dealer.hand)
print("Dealers hand value: ", self.Dealer.handValue)
if(self.Dealer.handValue > 21):
print('Dealer busts! You win!')
elif(self.Dealer.handValue > self.Player.handValue):
print('Dealer wins')
elif(self.Dealer.handValue == self.Player.handValue):
print('Tie')
else:
print('You win!')
else:
print('Dealer wins')
def refreshAndShuffleDeck(self):
suits = ['Spades', 'Clubs', 'Hearts', 'Diamonds']
cards = ['Ace', 'King', 'Queen', 'Jack', '10', '9', '8', '7', '6', '5', '4', '3', '2']
for suit in suits:
for card in cards:
self.deck.append(card + ' of ' + suit)
random.shuffle(self.deck)
def resetPlayer(self, player):
player.hand = []
player.handValue = 0
player.numHighAces = 0
def dealCard(self, player):
card = self.deck.pop()
player.hand.append(card)
cardValue = self.analyzeCard(card)
if(cardValue == 11):
player.numHighAces += 1
player.handValue += cardValue
self.evalAces(player)
def evalAces(self, player):
if(player.handValue > 21):
if(player.numHighAces != 0):
player.handValue -= 10
player.numHighAces -= 1
def analyzeCard(self, card):
cardValues = {
'Jack': 10,
'Queen': 10,
'King': 10,
}
card = card.split(' ')
if card[0] in cardValues:
cardValue = cardValues[card[0]]
elif card[0] == 'Ace':
cardValue = 11
else:
cardValue = int(card[0])
return cardValue
class Player():
def __init__(self):
self.money = 5000
self.hand = []
self.handValue = 0
self.numHighAces = 0
class Dealer():
def __init__(self):
self.hand = []
self.handValue = 0
self.numHighAces = 0
def boolResponse(string):
while(1==1):
response = input(string)
if(response == 'y' or response == 'Y'):
return True
elif(response == 'n' or respone == 'N'):
return False
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 3,056 | py | 1 | blackjack.py | 1 | 0.632526 | 0.615183 | 0 | 130 | 22.515385 | 88 |
mallocassi/Golden-Globes-NLP | 16,063,177,729,165 | 400505681076e45ccff905365107316438b3af93 | 71162a3cce73a76e09ec1e3cbad455e1798ed88a | /query.py | 8c263cd3d922d04e7a47f6e63210060b0b9a614b | [] | no_license | https://github.com/mallocassi/Golden-Globes-NLP | 93aff95bd3cebf9eb4aa4d3af6196d7744859ced | 8ce2e7e69be33689562cf3d73dde23561f91aad8 | refs/heads/master | 2018-01-10T04:44:05.903044 | 2016-03-13T17:05:44 | 2016-03-13T17:05:44 | 51,718,526 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!usr/bin/python
import nltk
import json
import re
from string import punctuation
from collections import Counter
#TODO: Pick best name
#Globals
award_specific_stopwords = ['rt', 'golden', 'globes', 'goldenglobes', 'best', 'director', 'actor', 'actress', 'movie', 'motion', 'picture', 'film', 'tv', 'series', 'performance', 'television', 'snub', 'wins', 'win', 'congrats', 'congratulations', 'season', 'animated', 'animation', 'feature', 'comedy', 'drama', 'musical', 'screenplay', 'award', 'awards']
stopwords = nltk.corpus.stopwords.words() + award_specific_stopwords
names = nltk.corpus.names.words()
punctuation.replace('#', '')
punctuation.replace('@', '')
def query_name(tweets, pattern, g=None, n=None):
"""
Querys the tweets of a specific year to get all sufficient ngrams.
tweets is a dictionary object with the loaded json data from the tweets.
pattern is a regex pattern you want to query. Pass in a compiled regex pattern
for improved performance.
g specifies which type of n-gram you want, i.e. g=1 for unigrams, 2 for
bigrams, and 3 for trigrams. If g is not provided, the query returns all types
of grams.
n is the number of results you want to see. If not provided, it shows all.
"""
unicount = Counter()
bicount = Counter()
tricount = Counter()
valid_word = lambda w: w[0].isupper() and w.lower() not in stopwords and w[1].islower()
strip_punctuation = lambda w: "".join(l for l in w if l not in punctuation)
for tweet in tweets:
text = tweet['text']
if check_neg(text):
continue
if re.search(pattern, text):
wordlist = text.split()
for i, w in enumerate(wordlist):
try:
word = strip_punctuation(w)
if valid_word(word):
unicount[word]+=1
nextword = strip_punctuation(wordlist[i+1])
if valid_word(nextword) and valid_word(word):
bicount[(word, nextword)] += 1
thirdword = strip_punctuation(wordlist[i+2])
if valid_word(nextword) and valid_word(word) and valid_word(thirdword):
tricount[(word, nextword, thirdword)]+=1
except IndexError:
continue
# Dealing with optional parameters
toreturn = (unicount, bicount, tricount)
if g!=None:
toreturn = toreturn[g-1]
if n!=None:
if g!=None:
toreturn = toreturn.most_common(n)
else:
toreturn = tuple(r.most_common(n) for r in toreturn)
return toreturn
def query_movie(tweets, pattern, g=None, n=None):
"""
Querys the tweets of a specific year to get all sufficient ngrams.
tweets is a dictionary object with the loaded json data from the tweets.
pattern is a regex pattern you want to query. Pass in a compiled regex pattern
for improved performance.
n is the number of results you want to see. If not provided, it shows all.
"""
unicount = Counter()
bicount = Counter()
tricount = Counter()
valid_word = lambda w: w[0].isupper() and w.lower() not in stopwords and w[1].islower()
strip_punctuation = lambda w: "".join(l for l in w if l not in punctuation)
for tweet in tweets:
text = tweet['text']
if check_neg(text):
continue
if re.search(pattern, text):
wordlist = text.split()
for i, w in enumerate(wordlist):
try:
word = strip_punctuation(w)
if valid_word(word):
unicount[word]+=1
nextword = strip_punctuation(wordlist[i+1])
if valid_word(nextword) and valid_word(word):
bicount[(word, nextword)] += 1
thirdword = strip_punctuation(wordlist[i+2])
if valid_word(nextword) and valid_word(word) and valid_word(thirdword):
tricount[(word, nextword, thirdword)]+=1
except IndexError:
continue
# Dealing with optional parameters
toreturn = (unicount, bicount, tricount)
if g!=None:
toreturn = toreturn[g-1]
if n!=None:
if g!=None:
toreturn = toreturn.most_common(n)
else:
toreturn = tuple(r.most_common(n) for r in toreturn)
return toreturn
def check_neg(text):
'''Determines whether tweet has negations'''
stoplist = ['not', 'n\'t', 'should', 'wish', 'want', 'hope']
for w in stoplist:
if w in text:
return True
return False
def get_names(pattern, n):
"""
Returns a list of n names.
"""
#TODO FUCKING IMPLEMENT THIS
return []
| UTF-8 | Python | false | false | 4,846 | py | 5 | query.py | 4 | 0.5844 | 0.580479 | 0 | 132 | 35.712121 | 355 |
SilvesSun/learn-algorithm-in-python | 18,949,395,733,327 | 616c0573fdcfca68eaf2cfb54228c265ccc21aa4 | cbc829f5787b770c9184b91ee470d058cc4cbe65 | /array/在排序数组中查找元素的第一个和最后一个位置.py | aa5322b7207a4c2732dc3c48cce7158b8c018d39 | [] | no_license | https://github.com/SilvesSun/learn-algorithm-in-python | 58815e7e85e767cbc4a9c21e36e7bdede4f32bef | 5ba3465ba9c85955eac188e1e3793a981de712e7 | refs/heads/master | 2022-09-19T05:10:26.783943 | 2022-09-10T04:56:43 | 2022-09-10T04:56:43 | 115,470,779 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
if nums[i] == target:
left_idx = i
break
else:
return [-1, -1]
# find the index of the rightmost appearance of `target` (by reverse
# iteration). it is guaranteed to appear.
for j in range(len(nums) - 1, -1, -1):
if nums[j] == target:
right_idx = j
break
return [left_idx, right_idx]
s = Solution()
print(s.searchRange([1], 1))
| UTF-8 | Python | false | false | 685 | py | 284 | 在排序数组中查找元素的第一个和最后一个位置.py | 283 | 0.472993 | 0.462774 | 0 | 25 | 26.4 | 80 |
praekelt/django-atlas | 4,535,485,491,873 | 1284adca6a4d80939467b778fda72a30781ac8f4 | b5655ada14bf043bcc7d2c87b30fc5e15c7d8198 | /atlas/forms.py | 7e5b5f70cdcb80f5c4330a1020a95576ba11e22e | [] | no_license | https://github.com/praekelt/django-atlas | 762a6bb09f3241791f7a5e4812ff4195c3d91c1e | ecb7d14edbc2d2b7ac070d11a811882bd5f49216 | refs/heads/develop | 2020-04-06T06:56:20.135013 | 2015-08-27T08:39:28 | 2015-08-27T08:39:28 | 5,109,867 | 2 | 0 | null | false | 2013-02-20T16:55:01 | 2012-07-19T12:54:24 | 2013-02-20T16:52:24 | 2013-02-20T16:52:23 | 66,573 | null | 2 | 2 | Python | null | null | from django import forms
from django.contrib.gis.geos import fromstr
from atlas.models import Country, Region, City
from atlas.fields import LonLatWidget, CoordinateFormField
from atlas.utils import get_city
class SelectLocationForm(forms.Form):
location = CoordinateFormField(
required = True,
help_text="Select your location on the map",
)
origin = forms.CharField(widget=forms.HiddenInput)
required_css_class = 'required'
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(SelectLocationForm, self).__init__(*args, **kwargs)
self.fields['origin'].initial = self.request.GET.get('view', '/')
# set location initial value to either GPS coords or closest city
if 'location' in self.request.session:
location = self.request.session['location']
self.fields['location'].initial = location['position'] \
if 'position' in location else location['city'].coordinates
def save(self):
position = fromstr(self.cleaned_data['location'], srid=4326)
'''if 'REMOTE_ADDR' in self.request.META:
city = get_city(ip=self.request.META['REMOTE_ADDR'], position=position)
else:'''
city = get_city(position=position)
self.request.session['location'] = {'city': city, 'position': position}
def as_div(self):
return self._html_output(
normal_row=u'<div class="field"><div %(html_class_attr)s>%(label)s %(errors)s <div class="helptext">%(help_text)s</div> %(field)s</div></div>',
error_row=u'%s',
row_ender='</div>',
help_text_html=u'%s',
errors_on_separate_row=False
)
| UTF-8 | Python | false | false | 1,746 | py | 17 | forms.py | 12 | 0.626002 | 0.623711 | 0 | 42 | 40.571429 | 155 |
brandoneng000/LeetCode | 14,577,119,041,114 | 5b75eb56c6022a8e73dbf3a30ecdad38e42a0dd0 | 55815c281f6746bb64fc2ba46d074ca5af966441 | /medium/785.py | e7b4697ee1619c9eec104647e92cce84d9f3fd81 | [] | no_license | https://github.com/brandoneng000/LeetCode | def5107b03187ad7b7b1c207d39c442b70f80fc2 | c7a42753b2b16c7b9c66b8d7c2e67b683a15e27d | refs/heads/master | 2023-08-30T23:38:04.845267 | 2023-08-30T08:42:57 | 2023-08-30T08:42:57 | 199,584,584 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
import collections
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
colors = [0] * len(graph)
for node in range(len(graph)):
if colors[node] != 0:
continue
q = collections.deque([node])
colors[node] = 1
while q:
cur = q.popleft()
for n in graph[cur]:
if colors[n] == 0:
colors[n] = -colors[cur]
q.append(n)
elif colors[n] != -colors[cur]:
return False
return True
def main():
sol = Solution()
print(sol.isBipartite([[1],[0,3],[3],[1,2]]))
print(sol.isBipartite([[1,2,3],[0,2],[0,1,3],[0,2]]))
print(sol.isBipartite([[1,3],[0,2],[1,3],[0,2]]))
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 886 | py | 1,007 | 785.py | 1,006 | 0.448081 | 0.416479 | 0 | 34 | 25.088235 | 58 |
zcribe/LinkShortener | 10,153,302,736,733 | f5ddd864c54cc7eb370feb3d59d7611553416d6a | 3061844bd60802e72ef969b326932be1d78a3219 | /LinkGenerator/apps.py | 0306e4c49611ea9bffda09686c704dc1910deaba | [] | no_license | https://github.com/zcribe/LinkShortener | 7ce945d91562e098ed61a4b86bbc6cc4f9c9b8f6 | 2b46c182b3158712788871fff7b5d6bbae5a98e9 | refs/heads/master | 2022-06-02T07:02:46.947816 | 2020-11-17T07:43:23 | 2020-11-17T07:43:23 | 181,020,466 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class LinkgeneratorConfig(AppConfig):
name = 'LinkGenerator'
| UTF-8 | Python | false | false | 101 | py | 15 | apps.py | 9 | 0.782178 | 0.782178 | 0 | 5 | 19.2 | 37 |
grabekm90/movies-api | 12,773,232,744,456 | ace650ce9e73092d293a3876a55f79c2b0647a46 | 26095e543a73374f33f0a6f082eab00071670fed | /movies/movies/settings/production.py | 499bb35586d34ccd7b8111e009fb0cd17bb39a29 | [] | no_license | https://github.com/grabekm90/movies-api | 912debeccc9ab37b7c4c2f5d907dd747804d8e50 | 57a52c0b98629f1609e7ffaa565f772f2e80b3c9 | refs/heads/master | 2021-01-01T04:48:50.532790 | 2017-07-16T18:24:39 | 2017-07-16T18:24:39 | 97,254,028 | 0 | 0 | null | false | 2017-07-14T19:48:32 | 2017-07-14T16:37:08 | 2017-07-14T18:59:58 | 2017-07-14T19:48:32 | 8 | 0 | 0 | 0 | Python | null | null | from . import *
DEBUG = os.getenv('DJANGO_DEBUG') == '1'
ALLOWED_HOSTS = ['*'] # FIXME
import dj_database_url
DATABASES = {
'default': dj_database_url.config(conn_max_age=500)
}
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
#STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
#)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| UTF-8 | Python | false | false | 450 | py | 3 | production.py | 2 | 0.682222 | 0.673333 | 0 | 22 | 19.454545 | 72 |
La0bALanG/Spider_Codes | 12,472,585,032,451 | f2a0213999fb709985815794683af98d21469d06 | 069e82d0400913682bf5ff1ffa60a62422610b83 | /demo13_JinRiTouTiao_news_OOP.py | 96f7bea1dece573aa58c5dd9cf545affaab82bef | [] | no_license | https://github.com/La0bALanG/Spider_Codes | ae9aa6a97c15d0aeeb72702f6a6b3b8caa7b107d | 72ce5b72b0baf1de7d8fecac4805d4711dc6409f | refs/heads/master | 2022-12-30T13:06:13.937923 | 2020-10-13T09:18:54 | 2020-10-13T09:18:54 | 298,497,349 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 _*-
"""
@version:
author:安伟超
@time: 2020/09/16
@file: demo13_JinRiTouTiao_news_OOP.py.py
@environment:virtualenv
@email:awc19930818@outlook.com
@github:https://github.com/La0bALanG
@requirement:定时爬取今日头条推荐类目新闻
"""
"""
var t = 1 === e.page ? e.options.defaultPageSize : e.options.pageSize
, o = {
pn: e.page,
rn: t,
ts: +new Date,
app_from: "indexnew_feed"
}
https://www.hao123.com/feedData/data?type=rec&callback=jQuery11010899572699889351_1600221414626&pn=6&rn=10&ts=1600221730804&app_from=indexnew_feed&_=1600221414637
https://www.hao123.com/feedData/data?type=rec&callback=jQuery11010899572699889351_1600221414626&pn=5&rn=10&ts=1600221728916&app_from=indexnew_feed&_=1600221414636
""" | UTF-8 | Python | false | false | 876 | py | 34 | demo13_JinRiTouTiao_news_OOP.py | 31 | 0.634204 | 0.454869 | 0 | 29 | 28.068966 | 162 |
superdev0505/mtp-web | 2,190,433,346,988 | 699f0d1b344a46579b0e50d54184a3a841b948dd | 487bdaeed33ecad9f1f210f7f82103c4e09e07db | /photographer/views.py | 234a0c7953b2307fc6f54b699ca5a4c2d7806d7a | [
"MIT"
] | permissive | https://github.com/superdev0505/mtp-web | e1a56eb3426d0739b247598ae908747e60a8de67 | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | refs/heads/master | 2022-12-14T00:28:36.521604 | 2020-09-11T21:12:20 | 2020-09-11T21:12:20 | 295,288,384 | 0 | 0 | MIT | true | 2020-09-14T02:59:07 | 2020-09-14T02:59:06 | 2020-09-11T21:12:31 | 2020-09-13T14:02:12 | 11,648 | 0 | 0 | 0 | null | false | false | ## Python packages
from datetime import datetime
import json
## Django Packages
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.shortcuts import redirect
from django.utils import timezone
from django.http import (
Http404, HttpResponse, JsonResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib import messages
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.template.loader import render_to_string
from django.contrib.gis.geos import Point, Polygon, MultiPolygon, LinearRing, LineString
## Custom Libs ##
from lib.functions import *
## Project packages
from accounts.models import CustomUser
## App packages
# That includes from .models import *
from .forms import *
############################################################################
# def handler404(request, *args, **argv):
# response = render(request, '404.html', {})
# response.status_code = 404
# return response
# def handler500(request, *args, **argv):
# response = render(request, '500.html', {})
# response.status_code = 500
# return response
############################################################################
MAIN_PAGE_DESCRIPTION = "Find or offer help on image collection projects to create fresh street level map data in locations where it's needed for Google Street View, Mapillary, and more..."
JOB_PAGE_DESCRIPTION = ""
PHOTOGRAPHER_PAGE_DESCRIPTION = ""
############################################################################
def index(request):
return redirect('photographer.photographer_list', page=1)
@my_login_required
def photographer_create(request):
if request.method == "POST":
form = PhotographerForm(request.POST)
if form.is_valid():
photographer = form.save(commit=False)
photographer.user = request.user
geometry = json.loads(photographer.geometry)
multipoly = MultiPolygon()
for geo in geometry:
coordinates = geo['geometry']['coordinates'][0]
lineString = LineString()
firstPoint = None
for i in range(len(coordinates)):
coor = coordinates[i]
if i == 0:
firstPoint = Point(coor[0], coor[1])
continue
point = Point(coor[0], coor[1])
if i == 1:
lineString = LineString(firstPoint.coords, point.coords)
else:
lineString.append(point.coords)
polygon = Polygon(lineString.coords)
multipoly.append(polygon)
photographer.multipolygon = multipoly
for geo in geometry:
geo['properties']['photographer_id'] = str(photographer.unique_id)
photographer.geometry = json.dumps(geometry)
photographer.save()
messages.success(request, 'A photographer was created successfully.')
return redirect('photographer.my_photographer_list')
else:
form = PhotographerForm()
content = {
'form': form,
'pageName': 'Create Photographer',
'pageTitle': 'Create Photographer'
}
return render(request, 'photographer/create.html', content)
@my_login_required
def photographer_hire(request, unique_id):
photographer = get_object_or_404(Photographer, unique_id=unique_id)
if request.method == "POST":
form = PhotographerEnquireForm(request.POST)
if form.is_valid():
photographerEnquire = form.save(commit=False)
photographerEnquire.photographer = photographer
photographerEnquire.user = request.user
photographerEnquire.email = request.user.email
photographerEnquire.save()
try:
# send email to applicant
subject = photographerEnquire.subject + ' has been sent'
html_message = render_to_string(
'emails/photographer/enquire_applicant.html',
{'subject': subject, 'photographer': photographer, 'photographer_enquire': photographerEnquire},
request
)
send_mail_with_html(subject, html_message, request.user.email)
# send email to photographer creator
subject = request.user.username + ' hired for "' + photographer.name + '".'
html_message = render_to_string(
'emails/photographer/enquire_creator.html',
{'subject': subject, 'photographer': photographer, 'photographer_enquire': photographerEnquire},
request
)
send_mail_with_html(subject, html_message, photographer.user.email)
except:
print('email sending error!')
messages.success(request, 'You have succeeded in hiring photographers.')
return redirect('photographer.photographer_list')
else:
form = PhotographerEnquireForm()
content = {
'form': form,
'photographer': photographer,
'pageName': 'Hire Photographer',
'PageTitle': photographer.name + ' - Hire Photographer'
}
return render(request, 'photographer/hire.html', content)
@my_login_required
def photographer_edit(request, unique_id):
photographer = get_object_or_404(Photographer, unique_id=unique_id)
if request.method == "POST":
form = PhotographerForm(request.POST, instance=photographer)
if form.is_valid():
photographer = form.save(commit=False)
photographer.user = request.user
photographer.updated_at = datetime.now()
photographer.save()
geometry = json.loads(photographer.geometry)
multipoly = MultiPolygon()
for geo in geometry:
coordinates = geo['geometry']['coordinates'][0]
lineString = LineString()
firstPoint = None
for i in range(len(coordinates)):
coor = coordinates[i]
if i == 0:
firstPoint = Point(coor[0], coor[1])
continue
point = Point(coor[0], coor[1])
if i == 1:
lineString = LineString(firstPoint.coords, point.coords)
else:
lineString.append(point.coords)
polygon = Polygon(lineString.coords)
multipoly.append(polygon)
photographer.multipolygon = multipoly
for geo in geometry:
geo['properties']['photographer_id'] = str(photographer.unique_id)
photographer.geometry = json.dumps(geometry)
photographer.save()
messages.success(request, 'Photographer "%s" is updated successfully.' % photographer.name)
return redirect('photographer.index')
else:
form = PhotographerForm(instance=photographer)
content = {
'form': form,
'pageName': 'Edit Photographer',
'photographer': photographer,
'pageTitle': photographer.name + ' - Edit Photographer'
}
return render(request, 'photographer/edit.html', content)
@my_login_required
def my_photographer_delete(request, unique_id):
photographer = get_object_or_404(Photographer, unique_id=unique_id)
if photographer.user == request.user:
photographer.delete()
messages.success(request, 'Photographer "%s" is deleted successfully.' % photographer.name)
else:
messages.error(request, "This user hasn't permission")
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def photographer_list(request, page):
photographers = None
if request.method == "GET":
form = PhotographerSearchForm(request.GET)
if form.is_valid():
type = form.cleaned_data['type']
captureMethod = form.cleaned_data['capture_method']
image_quality = form.cleaned_data['image_quality']
photographers = Photographer.objects.all().filter(is_published=True)
if len(type) > 0:
photographers = photographers.filter(type__overlap=type)
if len(captureMethod) > 0:
photographers = photographers.filter(capture_method__overlap=captureMethod)
if (image_quality != ''):
photographers = photographers.filter(image_quality=image_quality)
if photographers == None:
photographers = Photographer.objects.all().filter(is_published=True)
form = PhotographerSearchForm()
paginator = Paginator(photographers.order_by('-created_at'), 10)
try:
pPhotographers = paginator.page(page)
except PageNotAnInteger:
pPhotographers = paginator.page(1)
except EmptyPage:
pPhotographers = paginator.page(paginator.num_pages)
first_num = 1
last_num = paginator.num_pages
if paginator.num_pages > 7:
if pPhotographers.number < 4:
first_num = 1
last_num = 7
elif pPhotographers.number > paginator.num_pages - 3:
first_num = paginator.num_pages - 6
last_num = paginator.num_pages
else:
first_num = pPhotographers.number - 3
last_num = pPhotographers.number + 3
pPhotographers.paginator.pages = range(first_num, last_num + 1)
pPhotographers.count = len(pPhotographers)
content = {
'photographers': pPhotographers,
'form': form,
'pageName': 'Photographers',
'pageTitle': 'Photographers',
'pageDescription': MAIN_PAGE_DESCRIPTION
}
return render(request, 'photographer/list.html', content)
def photographer_detail(request, unique_id):
photographer = get_object_or_404(Photographer, unique_id=unique_id)
if (not request.user.is_authenticated or request.user != photographer.user) and not photographer.is_published:
messages.success(request, "You can't access this photographer.")
return redirect('photographer.photographer_list')
form = PhotographerSearchForm()
geometry = json.dumps(photographer.geometry)
if photographer.user == request.user:
is_mine = True
else:
is_mine = False
photographer.options = photographer.getCaptureType() + ', ' + photographer.getCaptureMethod()
hire_url = reverse('photographer.photographer_hire', kwargs={'unique_id': str(photographer.unique_id)})
photographer_html_detail = render_to_string('photographer/modal_detail.html',
{'photographer': photographer, 'hire_url': hire_url,
'is_mine': is_mine})
return render(request, 'photographer/photographer_detail.html',
{
'photographer': photographer,
'photographer_html_detail': photographer_html_detail,
'form': form,
'geometry': geometry,
'pageName': 'Photographer Detail',
'pageTitle': photographer.name + ' - Photographer'
})
@my_login_required
def my_photographer_list(request, page):
photographers = None
if request.method == "GET":
form = PhotographerSearchForm(request.GET)
if form.is_valid():
type = form.cleaned_data['type']
captureMethod = form.cleaned_data['capture_method']
photographers = Photographer.objects.all().filter(user=request.user)
if len(type) > 0:
photographers = photographers.filter(type__overlap=type)
if len(captureMethod) > 0:
photographers = photographers.filter(capture_method__overlap=captureMethod)
if photographers == None:
photographers = Photographer.objects.all().filter(user=request.user)
form = PhotographerSearchForm()
paginator = Paginator(photographers.order_by('-created_at'), 10)
try:
pPhotographers = paginator.page(page)
except PageNotAnInteger:
pPhotographers = paginator.page(1)
except EmptyPage:
pPhotographers = paginator.page(paginator.num_pages)
first_num = 1
last_num = paginator.num_pages
if paginator.num_pages > 7:
if pPhotographers.number < 4:
first_num = 1
last_num = 7
elif pPhotographers.number > paginator.num_pages - 3:
first_num = paginator.num_pages - 6
last_num = paginator.num_pages
else:
first_num = pPhotographers.number - 3
last_num = pPhotographers.number + 3
pPhotographers.paginator.pages = range(first_num, last_num + 1)
pPhotographers.count = len(pPhotographers)
content = {
'photographers': pPhotographers,
'form': form,
'pageName': 'My Photographers',
'pageTitle': 'My Photographers',
'pageDescription': MAIN_PAGE_DESCRIPTION
}
return render(request, 'photographer/list.html', content)
def ajax_photographer_detail(request, unique_id):
photographer = Photographer.objects.get(unique_id=unique_id)
if photographer.user == request.user:
is_mine = True
else:
is_mine = False
serialized_obj = serializers.serialize('json', [photographer, ])
data = {
'photographer': json.loads(serialized_obj)
}
if not data['photographer']:
data['error_message'] = "The photographer id doesn't exist."
else:
photographer.options = photographer.getCaptureType() + ', ' + photographer.getCaptureMethod()
hire_url = reverse('photographer.photographer_hire', kwargs={'unique_id': str(photographer.unique_id)})
data['photographer_html_detail'] = render_to_string('photographer/modal_detail.html',
{'photographer': photographer, 'hire_url': hire_url,
'is_mine': is_mine})
return JsonResponse(data) | UTF-8 | Python | false | false | 14,436 | py | 153 | views.py | 97 | 0.603699 | 0.598088 | 0 | 374 | 37.601604 | 189 |
lucianorc/testdriven-app | 8,701,603,786,057 | 6dcc8842e32e264135cbc11aa862ca64f0d94999 | c6f8ba216c3f8fc5f1aeb6fd94594e369bd1d6ce | /services/users/project/__init__.py | 514d5cea11e34abff7e7e1d1805bf36ae156aa9c | [] | no_license | https://github.com/lucianorc/testdriven-app | 55ef1590ffb1b75da7e83960ccd18260d2b7d16b | da7a773bb80edb32857e1e4383a4bdb97e61bd36 | refs/heads/master | 2023-01-04T21:42:16.042176 | 2019-06-06T14:11:26 | 2019-06-06T14:11:26 | 189,145,471 | 0 | 0 | null | false | 2023-01-03T23:28:43 | 2019-05-29T03:39:44 | 2019-06-06T14:13:02 | 2023-01-03T23:28:41 | 3,469 | 0 | 0 | 23 | Python | false | false | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_debugtoolbar import DebugToolbarExtension
from flask_cors import CORS
from flask_migrate import Migrate
db = SQLAlchemy()
toolbar = DebugToolbarExtension()
cors = CORS()
migrate = Migrate()
def create_app():
app = Flask(__name__)
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
db.init_app(app)
toolbar.init_app(app)
cors.init_app(app)
migrate.init_app(app, db)
from project.api.users import users_blueprint
app.register_blueprint(users_blueprint)
@app.shell_context_processor
def cxt():
return {
'app': app,
'db': db
}
return app
| UTF-8 | Python | false | false | 744 | py | 8 | __init__.py | 2 | 0.670699 | 0.670699 | 0 | 37 | 19.108108 | 52 |
lixiang2017/leetcode | 9,328,669,003,047 | 5c47ec0827aa1e04ee2ddb84d666e354d735b17f | 7950c4faf15ec1dc217391d839ddc21efd174ede | /explore/2021/january/Word_Ladder.2.py | 479469bcaca26a5f9a716e99ed0ef451b0c5ae57 | [] | no_license | https://github.com/lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
BFS - Single directional
Success
Details
Runtime: 108 ms, faster than 77.91% of Python online submissions for Word Ladder.
Memory Usage: 18.5 MB, less than 12.12% of Python online submissions for Word Ladder.
ref:
https://leetcode.com/problems/word-ladder/solution/
'''
from collections import defaultdict
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if not endWord: return 0
L = len(beginWord)
all_combo_dict = defaultdict(list)
for word in wordList:
for i in range(L):
intermediate = word[:i] + '*' + word[i + 1:]
all_combo_dict[intermediate].append(word)
curs = deque([(beginWord, 1)])
visited = {beginWord: True}
while curs:
curWord, level = curs.popleft()
if curWord == endWord:
return level
for i in range(L):
intermediate = curWord[:i] + '*' + curWord[i + 1:]
for word in all_combo_dict[intermediate]:
if word not in visited:
curs.append((word, level + 1))
visited[word] = True
return 0
| UTF-8 | Python | false | false | 1,404 | py | 3,076 | Word_Ladder.2.py | 2,900 | 0.524929 | 0.510684 | 0 | 48 | 27.979167 | 85 |
billdonghp/Python | 15,144,054,712,809 | 565ecf73628404de2539879964f3f4bd26be67e2 | 22f2b7dbe7796f4786dbffb61c58221f48b2bebe | /Bastard.py | 3e03cbe50153a6e5c42d258554993c8d164db4e0 | [] | no_license | https://github.com/billdonghp/Python | 726b1f80acb4c8399441055466f8d3a67f5eb1a7 | 30650f3a2c5ce2aa64322a81d845f4ecc8604338 | refs/heads/master | 2023-01-02T00:01:03.454414 | 2020-10-27T14:02:58 | 2020-10-27T14:02:58 | 259,535,815 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
有人的地方就有坏蛋
坏蛋类用以存储坏蛋的信息
class Bastard(Person) 继承Person类
super().__init__(name,age,rmb)
类的继承和方法覆写
'''
from Person import Person
class Bastard(Person):
#恶习
bayHobby = None
#覆写父类的构造方法
def __init__(self,name,age,rmb,badHobby):
#调用父类构造方法
super().__init__(name,age,rmb)
print("混蛋的初始化")
self.badHobby = badHobby
#覆写父类的方法
def tell(self):
print("我是你老子:%s,此树是我裁,此路是我开,要想过此路留下买路财!"%(self.name))
#设置器
def setBadHobby(self,badHobby):
self.badHobby = badHobby
def getBadHobby(self):
return self.badHobby
#特有方法:作恶
def doBadThings(self):
print("铁牛专好%s"%(self.badHobby))
if __name__ == '__main__':
b = Bastard("单阿信",35,500,"喝酒、杀人!")
b.tell()
b.doBadThings()
b.setBadHobby("杀进东京,夺了鸟位")
print(b.getBadHobby())
| UTF-8 | Python | false | false | 1,074 | py | 36 | Bastard.py | 35 | 0.601449 | 0.595411 | 0 | 40 | 19.7 | 61 |
ohern017/IR_TokenizationAndRetrieval | 14,980,845,932,072 | 43717edfe071c412be49994921a3de81ab22def1 | a6c8ca1bd6cb7316ed6cc77b1fe5b26cbd5acec9 | /LM-bigram.py | 7ed5340292a492840bc693eefb585a0647b69bf9 | [
"MIT"
] | permissive | https://github.com/ohern017/IR_TokenizationAndRetrieval | 1011f5b4d1a3364dc995e8623b25752783687077 | 1e5bf32bae0f6fa0111f7d46d4ae79f3ee5b85d4 | refs/heads/main | 2023-05-07T19:41:25.220240 | 2021-05-29T20:29:27 | 2021-05-29T20:29:27 | 372,062,679 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import os
import sys
import math
from irfunctions import tokenize
from parsing import termID, termInfo, docID, docCorpus, docTerms
def bigram(query, document):
##need tf, length of document, total distinct words
vocabSize = len(termID.keys())
querySize = len(query)
p_laplace = []
for i in range(len(query) - 1):
queryWord = query[i]
queryWord2 = query[i+1]
positions = []
if queryWord in termInfo:
docIndex = 3
for z in range(3, len(termInfo[queryWord])):
if document == termInfo[queryWord][z][0]:
docIndex = z
positions = termInfo[queryWord][z][2]
docName = termInfo[queryWord][docIndex][0]
for z in range(len(docTerms[docName])-1):
if docTerms[docName][z] == queryWord:
if docTerms[docName][z+1] == queryWord2:
docLength = docCorpus[docName][0]
termDocFreq = termInfo[queryWord][docIndex][1]
tf = termDocFreq / docCorpus[docName][0] ###
p_laplace.append((tf + 1)/(docLength + vocabSize))
lm_laplace = sum(math.log(number) for number in p_laplace)
return lm_laplace
#postingList['token ID'] = [word, # of distinct docs, total frequency, {posting list}]
#postinglist = (doc#, frequency in document, [ position1, position2, .....] )
queryList = {}
inFile = open(sys.argv[1], "r")
while True:
line = inFile.readline()
if not line:
break
queryText = tokenize(line)
queryList[queryText[0]] = queryText[1:]
inFile.close()
outFile = open(sys.argv[2], 'w')
for key in queryList:
queryScores = {}
rank = 1
for doc in docID:
queryScores[doc] = bigram(queryList[key], doc)
sortedScores = sorted(queryScores.items(), key=lambda x:x[1], reverse = False)
#output documents with their ranks
for i in range(10):
outFile.write(str(key) + ' ' + 'Q0' + ' ' + str(sortedScores[i][0]) + ' ' + str(rank) + ' ' + str(sortedScores[i][1]) + ' ' + 'Exp' + '\n')
rank+=1
outFile.close() | UTF-8 | Python | false | false | 1,968 | py | 11 | LM-bigram.py | 5 | 0.63313 | 0.618394 | 0 | 68 | 26.970588 | 141 |
wayne927/pasta | 19,361,712,570,764 | 3184cc8aba8260297cfa8f9c3a26bfa1c58a91c6 | d8c8f8344fcbc30c052819deb47635c4bcc34f57 | /pasta/gcodeFormat.py | 96e561b4c53df7dc140de89037150cedab6f6a98 | [] | no_license | https://github.com/wayne927/pasta | cd46f0bf50023097f495ac4bee4df90529064bd1 | 80339408b5e4b8e76daf52bbd188162a1398d935 | refs/heads/master | 2023-01-09T01:53:11.061755 | 2022-12-30T08:47:03 | 2022-12-30T08:47:03 | 265,163,487 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
def run(lines):
bigLine = ''
for line in lines:
words = (line.split(' '))[1:]
thisLine = ' '.join(words)
words = thisLine.split('\n')
thisLine = ''.join(words)
bigLine = bigLine + thisLine
bigLine = (''.join(bigLine))[1:-3]
out = '\n'.join(bigLine.split('\\\\n'))
return out
if __name__ == "__main__":
filename = sys.argv[1]
infile = open(filename, 'r')
lines = infile.readlines()
dat = run(lines)
print(dat)
| UTF-8 | Python | false | false | 531 | py | 14 | gcodeFormat.py | 10 | 0.499058 | 0.491525 | 0 | 29 | 17.310345 | 43 |
undx/undx-dots | 15,350,213,141,840 | 371be47f1d1b939d9d6385b95363ab7543803add | 8d021b52ef4fd8455066634395acbb4607d26a9a | /www/.local/share/qutebrowser/userscripts/qute-pass-add | 99c528ba6dbdacac530e8cc4e3a65b23a51304b6 | [] | no_license | https://github.com/undx/undx-dots | eb46e0fea6bd364adb325814eff547a75dc54880 | ecd1c53a73cfb226dc689f4ce91d36fef86c950d | refs/heads/master | 2020-06-12T11:43:49.590455 | 2020-04-12T13:36:13 | 2020-04-12T13:36:13 | 194,288,362 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
""" Adds a password to 'pass' storage.
originaly from https://github.com/bbugyi200/scripts/blob/master/main/qute-pass-add"""
import argparse
import os
import platform
import getpass
import subprocess
import re
os.environ['EDITOR'] = '/usr/bin/vim'
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('raw_url', help="The URL of the website (before being cleaned).")
args = parser.parse_args()
url = re.sub('\.(com|edu|gov|org|io|fr|en|pt|re|it)/.*$', r'.\1', args.raw_url)
url = re.sub('https://', '', url)
pass_url_dir = '/home/' + getpass.getuser() + '/.password-store/WebSites/' + url
options = ''
if os.path.isdir(pass_url_dir):
for filename in os.listdir(pass_url_dir):
options += re.sub('\.gpg', '', filename) + '\n'
rofi = 'rofi -dmenu'
command = 'printf "{{options}}" | {} -p "Username to add/edit"'.format(rofi)
username = subprocess.check_output(command.format(options=options), shell=True).decode('utf-8')
url = 'WebSites/' + url + '/' + username
if username in options:
subprocess.call(['st', '-n', 'qute-editor', '-e', 'zsh', '-c', 'gopass edit {}'.format(url)])
else:
subprocess.call(['st', '-n', 'qute-editor', '-e', 'zsh', '-c', 'gopass insert {}'.format(url)])
| UTF-8 | Python | false | false | 1,261 | 52 | qute-pass-add | 34 | 0.645519 | 0.640761 | 0 | 38 | 32.131579 | 99 |
|
HadrienMP/random-blame | 7,971,459,324,395 | 9e868ca9e74e1f90e5b0fd5f3e1706c8a4776d13 | 34c8df7d21acd89d3b9d3f087ec03165d0e0295d | /hipchat_client.py | ce1079fced03f7092f08d38421bce1a62bea8139 | [] | no_license | https://github.com/HadrienMP/random-blame | 7e8022b758b9ca0e6d144ff89db3c03c480b1de7 | f7d522d3e22cf2fccce9c9e9455addb0effa8c3b | refs/heads/master | 2021-01-12T05:34:56.920000 | 2016-12-22T14:29:46 | 2016-12-22T14:29:46 | 77,133,136 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
def get_room_members(room):
r = _authenticated_get('https://api.hipchat.com/v2/room/{room}/participant'.format(room=room))
if r.status_code is 200:
return [member['mention_name'] for member in r.json()['items']]
else:
print(r.url)
print(r.status_code)
print(r.text)
return []
def _authenticated_get(url):
return requests.get(url + '?auth_token=MX2thj0LKMXwgSE0FxjdM4ZrEAypHSX70JQiAHff')
| UTF-8 | Python | false | false | 464 | py | 3 | hipchat_client.py | 2 | 0.653017 | 0.631466 | 0 | 17 | 26.294118 | 98 |
joker-ht/mapIRbin | 18,648,748,019,229 | 7b4656409ce4f199a10e4846323258eb5fd2f466 | 954f8b74f16338733d18e37db1c71026e6f8b0db | /IR_CFG.py | 1a2b711f83cf7a255d909e6e657031a9194427f1 | [] | no_license | https://github.com/joker-ht/mapIRbin | 5fe18314aebf186fbecebfbd470bd12b79bc2724 | 11f70963f1f28019e079df36e12f57937d58e0f2 | refs/heads/master | 2022-12-08T12:39:28.213379 | 2020-08-28T07:42:47 | 2020-08-28T07:42:47 | 290,432,664 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os,sys
import MySQLdb
import json
sys.path.append(r"/home/seclab/xujianhao/checkCF/tracing_kernel/scripts")
import IR2graph
from mysql_config import read_config
from getIRinfo import get_IR_info
real_f_path = os.path.realpath(__file__)
this_path = real_f_path[:real_f_path.rfind('/')]
def build_IR_cfg(file_name, func_name, db_config):
host, db_user, db_password, db_name = read_config(db_config)
db = MySQLdb.connect(host, db_user, db_password, db_name, charset='utf8', autocommit=True)
cursor = db.cursor()
edges = IR2graph.get_edges_in_file_from_db(cursor, file_name)
edges = IR2graph.filter_edges_by_func(edges, func_name)
CFG = IR2graph.build_graph_from_edges(edges)
db.close()
return CFG
def get_IR_features(c_file_name, func_name):
fea_of_cfile = get_IR_info(c_file_name)
if fea_of_cfile is None:
return
# for given function in this file, go get its features
feas = dict()
for bb_info in fea_of_cfile:
bb_id = bb_info["id"]
bb_func, bb_label = bb_id.split('%')
# FIXIT: Now we only consider 'call function'
bb_call = bb_info["called function"]
bb_cmp2reg = bb_info['cmp']
bb_imm = bb_info['immediate']
bb_select = bb_info['select instruction']
bb_test = bb_info["cmp and"]
if bb_func == func_name:
feas[bb_label] = []
for calledf in bb_call:
if 'lifetime' in calledf:
continue
if 'memset' in calledf:
calledf = 'memset'
feas[bb_label].append('call ' + calledf)
for cmp2reg in bb_cmp2reg:
if cmp2reg == 'yes':
feas[bb_label].append('cmp 2 reg')
if cmp2reg == 'null':
feas[bb_label].append('cmp 0')
# if cmp2reg and cmp2reg != 'yes':
# feas[bb_label].append('cmp ' + cmp2reg)
imme = []
if not bb_test:
imme = bb_imm
else:
for test in bb_test:
if 'and' in test:
feas[bb_label].append('test ' + test.split('_')[0])
immdict = {}
for imm in bb_imm:
immdict.setdefault(imm,[]).append(imm)
for memu in bb_test:
if 'cmp' in memu:
del immdict[memu][0]
for value in immdict.values():
imme.extend(value)
for imm in imme:
value = imm.split('_')[0]
if 'cmp' in imm:
feas[bb_label].append('cmp ' + value)
continue
if 'shr' in imm:
feas[bb_label].append('shr ' + value)
continue
if 'shl' in imm:
feas[bb_label].append('shl ' + value)
continue
if 'and' in imm:
feas[bb_label].append('and ' + value)
continue
if 'add' in imm:
feas[bb_label].append('add ' + value)
continue
if 'or' in imm:
feas[bb_label].append('or ' + value)
continue
if 'switch' in imm:
feas[bb_label].append('cmp ' + value)
for sele in bb_select:
if sele == 'yes':
feas[bb_label].append('cmov')
return feas
class IR_CFG(object):
def __init__(self, file_name, func_name, db_config=None):
if db_config is None:
db_config = this_path + '/static_info.config.json'
self.file_name = file_name
self.func_name = func_name
self.graph = build_IR_cfg(file_name, func_name, db_config)
self.node_features_map = get_IR_features(file_name, func_name)
if __name__ == "__main__":
filename = '/home/seclab/xujianhao/linux/mm/mmap.c'
funcname = 'do_mmap'
db_config = this_path + '/static_info.config.json'
ir_cfg = IR_CFG(filename, funcname, db_config)
for node in ir_cfg.node_features_map.items():
print(node)
# for node in ir_cfg.graph.nodes:
# print(node)
# IR2graph.draw_graph(ir_cfg.graph, funcname)
| UTF-8 | Python | false | false | 4,484 | py | 9 | IR_CFG.py | 8 | 0.492194 | 0.487957 | 0 | 123 | 34.455285 | 94 |
philiplee15/personal-projects | 18,184,891,557,611 | 8c7d6053e0b3e01d81748cb6306b0da3254ad227 | bd006ae00a03c06acdc3209b47d5a6bcbda46bb4 | /cryptocurrency/apps/converter/views.py | d32129cb1deb86c46d70bbe3ad326893fff53b5a | [] | no_license | https://github.com/philiplee15/personal-projects | 29e1e558f4cedeb8d5a90bc58af7c6fa2739c9e5 | 2168a9808996e84a1729c9f65cca138c553f8709 | refs/heads/master | 2018-04-13T05:45:43.948012 | 2017-07-06T19:51:38 | 2017-07-06T19:51:38 | 90,322,350 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.conf import settings
import requests
from .forms import SubmitEmbed
from .serializer import EmbedSerializer
def save_embed(request):
form = SubmitEmbed()
r = requests.get('https://api.coinmarketcap.com/v1/ticker/')
json = r.json()
serializer = EmbedSerializer(data=json)
id = ( item['id'] for item in json )
if serializer.is_valid():
embed = serializer.save()
if request.POST and request.POST['currency'] in id:
currency = request.POST['currency']
else:
currency = "bitcoin"
return render(request, 'converter/index.html', {'embed': json, 'currency':currency, 'form':form})
def crypto_form(request):
pass
| UTF-8 | Python | false | false | 717 | py | 15 | views.py | 7 | 0.686192 | 0.684798 | 0 | 21 | 33.142857 | 101 |
seryte/test_platform_dev | 2,963,527,448,654 | 8821dba4520751a9d8e445169a3ae457cc55d49b | a58f41f78395a5e0a3b344f46d133a7dbfb3b8bd | /test_platform/user_app/views.py | 1a0e7f813cec52dcf22a4d8f09c134a9c5fa7c7c | [] | no_license | https://github.com/seryte/test_platform_dev | b41206df2d734957da8336ad3cf13ca0b76e0375 | 8c8475296cc9e2ecf072408d7b84229204ce69cf | refs/heads/master | 2020-03-29T03:09:48.446413 | 2018-10-12T15:27:29 | 2018-10-12T15:27:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from user_app.models import Project, Module
# Create your views here.
def index(request):
return render(request, "index.html")
def login_action(request):
if request.method == "POST":
username = request.POST.get("username", "")
password = request.POST.get("password", "")
if username == "" or password == "":
return render(request, "index.html", {"error": "用户名或密码不能为空"})
else:
user = auth.authenticate(
username=username, password=password)
if user is not None:
auth.login(request, user) # 记录用户登录状态
request.session['user'] = username
return HttpResponseRedirect('/project_manage/')
else:
return render(request, "index.html", {"error": "用户名或密码错误"})
@login_required
def project_manage(request):
username = request.session.get("user", '')
project = Project.objects.all()
return render(request, "project_manage.html", {"user": username, "projects": project})
def logout(request):
auth.logout(request)
respone = HttpResponseRedirect("/")
return respone
def create_project(request):
return render(request, "add_project.html")
| UTF-8 | Python | false | false | 1,466 | py | 5 | views.py | 2 | 0.644272 | 0.644272 | 0 | 47 | 29.085106 | 90 |
tshimoga/Diffusion-Decision-using-KNN | 9,878,424,808,912 | ad3914860d669bc4e8a52239f1a2962c3d646757 | 548a5fccfad71dbf5f391be991e9dfa118954f0d | /DiffusionDecision/generate_lamdas.py | 8a6a561c75f270dd4ff52569d1367a785fa73517 | [] | no_license | https://github.com/tshimoga/Diffusion-Decision-using-KNN | 544a15a916f60b2308921af4a926b846a876caa4 | 200c219feb7a6bcf2f59d84e40ad3fa0544a1c2b | refs/heads/master | 2021-01-10T03:02:22.377236 | 2016-01-06T23:05:49 | 2016-01-06T23:05:49 | 49,166,194 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from scipy.spatial.distance import euclidean
import pickle
import numpy as np
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
euc_dist = {}
def euc(test_point, point):
test_point_str = test_point
test_point_str = ''.join(test_point_str.astype(np.str))
point_str = point
point_str = '.'.join(point_str.astype(np.str))
if(test_point_str in euc_dist and point_str in euc_dist[test_point_str]):
return euc_dist[test_point_str][point_str]
else:
if(test_point_str not in euc_dist):
euc_dist[test_point_str] = {}
euc_dist[test_point_str][point_str] = euclidean(test_point,point)
return euc_dist[test_point_str][point_str]
training_class_sample_map = unpickle("data/training_class_sample_map")
test_class_sample_map = unpickle("data/test_class_sample_map")
random_point = [0,0,0,0,0,0,0,0,0]
sample_count = 0
for image_class in training_class_sample_map:
random_point += sum(training_class_sample_map[image_class])
sample_count += len(training_class_sample_map[image_class])
random_sample = random_point/float(sample_count)
distance_step = 0.1
lamda_values = []
for image_class in training_class_sample_map:
training_samples = training_class_sample_map[image_class]
distance = distance_step
num_samples_per_unit_distance = []
local_lamdas = []
ts1 = time.time()
while True:
num_samples = 0
for sample in training_samples:
distance_from_rand = euc(random_sample,sample)
if(distance_from_rand < distance):
num_samples += 1
num_samples_per_unit_distance.append(num_samples)
if(len(num_samples_per_unit_distance) == 1):
local_lamdas.append(num_samples_per_unit_distance[-1])
else:
local_lamdas.append(num_samples_per_unit_distance[-1]-num_samples_per_unit_distance[-2])
if(int(num_samples) == len(training_samples)):
ts2 = time.time()
break
else:
distance += distance_step
lamda_values.append(sum(local_lamdas)/float(len(local_lamdas)))
print(lamda_values)
lamda_values_dump = open("data/lamda_values", "wb")
pickle.dump(lamda_values, lamda_values_dump) | UTF-8 | Python | false | false | 2,361 | py | 10 | generate_lamdas.py | 9 | 0.635324 | 0.626853 | 0 | 66 | 33.80303 | 100 |
Step657/leetcode | 19,104,014,533,367 | 110163582a930198d9c2a102b9776e68ce81098c | 74ee1f097cd3339f8f25c798afbae31a4b02ec6a | /other/1_Two_Sum.py | c4259eb12ed784b8273cc0fb48d3106a7f877c05 | [] | no_license | https://github.com/Step657/leetcode | cd6a679f793cecde376c6cb59793150a2c85c00c | 70ca9c5ef5be291abbc6e48100ff638c4145eb58 | refs/heads/master | 2022-12-16T08:58:32.850811 | 2020-09-24T15:15:12 | 2020-09-24T15:15:12 | 285,330,996 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def bruteFroce(nums, target):
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
if nums[i] + nums[j] == target:
return [i,j]
return False
def pythonList(nums, target):
for i in nums:
if target-i in nums:
return [nums.index(i), nums.index(target-i)]
if __name__ == "__main__":
nums = [2, 7, 11, 15]
res = bruteFroce(nums, 9)
res = pythonList(nums, 9)
print(res) | UTF-8 | Python | false | false | 459 | py | 62 | 1_Two_Sum.py | 60 | 0.529412 | 0.507625 | 0 | 19 | 23.210526 | 56 |
jaakkju/ckan-streamcatalog | 9,010,841,388,989 | 337e9202d8e0386a169426e7761969a5bf704357 | 1ec65d98304b9390899355a14bf7756d84ffea2d | /ckanext/streamcatalog/activity.py | 5893be6d479e74c0731eea81095c03403a6b1e8d | [
"MIT"
] | permissive | https://github.com/jaakkju/ckan-streamcatalog | f8407dfd49ea1d17ae0b24de0f9ef881137e9964 | f7fe763f357be77bd538a5626710885acfbfc8ff | refs/heads/master | 2022-07-23T09:11:36.436342 | 2014-10-01T11:19:26 | 2014-10-01T11:19:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import datetime
from pylons import config
from webhelpers.html import literal
from webhelpers.html.tags import *
from routes import url_for
import ckan.lib.helpers as h
import ckan.lib.base as base
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.logic as logic
from ckan.logic.action.get import dashboard_activity_list
import ckan.model as model
from ckan.common import _, c
_check_access = logic.check_access
'''
This file is a forced copy-pasta of ckan/lib/activity_streams.py due to the inability to overwrite activity streams.
Here we simply replace all occurrances of "dataset" with "datastream".
@TODO: Refactor everything - if possible...
'''
# Salvage what we do not need to rewrite.
from ckan.lib.activity_streams import get_snippet_actor, get_snippet_user, get_snippet_dataset, \
get_snippet_tag, get_snippet_group, get_snippet_organization, \
get_snippet_extra, get_snippet_related_item, get_snippet_related_type
def get_snippet_resource(activity, detail):
text = str(detail['data']['resource']['url'])
url = url_for(controller='package',
action='resource_read',
id=activity['data']['package']['id'],
resource_id=detail['data']['resource']['id'])
return link_to(text, url)
# activity_stream_string_*() functions return translatable string
# representations of activity types, the strings contain placeholders like
# {user}, {dataset} etc. to be replaced with snippets from the get_snippet_*()
# functions above.
''' Replacements and salvaging in the order of appearance. '''
def activity_stream_string_added_tag(context, activity):
return _("{actor} added the tag {tag} to the datastream {dataset}")
from ckan.lib.activity_streams import activity_stream_string_changed_group, activity_stream_string_changed_organization
def activity_stream_string_changed_package(context, activity):
return _("{actor} updated the datastream {dataset}")
def activity_stream_string_changed_package_extra(context, activity):
return _("{actor} changed the extra {extra} of the datastream {dataset}")
def activity_stream_string_changed_resource(context, activity):
return _("{actor} updated a subscription to {resource} in the datastream {dataset}")
from ckan.lib.activity_streams import activity_stream_string_changed_user
def activity_stream_string_changed_related_item(context, activity):
if activity['data'].get('dataset'):
return _("{actor} updated the {related_type} {related_item} of the "
"datastream {dataset}")
else:
return _("{actor} updated the {related_type} {related_item}")
from ckan.lib.activity_streams import activity_stream_string_deleted_group, activity_stream_string_deleted_organization
def activity_stream_string_deleted_package(context, activity):
return _("{actor} deleted the datastream {dataset}")
def activity_stream_string_deleted_package_extra(context, activity):
return _("{actor} deleted the extra {extra} from the datastream {dataset}")
def activity_stream_string_deleted_resource(context, activity):
return _("{actor} deleted the subscription {resource} from the datastream "
"{dataset}")
from ckan.lib.activity_streams import activity_stream_string_new_group, activity_stream_string_new_organization
def activity_stream_string_new_package(context, activity):
return _("{actor} created the datastream {dataset}")
def activity_stream_string_new_package_extra(context, activity):
return _("{actor} added the extra {extra} to the datastream {dataset}")
def activity_stream_string_new_resource(context, activity):
return _("{actor} added a subscription to {resource} to the datastream {dataset}")
from ckan.lib.activity_streams import activity_stream_string_new_user
def activity_stream_string_removed_tag(context, activity):
return _("{actor} removed the tag {tag} from the datastream {dataset}")
from ckan.lib.activity_streams import activity_stream_string_deleted_related_item, activity_stream_string_follow_dataset, \
activity_stream_string_follow_user, activity_stream_string_follow_group
def activity_stream_string_new_related_item(context, activity):
if activity['data'].get('dataset'):
return _("{actor} added the {related_type} {related_item} to the "
"datastream {dataset}")
else:
return _("{actor} added the {related_type} {related_item}")
# A dictionary mapping activity snippets to functions that expand the snippets.
activity_snippet_functions = {
'actor': get_snippet_actor,
'user': get_snippet_user,
'dataset': get_snippet_dataset,
'tag': get_snippet_tag,
'group': get_snippet_group,
'organization': get_snippet_organization,
'extra': get_snippet_extra,
'resource': get_snippet_resource,
'related_item': get_snippet_related_item,
'related_type': get_snippet_related_type,
}
# A dictionary mapping activity types to functions that return translatable
# string descriptions of the activity types.
activity_stream_string_functions = {
'added tag': activity_stream_string_added_tag,
'changed group': activity_stream_string_changed_group,
'changed organization': activity_stream_string_changed_organization,
'changed package': activity_stream_string_changed_package,
'changed package_extra': activity_stream_string_changed_package_extra,
'changed resource': activity_stream_string_changed_resource,
'changed user': activity_stream_string_changed_user,
'changed related item': activity_stream_string_changed_related_item,
'deleted group': activity_stream_string_deleted_group,
'deleted organization': activity_stream_string_deleted_organization,
'deleted package': activity_stream_string_deleted_package,
'deleted package_extra': activity_stream_string_deleted_package_extra,
'deleted resource': activity_stream_string_deleted_resource,
'new group': activity_stream_string_new_group,
'new organization': activity_stream_string_new_organization,
'new package': activity_stream_string_new_package,
'new package_extra': activity_stream_string_new_package_extra,
'new resource': activity_stream_string_new_resource,
'new user': activity_stream_string_new_user,
'removed tag': activity_stream_string_removed_tag,
'deleted related item': activity_stream_string_deleted_related_item,
'follow dataset': activity_stream_string_follow_dataset,
'follow user': activity_stream_string_follow_user,
'follow group': activity_stream_string_follow_group,
'new related item': activity_stream_string_new_related_item,
}
# A dictionary mapping activity types to the icons associated to them
activity_stream_string_icons = {
'added tag': 'tag',
'changed group': 'group',
'changed package': 'sitemap',
'changed package_extra': 'edit',
'changed resource': 'file',
'changed user': 'user',
'deleted group': 'group',
'deleted package': 'sitemap',
'deleted package_extra': 'edit',
'deleted resource': 'file',
'new group': 'group',
'new package': 'sitemap',
'new package_extra': 'edit',
'new resource': 'file',
'new user': 'user',
'removed tag': 'tag',
'deleted related item': 'picture',
'follow dataset': 'sitemap',
'follow user': 'user',
'follow group': 'group',
'new related item': 'picture',
'changed organization': 'briefcase',
'deleted organization': 'briefcase',
'new organization': 'briefcase',
'undefined': 'certificate', # This is when no activity icon can be found
}
# A list of activity types that may have details
activity_stream_actions_with_detail = ['changed package']
def activity_list_to_html(context, activity_stream, extra_vars):
'''Return the given activity stream as a snippet of HTML.
:param activity_stream: the activity stream to render
:type activity_stream: list of activity dictionaries
:param extra_vars: extra variables to pass to the activity stream items
template when rendering it
:type extra_vars: dictionary
:rtype: HTML-formatted string
'''
activity_list = [] # These are the activity stream messages.
for activity in activity_stream:
detail = None
activity_type = activity['activity_type']
# Some activity types may have details.
if activity_type in activity_stream_actions_with_detail:
details = logic.get_action('activity_detail_list')(context=context,
data_dict={'id': activity['id']})
# If an activity has just one activity detail then render the
# detail instead of the activity.
if len(details) == 1:
detail = details[0]
object_type = detail['object_type']
if object_type == 'PackageExtra':
object_type = 'package_extra'
new_activity_type = '%s %s' % (detail['activity_type'],
object_type.lower())
if new_activity_type in activity_stream_string_functions:
activity_type = new_activity_type
if not activity_type in activity_stream_string_functions:
raise NotImplementedError("No activity renderer for activity "
"type '%s'" % activity_type)
if activity_type in activity_stream_string_icons:
activity_icon = activity_stream_string_icons[activity_type]
else:
activity_icon = activity_stream_string_icons['undefined']
activity_msg = activity_stream_string_functions[activity_type](context,
activity)
# Get the data needed to render the message.
matches = re.findall('\{([^}]*)\}', activity_msg)
data = {}
for match in matches:
snippet = activity_snippet_functions[match](activity, detail)
data[str(match)] = snippet
activity_list.append({'msg': activity_msg,
'type': activity_type.replace(' ', '-').lower(),
'icon': activity_icon,
'data': data,
'timestamp': activity['timestamp'],
'is_new': activity.get('is_new', False)})
extra_vars['activities'] = activity_list
return literal(base.render('activity_streams/activity_stream_items.html',
extra_vars=extra_vars))
''' Helpers copied from ckan/logic/action/get.py which use the activity_list_to_html function redefined above. '''
def user_activity_list_html(context, data_dict):
'''Return a user's public activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: The id or name of the user.
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = user_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'user',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_list_to_html(context, activity_stream, extra_vars)
def package_activity_list(context, data_dict):
'''Return a package's activity stream.
You must be authorized to view the package.
:param id: the id or name of the package
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
_check_access('package_show', context, data_dict)
model = context['model']
package_ref = data_dict.get('id') # May be name or ID.
package = model.Package.get(package_ref)
if package is None:
raise logic.NotFound
offset = int(data_dict.get('offset', 0))
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
activity_objects = model.activity.package_activity_list(package.id,
limit=limit, offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
def package_activity_list_html(context, data_dict):
'''Return a package's activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the package
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = package_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'package',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_list_to_html(context, activity_stream, extra_vars)
def group_activity_list(context, data_dict):
'''Return a group's activity stream.
You must be authorized to view the group.
:param id: the id or name of the group
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
_check_access('group_show', context, data_dict)
model = context['model']
group_id = data_dict.get('id')
offset = data_dict.get('offset', 0)
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
# Convert group_id (could be id or name) into id.
group_show = logic.get_action('group_show')
group_id = group_show(context, {'id': group_id})['id']
activity_objects = model.activity.group_activity_list(group_id,
limit=limit, offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
def group_activity_list_html(context, data_dict):
'''Return a group's activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the group
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = group_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'group',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_list_to_html(context, activity_stream, extra_vars)
def organization_activity_list(context, data_dict):
'''Return a organization's activity stream.
:param id: the id or name of the organization
:type id: string
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
_check_access('organization_show', context, data_dict)
model = context['model']
org_id = data_dict.get('id')
offset = data_dict.get('offset', 0)
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
# Convert org_id (could be id or name) into id.
org_show = logic.get_action('organization_show')
org_id = org_show(context, {'id': org_id})['id']
activity_objects = model.activity.group_activity_list(org_id,
limit=limit, offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
def organization_activity_list_html(context, data_dict):
'''Return a organization's activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the organization
:type id: string
:rtype: string
'''
activity_stream = organization_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'organization',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_list_to_html(context, activity_stream, extra_vars)
def recently_changed_packages_activity_list_html(context, data_dict):
'''Return the activity stream of all recently changed packages as HTML.
The activity stream includes all recently added or changed packages. It is
rendered as a snippet of HTML meant to be included in an HTML page, i.e. it
doesn't have any HTML header or footer.
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = recently_changed_packages_activity_list(context,
data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'package',
'action': 'activity',
'offset': offset,
}
return activity_list_to_html(context, activity_stream, extra_vars)
def dashboard_activity_list_html(context, data_dict):
'''Return the authorized user's dashboard activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the user
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = dashboard_activity_list(context, data_dict)
model = context['model']
offset = data_dict.get('offset', 0)
extra_vars = {
'controller': 'user',
'action': 'dashboard',
'offset': offset,
}
return activity_list_to_html(context, activity_stream, extra_vars)
''' Overwritten activity stream helpers from ckan/lib/helpers.py. '''
def dashboard_activity_stream(user_id, filter_type=None, filter_id=None, offset=0):
'''Return the dashboard activity stream of the current user.
:param user_id: the id of the user
:type user_id: string
:param filter_type: the type of thing to filter by
:type filter_type: string
:param filter_id: the id of item to filter by
:type filter_id: string
:returns: an activity stream as an HTML snippet
:rtype: string
'''
context = {'model': model, 'session': model.Session, 'user': c.user}
if filter_type:
action_functions = {
'dataset': package_activity_list_html,
'user': user_activity_list_html,
'group': group_activity_list_html
}
action_function = action_functions.get(filter_type)
return action_function(context, {'id': filter_id, 'offset': offset})
else:
return dashboard_activity_list_html(context, {'offset': offset})
| UTF-8 | Python | false | false | 20,690 | py | 23 | activity.py | 10 | 0.671194 | 0.669309 | 0 | 547 | 36.824497 | 123 |
meher1087/learn_ros_python | 16,973,710,780,367 | b3bc19888a0af4458cc08abb5cef9a0276924194 | a4b94c63843d344ad1cdcfee314c74ed10a460a5 | /arm_ws/src/dext_moveit_config/node/control_pose.py | dfa8bd4845b6f81dc8ae85b633c4b07c09ba1c70 | [] | no_license | https://github.com/meher1087/learn_ros_python | 25efc5d915dddec722c9be5c127e0dd214d100b1 | 2a3460df8400446d3286f243bce4a534e0ea7336 | refs/heads/master | 2022-11-20T17:15:17.331227 | 2020-07-18T15:49:20 | 2020-07-18T15:49:20 | 255,571,487 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import sys
import rospy
import copy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
#initialise moveit commander and rospy nodes
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_python_interface_tutorial',anonymous=True)
# instantiate Robotcommander object. This object is the outer inerface to the robot
robot = moveit_commander.RobotCommander()
# instantiate planningsceneinterfacae object. This object is an interface to the world surrounding the robot
scene=moveit_commander.PlanningSceneInterface()
#instantiate MoveGroupCommander for one of the planning groups of robot
group_name = "manipulator"#"arm"
group = moveit_commander.MoveGroupCommander(group_name)
# # set plannner
# group.set_planner_id("RRTConnectkConfigDefault")
# # set time for planning
# group.set_planning_time(20)
#create a display trajectory publisher which is used later to publish trajectories for rvis to visulalize
display_trajectory_publisher=rospy.Publisher('/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=20)
# We can get the name of the reference frame for this robot:
planning_frame = group.get_planning_frame()
print "============ Reference frame: %s" % planning_frame
# We can also print the name of the end-effector link for this group:
eef_link = group.get_end_effector_link()
print "============ End effector: %s" % eef_link
# We can get a list of all the groups in the robot:
group_names = robot.get_group_names()
print "============ Robot Groups:", robot.get_group_names()
# Sometimes for debugging it is useful to print the entire state of the
# robot:
print "============ Printing robot state"
print robot.get_current_state()
print ""
# get robot to initial position
group.go([pi/4,pi/4,pi/4,pi/4],wait=True)
group.stop()
# move robot to non singular position by adjusting joint vlaues in group
joint_goal = group.get_current_joint_values()
joint_goal[0] = -pi/4
joint_goal[1] = -pi/4
joint_goal[2] = 0
joint_goal[3] = pi/4
#use go command to execute the joint angles
group.go(joint_goal,wait=True)
# get pose
pose = group.get_current_pose(eef_link).pose
x = pose.position.x
y = pose.position.y
z = pose.position.z
print(x,y,z)
group.stop()
# Inverse Kinematics Solution
import tinyik
import numpy as np
from tinyik import (
Link, Joint,
FKSolver, IKSolver,
NewtonOptimizer,
SteepestDescentOptimizer,
ConjugateGradientOptimizer,
ScipyOptimizer, ScipySmoothOptimizer
)
theta = np.pi / 6
def build_ik_solver(optimizer_instance):
fk = FKSolver([Joint('z'), Link([0, 0., 0.001]), Joint('x'), Link([0, 0., 0.093]), Joint('x'), Link([0, 0., 0.121]), Joint('x'), Link([0, 0., 0.112])])
return IKSolver(fk, optimizer_instance)
ik = build_ik_solver(ConjugateGradientOptimizer())
pos_Conj_grad = ik.solve([pi/4,pi/4,pi/4,pi/4], [x,y,z])
print("")
print(pos_Conj_grad)
# apply on robot again
#use go command to execute the joint angles
# get robot to initial position
group.go([pi/2,pi/4,pi/4,pi/2],wait=True)
group.stop()
group.go(pos_Conj_grad,wait=True)
group.stop()
# get pose
pose = group.get_current_pose(eef_link).pose
x_e = pose.position.x
y_e = pose.position.y
z_e = pose.position.z
print(x_e,y_e,z_e)
group.stop()
| UTF-8 | Python | false | false | 3,384 | py | 38 | control_pose.py | 14 | 0.731087 | 0.716608 | 0 | 123 | 26.504065 | 155 |
faiichann/ProjectOPengl | 5,437,428,605,349 | 17bba9a7c1f9e3ec227c366368c321d8cbf53d5f | 0315af311f626de4ab9b04d53d6a712508418bae | /RoomProject-219-262-265.py | be2b22996431515efaf9839abf15917d8efd8032 | [] | no_license | https://github.com/faiichann/ProjectOPengl | 4ca86ff64eadfa3f1cd5765310e6ac06fe742b6b | 4920a01735f9d22e32093453a7118364f12f9832 | refs/heads/master | 2022-11-09T10:13:51.084938 | 2020-06-08T04:59:38 | 2020-06-08T04:59:38 | 269,681,546 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | if __name__ == '__build__':
raise Exception
import sys
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
w,h=1280, 720
rot = 0
speed = 20
sum_rot_updown = 0
current_mv_mat = (GLfloat * 16)()
def init():
mat_ambient =[0.7,0.7,0.7,0.1]
mat_diffuse =[0.5,0.5,0.5,1.0]
mat_specular =[1.0,1.0,1.0,1.0]
mat_shininess =[50.0]
glMaterialfv(GL_FRONT,GL_AMBIENT,mat_ambient)
glMaterialfv(GL_FRONT,GL_SPECULAR,mat_specular)
glMaterialfv(GL_FRONT,GL_SHININESS,mat_shininess)
light_ambient = [0.0, 0.0, 0.0, 1.0]
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_specular = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient)
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
glEnable(GL_LIGHT2)
glEnable(GL_LIGHT3)
glEnable(GL_LIGHT4)
glEnable(GL_LIGHT5)
glEnable(GL_DEPTH_TEST)
glEnable(GL_NORMALIZE)
glEnable(GL_TEXTURE_2D)
def set_texture():
# glTexImage2D(GL_TEXTURE_2D,0,3,11,12,0,GL_RGB,GL_UNSIGNED_BYTE, image)
glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)
def wall(thickness) :
#function to create the walls with given thickness
glScalef(5,5,5)
glPushMatrix()
glTranslated(0.5,0.5*thickness,0.5)
glScaled(1.0,thickness,1.0)
glutSolidCube(1.0)
glPopMatrix()
def table():
glPushMatrix()
#ขาโต๊ะ 1
glPushMatrix()
glTranslatef(2.5,0.12,2.5)
glRotate(360,1,0,0)
glScalef(0.5,2.0,0.5)
glColor3f(0.1,0,0)
glutSolidCube (1)
glPopMatrix ()
#ขาโต๊ะ 2
glPushMatrix()
glTranslatef(-1.5,0.12,2.5)
glRotate(360,1,0,0)
glScalef(0.5,2.0,0.5)
glColor3f(0.1,0,0)
glutSolidCube (1)
glPopMatrix ()
#ขาโต๊ะ 3
glPushMatrix()
glTranslatef(2.5,0.12,-1.5)
glRotate(360,1,0,0)
glScalef(0.5,2.0,0.5)
glColor3f(0.1,0,0)
glutSolidCube (1)
glPopMatrix ()
#ขาโต๊ะ 4
glPushMatrix()
glTranslatef(-1.5,0.12,-1.5)
glRotate(360,1,0,0)
glScalef(0.5,2.0,0.5)
glColor3f(0.1,0,0)
glutSolidCube (1)
glPopMatrix ()
#พื้นโต๊ะ
glPushMatrix()
glTranslated(0.5,1.2,0.5)
glScaled(3,0.2,3)
glColor3f(0,0.5,0.5)
glutSolidCube(1.5)
glPopMatrix()
glPopMatrix()
def chair():
glPushMatrix()
#ขาเก้าอี้ 1
glPushMatrix()
glScalef(0.5,1,0.5)
glColor3f(2,0.5,1)
glutSolidCube (1)
glPopMatrix ()
#ขาเก้าอี้ 3
glPushMatrix()
glTranslatef(1,0,0)
glScalef(0.5,1,0.5)
glColor3f(2,0.5,1)
glutSolidCube (1)
glPopMatrix ()
#ขาเก้าอี้ 3
glPushMatrix()
glTranslatef(1,0,1)
glScalef(0.5,1,0.5)
glColor3f(2,0.5,1)
glutSolidCube (1)
glPopMatrix ()
#ขาเก้าอี้ 4
glPushMatrix()
glTranslatef(0,0,1)
glScalef(0.5,1,0.5)
glColor3f(2,0.5,1)
glutSolidCube (1)
glPopMatrix ()
#พื้นเก้าอี้
glPushMatrix()
glTranslated(0.5,0.5,0.5)
glScaled(3,0.2,3)
glColor3f(0,0.5,1)
glutSolidCube(0.5)
glPopMatrix()
#ฝาเก้าอี้
glPushMatrix()
glTranslated(0.5,1,-0.125)
glRotatef(90,1,0,0)
glScaled(3,0.5,3)
glColor3f(0.5,0.5,0.5)
glutSolidCube(0.5)
glPopMatrix()
glPopMatrix()
def closet():
glPushMatrix()
glColor(0.5,0.5,0.1)
glTranslated(0.605,0.78,0.5)
glRotate(30,0,1,0)
glScalef(0.5,0.84,0.05)
glutSolidCube (1)
glPopMatrix()
glPushMatrix()
glColor(1,0,0.5)
glTranslated(0.6644,1.18,0.605)
glRotate(30,0,1,0)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(1,0,0.5)
glTranslated(0.6644,1.02,0.605)
glRotate(30,0,1,0)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(1,0,0.5)
glTranslated(0.6644,0.86,0.605)
glRotate(30,0,1,0)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(1,0,0.5)
glTranslated(0.6644,0.7,0.605)
glRotate(30,0,1,0)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(1,0,0.5)
glTranslated(0.6644,0.54,0.605)
glRotate(30,0,1,0)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(1,0,0.5)
glTranslated(0.6644,0.38,0.605)
glRotate(30,0,1,0)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(0.5,0.5,0.1)
glTranslated(0.866,0.785,0.48)
glRotate(30,0,1,0)
glScalef(0.03,0.846,0.27)
glutSolidCube (1)
glPopMatrix()
glPushMatrix()
glColor(0.5,0.5,0.1)
glTranslated(0.449,0.785,0.72)
glRotate(30,0,1,0)
glScalef(0.03,0.846,0.27)
glutSolidCube (1)
glPopMatrix()
def rubber():
glPushMatrix()
glColor(1,0,0)
glTranslated(0.605,0.78,0.5)
glRotate(30,0,1,0)
glScalef(0.05,0.05,0.05)
glutSolidTorus(0.8,2,30,30)
glPopMatrix()
glPushMatrix()
glColor(1,1,1)
glTranslated(0.605,0.88,0.5)
glRotate(110,0,1,0)
glScalef(0.05,0.05,0.05)
glutSolidTorus(0.5,0.5,15,30)
glPopMatrix()
glPushMatrix()
glColor(1,1,1)
glTranslated(0.605,0.68,0.5)
glRotate(110,0,1,0)
glScalef(0.05,0.05,0.05)
glutSolidTorus(0.5,0.5,15,30)
glPopMatrix()
glPushMatrix()
glColor(1,1,1)
glTranslated(0.69,0.79,0.45)
glRotate(100,1,0,0)
glScalef(0.05,0.05,0.05)
glutSolidTorus(0.5,0.5,15,30)
glPopMatrix()
glPushMatrix()
glColor(1,1,1)
glTranslated(0.52,0.79,0.55)
glRotate(100,1,0,0)
glScalef(0.05,0.05,0.05)
glutSolidTorus(0.5,0.5,15,30)
glPopMatrix()
def lamp():
#ฐานรอง
glPushMatrix()
glTranslatef(0,0,0)
glRotatef(90,0.5,0,0)
glScalef(0.2,0.2,0.1)
glColor3f(2,0.5,1)
glutSolidCylinder(1,1,32,1)
glPopMatrix ()
#แท่น
glPushMatrix()
glTranslatef(0,0.7,0)
glRotatef(90,0.5,0,0)
glScalef(0.04,0.04,0.7)
glColor4f(1,1,0,0)
glutSolidCylinder(1,1,32,1)
glPopMatrix ()
#โคม
glPushMatrix()
glTranslatef(0,1,0)
glRotatef(90,0.5,0,0)
glScalef(0.3,0.3,0.4)
glColor4f(1,0.5,0,0)
glutSolidCylinder(1,1,32,1)
glPopMatrix ()
def window():
#บน
glPushMatrix()
glColor(1,0,0.5)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.67,1.2,0.61)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
#ล่าง
glPushMatrix()
glColor(1,0,0.5)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.67,0.54,0.61)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
#ขวา
glPushMatrix()
glColor(0.5,0.5,0.1)
glTranslated(1,0.95,0.69)
glScalef(0.03,0.7,0.2)
glutSolidCube (1)
glPopMatrix()
#ซ้าย
glPushMatrix()
glColor(0.5,0.5,0.1)
glTranslated(0.5,0.95,0.69)
glScalef(0.03,0.7,0.2)
glutSolidCube (1)
glPopMatrix()
#กลางตั้ง
glPushMatrix()
glColor(0.5,0.5,0.1)
glTranslated(0.75,0.95,0.69)
glScalef(0.03,0.7,0.2)
glutSolidCube (1)
glPopMatrix()
#กลางนอน
glPushMatrix()
glColor(1,0,0.5)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.67,0.85,0.61)
glScalef(0.5,0.04,0.2)
glutSolidCube(1)
glPopMatrix()
def cup():
#แก้ว
glPushMatrix()
glTranslatef(0,1,1)
glRotatef(90,0.5,0,0)
glScalef(0.2,0.2,0.4)
glColor4f(0,1,1,1)
glutSolidCylinder(1,1,32,1)
glPopMatrix ()
#หูแก้ว
glPushMatrix()
glTranslatef(0.1,0.8,1)
glScalef(0.3,0.2,0.4)
glColor3f(0.5,1,1)
glutSolidTorus(0.1,0.5,20,30)
glPopMatrix ()
def sleep():
glPushMatrix()
glColor(1,0,1)
glTranslated(0.5,0.9,0.6)
glRotate(90,0,1,0)
glScalef(0.2,0.05,0.5)
glutSolidCylinder(1,1,30,30)
glPopMatrix()
glPushMatrix()
glColor(1,1,0)
glTranslated(0.68,0.83,0.6)
glRotate(90,0,1,0)
glScalef(0.5,0.15,0.7)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(0,1,0)
glTranslated(0.38,0.91,0.6)
glRotate(90,0,1,0)
glScalef(0.1,0.03,0.1)
glutSolidCylinder(1,1,30,30)
glPopMatrix()
glPushMatrix()
glColor(1,0,0)
glTranslated(0.5,0.9,0.6)
glRotate(90,0,1,0)
glScalef(0.21,0.06,0.2)
glutSolidCylinder(1,1,30,30)
glPopMatrix()
def tv():
glPushMatrix()
glColor(1,0,1)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.605,0.78,0.5)
glRotate(90,1,0,0)
glScalef(0.5,0.02,0.3)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(1,1,0)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.605,0.6,0.5)
glRotate(90,1,0,0)
glScalef(0.3,0.05,0.02)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(0,1,1)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.605,0.62,0.5)
glRotate(90,1,0,0)
glScalef(0.05,0.02,0.02)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor(0.5,0.5,0.5)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.605,0.78,0.49)
glRotate(90,1,0,0)
glScalef(0.48,0.02,0.25)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor3f(0.1,0,0)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.605,0.55,0.5)
glRotate(90,1,0,0)
glScalef(0.55,0.2,0.05)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor3f(0,0.1,0.1)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.605,0.55,0.5)
glRotate(90,1,0,0)
glScalef(0.02,0.2,0.02)
glutSolidCube(1)
glPopMatrix()
glPushMatrix()
glColor3f(2,0.5,1)
glTranslatef(0.08,0.08,0.08)
glTranslated(0.605,0.52,0.5)
glRotate(90,1,0,0)
glScalef(0.5,0.15,0.15)
glutSolidCube(1)
glPopMatrix()
def sofa():
#พื้นโซฟา
glPushMatrix()
glTranslated(0.5,0.3,0.5)
glScaled(3,0.6,3)
glColor3f(1,0.5,0)
glutSolidCube(0.5)
glPopMatrix()
#ฝาโซฟา
glPushMatrix()
glTranslated(0.5,1,-0.125)
glRotatef(90,1,0,0)
glScaled(3,0.5,3)
glColor3f(0.5,0.5,0.5)
glutSolidCube(0.5)
glPopMatrix()
#พื้นโซฟา2
glPushMatrix()
glTranslated(-0.5,0.3,0.5)
glScaled(3,0.6,3)
glColor3f(1,0.5,0)
glutSolidCube(0.5)
glPopMatrix()
#ฝาโซฟา2
glPushMatrix()
glTranslated(-0.5,1,-0.125)
glRotatef(90,1,0,0)
glScaled(3,0.5,3)
glColor3f(0.5,0.5,0.5)
glutSolidCube(0.5)
glPopMatrix()
#ซ้าย
glPushMatrix()
glTranslated(-1.4,0.3,0.5)
glRotatef(90,0,0,1)
glScaled(3,0.6,3)
glColor3f(1,0,1)
glutSolidCube(0.5)
glPopMatrix()
#ขวา
glPushMatrix()
glTranslated(1.4,0.3,0.5)
glRotatef(90,0,0,1)
glScaled(3,0.6,3)
glColor3f(1,0,1)
glutSolidCube(0.5)
glPopMatrix()
def bun6():
glColor3f(0.1,0,0.1)
wall(0.1)
def bun2():
glColor3f(0.5,0.5,0.5)
wall(0.1)
def tree():
#ต้นไม้
glPushMatrix()
glTranslatef(0,0.7,0)
glRotatef(90,0.5,0,0)
glColor3f(0,1,0)
glutSolidSphere(0.27,15,15)
glPopMatrix ()
#ต้นไม้
glPushMatrix()
glTranslatef(0,0.8,0.2)
glRotatef(90,0.5,0,0)
glColor3f(0.0,0.2,0.0)
glutSolidSphere(0.23,15,15)
glPopMatrix ()
#ต้นไม้
glPushMatrix()
glTranslatef(0,0.62,-0.2)
glRotatef(90,0.5,0,0)
glColor3f(0.0,0.5,0.0)
glutSolidSphere(0.18,15,15)
glPopMatrix ()
#ฐานรองล่าง
glPushMatrix()
glTranslatef(0,-0.2,0)
glRotatef(90,0.5,0,0)
glScalef(0.3,0.25,0.1)
glColor3f(0.5,1,1)
glutSolidCylinder(0.8,1,32,1)
glPopMatrix ()
#ฐานรองบน
glPushMatrix()
glTranslatef(0,0.2,0)
glRotatef(90,0.5,0,0)
glScalef(0.3,0.3,0.1)
glColor3f(0.5,0.5,0.5)
glutSolidCylinder(0.85,1,32,1)
glPopMatrix ()
#แท่น
glPushMatrix()
glTranslatef(0,0.7,0)
glRotatef(90,0.5,0,0)
glScalef(0.04,0.04,0.5)
glColor3f(1,0,1)
glutSolidCylinder(1,1,32,1)
glPopMatrix ()
#โคม
glPushMatrix()
glTranslatef(0,0.2,0)
glRotatef(90,0.5,0,0)
glScalef(0.5,0.5,0.4)
glColor3f(0.1,0.1,0)
glutSolidCylinder(0.4,1,32,1)
glPopMatrix ()
def first():
glColor3f(0,0.2,0.3)
wall(0.2)
def display():
global rot,speed,sum_rot_updown,current_mv_mat,clock
glPushMatrix()
glGetFloatv(GL_MODELVIEW_MATRIX, current_mv_mat)
glLoadIdentity()
glRotatef(sum_rot_updown, 1, 0, 0)
glMultMatrixf(current_mv_mat)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
gluLookAt(2.0,2.0,2.0,0.0,0.2,0.0,0.0,1.0,0.0)
glPushMatrix()
glPushMatrix() #table
glTranslated(0.5,1,4.3)
glRotated(0,0,0,0)
glScalef(0.2,0.2,0.2)
table()
glPopMatrix()
glPushMatrix() #chair 1
glTranslated(1.3,1.1,4.3)
glRotated(-90,0,1,0)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #chair 2
glTranslated(0.5,1.1,3.7)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #teapot
glTranslated(0.5,1.35,2.5)
glColor3f(0.1,0.1,0.1)
glutSolidTeapot(0.15)
glPopMatrix()
glPushMatrix() #table 2
glTranslated(0.5,1,2.4)
glScalef(0.2,0.2,0.2)
table()
glPopMatrix()
glPushMatrix() #chair 2.1
glTranslated(1.3,1.1,2.4)
glRotated(-90,0,1,0)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #chair 2.2
glTranslated(0.5,1.1,1.9)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #chair 2.3
glTranslated(0.7,1.1,3.1)
glRotated(-180,0,1,0)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #table 3
glTranslated(2.5,1,2.4)
glScalef(0.2,0.2,0.2)
table()
glPopMatrix()
glPushMatrix() #chair 3.1
glTranslated(3.3,1.1,2.4)
glRotated(-90,0,1,0)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #chair 3.2
glTranslated(2.5,1.1,1.9)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #chair 3.3
glTranslated(2.7,1.1,3.1)
glRotated(-180,0,1,0)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #cup
glTranslated(2.9,1,4)
glScalef(0.5,0.5,0.5)
cup()
glPopMatrix()
glPushMatrix() #cup 2
glTranslated(2.9,1,2.3)
glScalef(0.5,0.5,0.5)
cup()
glPopMatrix()
glPushMatrix() #table 4
glTranslated(2.5,1,4.3)
glRotated(0,0,0,0)
glScalef(0.2,0.2,0.2)
table()
glPopMatrix()
glPushMatrix() #chair 4.1
glTranslated(3.3,1.1,4.3)
glRotated(-90,0,1,0)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #chair 4.2
glTranslated(2.5,1.1,3.7)
glScalef(0.2,0.2,0.2)
chair()
glPopMatrix()
glPushMatrix() #closet
glTranslated(-1.0,0.5,0.75)
glRotated(60,0,1,0)
glScalef(1.3,1.3,1.3)
closet()
glPopMatrix()
glPushMatrix() #rubber
glTranslated(-1.6,2.8,2.5)
glRotated(60,0,1,0)
glScalef(2.2,2.2,2.2)
rubber()
glPopMatrix()
glPushMatrix() #sleep
glTranslated(2,1.6,-0.1)
glRotated(270,0,1,0)
glScalef(2.2,2.2,2.2)
sleep()
glPopMatrix()
glPushMatrix() #TV
glTranslated(4,2,1.5)
glRotated(180,0,1,0)
glScalef(2.2,2.2,2.2)
tv()
glPopMatrix()
glPushMatrix() #Sofa
glTranslated(2.8,3.4,2.2)
glRotated(180,0,1,0)
glScalef(0.3,0.3,0.3)
sofa()
glPopMatrix()
glPushMatrix() #lamp
glTranslated(0.5,1.35,4.4)
glRotated(60,0,1,0)
glScalef(0.5,0.5,0.5)
lamp()
glPopMatrix()
glPushMatrix()
glTranslated(0.25,0.42,0.35)
glPopMatrix()
glPushMatrix()
glTranslated(0.4,0,0.4)
glPopMatrix()
glPushMatrix() #tree
glTranslated(4.5,3.4,0.2)
glRotated(-90,0,1,0)
glScalef(0.5,0.5,0.5)
tree()
glPopMatrix()
glPushMatrix() #tree2
glTranslated(4.5,1.25,0.3)
glRotated(-90,0,1,0)
glScalef(0.5,0.5,0.5)
tree()
glPopMatrix()
glPushMatrix() #tree3
glTranslated(3.5,1.25,0.3)
glRotated(-90,0,1,0)
glScalef(0.5,0.5,0.5)
tree()
glPopMatrix()
glPushMatrix() #tree4
glTranslated(2.5,1.25,0.3)
glRotated(-90,0,1,0)
glScalef(0.5,0.5,0.5)
tree()
glPopMatrix()
glPushMatrix() #tree5
glTranslated(1.5,1.25,0.3)
glRotated(-90,0,1,0)
glScalef(0.5,0.5,0.5)
tree()
glPopMatrix()
glPushMatrix() #tree6
glTranslated(1.2,3.4,0.3)
glRotated(-90,0,1,0)
glScalef(0.5,0.5,0.5)
tree()
glPopMatrix()
glPushMatrix() #บันได 1
glTranslated(3.95,2.8,0)
glScalef(0.2,1,0.5)
glColor3f(0,0.5,1)
wall(0.1)
glPopMatrix()
glPushMatrix() #บันได 2
glTranslated(3.95,2.4,1.9)
glScalef(0.2,1,0.2)
bun6()
glPopMatrix()
glPushMatrix() #บันได 3
glTranslated(3.95,2.1,2.3)
glScalef(0.2,1,0.2)
glColor3f(0,0.5,1)
bun2()
glPopMatrix()
glPushMatrix() #บันได 4
glTranslated(3.95,1.8,2.7)
glScalef(0.2,1,0.2)
bun6()
glPopMatrix()
glPushMatrix() #บันได 5
glTranslated(3.95,1.4,3.1)
glScalef(0.2,1,0.2)
glColor3f(0,0.5,1)
bun2()
glPopMatrix()
glPushMatrix() #บันได 6
glTranslated(3.95,1.0,3.5)
glScalef(0.2,1,0.2)
bun6()
glPopMatrix()
glPushMatrix() #second floor
glColor3f(0.5,1,1)
glTranslated(0,3.2,0)
glScalef(0.8,1,0.5)
wall(0.025)
glPopMatrix()
glPushMatrix() #first floor
first()
glPopMatrix()
glPushMatrix() #window1
glTranslated(-0.6,1.35,5)
glRotated(90,0,1,0)
glScalef(1,1,1)
window()
glPopMatrix()
glPushMatrix() #window2
glTranslated(-0.6,1.35,3.3)
glRotated(90,0,1,0)
glScalef(1,1,1)
window()
glPopMatrix()
glPushMatrix() #wall left
glColor3f(0,0.5,0.5)
glRotated(-90.0,1.0,0.0,0.0)
wall(0.05)
glPopMatrix()
glRotated(90.0,0.0,0.0,180.0) #wall right
glColor3f(0,0.1,0.1)
wall(0.05)
glPopMatrix()
glPopMatrix()
glFlush()
def reshape(w, h):
glViewport(0, 0, w, h)
glMatrixMode (GL_PROJECTION)
glLoadIdentity()
# winlet=1.0
gluPerspective(45, (w/h), 0.1, 50.0)
# glOrtho(-winlet*64/48,winlet*64/48.0,-winlet*64/48,winlet*64/48,0.6,100.0)
# if w <= h:
# glOrtho(-2.5, 2.5, -2.5*h/w,
# 2.5*h/w, -10.0, 10.0)
# else:
# glOrtho(-2.5*w/h,
# 2.5*w/h, -2.5, 2.5, -10.0, 10.0)
# glGetFloatv(GL_MODELVIEW_MATRIX, current_mv_mat)
# glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def keyboard(key, x, y):
global rot,speed,sum_rot_updown,current_mv_mat
#WASD
if key == b'w':
glTranslate(0, 0, 2 / speed)
if key == b'a':
glTranslate(1 / speed, 0, 0)
if key == b's':
glTranslate(0, 0, -2 / speed)
if key == b'd':
glTranslate(-1 / speed, 0, 0)
#UP/Down
if key == b'm':
glTranslate(0, -1 / speed, 0)
if key == b'n':
glTranslate(0, 1 / speed, 0)
# glMultMatrixf(current_mv_mat)
#rotate
if key == b'i':
sum_rot_updown -= speed / 10
if key == b'k':
sum_rot_updown += speed / 10
if key == b'j':
glRotatef(speed / 10, 0, -1, 0)
rot += 1
if key == b'l':
glRotatef(speed / 10, 0, 1, 0)
rot -= 1
glutPostRedisplay()
glutInit(sys.argv)
glutInitDisplayMode (GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(w,h)
glutCreateWindow('rommtour')
init()
glutReshapeFunc(reshape)
glutKeyboardFunc(keyboard)
glutDisplayFunc(display)
glutMainLoop() | UTF-8 | Python | false | false | 19,805 | py | 3 | RoomProject-219-262-265.py | 2 | 0.610481 | 0.478121 | 0 | 966 | 18.991718 | 79 |
Bloodielie/vk_contrallers_bot | 6,098,853,570,221 | 66e3fe87d8098ab0f1d45aaeeaa456469a54d599 | 6360e50839c84f2392ffbccd7324b85b0f55eacc | /utils/json.py | 96d8f9b8bcc89d037750530fcee9c943617ea12f | [] | no_license | https://github.com/Bloodielie/vk_contrallers_bot | 3a86290e5dc29a040476db20d2b8b8216a644bf6 | 6a961996d3bc8900d3417bbe76e1a1fafe1e4813 | refs/heads/implementation_through_api | 2022-12-04T05:16:57.445464 | 2020-02-05T18:39:49 | 2020-02-05T18:39:49 | 233,390,974 | 0 | 0 | null | false | 2022-11-22T05:14:23 | 2020-01-12T12:43:55 | 2020-02-05T18:43:13 | 2022-11-22T05:14:21 | 1,199 | 0 | 0 | 1 | Python | false | false | import json
class JsonUtils:
def __init__(self, file_path):
self.file_path = file_path
def get_json(self):
with open(self.file_path, 'r', encoding='utf-8') as file:
return json.load(file)
def write_json(self, data, indent=4, ensure_ascii=None, sort_keys=None):
with open(self.file_path, 'r', encoding='utf-8') as file:
data_json = json.load(file)
with open(self.file_path, 'w', encoding='utf-8') as file:
data_json.update(data)
json.dump(data_json, file, indent=indent, sort_keys=sort_keys, ensure_ascii=ensure_ascii)
def write_json_unsafe(self, data):
with open(self.file_path, 'w', encoding='utf-8') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
class JsonWrite(JsonUtils):
def first_write(self, id: str, time=10800, sort="Время", display='Фото'):
data = {id: {"time": time, "sort": sort, "display": display}}
self.write_json(data=data, indent=4)
def get_member(self, key):
return self.get_json().get(key) | UTF-8 | Python | false | false | 1,116 | py | 29 | json.py | 21 | 0.594399 | 0.583559 | 0 | 30 | 34.966667 | 101 |
lieuzhenghong/programming-practice | 7,825,430,415,800 | 81dd0875be0fa8062adc77dfcc1a7d691793c2bf | c416961f635d700927dbb7b90556ee5b7698af6e | /problems/LC_2_add_two_numbers.py | 55eb71dc44e2b029f60d59c51f131b7f6ea830a7 | [] | no_license | https://github.com/lieuzhenghong/programming-practice | ffe0d3b2a7d56f689233478257777d3e66b5ff38 | a0ab71b1fd759cb9097dea1e8232cdefc8e6f1da | refs/heads/master | 2023-03-06T04:33:17.282844 | 2021-02-17T15:17:21 | 2021-02-17T15:17:21 | 292,199,971 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
You are given two non-empty linked lists representing two non-negative
integers. The digits are stored in reverse order and each of their nodes
contain a single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the
number 0 itself.
'''
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def traverse_ll(self, l):
ll = []
while l:
ll.append(l.val)
l = l.next
print(ll)
def appendListNode(self, l_from: ListNode,
l_to: ListNode, carry: int):
value = l_from.val + carry
val = value if value < 10 else value - 10
carry = 0 if value < 10 else 1
l_to.next = ListNode(val, next=None)
return (l_to.next, carry)
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
carry = 0
first = None
last = None
while l1 and l2:
value = l1.val + l2.val + carry
val = value if value < 10 else value - 10
carry = 0 if value < 10 else 1
if first is None: # base case: resultant list is empty
first = ListNode(val, next=None)
last = first
else:
last.next = ListNode(val, next=None)
last = last.next
l1 = l1.next
l2 = l2.next
while l1:
last, carry = self.appendListNode(l1, last, carry)
l1 = l1.next
while l2:
last, carry = self.appendListNode(l2, last, carry)
l2 = l2.next
# Last number
if carry:
last.next = ListNode(carry, next=None)
last = last.next
return first
# Do a couple of edge cases
l1 = ListNode(5)
l2 = ListNode(2)
sol = Solution()
l3 = sol.addTwoNumbers(l1, l2)
sol.traverse_ll(l3)
l1 = ListNode(5)
l2 = ListNode(5)
sol = Solution()
l3 = sol.addTwoNumbers(l1, l2)
sol.traverse_ll(l3)
l1 = ListNode(2, ListNode(4, ListNode(3)))
l2 = ListNode(5, ListNode(6, ListNode(4)))
sol = Solution()
l3 = sol.addTwoNumbers(l1, l2)
sol.traverse_ll(l3)
# When one list is longer than the other
l1 = ListNode(3, ListNode(9, ListNode(2, ListNode(1))))
l2 = ListNode(1, ListNode(8))
sol = Solution()
l3 = sol.addTwoNumbers(l1, l2)
sol.traverse_ll(l3)
# When one list is 0
l1 = ListNode(3, ListNode(9, ListNode(2, ListNode(1))))
l2 = ListNode(0)
sol = Solution()
l3 = sol.addTwoNumbers(l1, l2)
sol.traverse_ll(l3)
| UTF-8 | Python | false | false | 2,584 | py | 65 | LC_2_add_two_numbers.py | 61 | 0.583591 | 0.549149 | 0 | 96 | 25.916667 | 75 |
akosourov/flask_tasks | 9,019,431,349,183 | 4ba9858cdf065a0ee8eac82cd64f7319bca97fca | 7bc50d91c0354020d105099749e69fdf2a295841 | /users/users/__init__.py | de0f38f27a83a288fb364843121151a5646805e2 | [] | no_license | https://github.com/akosourov/flask_tasks | f5b1a4a639b3decb963f676c3d4724ddc181481f | 3201f9c516e5491144d9ae623151c2204f39b0a1 | refs/heads/master | 2020-03-25T12:09:10.636822 | 2018-08-07T23:42:22 | 2018-08-07T23:42:22 | 143,762,152 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
default_config = dict(
SECRET_KEY='dev',
DATABASE='dev.sqlite',
ADMIN_NAME='admin',
ADMIN_PASSWORD= 'admin'
)
def create_app(config_file_name=None):
app = Flask(__name__, instance_relative_config=True)
if config_file_name is None:
app.config.from_mapping(**default_config)
else:
app.config.from_pyfile(config_file_name)
from . import db
with app.app_context():
db.init_db()
db.init_app(app)
from . import auth, users
app.register_blueprint(auth.bp)
app.register_blueprint(users.bp)
app.add_url_rule('/', endpoint='index') # makes index == blog.index
return app
| UTF-8 | Python | false | false | 679 | py | 11 | __init__.py | 9 | 0.637703 | 0.637703 | 0 | 30 | 21.633333 | 73 |
smalljjjack/Finger-Dancer | 13,297,218,792,560 | d8582ffbc5cd517ce63174cba5eaf3d6933380f7 | a0ca99a1ac9d9fe29ab37c2007b823b4e9066da1 | /Tetris/game.py | 8974c3be9e0751d6bec3ffda27b399eed675cafb | [
"MIT"
] | permissive | https://github.com/smalljjjack/Finger-Dancer | c30c962d866026425db7156958bd6f7b9b4daabf | 3288f952a2c8a1adf2aae58b8abf6145eafef8e7 | refs/heads/master | 2020-04-01T04:29:14.510470 | 2018-10-14T07:52:57 | 2018-10-14T07:52:57 | 152,865,699 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
from pygame.locals import *
import math
import random
class Brick():
def __init__(self, p_position, p_color):
self.position = p_position
self.color = p_color
self.image = pygame.Surface([brick_width, brick_height])
self.image.fill(self.color)
def draw(self):
screen.blit(self.image, (self.position[0] * brick_width, self.position[1] * brick_height))
class Block():
def __init__(self, p_bricks_layout, p_direction, p_color):
self.bricks_layout = p_bricks_layout
self.direction = p_direction
self.cur_layout = self.bricks_layout[self.direction]
self.position = cur_block_init_position
self.stopped = False
self.move_interval = 800
self.bricks = []
for (x, y) in self.cur_layout:
self.bricks.append(Brick(
(self.position[0] + x, self.position[1] + y),
p_color))
def setPosition(self, position):
self.position = position
self.refresh_bircks()
def draw(self):
for brick in self.bricks:
brick.draw()
def isLegal(self, layout, position):
(x0, y0) = position
for (x, y) in layout:
if x + x0 < 0 or y + y0 < 0 or x + x0 >= field_width or y + y0 >= field_height:
return False
if field_map[y + y0][x + x0] != 0:
return False
return True
def left(self):
new_position = (self.position[0] - 1, self.position[1])
if self.isLegal(self.cur_layout, new_position):
self.position = new_position
self.refresh_bircks()
def right(self):
new_position = (self.position[0] + 1, self.position[1])
if self.isLegal(self.cur_layout, new_position):
self.position = new_position
self.refresh_bircks()
def down(self):
(x, y) = (self.position[0], self.position[1] + 1)
while self.isLegal(self.cur_layout, (x, y)):
self.position = (x, y)
self.refresh_bircks()
y += 1
def refresh_bircks(self):
for (brick, (x, y)) in zip(self.bricks, self.cur_layout):
brick.position = (self.position[0] + x, self.position[1] + y)
def stop(self):
global field_bricks
global score
self.stopped = True
ys = []
for brick in self.bricks:
field_bricks.append(brick)
(x, y) = brick.position
if y not in ys:
ys.append(y)
field_map[y][x] = 1
eliminate_count = 0
ys.sort()
for y in ys:
if 0 in field_map[y]:
continue
eliminate_count += 1
for fy in range(y, 0, -1):
field_map[fy] = field_map[fy - 1][:]
field_map[0] = [0 for i in range(field_width)]
tmp_field_bricks = []
for fb in field_bricks:
(fx, fy) = fb.position
if fy < y:
fb.position = (fx, fy + 1)
tmp_field_bricks.append(fb)
elif fy > y:
tmp_field_bricks.append(fb)
field_bricks = tmp_field_bricks
if eliminate_count == 1:
score += 1
elif eliminate_count == 2:
score += 2
elif eliminate_count == 3:
score += 4
elif eliminate_count == 4:
score += 6
'''
attention: the code below does not work
index = 0
for fb in field_bricks:
if fb.y < y:
fb.y += 1
elif fb.y == y:
field_bricks.pop(index)
index += 1
'''
def update(self, time):
global last_move
self.draw()
if last_move == -1 or time - last_move >= self.move_interval:
new_position = (self.position[0], self.position[1] + 1)
if self.isLegal(self.cur_layout, new_position):
self.position = new_position
self.refresh_bircks()
last_move = time
else:
self.stop()
def rotate(self):
new_direction = (self.direction + 1) % len(self.bricks_layout)
new_layout = self.bricks_layout[new_direction]
if not self.isLegal(new_layout, self.position):
return
self.direction = new_direction
self.cur_layout = new_layout
for (brick, (x, y)) in zip(self.bricks, self.cur_layout):
brick.position = (self.position[0] + x, self.position[1] + y)
self.refresh_bircks()
self.draw()
def drawField():
for brick in field_bricks:
brick.draw()
def drawInfoPanel():
font = pygame.font.Font("resources/fonts/MONACO.TTF", 18)
survivedtext = font.render('score: ' + str(score), True, (255, 255, 255))
textRect = survivedtext.get_rect()
textRect.topleft = ((field_width + 2) * brick_width, 10)
screen.blit(survivedtext, textRect)
next_block.draw()
def drawFrame():
frame_color = pygame.Color(200, 200, 200)
pygame.draw.line(screen, frame_color, (field_width * brick_width, field_height * brick_height), (field_width * brick_width, 0), 3)
def getBlock():
block_type = random.randint(0, 6)
if block_type == 0:
return Block(bricks_layout_0, random.randint(0, len(bricks_layout_0) - 1), colors_for_bricks[0])
elif block_type == 1:
return Block(bricks_layout_1, random.randint(0, len(bricks_layout_1) - 1), colors_for_bricks[1])
elif block_type == 2:
return Block(bricks_layout_2, random.randint(0, len(bricks_layout_2) - 1), colors_for_bricks[2])
elif block_type == 3:
return Block(bricks_layout_3, random.randint(0, len(bricks_layout_3) - 1), colors_for_bricks[3])
elif block_type == 4:
return Block(bricks_layout_4, random.randint(0, len(bricks_layout_4) - 1), colors_for_bricks[4])
elif block_type == 5:
return Block(bricks_layout_5, random.randint(0, len(bricks_layout_5) - 1), colors_for_bricks[5])
elif block_type == 6:
return Block(bricks_layout_6, random.randint(0, len(bricks_layout_6) - 1), colors_for_bricks[6])
# 0: oooo
# 1: oo
# oo
# 2: o
# ooo
# 3: o
# oo
# o
# 4: o
# oo
# o
# 5: ooo
# o
# 6: ooo
# o
bricks_layout_0 = (
((0, 0), (0, 1), (0, 2), (0, 3)),
((0, 1), (1, 1), (2, 1), (3, 1)))
bricks_layout_1 = (
((1, 0), (2, 0), (1, 1), (2, 1)),
)
bricks_layout_2 = (
((1, 0), (0, 1), (1, 1), (2, 1)),
((0, 1), (1, 0), (1, 1), (1, 2)),
((1, 2), (0, 1), (1, 1), (2, 1)),
((2, 1), (1, 0), (1, 1), (1, 2)),
)
bricks_layout_3 = (
((0, 1), (1, 1), (1, 0), (2, 0)),
((0, 0), (0, 1), (1, 1), (1, 2)),
)
bricks_layout_4 = (
((0, 0), (1, 0), (1, 1), (2, 1)),
((1, 0), (1, 1), (0, 1), (0, 2)),
)
bricks_layout_5 = (
((0, 0), (1, 0), (1, 1), (1, 2)),
((0, 2), (0, 1), (1, 1), (2, 1)),
((1, 0), (1, 1), (1, 2), (2, 2)),
((2, 0), (2, 1), (1, 1), (0, 1)),
)
bricks_layout_6 = (
((2, 0), (1, 0), (1, 1), (1, 2)),
((0, 0), (0, 1), (1, 1), (2, 1)),
((0, 2), (1, 2), (1, 1), (1, 0)),
((2, 2), (2, 1), (1, 1), (0, 1)),
)
colors_for_bricks = (
pygame.Color(255, 0, 0), pygame.Color(0, 255, 0), pygame.Color(0, 0, 255),
pygame.Color(100, 100, 100), pygame.Color(120, 200, 0), pygame.Color(100, 0, 200),
pygame.Color(10, 100, 30))
field_width, field_height = 12, 17
cur_block_init_position = (4, 0)
info_panel_width = 8
next_block_init_position = (field_width + 3, 5)
field_map = [[0 for i in range(field_width)] for i in range(field_height)]
game_over_img = pygame.image.load("resources/images/game_over.gif")
running = True
score = 0
brick_width, brick_height = 30, 30
field_bricks = []
next_block = None
last_move = -1
pygame.init()
screen = pygame.display.set_mode(((field_width + info_panel_width) * brick_width, field_height * brick_height), 0, 32)
pygame.display.set_caption('Tetris')
while running:
if next_block == None:
cur_block = getBlock()
else:
cur_block = next_block
cur_block.setPosition(cur_block_init_position)
next_block = getBlock()
next_block.setPosition(next_block_init_position)
if not cur_block.isLegal(cur_block.cur_layout, cur_block.position):
cur_block.draw()
running = False
continue
while not cur_block.stopped:
screen.fill(0)
drawFrame()
time = pygame.time.get_ticks()
cur_block.update(time)
drawField()
drawInfoPanel()
pygame.display.flip()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key == K_w or event.key == K_UP:
cur_block.rotate()
last_move = time
elif event.key == K_a or event.key == K_LEFT:
cur_block.left()
elif event.key == K_d or event.key == K_RIGHT:
cur_block.right()
elif event.key == K_s or event.key == K_DOWN:
cur_block.down()
last_move = time - 500
screen.blit(game_over_img, (field_width / 2 * brick_width, (field_height / 2 - 2) * brick_height))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
pygame.display.update()
| UTF-8 | Python | false | false | 9,727 | py | 2 | game.py | 1 | 0.520202 | 0.482574 | 0 | 298 | 31.64094 | 134 |
edinsonlenin/probandogit | 17,514,876,640,111 | 20b7f923a3d85edebb540a6d97a71e8c8174472c | 95e0c5ff2ac733a84d7d022dc47c4244bb0baed1 | /inicio.py | f3058a333d22106c8330ac49649724adf13366c2 | [] | no_license | https://github.com/edinsonlenin/probandogit | 3ca136e03c476674959a77287842b5306cc71b12 | 9586577d428e9e283db289bee0334afd3cfd9f53 | refs/heads/master | 2022-12-13T21:38:05.277389 | 2020-09-11T21:24:48 | 2020-09-11T21:24:48 | 294,229,716 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | primer cambio
segundo cambio
tercer cambio | UTF-8 | Python | false | false | 42 | py | 1 | inicio.py | 1 | 0.880952 | 0.880952 | 0 | 3 | 13.333333 | 14 |
biggreenogre/GatewayManager | 10,041,633,585,195 | dee81312892b87923de176dc38426ff933da1ba1 | 569c8fea9007fb6acee55df23c4db5c2076e3e53 | /gatewayManager.py | 4fdcac36f0a6e8c8452c52f4ba0b4831170d69a3 | [] | no_license | https://github.com/biggreenogre/GatewayManager | a7d0b08dc2f2d72c0eca227208d0ecb0049dbe0c | 9fb82944c19cf738e2ea09d9d5bf7f4447cd1871 | refs/heads/master | 2016-09-13T14:12:18.780303 | 2016-06-02T14:30:04 | 2016-06-02T14:30:04 | 57,902,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Import libs
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from time import sleep, time, strftime
import ovpnMgr
try:
from gpiozero import Button, PWMLED, RGBLED
hasButton = True
except:
hasButton = False
#global lastState
#global realState
#global nextState
#global buttonLong
#global hasButton
#global buttonRGB
buttonRGB = True # RGB status LEDs?
buttonLong = 3.0
buttonShort = 1.0
debug_logging = True
debuglog = "/home/drew/GatewayManager/gwmd.log"
lastlog = 0.0
lastState = 0
realState = 0
nextState = 0
#ledColour = (0, 0, 0)
def logit(logText):
if debug_logging:
now = strftime("%c")
dlog = open(debuglog, 'a')
dlog.write( "%s: %s\n" % (now,logText) )
dlog.close()
if hasButton:
# Init button device
bigButton = Button(pin=17) #,bounce_time=1,pull_up=True) # GPIO17 Physical 11
# buttonLED = PWMLED(18) # GPIO18 Physical 12
# Grnd to button & LEDs # Physical 14
# LED params
BLINK_ON = 0.25
BLINK_OFF = 0.25
FADE_IN = 0.5
FADE_OUT = 0.5
fbOn = (BLINK_ON/2)
fbOff = (BLINK_OFF/2)
ffIn = (FADE_IN/2)
ffOut = (FADE_OUT/2)
cycleTime = BLINK_ON + BLINK_OFF + FADE_IN + FADE_OUT
# RGB Colours
colourUp=(0,0.2,0)
colourDown=(0.2,0,0)
colourChange=(0.2,0.33,0)
colourUnknown=(0.2,0,0.2)
if buttonRGB:
buttonLED = PWMLED(4) # GPIO18 Physical 7
statusLED = RGBLED(18,22,27) # Physical 12, 15, 13
else:
buttonLED = PWMLED(18) # GPIO18 Physical 12
statusLED = PWMLED(27) # GPIO27 Physical 13
def gw_status():
#print( "Getting status ...")
result = ovpnMgr.m2mg( 'status' )
#print( "Result: %d:%s" % (result[0], result[1]) )
if result[0] == 0:
if result[1] == 'UP':
gw_status = 1
elif result[1] == 'DOWN':
gw_status = -1
else:
gw_status = 0
else:
gw_status = 0
return gw_status
def gw_up():
#print( "gw_up....")
result = ovpnMgr.m2mg( 'up' )
return result
def gw_down():
#print( "gw_down....")
result = ovpnMgr.m2mg( 'down' )
return result
def bothLED_on():
if not buttonLED.is_lit and not statusLED.is_lit:
buttonLED.blink(on_time=0, off_time=0, fade_in_time=FADE_IN, fade_out_time=0, n=1, background=True)
statusLED.blink(on_time=0, off_time=0, fade_in_time=FADE_IN, fade_out_time=0, n=1, background=False)
buttonLED.on()
statusLED.on()
elif statusLED.is_lit:
bLED_on()
elif buttonLED.is_lit:
sLED_on()
def bothLED_off():
if buttonLED.is_lit and statusLED.is_lit:
buttonLED.blink(on_time=0, off_time=0, fade_in_time=0, fade_out_time=FADE_OUT, n=1, background=True)
statusLED.blink(on_time=0, off_time=0, fade_in_time=0, fade_out_time=FADE_OUT, n=1, background=False)
buttonLED.off()
statusLED.off()
elif statusLED.is_lit:
sLED_off()
elif buttonLED.is_lit:
bLED_off()
def bLED_on():
if not buttonLED.is_lit:
buttonLED.blink(on_time=0, off_time=0,
fade_in_time=FADE_IN, fade_out_time=0,
n=1, background=False)
buttonLED.on()
def bLED_off():
if buttonLED.is_lit:
buttonLED.blink(on_time=0, off_time=0,
fade_in_time=0, fade_out_time=FADE_OUT,
n=1, background=False)
buttonLED.off()
def bLED_blink(blinks=None):
buttonLED.blink(on_time=BLINK_ON, off_time=BLINK_OFF,
fade_in_time=FADE_IN, fade_out_time=FADE_OUT,
n=blinks, background=True)
def sLED_on():
if not statusLED.is_lit:
statusLED.blink(on_time=0, off_time=0,
fade_in_time=FADE_IN, fade_out_time=0,
n=1, background=False)
statusLED.on()
def sLED_off():
if statusLED.is_lit:
statusLED.blink(on_time=0, off_time=0,
fade_in_time=0, fade_out_time=FADE_OUT,
n=1, background=False)
statusLED.off()
def sLED_blink(blinks=None):
statusLED.blink(on_time=BLINK_ON, off_time=BLINK_OFF,
fade_in_time=FADE_IN, fade_out_time=FADE_OUT,
n=blinks, background=True)
def setLEDs(ledState):
if buttonRGB:
if ledState == 4:
#print( "State 4")
# Gateway UP #Solid green
if statusLED.color != colourUp:
#statusLED.blink(on_time=0, off_time=0,
# fade_in_time=FADE_IN, fade_out_time=0,
# on_color=colourUp, off_color=colourUp,
# n=1, background=False)
#statusLED.off()
statusLED.color = colourUp
#ledColour = colourUp
elif ledState == 3:
#print( "State 3")
# Gateway going DOWN # Flash Green/Amber
statusLED.blink(on_time=BLINK_ON, off_time=BLINK_OFF,
fade_in_time=FADE_IN, fade_out_time=FADE_OUT,
on_color=colourUp, off_color=(0,0,0), #colourChange,
n=3, background=False)
statusLED.color = colourChange
#ledColour = colourChange
#sleep(2 * cycleTime)
elif ledState == 2:
#print( "State 2")
# Gateway going UP # Flash Red/Amber
statusLED.blink(on_time=BLINK_ON, off_time=BLINK_OFF,
fade_in_time=FADE_IN, fade_out_time=FADE_OUT,
on_color=colourDown, off_color=(0,0,0), #colourChange,
n=3, background=False)
statusLED.color = colourChange
#ledColour = colourChange
#sleep(2 * cycleTime)
elif ledState == 1:
#print( "State 1")
#Gateway DOWN # Solid Red
if statusLED.color != colourDown:
statusLED.blink(on_time=0, off_time=0,
fade_in_time=FADE_IN, fade_out_time=0,
on_color=colourDown, off_color=colourDown,
n=1, background=False)
#statusLED.off()
statusLED.color = colourDown
#ledColour = colourDown
elif ledState == 0:
#print( "State 0")
#Gateway unknown state #Slow flash Amber/Off
statusLED.blink(on_time=BLINK_ON, off_time=BLINK_OFF,
fade_in_time=FADE_IN, fade_out_time=FADE_OUT,
on_color=colourUnknown, off_color=(0,0,0),
n=3, background=False)
statusLED.color = colourUnknown #( 0, 0.2, 0)
#ledColour = ( 0, 0.2, 0)
#sleep(3)
else:
#Something broke #Rapid flash Red/Green
statusLED.blink(on_time=fbOn, off_time=fbOff,
fade_in_time=ffIn, fade_out_time=ffOut,
on_color=colourUp, off_color=colourDown,
n=3, background=False)
#sleep(2 * cycleTime)
else:
if ledState == 4:
# Gateway UP # bLED on solid
bLED_on()
sLED_off()
elif ledState == 3:
# Gateway going DOWN # bLED on solid, sLED flash
bLED_on()
sLED_blink(3)
sleep(cycleTime)
elif ledState == 2:
# Gateway going UP # bLED off, sLED flash
bLED_off()
sLED_blink(3)
sleep(3)
elif ledState == 1:
#Gateway DOWN # All off
bothLED_off()
elif ledState == 0:
#Gateway unknown state #sLED flash
sLED_blink(3)
sleep(cycleTime)
else:
#Something broke #Rapid flash both
statusLED.blink(on_time=fbOn, off_time=fbOff,
fade_in_time=ffIn, fade_out_time=ffOut,
n=3, background=True)
buttonLED.blink(on_time=fbOn, off_time=fbOff,
fade_in_time=ffIn, fade_out_time=ffOut,
n=3, background=True)
sleep(3)
def button():
# global lastState
global realState
global nextState
buttonDown = time()
while True:
if ( not bigButton.is_pressed ) or ( time() > ( buttonDown + buttonLong ) ):
buttonUp = time()
break
else:
sleep(0.1)
#print( "button oldState %s" % realState)
if buttonUp >= ( buttonDown + buttonLong ):
logit( "Long press.")
#print( "Long press.")
# lastState = realState
realState = gw_status()
if ( realState != 0 ) and ( realState == nextState ):
#print( "They're equal, system stable, change state" )
nextState = realState * -1
else:
dummyVar = None
logit("Button long press but system not stable.")
elif buttonUp >= ( buttonDown + buttonShort ):
dummyVar = None
logit("Short press.")
#print( "Short press.")
else:
dummyVar = None
logit("Not a press.")
#print( "Not a press.")
#print( "button lastState: %d realState: %d nextState %d" % ( lastState, realState, nextState) )
def main():
global lastState
global realState
global nextState
global lastlog
# Init realState and nextState
logit( "\n\n============================\n=== GatewayManager init. ===\n============================" )
realState = gw_status()
nextState = realState
logit( "\n============================\n=== Gateway initial state: %s\n============================" % realState)
#print( "lastState %s" % lastState)
#print( "curState %s" % realState)
#print( "nextState %s" % nextState)
#print( "---")
if realState > 0:
bLED_on()
elif realState < 0:
bothLED_off()
else:
bLED_off()
sLED_blink(1)
# Do something when button pressed
bigButton.when_pressed = button
while True:
oldState = realState
#print( "lastState %s" % lastState)
#print( "oldState %s" % oldState)
realState = gw_status()
if ( realState == 0 ) or \
( realState != nextState ) or \
( lastState != realState ) or \
( oldState != realState ) or \
( lastlog + 120.0 ) <= time():
ledColour = statusLED.color
logit("lastState:%s oldState:%s curState:%s nextState:%s ledColour:%s" % (lastState,oldState,realState,nextState,ledColour))
# logit("ledColour: %s" % statusLED.color())
lastlog = time()
#print( "curState %s" % realState)
#print( "nextState %s" % nextState)
#print( "---")
if ( realState == 0 ) or ( realState != nextState ):
if realState != nextState:
if nextState > 0:
gw_up()
setLEDs(2)
elif nextState < 0:
setLEDs(3)
gw_down()
else:
setLEDs(0)
lastlog = 0.0
else:
if realState == nextState:
lastState = realState
if realState > 0:
setLEDs(4)
elif realState < 0:
setLEDs(1)
sleep(2)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 11,820 | py | 5 | gatewayManager.py | 4 | 0.514298 | 0.496193 | 0 | 356 | 32.185393 | 136 |
haotianwooo/seniorcare | 68,719,500,279 | ef5f119f2fa72ca474d77b297888b3f9d1ad4797 | cf21ba2100b037ba8c12eeee940080d9f0148d7e | /galileo/arduino-galileo.py | c57d566ac408d92b2ac36964fe158cb195c3114a | [] | no_license | https://github.com/haotianwooo/seniorcare | a9b019afbfc555454b393ec33638db3a2779e019 | f71206642e1a78dd1380cdf47d7bd4d78be09385 | refs/heads/master | 2016-09-10T11:26:03.030507 | 2015-07-30T05:37:12 | 2015-07-30T05:37:12 | 29,128,475 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#Haotian Wang June 2014
#Send the 'picture' string to arduino
#Receive the data of PM 2.5 and temperature on arduino
#Usage: arduino-galileo.py MQTT_SERVER
import sys
import time
import mosquitto
import json
import serial
import os
ser = serial.Serial()
ser.port = '/dev/ttyS0'
ser.baudrate = 9600
ser.open()
mqtt_server = sys.argv[1]
old_emergency = '0'
def on_message(mosq, userdata, msg):
global old_emergency
json_data = json.loads(str(msg.payload))
print "json_data: "+str(json_data)
if json_data['name'] == 'led_array':
pic = json_data['value']
print 'pic: '+str(pic)
#ret = ser.write(str(pic))
os.system("echo '"+str(pic)+"' > /dev/ttyS0")
#print str(ret)+' chars have been sent into Serial: '+str(pic)
elif json_data['name'] == 'light':
light = json_data['value']
print 'light: '+str(light)
os.system("echo 'l"+str(light)+"'$ > /dev/ttyS0")
elif json_data['name'] == 'steps':
steps = json_data['value']
print 'steps: '+str(steps)
os.system("echo 's"+str(steps)+"'$ > /dev/ttyS0")
elif json_data['name']== 'emergency':
print "emergency!!!! "+json_data['value']
if json_data['value'] == '1':
print "Emergency!"
os.system("echo E$ > /dev/ttyS0")
if json_data['value'] == '0' and old_emergency == '1':
os.system("echo e$ > /dev/ttyS0")
old_emergency = json_data['value']
elif json_data['name'] == 'falling':
if json_data['value'] == '1':
print 'fall!'
os.system("echo F$ > /dev/ttyS0")
mqtt_client = mosquitto.Mosquitto('arduino-galileo')
mqtt_client.on_message = on_message
mqtt_client.connect(mqtt_server)
mqtt_client.subscribe('led_array')
old_alarm = '0'
while 1:
ret = mqtt_client.loop()
if ret == 0:
print 'mqtt listening!'
else:
mqtt_client.unsubscribe('led_array')
mqtt_client.disconnect()
mqtt_client.connect(mqtt_server)
mqtt_client.subscribe('led_array')
try:
f_alarm = open('/home/root/senior_care/alarm_data','r+')
except IOError as err:
print 'File error '+str(err)
alarm = f_alarm.read().rstrip()
f_alarm.close()
if alarm == '1':
print 'medicine alarm!'
os.system("echo 'm' > /dev/ttyS0")
elif alarm == '0' and alarm != old_alarm:
print 'medicine alarm cancelled!'
os.system("echo 'c' > /dev/ttyS0")
old_alarm = alarm
time_now = time.localtime()
hour_now = int(time.strftime("%H",time_now))
min_now = int(time.strftime("%M",time_now))
time_value = hour_now*100+min_now
os.system("echo 't"+str(time_value)+"'$ > /dev/ttyS0")
| UTF-8 | Python | false | false | 2,632 | py | 8 | arduino-galileo.py | 7 | 0.612842 | 0.599924 | 0 | 89 | 28.505618 | 70 |
AlexWeb2018/labs_works | 19,043,885,018,466 | fbe6e752a484e3e7907460dfcbfbd1a354bd8ca0 | f762dd1cce24dc3b0673fe4f077538d9bbafda21 | /15-3.py | b717c7224d8407bd79436c14e9aebb71bdd131a2 | [] | no_license | https://github.com/AlexWeb2018/labs_works | 49041c8b416de59cb8e737c466d0c690d61e5b42 | e3475fa115465de7f8ee27b96de2a609bad86053 | refs/heads/master | 2020-07-23T16:40:35.755399 | 2019-12-25T09:02:48 | 2019-12-25T09:02:48 | 207,633,494 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
N = random.randrange(2,10)
a = [random.randrange(1,11) for i in range(N)]
print("N:",N)
print("Array a:\n",a)
x_odd = None
for i in range(N-1,-1,-1) :
if a[i]%2 == 1 :
x_odd = a[i]
break
print("Last odd:",x_odd)
if x_odd != None :
for (i, item) in enumerate(a) :
if item%2 == 1:
a[i] += x_odd
print("Modified Array a:\n",a) | UTF-8 | Python | false | false | 406 | py | 105 | 15-3.py | 105 | 0.5 | 0.46798 | 0 | 21 | 17.428571 | 46 |
realitysharesadvisors/crytpo-trading-bot | 16,527,034,196,742 | 47497ad3df850a8b77ae84a17d64d539f8396218 | 0d871f635932ef4b5641320a7f094f19f47b6cb9 | /trading-bot(50200strategy).py | cff337b66fa994e239236d9bb7284ce9eb59a5e4 | [] | no_license | https://github.com/realitysharesadvisors/crytpo-trading-bot | f9d8fc6d6cb20367b8b2e8de9305f34106c86ac5 | 63648d5dd64cfd724b86b5b104044c2ee2c4d2f1 | refs/heads/master | 2021-09-11T16:36:40.929663 | 2018-04-09T18:50:54 | 2018-04-09T18:50:54 | 123,356,283 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import sys, getopt
import datetime
# import gemini
import requests
from poloniex import poloniex ##Convenient python wrapper##
def main(argv):
period = 10
pair = "USDT_BTC"
prices = []
currentMovingAverage = 0
currentFiftyMA = 0
currentTwoHundredMA = 0
lengthOfMA = 0
startTime = False
endTime = False
historicalData = False
tradePlaced = False
typeOfTrade = False
dataDate = ""
orderNumber = ""
dataPoints = []
fiftyMaPoints = []
twoHundredMaPoints = []
total = 100000
bitcoin = 10
fifty = 50
twohundred = 200
# r = gemini.PrivateClient("JBCBbwEmXfD6IwS6Q4WN", "2YzrK8cq5jCRs8r8QKLKUtDDJxqV", sandbox=True)
# print(r)
try:
opts, args = getopt.getopt(argv, "hp:c:n:s:e:", ["period=", "currency=", "points="])
except getopt.GetoptError:
print('trading-bot.py -p <period length> -c <currency pair> -n <period of moving average>')
sys.exit(2)
##Parse arguments##
for opt, arg in opts:
if opt == '-h':
print('trading-bot.py -p <period length> -c <currency pair> -n <period of moving average>')
sys.exit()
elif opt in ("-p", "--period"):
if (int(arg) in [300, 900, 1800, 7200, 14400, 86400]):
period = arg
else:
print('Poloniex requires periods in 300,900,1800,7200,14400, or 86400 second increments')
sys.exit(2)
elif opt in ("-c", "--currency"):
pair = arg
elif opt in ("-n", "--points"):
lengthOfMA = int(arg) ##Number of points used to calc Moving Average(MA)##
elif opt in ("-s"):
startTime = arg ##Has to be in UNIX Timestamp##
elif opt in ("-e"):
endTime = arg ##Has to be in UNIX Timestamp##
##Connect with poloniex with its API keys##
conn = poloniex('OICTWNLZ-NG2ATAQN-S3DYCUWH-QGWMPXD3',
'0348b3b5932f49b1e84feeacbec88a8b9eb0770d11fe553515f6a946b8b23eb2abe9748ee06417f1fd27310759e3b6535782c03de75c3499f42c714b0b6530a5')
output = open("output.html",'w')
output.truncate()
output.write("""
<html>
<head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['line']});
google.charts.setOnLoadCallback(drawChart);
function drawChart(){
var data = new google.visualization.DataTable();
data.addColumn('string', 'time');
data.addColumn('number', 'value');
data.addColumn('number', '50 MA');
data.addColumn('number', '200 MA');
data.addRows([""")
if (startTime):
##Returns candlestick chart data##
historicalData = conn.api_query("returnChartData",
{"currencyPair": pair, "start": startTime, "end": endTime, "period": period})
# log = open("compare.txt", "w")
# print(historicalData, file = log)
while True:
##If startTime exists, this means that we are retrieving historical data##
if (startTime and historicalData):
nextDataPoint = historicalData.pop(0)
lastPairPrice = nextDataPoint['weightedAverage']
dataDate = datetime.datetime.fromtimestamp(int(nextDataPoint['date'])).strftime('%Y-%m-%d %H:%M:%S')
##Error for wrong period of time##
elif (startTime and not historicalData):
for point, fiftyma, twohundma in zip(dataPoints, fiftyMaPoints, twoHundredMaPoints):
output.write("['" + point['date'] + "'," + point['price'] + "," + fiftyma['price'] + "," + twohundma['price'])
output.write("],\n")
output.write("""]);
var formatter = new google.visualization.NumberFormat({
pattern: '##.######'
});
for (var i = 1; i < data.getNumberOfColumns(); i++) {
formatter.format(data, i);
}
var options = {title: 'Price Chart', legend: { position: 'bottom' }};
var chart = new google.charts.Line(document.getElementById('curve_chart'));
chart.draw(data, options);}</script>
</head>
<body>
<div id="curve_chart" style="width: 100%; height: 100%"></div>
</body>
</html>""")
exit()
##Live data##
else:
currentValues = conn.api_query("returnTicker")
lastPairPrice = currentValues[pair]["last"]
dataDate = datetime.datetime.now()
dataPoints.append({'date':dataDate, 'price': str(lastPairPrice)})
if (len(prices) > 0):
currentFiftyMA = sum(fiftyprices) / 50.0
currentTwoHundredMA = sum(twohundredprices) / 200.0
# previousPrice = prices[-1] ## Latest price ##
previousFiftyMA = (float)(fiftyMaPoints[-1]['price']) ## Latest price ##
previousTwoHundredMA = (float)(twoHundredMaPoints[-1]['price']) ## Latest price ##
if (not tradePlaced):
##If price > MA and price < exactly previous one##
if ((previousFiftyMA < previousTwoHundredMA) and (currentTwoHundredMA < currentFiftyMA)):
print("\nSELL 1 BTC at " + str(lastPairPrice))
#orderNumber = conn.sell(pair, lastPairPrice, .01)
total = total + lastPairPrice
bitcoin = bitcoin - 1
print("Total funds: " + str(total))
print("Total bitcoin: " + str(bitcoin))
# tradePlaced = True
# typeOfTrade = "short"
##If price < MA and price > exactly previous one##
elif ((previousFiftyMA > previousTwoHundredMA) and (currentTwoHundredMA > currentFiftyMA)):
print("\nBUY ORDER")
#orderNumber = conn.buy(pair, lastPairPrice, .01)
print("BUY 1 BTC at " + str(lastPairPrice))
total = total - lastPairPrice
bitcoin = bitcoin + 1
print("Total funds: " + str(total))
print("Total bitcoin: " + str(bitcoin))
# tradePlaced = True
# typeOfTrade = "long"
# elif (typeOfTrade == "short"):
# if (lastPairPrice < currentMovingAverage):
# print("EXIT TRADE")
#conn.cancel(pair, orderNumber)
# tradePlaced = False
# typeOfTrade = False
# elif (typeOfTrade == "long"):
# if (lastPairPrice > currentMovingAverage):
# print("EXIT TRADE")
#conn.cancel(pair, orderNumber)
# tradePlaced = False
# typeOfTrade = False
else:
previousPrice = 0
fiftyMaPoints.append({'date':dataDate, 'price': str(currentFiftyMA)})
twoHundredMaPoints.append({'date':dataDate, 'price': str(currentTwoHundredMA)})
##timestamp##
print(
# "%s Period: %ss %s: %s Moving Average: %s" % (dataDate, period, pair, lastPairPrice, currentMovingAverage))
"Previousprice: %s , %s: %s \nFifty Moving Average: %s \nTwo Hundred Moving Average: %s" % (previousPrice, pair, lastPairPrice, currentFiftyMA, currentTwoHundredMA))
prices.append(float(lastPairPrice))
fiftyprices = prices[-50:] ##Last lengthOfMA prices##
twohundredprices = prices[-200:] ##Last lengthOfMA prices##
##Sleep for real time data, no sleep for historical data##
if (not startTime):
time.sleep(int(period))
print("Total funds: " + str(total))
print("Total bitcoin: " + str(bitcoin) + "\n")
if __name__ == "__main__":
main(sys.argv[1:])
| UTF-8 | Python | false | false | 8,345 | py | 6 | trading-bot(50200strategy).py | 4 | 0.530377 | 0.506171 | 0 | 187 | 42.625668 | 177 |
BaFin/Crumblr2 | 11,184,094,880,781 | dc7eb43a4e19671d99a746d38e158220d4469e44 | 63040592aead3a09c087e683c83ca8f89e649e6b | /class_Template.py | 6a82fe3cf37a11f7bed284509101d78f524c73cb | [] | no_license | https://github.com/BaFin/Crumblr2 | c1c6e5b0fb2496a10b42ccb47b60561a8fc3a9c3 | fa2918f59b639a5a44ac1134912b73126bafba6c | refs/heads/master | 2016-09-15T05:59:57.788025 | 2016-03-02T22:39:06 | 2016-03-02T22:39:06 | 38,463,218 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import re
import shutil
class Template():
template = {'general' : 'general.html',
'quote' : 'quote.html',
'text' : 'text.html',
'link' : 'link.html',
'chat' : 'chat.html',
'answer' : 'answer.html',
'audio' : 'audio.html',
'photo' : 'photo.html',
'video' : 'video.html',
'index' : 'index.html'}
template_dir = os.path.abspath(os.path.join('templates'))
template_style = 'style.css'
def __init__(self, template_type='general'):
self.template_type = template_type
self._template_string = None
self._template_keys = None
def load_template(self):
if(self._template_string != None):
return self._template_string
template_file = os.path.join(Template.template_dir,
Template.template[self.template_type])
with open(template_file, 'r') as f:
self._template_string = f.read()
return self._template_string
def get_keys(self):
if(self._template_keys != None):
return self._template_keys
self.load_template()
rgxp = re.compile('{%(.*?)%}')
self._template_keys = rgxp.findall(self._template_string)
return self._template_keys
def insert_keys(self, keys, kdict, blanking=True, blanking_text='<null>'):
text = self.load_template()
for k in keys:
if(k in kdict):
inserted_string = unicode(kdict[k])
text = text.replace('{%' + k + '%}', inserted_string)
else:
if(blanking == True):
text = text.replace('{%' + k + '%}', blanking_text)
return text
@staticmethod
def copy_style_if_not_exists(dest_path):
style_fname = Template.template_style
if(not os.path.exists(os.path.join(dest_path, style_fname))):
shutil.copy(os.path.join(Template.template_dir, style_fname),
os.path.join(dest_path, style_fname))
| UTF-8 | Python | false | false | 2,097 | py | 14 | class_Template.py | 3 | 0.536481 | 0.536481 | 0 | 67 | 30.283582 | 84 |
RoDaniel/featurehouse | 7,395,933,716,977 | a4cab5589801580f49853730f9ebb47d44b78a6a | dc7dc1ab85403a4467044d4c0c936c17fff5225a | /fstmerge/examples/Fail2ban/rev579-732/base-trunk-579/server/datetemplate.py | a495df29c03ed39cdbb248484605dae12d27a53e | [] | no_license | https://github.com/RoDaniel/featurehouse | d2dcb5f896bbce2c5154d0ba5622a908db4c5d99 | df89ce54ddadfba742508aa2ff3ba919a4a598dc | refs/heads/master | 2020-12-25T13:45:44.511719 | 2012-01-20T17:43:15 | 2012-01-20T17:43:15 | 1,919,462 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = "Cyril Jaquier"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2010-07-25 12:46:34 $"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import re
class DateTemplate:
def __init__(self):
self.__name = ""
self.__regex = ""
self.__cRegex = None
self.__pattern = ""
self.__hits = 0
def setName(self, name):
self.__name = name
def getName(self):
return self.__name
def setRegex(self, regex):
self.__regex = regex.strip()
self.__cRegex = re.compile(regex)
def getRegex(self):
return self.__regex
def setPattern(self, pattern):
self.__pattern = pattern.strip()
def getPattern(self):
return self.__pattern
def isValid(self):
return self.__regex != "" and self.__pattern != ""
def incHits(self):
self.__hits = self.__hits + 1
def getHits(self):
return self.__hits
def matchDate(self, line):
dateMatch = self.__cRegex.search(line)
return dateMatch
def getDate(self, line):
raise Exception("matchDate() is abstract")
| UTF-8 | Python | false | false | 1,020 | py | 287 | datetemplate.py | 81 | 0.62549 | 0.603922 | 0 | 49 | 19.816327 | 52 |
huangchao20/python_test | 9,792,525,464,369 | 86b1f1e16800e5c7291c46fbba8aea7b2d10a15c | 1508196ddc1846cadb5fb0b72f32712069077c71 | /python-面向对象/面向对象/面向对象3.py | e9b55384f04e40d41204bf604ffbcfecf2d86159 | [] | no_license | https://github.com/huangchao20/python_test | b25ab9f3ecbf389df86fd8e142257271aca3a585 | 756fb77ce746c2f9d68c1cafc7f69577ca13e87b | refs/heads/master | 2020-04-27T15:21:58.238908 | 2019-03-08T01:16:07 | 2019-03-08T01:16:07 | 174,443,323 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from obj import Test1
| UTF-8 | Python | false | false | 23 | py | 99 | 面向对象3.py | 88 | 0.782609 | 0.73913 | 0 | 1 | 21 | 21 |
tuantle/simple_nn_with_numpy | 10,565,619,596,170 | 59e0066490a7f516e81238f4cbd687d1e77edc47 | 23ec7089b5b18f8c2808296bdc45819fd5f08180 | /modules/npcore/optimizers.py | 0679ab9efc9dbc48a270542b119c2b3422d076c0 | [
"MIT"
] | permissive | https://github.com/tuantle/simple_nn_with_numpy | 3b5ace743e7372a0fe9844d15cdae754f39aff30 | 4bf5ba23e2df7879030de85eb22b8e30ad9708de | refs/heads/master | 2020-04-16T16:45:52.880599 | 2019-08-13T23:06:00 | 2019-08-13T23:06:00 | 165,748,521 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#
# Copyright 2016-present Tuan Le.
#
# Licensed under the MIT License.
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/mit-license.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
#
# Author Tuan Le (tuan.t.lei@gmail.com)
#
# ------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import abc
import json
import math
import numpy as np
from util.const import CONST
from util.validation import MType
# ------------------------------------------------------------------------
class OPTIMIZER(CONST):
LABEL = 'optim'
SGD_LABEL = 'sgd'
SGDM_LABEL = 'sgdm'
RMSPROP_LABEL = 'rmsprop'
ADAM_LABEL = 'adam'
DEFAULT_ETA = 1e-3
DEFAULT_ETA_DECAY = 0.9
DEFAULT_BETA_DECAY1 = 0.9
DEFAULT_BETA_DECAY2 = 0.999
DEFAULT_MOMENTUM = 0.9
# ------------------------------------------------------------------------
class Optimizer(type):
"""
A metaclass for an optimizer class
"""
def __getattr__(cls, key):
return cls[key]
def __setattr__(cls, key, value):
raise ValueError('Cannot set value to an optimizer class.')
@property
def label(cls):
"""
Get optimizer label.
Returns:
str
"""
return cls._label
# ------------------------------------------------------------------------
class Optimizer(object, metaclass=Optimizer):
_label = OPTIMIZER.LABEL
"""
Abtraction of a base optimizer. Manages the stochastic gradient descent optimizations.
"""
def __str__(self):
return self.label
# ------------------------------------------------------------------------
@property
def label(self):
"""
Get optimizer label.
Returns:
str
"""
return type(self).label
@abc.abstractmethod
def reset(self):
"""
Reset internal states.
"""
pass
@MType(as_json=bool, beautify_json=bool)
def snapshot(self, *, as_json=False, beautify_json=True):
"""
Return optimizer as a snapshot dict data
Arguments:
as_json -
beautify_json -
Returns:
snapshot
"""
snapshot = {
'label': self.label,
'base_label': Optimizer.label
}
if as_json:
if beautify_json:
return json.dumps(snapshot, indent=4, sort_keys=False)
else:
return json.dumps(snapshot)
else:
return snapshot.copy()
@abc.abstractmethod
def compute_grad_descent_step(self):
"""
Compute gradient descent step update optimization. Not implemented
"""
pass
# ------------------------------------------------------------------------
class SGD(Optimizer):
_label = OPTIMIZER.SGD_LABEL
"""
Optimization using stochastic gradient descent update rule.
"""
# ------------------------------------------------------------------------
def reset(self):
"""
Reset internal states.
"""
pass
@MType(int, [np.ndarray], dict)
def compute_grad_descent_step(self, epoch, egs, hparam):
"""
Implement stochastic gradient descent update formula. Compute gradient step delta tensor.
Arguments:
epoch:
egs: a list of gradient error tensors
hparam: hold eta value
Returns:
list
"""
eta = hparam['eta']
return [eta * eg_t for eg_t in egs]
# ------------------------------------------------------------------------
class SGDM(Optimizer):
_label = OPTIMIZER.SGDM_LABEL
"""
Optimization using stochastic gradient descent with momentum update rule.
"""
def __init__(self):
self._velocities = []
super().__init__()
# ------------------------------------------------------------------------
def reset(self):
"""
Reset internal states.
"""
super().reset()
self._velocities = [np.zeros_like(velocity_t, dtype=velocity_t.dtype) for velocity_t in self._velocities]
@MType(int, [np.ndarray], dict)
def compute_grad_descent_step(self, epoch, egs, hparam):
"""
Implement stochastic gradient descent with momentum update formula. Compute gradient step delta tensor.
Arguments:
epoch:
egs: a list of gradient error tensors
hparam: hold eta value
Returns:
list
"""
eta = hparam['eta']
momentum = hparam.get('momentum', OPTIMIZER.DEFAULT_MOMENTUM)
if len(self._velocities) != len(egs):
self._momentums = [momentum for eg_t in egs]
self._velocities = [np.zeros_like(eg_t, dtype=eg_t.dtype) for eg_t in egs]
self._velocities = [momentum * velocity_t + (1 - momentum) * eg_t for (velocity_t, eg_t) in zip(self._velocities, egs)]
return [eta * velocity_t for velocity_t in self._velocities]
# ------------------------------------------------------------------------
class RMSprop(Optimizer):
_label = OPTIMIZER.RMSPROP_LABEL
"""
RMSprop update rule, which uses a moving average of squared gradient tensors to set adaptive per-parameter eta value.
"""
def __init__(self):
self._moving_means = []
super().__init__()
# ------------------------------------------------------------------------
def reset(self):
"""
Reset internal states.
"""
super().reset()
self._moving_means = [np.zeros_like(moving_mean_t, dtype=moving_mean_t.dtype) for moving_mean_t in self._moving_means]
@MType(int, [np.ndarray], dict)
def compute_grad_descent_step(self, epoch, egs, hparam):
"""
Implement rmsprop update formula. Compute gradient step delta tensor.
Arguments:
epoch:
egs: a list of gradient error tensors
hparam: hold eta value
Returns:
list
"""
eta = hparam['eta']
beta_decay = hparam.get('beta_decay1', OPTIMIZER.DEFAULT_BETA_DECAY1)
if len(self._moving_means) != len(egs):
self._moving_means = [np.zeros_like(eg_t, dtype=eg_t.dtype) for eg_t in egs]
self._moving_means = [beta_decay * moving_mean_t + (1 - beta_decay) * np.square(eg_t)
for (moving_mean_t, eg_t) in zip(self._moving_means, egs)]
return [eta * eg_t / (np.sqrt(moving_mean_t) + 1e-12)
for (moving_mean_t, eg_t) in zip(self._moving_means, egs)]
# ------------------------------------------------------------------------
class Adam(Optimizer):
_label = OPTIMIZER.ADAM_LABEL
"""
Optimization using Adam update rule, which incorporates moving averages of both the gradient and its square and a bias correction term.
"""
def __init__(self):
self._moving_means = []
self._moving_sqr_means = []
super().__init__()
# ------------------------------------------------------------------------
def reset(self):
"""
Reset internal states.
"""
super().reset()
self._moving_means = [np.zeros_like(moving_mean_t, dtype=moving_mean_t.dtype)
for moving_mean_t in self._moving_means]
self._moving_sqr_means = [np.zeros_like(moving_sqr_mean_t, dtype=moving_sqr_mean_t.dtype)
for moving_sqr_mean_t in self._moving_sqr_means]
@MType(int, [np.ndarray], dict)
def compute_grad_descent_step(self, epoch, egs, hparam):
"""
Implement adam update formula. Compute gradient step delta tensor.
Arguments:
epoch:
egs: a list of gradient error tensors
hparam: hold eta value
Returns:
list
"""
eta = hparam['eta']
beta_decay1 = hparam.get('beta_decay1', OPTIMIZER.DEFAULT_BETA_DECAY1)
beta_decay2 = hparam.get('beta_decay2', OPTIMIZER.DEFAULT_BETA_DECAY2)
if len(self._moving_means) != len(egs) or len(self._moving_sqr_means) != len(egs):
self._moving_means = [np.zeros_like(eg_t, dtype=eg_t.dtype) for eg_t in egs]
self._moving_sqr_means = [np.zeros_like(eg_t, dtype=eg_t.dtype) for eg_t in egs]
bias_correction1 = 1 / (1 - math.pow(beta_decay1, epoch + 1))
bias_correction2 = 1 / (1 - math.pow(beta_decay2, epoch + 1))
self._moving_means = [beta_decay1 * moving_mean_t + (1 - beta_decay1) * eg_t
for (moving_mean_t, eg_t) in zip(self._moving_means, egs)]
self._moving_sqr_means = [beta_decay2 * moving_sqr_mean_t + (1 - beta_decay2) * np.square(eg_t)
for (moving_sqr_mean_t, eg_t) in zip(self._moving_sqr_means, egs)]
return [eta * (moving_mean_t * bias_correction1) / (np.sqrt(moving_sqr_mean_t * bias_correction2) + 1e-12)
for (moving_mean_t, moving_sqr_mean_t) in zip(self._moving_means, self._moving_sqr_means)]
| UTF-8 | Python | false | false | 9,656 | py | 39 | optimizers.py | 28 | 0.518538 | 0.513049 | 0 | 306 | 30.555556 | 139 |
Francinaldo-Silva/Uri-Online-Judge | 4,896,262,739,904 | 088f0524cf8a9e4cc8a300ca2d2fcc242bdc4f21 | 674b53609cf9e56f7313574bd62ea7df8792274d | /1042.py | 6456fcea60ab26261dbf5864a8adb8785ead5c5c | [] | no_license | https://github.com/Francinaldo-Silva/Uri-Online-Judge | 706bc66b97beaf6b59fbdff14852c33e1eede26f | 09807d4c0d325e1e5afa7a8220539f4c501af44a | refs/heads/master | 2022-03-29T19:08:14.066466 | 2020-01-19T03:29:44 | 2020-01-19T03:29:44 | 234,834,075 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | numeros = input().split()
numeros = list(map(int, numeros))
for i in sorted(numeros):
print(i)
print('')
for i in numeros:
print(i) | UTF-8 | Python | false | false | 139 | py | 31 | 1042.py | 9 | 0.647482 | 0.647482 | 0 | 7 | 19 | 33 |
MzXuan/FER_JueAndXuan | 18,047,452,592,422 | 339989422089c0a6452695f2c40cc6d038153e40 | b5dbc999ee4592b5d7d339674c2bd27f10cb74c6 | /test.py | 7dd802543015f0ae5e9c72db888653c2934b6178 | [] | no_license | https://github.com/MzXuan/FER_JueAndXuan | 39a6355399fb7a6662b4eab8ad67d306ac6801e8 | e3a9a3ef2806848c53a1e17ca6086c63f8bb246f | refs/heads/master | 2020-03-12T01:07:11.856300 | 2018-04-20T13:45:33 | 2018-04-20T13:45:33 | 130,367,767 | 0 | 0 | null | true | 2018-04-20T13:44:43 | 2018-04-20T13:44:42 | 2018-04-20T13:42:48 | 2018-04-20T13:42:46 | 0 | 0 | 0 | 0 | null | false | null | from seq_fer_datasets import *
from seq_fer import SFER_LSTM
if __name__ == '__main__':
video_root_dir = r'/home/young/cv_project/cohn-kanade-images'
label_root_dir = r'/home/young/cv_project/Emotion'
video_dir_paths, label_dir_paths = get_ck_data(video_root_dir, label_root_dir)
img_size = (320, 240)
composed_tf = transforms.Compose([transforms.Grayscale(), transforms.Resize(img_size), transforms.ToTensor()])
# img_mean, img_std = calc_img_dataset_mean_std(video_dir_paths, composed_tf)
# dataset_tf = transforms.Compose([transforms.Grayscale(), transforms.Resize(img_size), transforms.ToTensor(),
# ImgMeanStdNormalization(img_mean, img_std)])
sfer_dataset = SFERDataset(video_dir_paths, label_dir_paths, transform=composed_tf)
sfer_dataloaer = DataLoader(sfer_dataset, batch_size=8, shuffle=False, collate_fn=SFERPadCollate(dim=0))
for i_batch, sample_batched in enumerate(sfer_dataloaer):
print(type(sample_batched))
for v in sample_batched[0]:
print(v.size())
print(sample_batched[0].size())
if i_batch == 1:
break | UTF-8 | Python | false | false | 1,167 | py | 4 | test.py | 3 | 0.656384 | 0.646958 | 0 | 32 | 35.5 | 114 |
bueaux/mri | 6,691,559,051,452 | 3459d84980a7248b09858a4be10ac0e57d9ebe2a | fdf23fa82dc5d8b08f3c0f6aac8016890edd85ea | /mri/util.py | 3a5434e0063c07f0ce642f9c61192afa08e787da | [
"MIT"
] | permissive | https://github.com/bueaux/mri | 830ae1a1450ac4ba4b058922e59c57632ad4923e | 5ebe151656c7b7033a1d9a6b421c3392d41e5a20 | refs/heads/master | 2020-04-24T06:43:39.848446 | 2014-06-24T02:57:56 | 2014-06-24T02:57:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from itertools import ifilter, izip
from operator import add
from mri import _entropy
BLOCK_LEN = _entropy.BLOCK_LEN
def generate_hex_ticks(ticks, max):
mask = 0xfffffff
# Find the largest bit mask we can use without reducing the number
# of ticks by much less than a half.
# Worse case scenario is we get the original mask.
ticks = map(int, ticks)
while mask:
tnew = [ t&(~mask) for t in ticks ]
if len(set(tnew)) >= 0.45*len(ticks):
tnew = sorted(list(set(tnew)))
ticks = range(0,ticks[-1],tnew[1])
break
mask >>= 4
# If there's not enough ticks, double them.
while len(ticks) < 6:
half = ticks[1]/2
ticks = [ [t,t+half] for t in ticks ]
ticks = reduce(add, ticks)
# Truncate at the maximum point of the graph.
return filter(lambda x: x<max, ticks)
def scale_down(values, num):
if len(values) < num:
return values
# Reducing the range of values to a fixed width by accurately averaging
# despite the possibility that multiple of the the shorter length doesn't evenly
# fit into the larger one.
#
# This is similar to integer line drawing algorithm using antialiasing
# in that we track an integral part and a fractional part
# of a ratio.
intg = width_intg = len(values)/num
frac = width_frac = len(values)%num
bin = 0.0
count = 0.0
output = []
values = (v for v in values)
for n in xrange(num):
while intg:
bin += values.next()
count += 1
intg -= 1
intg += width_intg
frac += width_frac
if frac > num:
frac -= num
intg += 1
# If there is a fractional component, we calculate it's contribution
# to the average.
if frac:
weight = float(frac)/num
x = values.next()
bin += weight*x
count += weight
output.append(bin/count)
bin = (1-weight)*x
intg -= 1
count = 1-weight
else:
output.append(bin/count)
count = 0.0
bin = 0.0
return output
def filter_zero(axis, data):
return zip(*ifilter(lambda x: x[1] != 0.0, izip(axis, data))) | UTF-8 | Python | false | false | 2,301 | py | 8 | util.py | 6 | 0.563668 | 0.551499 | 0 | 82 | 27.073171 | 85 |
pg-irc/IRC-Services-Legacy | 18,940,805,805,366 | 4f0c059a28eda14f21ab8b0bdf4d1b2efb51f82a | 2c529ae8190c6a97f13f61b81e2c85b3d2fe0948 | /admin_panel/urls.py | ae431633b2212d063f97be96add7463d94b00854 | [
"BSD-3-Clause"
] | permissive | https://github.com/pg-irc/IRC-Services-Legacy | ec6c3b32769f6b7c433610d736e120fe199a7ee0 | 818ffd13fecb8a7f1c29c341d427d0a339809f8b | refs/heads/master | 2021-05-01T23:19:11.592256 | 2019-08-20T00:04:03 | 2019-08-20T00:04:03 | 120,933,399 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url, include
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from .views import LandingPageView
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'admin_panel/login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, {'template_name': 'admin_panel/logout.html'}, name='logout'),
url(r'^$', LandingPageView.as_view(), name='admin-landing-page'),
]
| UTF-8 | Python | false | false | 525 | py | 177 | urls.py | 100 | 0.729524 | 0.729524 | 0 | 11 | 46.727273 | 103 |
YukioNiwa/lgtm | 6,021,544,154,662 | 65b91a608ca34df6a10b462434e496f7078c6366 | 1c493a7870b39d4171bd58cab81365981594d2b6 | /main.py | e158e4cb9b9f63571a3e45e6ff17ae6c209e8d12 | [] | no_license | https://github.com/YukioNiwa/lgtm | 2582757067ec9b0bcd8506596b00af6542c9b084 | 23ae89bfaf9ebecc6343b031dd930dab725b0cb9 | refs/heads/master | 2022-01-22T19:32:30.818764 | 2020-03-08T13:51:24 | 2020-03-08T13:51:24 | 243,684,357 | 0 | 0 | null | false | 2022-01-13T02:16:51 | 2020-02-28T05:18:04 | 2020-03-08T13:51:31 | 2022-01-13T02:16:48 | 7 | 0 | 0 | 2 | Python | false | false | from lgtm import core
if __name__ == '__main__':
# コマンド追加時はここにも追加
core.cli()
core.cli_niwa()
| UTF-8 | Python | false | false | 134 | py | 3 | main.py | 3 | 0.566038 | 0.566038 | 0 | 6 | 16.666667 | 26 |
Sandy4321/CMSC516-SE-T8 | 9,517,647,578,804 | f668e85053af3762ade70f587d7e82768aadd415 | 79c2f47da0c0409510de071d588970c75dac5393 | /SemEvalEight/config.py | 8340d8a444dafc6de1baa9c51e0e869b23b41775 | [] | no_license | https://github.com/Sandy4321/CMSC516-SE-T8 | fe6cda1f53bd73168ff8e7aaede04447225b2ddc | d06ccba459a6e79576f6492b8524db7331594202 | refs/heads/master | 2020-07-08T12:52:04.922038 | 2017-12-09T12:13:44 | 2017-12-09T12:13:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
# Todo: setup a real plaintext config?
###----
# Hardcode data dirs if wanting to avoid use of env vars
#semeval8_data_dir = '/home/morgan/ownCloud/Classes/NLP/semeval_task_8/data/'
semeval8_data_dir = '/home/morgan/ownCloud/Classes/NLP/semeval_task_8/data/training_material/data'
ext_data_dir = '/home/morgan/Projects/NLP-Semeval-T8/ext_data'
eval_data_dir = '/home/morgan/Projects/NLP-Semeval-T8/data'
stucco_corpus_dir = os.path.join(ext_data_dir, 'stucco_corpus')
embeddings_dir = os.path.join(ext_data_dir, 'embeddings')
###----
# From env vars
#semeval8_data_dir = os.environ.get('SEMEVAL_8_DATA', semeval8_data_dir)
#stucco_corpus_dir = os.environ.get('STUCCO_AUTO_LABELED', stucco_corpus_dir)
#embeddings_dir = os.environ.get('EMBEDDINGS_DIR', embeddings_dir)
###----
# Check and warn
if semeval8_data_dir is None:
raise ValueError("Specify semeval data dir in config.py or set the 'SEMEVAL_8_DATA' environment variable")
# not a required data set
if stucco_corpus_dir is None:
print("warning: Specify stucco data dir in config.py or set the 'STUCCO_AUTO_LABELED' environment variable")
print("download stucco auto-labeled here: https://github.com/stucco/auto-labeled-corpus")
#raise ValueError("Specify stucco data dir in config.py or set the 'STUCCO_AUTO_LABELED' environment variable")
else:
stucco_corpus_json_path = os.path.join(stucco_corpus_dir, 'full_corpus.json')
if embeddings_dir is None:
print("warning: specify embeddings dir in config.py or set the 'EMBEDDINGS_DIR' environment variable")
# Useful paths
tokenized_dir = os.path.join(semeval8_data_dir, 'tokenized')
#brown_ext_dir = os.path.join(semeval8_data_dir, 'brown_ext_training_set')
brown_ext_dir = os.path.join(ext_data_dir, 'additional_plaintext_sentences')
| UTF-8 | Python | false | false | 1,780 | py | 19 | config.py | 18 | 0.742135 | 0.734831 | 0 | 39 | 44.641026 | 115 |
milezer0/appdev | 15,066,745,283,751 | bac64ce5030d857bef42e65f2e545f919c2cad05 | 081bc26ac75f5e72b3c65e777c2a767181b5f56f | /production/migrations/0003_auto_20170728_0453.py | 03c05c8ab9801210acca5694754bb524c09bac34 | [] | no_license | https://github.com/milezer0/appdev | b3d00c9769347672cf884306a82ef113f4414699 | 311a002c8e569a5d40d8bf8792ae4a207c7c5af2 | refs/heads/master | 2018-02-09T01:27:26.684882 | 2017-11-08T22:58:09 | 2017-11-08T22:58:09 | 96,613,145 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-28 04:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('productdevelopment', '0002_auto_20170727_0925'),
('production', '0002_worker_worker_number'),
]
operations = [
migrations.AddField(
model_name='finished_bundle',
name='production_line_worker',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='production.Worker'),
preserve_default=False,
),
migrations.AddField(
model_name='production_line_deliverable',
name='operations',
field=models.ManyToManyField(to='productdevelopment.Operations'),
),
migrations.AlterField(
model_name='production_line_worker',
name='date_removed',
field=models.DateTimeField(blank=True, null=True),
),
]
| UTF-8 | Python | false | false | 1,063 | py | 63 | 0003_auto_20170728_0453.py | 28 | 0.619944 | 0.584196 | 0 | 33 | 31.212121 | 116 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.