repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ktc312/tw_perm_data_analysis_web | 7,713,761,303,201 | 67d9ad40a14eb246eaca883e3ad02fe49595c8fe | 253b9c42bffbcd7b0c3fe3880e0370359b1aedb0 | /tw_perm_data_analysis/data_cleaning.py | 66b1805450a4a4d2a979043efb4ed7b39c1132ff | [
"MIT"
]
| permissive | https://github.com/ktc312/tw_perm_data_analysis_web | 40c78bf1511f98da07054ae8c6287a53da1e0c73 | 98302d45afb0d47b230a603279699351739ba34c | refs/heads/master | 2021-01-13T01:07:52.923291 | 2017-03-02T22:17:12 | 2017-03-02T22:17:12 | 81,408,337 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # __author__ = 'ktc312'
import pandas as pd
import numpy as np
import os
data_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tw_perm_data_analysis/')
# Read the CSV file (cities)
ny_cities_df = pd.read_csv(data_path + 'data/NY_cities.csv', names='c', dtype=str)
ny_cities = []
for x in ny_cities_df['c']:
ny_cities.append(x)
bay_cities_df = pd.read_csv(data_path + 'data/Bay_Area_cities.csv', names='city', dtype=str)
bay_cities = []
for x in bay_cities_df['c']:
bay_cities.append(x)
# Convert DateTime
def convert_datetime(input_data, input_date):
input_data[input_date] = pd.to_datetime(input_data[input_date])
# Convert to equivalent annual salary
def equivalent_annual_salary(input_data, input_wage):
annual_salary = []
for input_wage_str in input_data[input_wage]:
wage = float(input_wage_str.split('/')[0].replace(",", ""))
keyword = input_wage_str.split('/')[1].lower()
if keyword in ('year', 'yr'):
annual_salary.append(wage)
elif keyword in ('hour', 'hr'):
if wage < 1000:
annual_salary.append(wage * 2080)
else:
annual_salary.append(wage)
elif keyword in ('mth', 'month'):
if wage < 100000:
annual_salary.append(wage * 12)
else:
annual_salary.append(wage)
elif keyword in ('week', 'wk'):
if wage < 90000:
annual_salary.append(wage * 52)
else:
annual_salary.append(wage)
elif keyword == 'bi':
annual_salary.append(wage * 26)
elif float(input_wage_str[:-1]) <= 100:
annual_salary.append(float(input_wage_str[:-1]) * 2080)
else:
annual_salary.append(float(input_wage_str[:-1]))
input_data['Salary'] = np.asarray(annual_salary)
input_data.drop(input_wage, axis=1, inplace=True)
# remove outliers
input_data.ix[input_data.Salary > 500000, 'Salary'] = '-999'
# Clean Case Status
def clean_case_status(input_data, input_status):
input_data[input_status] = np.where(input_data[input_status] == 'Certified-expired',
'Certified-Expired', input_data[input_status])
# Separate State and City
def separate_tate_city(input_data, input_region):
city = []
state = []
for s_c in input_data[input_region]:
if len(s_c.split(',')[1]) > 3:
city.append(s_c.split(',')[0])
state.append(s_c.split(',')[1][1:3].upper())
else:
city.append(s_c.split(',')[0])
state.append(s_c.split(',')[1][1:3].upper())
state = ['-999' if v is '' else v for v in state]
input_data['City'] = np.asarray(city)
input_data['State'] = np.asarray(state)
# Clean employer name
def clean_employer_name(input_data, input_employer):
com_list = []
com_list_2 = []
for employer in input_data[input_employer]:
com_list.append(employer.replace(',', ''))
for com in com_list:
com_list_2.append(com.replace('!', ''))
input_data['Company'] = np.asarray(com_list_2)
input_data.drop(input_employer, axis=1, inplace=True)
# Add Area
def add_area(input_data, input_region):
area = []
for s_c in input_data[input_region]:
city = s_c.split(',')[0].upper()
state = s_c.split(',')[1][1:3].upper()
if state in ('NY', 'NJ', 'CT'):
if city in ny_cities:
area.append('New York Metro')
else:
area.append('-999')
elif state == 'CA':
if city in bay_cities:
area.append('Bay Area')
else:
area.append('-999')
else:
area.append('-999')
input_data['Area'] = np.asarray(area)
# NaN
def clear_nan_value(input_data):
input_data['State'] = input_data['State'].replace({'-999': np.nan})
input_data['Salary'] = input_data['Salary'].replace({'-999': np.nan})
input_data['Area'] = input_data['Area'].replace({'-999': np.nan})
# Remove Rare Cases in Pandas Data Frame
def remove_rare_case(input_data, col_name, freq):
col = col_name
bin_freq = float(freq) / float(100)
filtered_df = pd.DataFrame()
for i in input_data[col].unique():
counts = input_data[input_data[col] == i].count()[col]
total_counts = input_data[col].count()
freq = float(counts) / float(total_counts)
if freq > bin_freq:
filtered_df = pd.concat([input_data[input_data[col] == i], filtered_df])
return filtered_df
| UTF-8 | Python | false | false | 4,610 | py | 73 | data_cleaning.py | 10 | 0.572234 | 0.552278 | 0 | 137 | 32.649635 | 111 |
paoladuran0618/PythonPractices | 8,083,128,497,233 | 5a6e81ba848920b3575f1a1e23fd3abec9c011ee | d5aa3dbe3133ebed014033b1d98d006259b29ccd | /control-de-flujo/ejercicio6.py | afbb153618d94ba8943b13399ccf173d5d4f7aeb | []
| no_license | https://github.com/paoladuran0618/PythonPractices | b5f82df3e033bd6c2100619f01c85adb0426870c | 66693e71f911d76a39dd7947d11389847d386873 | refs/heads/master | 2023-05-25T07:19:10.788523 | 2021-01-17T22:04:46 | 2021-01-17T22:04:46 | 282,688,247 | 0 | 0 | null | false | 2023-05-23T00:56:55 | 2020-07-26T16:23:38 | 2021-01-17T22:07:17 | 2023-05-23T00:56:54 | 10,772 | 0 | 0 | 1 | Python | false | false | """
Utilizando la función range() y la conversión a listas
genera las siguientes listas dinámicamente:
Todos los números del 0 al 10 [0, 1, 2, ..., 10]
Todos los números del -10 al 0 [-10, -9, -8, ..., 0]
Todos los números pares del 0 al 20 [0, 2, 4, ..., 20]
Todos los números impares entre -20 y 0 [-19, -17, -15, ..., -1]
Todos los números múltiples de 5 del 0 al 50 [0, 5, 10, ..., 50]
"""
print( list( range(0, 11) ) )
print( list( range(-10, 1) ) )
print( list( range(0, 21, 2) ) )
print( list( range(-19, 0, 2) ) )
print( list( range(0, 51, 5) ) ) | UTF-8 | Python | false | false | 570 | py | 23 | ejercicio6.py | 22 | 0.602496 | 0.491979 | 0 | 20 | 27.1 | 64 |
DhawalRank-zz/LibApp | 11,596,411,723,025 | 23a33b094e34811bf3d4b4481a1f9ed435bdb256 | 40298fa8620011276a2d77d8af430c5d5fcf7dee | /app/views.py | 8ffa2c98d9a64d6b98ba2cf5c451b1668c21bb3e | []
| no_license | https://github.com/DhawalRank-zz/LibApp | 5df0e734114f874bef9adf5d8aaf0bfa7484cb9a | db0711e3accb5b0209fb93f73803c23c3b713a7e | refs/heads/master | 2021-09-26T17:44:49.839768 | 2016-06-29T18:29:12 | 2016-06-29T18:29:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from random import randint
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.shortcuts import render, get_object_or_404, get_list_or_404
from django.views.decorators.csrf import csrf_protect
from django.http import HttpResponseRedirect
from django.core import serializers
from django.views.generic import View
from app.forms import SuggestionForm, SearchlibForm, LoginForm, Register, MyAcct
from app.models import Book, Dvd, Libuser, Libitem, Suggestion
from django.core.mail import send_mail
# Create your views here.
@csrf_protect
def login_user(request):
form = LoginForm()
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
login(request, user)
userob = Libuser.objects.get(username=request.user.username)
luckynum = randint(0, 9)
request.session['luckynum'] = luckynum
request.session['profilepic'] = userob.profilepic.url
request.session.set_expiry(3600)
userob = Libuser.objects.filter(username=request.user.username)
request.session['userob'] = serializers.serialize('json', userob)
response = HttpResponseRedirect('/app/index/')
response.flush()
return response
elif user is None:
return render(request, 'libapp/login.html', {'notlogin': True, 'form': form})
else:
return render(request, 'libapp/login.html', {'notactive': True, 'form': form})
else:
return render(request, 'libapp/login.html', {'form': form})
@login_required
def user_logout(request):
del request.session['userob']
response = HttpResponseRedirect('/')
response.delete_cookie('about_visits')
logout(request)
return response
def index(request):
itemlist = Libitem.objects.all().order_by('title')[:10]
itemlistper = Libitem.objects.filter(user_id__exact=request.user.id)
userob = Libuser.objects.filter(username=request.user.username)
return render(request, "libapp/index.html",
{'itemlist': itemlist, 'itemlistper': itemlistper, 'userob': userob})
def about(request):
userob = Libuser.objects.filter(username=request.user.username)
if 'about_visits' in request.COOKIES:
about_visits = int(request.COOKIES['about_visits'])
about_visits += 1
response = render(request, 'libapp/about.html', {'about_visits': about_visits, 'userob': userob})
response.set_cookie('about_visits', about_visits)
return response
else:
about_visits = 1
response = render(request, 'libapp/about.html', {'about_visits': about_visits, 'userob': userob})
response.set_cookie('about_visits', about_visits)
return response
def detail(request, item_id):
libitem = get_object_or_404(Libitem, id=item_id)
userob = Libuser.objects.filter(username=request.user.username)
if libitem.itemtype == 'Book':
book = get_list_or_404(Book, id=item_id)
return render(request, 'libapp/detail.html', {'book': book, 'userob': userob})
else:
dvd = get_list_or_404(Dvd, id=item_id)
return render(request, 'libapp/detail.html', {'dvd': dvd, 'userob': userob})
def suggestions(request):
userob = Libuser.objects.filter(username=request.user.username)
suggestionlist = Suggestion.objects.all()[:10]
return render(request, 'libapp/suggestions.html', {'itemlist': suggestionlist, 'userob': userob})
def newitem(request):
suggestionsob = Suggestion.objects.all()
userob = Libuser.objects.filter(username=request.user.username)
if request.method == 'POST':
form = SuggestionForm(request.POST)
if form.is_valid():
suggestion = form.save(commit=False)
suggestion.num_interested = 1
suggestion.save()
return HttpResponseRedirect('/app/suggestions/')
else:
return render(request, 'libapp/newitem.html',
{'form': form, 'suggestions': suggestionsob, 'userob': userob})
else:
form = SuggestionForm()
return render(request, 'libapp/newitem.html', {'form': form, 'suggestions': suggestionsob, 'userob': userob})
def searchitem(request):
userob = Libuser.objects.filter(username=request.user.username)
if request.method == 'POST':
title1 = request.POST['title']
author1 = request.POST['author']
if title1 != '' and author1 != '': # Title and User not null
bookob = Book.objects.filter(title__contains=title1, author__contains=author1)
dvdob = Dvd.objects.filter(title__contains=title1, maker__contains=author1)
form = SearchlibForm()
if bookob and dvdob:
return render(request, 'libapp/searchitem.html',
{'bookob': bookob, 'dvdob': dvdob, 'form': form, 'userob': userob})
elif not bookob and dvdob:
return render(request, 'libapp/searchitem.html', {'dvdob': dvdob, 'form': form, 'userob': userob})
elif bookob and not dvdob:
return render(request, 'libapp/searchitem.html', {'bookob': bookob, 'form': form, 'userob': userob})
else:
return render(request, 'libapp/searchitem.html', {'notfound': True, 'form': form, 'userob': userob})
elif title1 != '' and author1 == '': # Only Title searched
bookob = Book.objects.filter(title__contains=title1)
dvdob = Dvd.objects.filter(title__contains=title1)
form = SearchlibForm()
if bookob and dvdob:
return render(request, 'libapp/searchitem.html',
{'bookob': bookob, 'dvdob': dvdob, 'form': form, 'userob': userob})
elif bookob and not dvdob:
return render(request, 'libapp/searchitem.html', {'bookob': bookob, 'form': form, 'userob': userob})
elif not bookob and dvdob:
return render(request, 'libapp/searchitem.html', {'dvdob': dvdob, 'form': form, 'userob': userob})
else:
return render(request, 'libapp/searchitem.html', {'notfound': True, 'form': form, 'userob': userob})
elif author1 != '' and title1 == '': # Only Author searched
bookob = Book.objects.filter(author__contains=author1)
dvdob = Dvd.objects.filter(maker__contains=author1)
form = SearchlibForm()
if bookob and dvdob:
return render(request, 'libapp/searchitem.html',
{'bookob': bookob, 'dvdob': dvdob, 'form': form, 'userob': userob})
elif bookob and not dvdob:
return render(request, 'libapp/searchitem.html', {'bookob': bookob, 'form': form, 'userob': userob})
elif not dvdob and bookob:
return render(request, 'libapp/searchitem.html', {'dvdob': dvdob, 'form': form, 'userob': userob})
else:
form = SearchlibForm()
return render(request, 'libapp/searchitem.html', {'notfound': True, 'form': form, 'userob': userob})
else: # Author and Title null
form = SearchlibForm()
return render(request, 'libapp/searchitem.html', {'notinput': True, 'form': form, 'userob': userob})
else:
form = SearchlibForm()
return render(request, 'libapp/searchitem.html', {'form': form, 'userob': userob})
class SuggestionView(View):
def get(self, request, item_id):
suggestionsob = Suggestion.objects.filter(id=item_id)
userob = Libuser.objects.filter(username=request.user.username)
return render(request, 'libapp/suggestionsdet.html', {'suggestionob': suggestionsob, 'userob': userob})
@login_required
def myacct(request):
userob1 = Libuser.objects.filter(username=request.user.username)
if request.method == 'POST':
userob = Libuser.objects.get(id=request.user.id)
form = MyAcct(request.POST or None, request.FILES or None, instance=userob)
if form.is_valid():
form.save()
userob = Libuser.objects.get(id=request.user.id)
form = MyAcct(instance=userob)
return render(request, 'libapp/myacct.html', {"form": form, "added": True, 'userob': userob1})
else:
userob = Libuser.objects.get(id=request.user.id)
form = MyAcct(instance=userob)
return render(request, 'libapp/myacct.html', {"form": form, 'userob': userob1, 'failed': True})
else:
userob = Libuser.objects.get(id=request.user.id)
form = MyAcct(instance=userob)
return render(request, 'libapp/myacct.html', {"form": form, 'userob': userob1})
def register(request):
if request.method == 'POST':
form = Register(request.POST, request.FILES)
if form.is_valid():
user = Libuser.objects.create(
username=form.cleaned_data['username'],
first_name=form.cleaned_data['first_name'],
last_name=form.cleaned_data['last_name'],
email=form.cleaned_data['email'],
address=form.cleaned_data['address'],
city=form.cleaned_data['city'],
province=form.cleaned_data['province'],
phone=form.cleaned_data['phone']
)
password = form.cleaned_data['password']
user.profilepic = form.cleaned_data['profilepic']
user.set_password(password)
user.save()
form = Register()
return render(request, 'libapp/register.html', {'form': form, 'added': True})
else:
form = Register()
return render(request, 'libapp/register.html', {'form': form, 'failed': True})
else:
form = Register()
return render(request, 'libapp/register.html', {'form': form})
def myitems(request):
userob = Libuser.objects.filter(username=request.user.username)
itemob = Libitem.objects.filter(user__username=request.user.username, checked_out=True)
return render(request, 'libapp/myitem.html', {'itemob': itemob, 'userob': userob})
def forgotpwd(request):
if request.method == 'POST':
username = request.POST['username']
userob = Libuser.objects.get(username=username)
password = str(os.urandom(4))
userob.set_password(password)
userob.save()
send_mail(
'LibApp Password',
'Your new Password is:' + password,
'sojitradhawal@gmail.com',
[userob.email],
)
return render(request, 'libapp/forgotpwd.html', {'emailSent': True})
else:
return render(request, 'libapp/forgotpwd.html')
@csrf_protect
def checkuname(request):
from app.models import Libuser
from django.http import HttpResponse
username = request.POST.get('username', False)
if username:
userob = Libuser.objects.filter(username=username).count()
if userob:
responce = True
else:
responce = False
else:
responce = ""
return HttpResponse(responce)
def setpwd(request):
userob = Libuser.objects.filter(username=request.user.username)
if request.method == 'POST':
userob = Libuser.objects.get(username=request.user.username)
password = request.POST.get('npassword', 0)
userob.set_password(password)
userob.save()
return render(request, 'libapp/setpwd.html', {'changed': True, 'userob': userob})
else:
return render(request, 'libapp/setpwd.html', {'userob': userob})
| UTF-8 | Python | false | false | 11,879 | py | 16 | views.py | 13 | 0.622527 | 0.618318 | 0 | 278 | 41.730216 | 117 |
yoshi112da/Instacart | 4,131,758,572,789 | 40893191ab08b38b9313de6e1ace88dc0480c118 | 19dfc947c052ea113306b3ae4bf1e2e1a069f0a9 | /appendix/317_.py | fca9453ca0ed2da4cc209cbfd01b81264c1595e6 | [
"MIT"
]
| permissive | https://github.com/yoshi112da/Instacart | 25bd6035bb1c764ec25c6e011949a71f122084f5 | 2cb49dc95a2ce3f1cbd3fccb54ce15b624a5d503 | refs/heads/master | 2023-04-26T12:21:28.709941 | 2023-04-16T02:08:23 | 2023-04-16T02:08:23 | 579,041,637 | 0 | 0 | MIT | true | 2022-12-16T14:09:39 | 2022-12-16T14:09:38 | 2022-12-08T09:42:11 | 2018-05-07T06:51:15 | 4,687 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 18 12:55:38 2017
@author: konodera
https://twitter.com/jeremystan/status/911357665481080832
6/ most novel feature:
binary user by product purchase sequence ->
decimal -> XGBoost learns non-trivial sequence patterns
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
from decimal import Decimal
import utils
#utils.start(__file__)
#==============================================================================
# load
#==============================================================================
col = ['order_id', 'user_id', 'product_id', 'order_number', 'order_number_rev']
log = utils.read_pickles('../input/mk/log', col).sort_values(['user_id', 'product_id', 'order_number'])
#==============================================================================
# def
#==============================================================================
def conv_bi2dec(seq, onb_max, reverse=True, deci=10):
"""
ex.
seq = [1,3,4]
onb_max = 6
101100 -> 44
001101 -> 13
"""
bi = [0]*onb_max
for i in seq:
bi[i-1] = 1
if reverse:
bi = ''.join(map(str, bi))[::-1]
else:
bi = ''.join(map(str, bi))
if deci==10:
return int(bi, 2)
elif deci==2:
return int(bi)
elif deci==.2:
return float(bi[0] + '.' + bi[1:])
else:
raise
def make(T):
"""
T = 0
folder = 'trainT-0'
"""
if T==-1:
folder = 'test'
else:
folder = 'trainT-'+str(T)
log_ = log[log.order_number_rev>T]
log_['onb_max'] = log_.groupby('user_id').order_number.transform(np.max)
r1_d10 = []
r1_d2 = []
r1_df2 = []
r0_d10 = []
r0_d2 = []
r0_df2 = []
seq = []
uid_bk = pid_bk = onb_max_bk = None
for uid,pid,onb,onb_max in tqdm(log_[['user_id', 'product_id', 'order_number', 'onb_max']].values):
if uid_bk is None:
pass
elif uid==uid_bk and pid==pid_bk:
pass
elif uid!=uid_bk or pid!=pid_bk:
r1_d10.append(conv_bi2dec(seq, onb_max_bk, True, 10))
r1_d2.append(conv_bi2dec(seq, onb_max_bk, True, 2))
r1_df2.append(conv_bi2dec(seq, onb_max_bk, False, .2))
r0_d10.append(conv_bi2dec(seq, onb_max_bk, True, 10))
r0_d2.append(conv_bi2dec(seq, onb_max_bk, True, 2))
r0_df2.append(conv_bi2dec(seq, onb_max_bk, False, .2))
seq = []
seq.append(onb)
uid_bk = uid
pid_bk = pid
onb_max_bk = onb_max
r1_d10.append(conv_bi2dec(seq, onb_max_bk, True, 10))
r1_d2.append(conv_bi2dec(seq, onb_max_bk, True, 2))
r1_df2.append(conv_bi2dec(seq, onb_max_bk, False, .2))
r0_d10.append(conv_bi2dec(seq, onb_max_bk, True, 10))
r0_d2.append(conv_bi2dec(seq, onb_max_bk, True, 2))
r0_df2.append(conv_bi2dec(seq, onb_max_bk, False, .2))
df = log_[['user_id', 'product_id']].drop_duplicates(keep='first').reset_index(drop=True)
df['seq2dec_r1_d10'] = r1_d10
df['seq2dec_r1_d2'] = r1_d2
df['seq2dec_r1_df2'] = r1_df2
df['seq2dec_r0_d10'] = r0_d10
df['seq2dec_r0_d2'] = r0_d2
df['seq2dec_r0_df2'] = r0_df2
df.to_pickle('../feature/{}/f317_user-product.p'.format(folder))
#==============================================================================
# main
#==============================================================================
make(0)
#make(1)
#make(2)
make(-1)
#==============================================================================
utils.end(__file__)
| UTF-8 | Python | false | false | 3,743 | py | 74 | 317_.py | 58 | 0.456051 | 0.407694 | 0 | 135 | 26.718519 | 103 |
huang8228541/upload_look_photo_system | 2,181,843,402,255 | 50892cd7c2dcf88e23348ecc3809e0fe5dbbce4f | 357fb20ace48919be3512b819cdf969cbaa8429b | /established_photo_v1.0/create_db.py | a6e6fb5edeaa348dd083d5918b9035041193df9e | []
| no_license | https://github.com/huang8228541/upload_look_photo_system | 823fe5b9eed8806ba883d78951a7c3f9a9f80386 | 8b1690665b1a295f0b80aa045838a81de61f1044 | refs/heads/master | 2021-09-19T11:39:43.506868 | 2018-07-27T13:09:46 | 2018-07-27T13:09:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
实现持久层:
本引用需要两个表。分别存放用户信息和相片信息。
用户信息主要保存:用户名,密码等信息。
相片信息,则保存相片的标题,相片对应的文件名,以及该相片的属主
等。
因此,用户表和相片表有主从表关联关系,一个用户可以对应于多个相片。
'''
import pymysql
class CreateMysqlTable(object):
def __init__(self):
#连接数据库,
self.db=pymysql.connect("localhost","root","new_password","manager_user")
#创建游标
self.cursor=self.db.cursor()
def run(self):
#写创建表的SQL语句
user_table="""
CREATE TABLE user_infomation(
id INT NOT NULL AUTO_INCREMENT,
user_name CHAR(20) NOT NULL,
pass CHAR(50) NOT NULL,
PRIMARY KEY(id),
UNIQUE (user_name)
);
"""
photo_table="""
CREATE TABLE photo_infomation(
id INT NOT NULL AUTO_INCREMENT,
image_name CHAR(20) NOT NULL,
img_path CHAR(50) NOT NULL,
img_time DATE NOT NULL,
img_size FLOAT NOT NULL,
img_acription CHAR(20) NOT NULL,
one_img_id INT NOT NULL,
PRIMARY KEY(id),
UNIQUE(img_path)
);
"""
#执行sql语句
# self.cursor.execute(user_table)
self.cursor.execute(photo_table)
#提交事务
self.db.commit()
#关闭数据库
self.db.close()
def main():
create_mysql_table=CreateMysqlTable()
create_mysql_table.run()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,921 | py | 7 | create_db.py | 4 | 0.466832 | 0.460632 | 0 | 64 | 24.203125 | 81 |
ulicar/sentry-cli | 4,535,485,477,519 | a1b0645dad451cfa8b4dbb5203abf24ff6924320 | 9f071beeeab8adb28e2c8bbf77c2af7b2a66f506 | /sentry/client.py | 2f24f61c51432bd0224e0418dbb60d1f9828394b | [
"MIT"
]
| permissive | https://github.com/ulicar/sentry-cli | b23714cf4a2265bdbf97f2f95bfd301b67a50f3d | c95a77f8b74ced7c41dc8ab61fd31fdf64c08880 | refs/heads/master | 2021-05-02T13:45:57.346689 | 2016-11-02T19:33:03 | 2016-11-02T19:33:03 | 72,670,663 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import requests
class Client(object):
def __init__(self, token, domain, debug=False):
assert isinstance(token, str)
assert isinstance(domain, str)
self.token = token
self.domain = domain
self.debug = debug
def _do_query(self, resource):
base = 'http://{domain}/api/0'.format(token=self.token, domain=self.domain)
url = base + resource
if self.debug:
print (url)
return requests.get(url, auth=requests.auth.HTTPBasicAuth(self.token, '')).json()
def get_organizations(self):
return self._do_query('/organizations/')
def get_organization(self, organization_slug):
return self._do_query('/organizations/{o}/'.format(o=organization_slug))
def get_projects(self, organization_slug):
return self._do_query('/{o}/projects/'.format(o=organization_slug))
def get_project(self, project, organization_slug):
return self._do_query('/projects/{o}/{p}/'.format(o=organization_slug, p=project))
def get_groups(self, project, organization_slug):
return self._do_query('/projects/{o}/{p}/groups/'.format(o=organization_slug, p=project))
def get_group(self, group_id):
return self._do_query('/groups/{g}/'.format(g=str(group_id)))
def get_events(self, group_id):
return self._do_query('/groups/{g}/events/'.format(g=str(group_id)))
def get_event(self, event_id):
return self._do_query('/events/{e}/'.format(e=str(event_id)))
| UTF-8 | Python | false | false | 1,528 | py | 4 | client.py | 3 | 0.631545 | 0.63089 | 0 | 46 | 32.195652 | 97 |
jasonshih/googleads-python-legacy-lib | 18,305,150,623,495 | 1bc99012d5c3b1c149a83dfde3acad2602fe625c | 93f47ba04fc18c4e537f0a48fe6232e2a89a4d30 | /tests/adspygoogle/dfa/v1_18/dfa_logger_unittest.py | 02825a198a2779854fd758cf3143ca4abf1f023f | [
"Apache-2.0"
]
| permissive | https://github.com/jasonshih/googleads-python-legacy-lib | c56dc52a1dab28b9de461fd5db0fcd6020b84a04 | 510fad41ecf986fe15258af64b90f99a96dc5548 | refs/heads/master | 2021-04-30T22:12:12.900275 | 2015-03-06T15:35:21 | 2015-03-06T15:35:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover Logger."""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import logging
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.dfa.v1_18 import client
from tests.adspygoogle.dfa.v1_18 import HTTP_PROXY
from tests.adspygoogle.dfa.v1_18 import SERVER_V1_18
from tests.adspygoogle.dfa.v1_18 import VERSION_V1_18
class DfaLoggerTestV1_18(unittest.TestCase):
"""Unittest suite for Logger using v1_18."""
SERVER = SERVER_V1_18
VERSION = VERSION_V1_18
TMP_LOG = os.path.join('..', '..', '..', '..', 'logs', 'logger_unittest.log')
DEBUG_MSG1 = 'Message before call to an API method.'
DEBUG_MSG2 = 'Message after call to an API method.'
client.debug = False
def setUp(self):
"""Prepare unittest."""
print self.id()
def testUpperStackLogging(self):
"""Tests whether we can define logger at client level and log before and
after the API request is made.
"""
logger = logging.getLogger(self.__class__.__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(self.__class__.TMP_LOG)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# Clean up temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
logger.debug(self.__class__.DEBUG_MSG1)
advertiser_service = client.GetAdvertiserService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
advertiser_service.GetAdvertisers({})
logger.debug(self.__class__.DEBUG_MSG2)
data = Utils.ReadFile(self.__class__.TMP_LOG)
self.assertEqual(data.find(self.__class__.DEBUG_MSG1), 0)
self.assertEqual(data.find(self.__class__.DEBUG_MSG2),
len(self.__class__.DEBUG_MSG1) + 1)
# Clean up and remove temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
os.remove(self.__class__.TMP_LOG)
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 2,579 | py | 76 | dfa_logger_unittest.py | 60 | 0.688639 | 0.670027 | 0 | 79 | 31.64557 | 79 |
kristinyanah/backendrepo | 1,039,382,099,143 | fe07d1bcbd17c03eadbfab36a1ca5bf335fabc11 | e3f34e3fe9783c3e6405b128d0cf1a6be79a7ec8 | /models/cnn_gnn/code/preprocess_data.py | c83dc99f62f5d17c315dc85bd6e778fdd90f3a58 | [
"Apache-2.0"
]
| permissive | https://github.com/kristinyanah/backendrepo | e1586e93afbc35b3c99d96b533c96de7da3d25ea | 251fd350dbde04bf0428da1e5ee2128acc6c5d10 | refs/heads/master | 2020-05-24T12:47:34.003108 | 2019-05-19T09:50:09 | 2019-05-19T09:50:09 | 187,275,140 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import defaultdict
import os
import pickle
import sys
import numpy as np
from rdkit import Chem
def load_dictionary(file_name):
with open(file_name, 'rb') as f:
d = pickle.load(f)
dictionary = defaultdict(lambda: len(d))
dictionary.update(d)
return dictionary
def create_atoms(atom_dict, mol):
# NOTE: my error handling
try:
atoms = [atom_dict[a.GetSymbol()] for a in mol.GetAtoms()]
except Exception as e:
print("Error creating atoms: {}".format(str(e)))
return None
return np.array(atoms)
def create_ijbonddict(bond_dict, mol):
i_jbond_dict = defaultdict(lambda: [])
for b in mol.GetBonds():
i, j = b.GetBeginAtomIdx(), b.GetEndAtomIdx()
bond = bond_dict[str(b.GetBondType())]
i_jbond_dict[i].append((j, bond))
i_jbond_dict[j].append((i, bond))
return i_jbond_dict
def create_fingerprints(fingerprint_dict, atoms, i_jbond_dict, radius):
"""Extract r-radius subgraphs (i.e., fingerprints)
from a molecular graph using WeisfeilerLehman-like algorithm."""
if (len(atoms) == 1) or (radius == 0):
fingerprints = [fingerprint_dict[a] for a in atoms]
else:
vertices = atoms
for _ in range(radius):
fingerprints = []
for i, j_bond in i_jbond_dict.items():
neighbors = [(vertices[j], bond) for j, bond in j_bond]
fingerprint = (vertices[i], tuple(sorted(neighbors)))
fingerprints.append(fingerprint_dict[fingerprint])
vertices = fingerprints
return np.array(fingerprints)
def create_adjacency(mol):
adjacency = Chem.GetAdjacencyMatrix(mol)
return np.array(adjacency)
def split_sequence(word_dict, sequence, ngram):
sequence = '-' + sequence + '='
words = [word_dict[sequence[i:i+ngram]]
for i in range(len(sequence)-ngram+1)]
return np.array(words)
def dump_dictionary(dictionary, file_name):
with open(file_name, 'wb') as f:
pickle.dump(dict(dictionary), f)
if __name__ == "__main__":
DATASET, radius, ngram, test = sys.argv[1:]
radius, ngram = map(int, [radius, ngram])
# make boolean
test = test.lower() == 'true'
# TODO: replace this so it isn't hardcoded
# with open('../dataset/' + DATASET + '/original/'
# 'smiles_sequence_interaction.txt', 'r') as f:
# cpi_list = f.read().strip().split('\n')
# if we're generating test data, pull from test set
if test:
testset_name = "comp_seq_list_C1013_S1"
with open('../dataset/' + DATASET + '/test_original/'
+ testset_name + '.txt', 'r') as f:
cpi_list = f.read().strip().split('\n')
# with open('../dataset/' + DATASET + '/test_original/'
# 'comp_seq_list_C1013_S2.txt', 'r') as f:
# cpi_list = f.read().strip().split('\n')
else:
with open('../dataset/' + DATASET + '/original/'
'50_pos_50_neg_composite_interactions_no_period_no_failure.txt', 'r') as f:
cpi_list = f.read().strip().split('\n')
"""Exclude data contains "." in the smiles."""
cpi_list = list(filter(lambda x:
'.' not in x.strip().split()[0], cpi_list))
N = len(cpi_list)
atom_dict = defaultdict(lambda: len(atom_dict))
bond_dict = defaultdict(lambda: len(bond_dict))
fingerprint_dict = defaultdict(lambda: len(fingerprint_dict))
word_dict = defaultdict(lambda: len(word_dict))
Compounds, Adjacencies, Proteins, Interactions = [], [], [], []
for no, cpi in enumerate(cpi_list):
print('/'.join(map(str, [no+1, N])))
# TODO: make this nicer (perhaps we unpack first two, then third is in try/except block where we pass if we except)
# check for cpi data interaction
# has_interaction = False
cpi_data = cpi.strip().split()
smiles = cpi_data[0]
sequence = cpi_data[1]
try:
interaction = cpi_data[2]
except:
print("CPI line did not have a third element; setting -999 as sentinel")
interaction = -999
# if len(cpi_data) == 3:
# smiles, sequence, interaction = cpi.strip().split()
# elif len(cpi_data) == 2:
# smiles, sequence = cpi.strip().split()
# else:
# raise Exception ("Unexpected input, CPI file line has {} elements: {}".format(len(cpi_data), cpi_data))
mol = Chem.MolFromSmiles(smiles)
atoms = create_atoms(atom_dict, mol)
# NOTE: my error handling
if atoms is None:
print("failure in sequence no {}, {}".format(no, cpi))
continue
i_jbond_dict = create_ijbonddict(bond_dict, mol)
fingerprints = create_fingerprints(fingerprint_dict, atoms, i_jbond_dict, radius)
Compounds.append(fingerprints)
adjacency = create_adjacency(mol)
Adjacencies.append(adjacency)
words = split_sequence(word_dict, sequence, ngram)
Proteins.append(words)
interaction = np.array([int(interaction)])
Interactions.append(interaction)
# change dir name according to whether or not this is a test set
if test:
dir_input = ('../dataset/' + DATASET + '/test_input/'
'radius' + str(radius) + '_ngram' + str(ngram) + '/' + testset_name + '/')
else:
dir_input = ('../dataset/' + DATASET + '/input/'
'radius' + str(radius) + '_ngram' + str(ngram) + '/')
# NOTE: this is a python3 thing, so doing it in python2
# os.makedirs(dir_input, exist_ok=True)
try:
os.makedirs(dir_input)
except:
pass
np.save(dir_input + 'compounds', Compounds)
np.save(dir_input + 'adjacencies', Adjacencies)
np.save(dir_input + 'proteins', Proteins)
np.save(dir_input + 'interactions', Interactions)
dump_dictionary(atom_dict, dir_input + 'atom_dict.pickle')
dump_dictionary(bond_dict, dir_input + 'bond_dict.pickle')
dump_dictionary(fingerprint_dict, dir_input + 'fingerprint_dict.pickle')
dump_dictionary(word_dict, dir_input + 'word_dict.pickle')
print('The preprocess of ' + DATASET + ' dataset has finished!')
| UTF-8 | Python | false | false | 6,305 | py | 18 | preprocess_data.py | 8 | 0.591594 | 0.58636 | 0 | 182 | 33.642857 | 123 |
Rlogarisation/NihaoPython | 790,274,031,418 | 0a235366197728375ac4358e6c8596b13af10fe0 | cb0119df748ac0d83f9f6ce1c82910e1e9232fe2 | /lab03/lab03_timetable/timetable_test.py | 74f2c9e4616d0447c0e8c95c88aa91e7c31bde2c | []
| no_license | https://github.com/Rlogarisation/NihaoPython | 3bf7cb37fea068695e0f483ab426bd75d4e68482 | 6a302ba548a110378a3460cb46e93607d4e58f48 | refs/heads/main | 2023-05-13T18:19:13.256740 | 2021-05-20T05:03:15 | 2021-05-20T05:03:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from timetable import timetable
from datetime import date, time, datetime
import datetime
def test_for_timetable():
assert(timetable([date(2019,9,27)], [time(14,10)]) == [datetime.datetime(2019, 9, 27, 14, 10)])
assert(timetable([date(2019,9,27), date(2019,9,30)], [time(14,10), time(10,30)]) == [datetime.datetime(2019, 9, 27, 10, 30), datetime.datetime(2019, 9, 27, 14, 10), datetime.datetime(2019, 9, 30, 10, 30), datetime.datetime(2019, 9, 30, 14, 10)])
| UTF-8 | Python | false | false | 471 | py | 39 | timetable_test.py | 35 | 0.673036 | 0.4862 | 0 | 7 | 65.714286 | 249 |
npolshakova/nnet | 1,254,130,459,556 | 7a85a63c1a12d63969b768991fc858fd5d1e0271 | d711f546a22d6942e74f0def14ce819f20b3ffff | /stencil/generate_data.py | 12a3d8330e5ce3ed212f96c6813798b6b949eb6f | []
| no_license | https://github.com/npolshakova/nnet | dda50dab3e1e35b3e262c5ca5f4453604a0bbfca | 4a9b8e96a179059b1b01d96a227a8d1db8eff60c | refs/heads/master | 2021-10-27T15:26:42.174227 | 2019-04-18T00:43:00 | 2019-04-18T00:43:00 | 181,326,965 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import random
def generate_data():
X = [[1 if np.random.normal() > 0 else 0, 1 if np.random.normal() > 0 else 0] for ix in range(100)]
Y = [1 if c[0] != c[1] else 0 for c in X]
return np.array(X),np.array(Y)
| UTF-8 | Python | false | false | 240 | py | 3 | generate_data.py | 2 | 0.608333 | 0.554167 | 0 | 7 | 33.285714 | 103 |
nuxeo-cps/zope2--PortalTransforms | 15,539,191,721,312 | bc685dda6bef446eedc8f1804ff2ae2a24943c05 | ae5a60fb7105533eb46a7113b709f49f9525b419 | /zope/MimeTypesTool.py | a0f347c7f0e4f4feac59700b25fd149a85e73956 | [
"BSD-3-Clause"
]
| permissive | https://github.com/nuxeo-cps/zope2--PortalTransforms | 61cab5a168c254a5850bcb0fb182b064ce1ba78f | 753f67202b016d0b07edd3bc65fd827cb39e50db | refs/heads/main | 2023-01-30T03:53:56.400774 | 2012-01-21T21:23:51 | 2012-01-21T21:23:51 | 317,994,731 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from OFS.Folder import Folder
try:
from Products.CMFCore.permissions import ManagePortal
except ImportError: # BBB: CMF 1.4
from Products.CMFCore.CMFCorePermissions import ManagePortal
from Products.CMFCore.ActionProviderBase import ActionProviderBase
from Products.CMFCore.TypesTool import FactoryTypeInformation
from Products.CMFCore.utils import UniqueObject
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
from Globals import InitializeClass
from Acquisition import aq_parent
from AccessControl import ClassSecurityInfo
from Products.PortalTransforms.interfaces import isourceAdapter, imimetypes_registry
from Products.PortalTransforms.utils import log, _www
from Products.PortalTransforms.MimeTypesRegistry import MimeTypesRegistry
from Products.PortalTransforms.zope.MimeTypeItem import MimeTypeItem
__revision__ = '$Id$'
class MimeTypesTool(UniqueObject, ActionProviderBase, Folder, MimeTypesRegistry):
"""extend the MimeTypesRegistry of CMF compliance
"""
__implements__ = (imimetypes_registry, isourceAdapter)
id = 'mimetypes_registry'
meta_type = 'MimeTypes Registry'
isPrincipiaFolderish = 1 # Show up in the ZMI
meta_types = all_meta_types = (
{ 'name' : 'MimeType',
'action' : 'manage_addMimeTypeForm'},
)
manage_options = (
( { 'label' : 'MimeTypes',
'action' : 'manage_main'},) +
Folder.manage_options[2:]
)
manage_addMimeTypeForm = PageTemplateFile('addMimeType', _www)
manage_main = PageTemplateFile('listMimeTypes', _www)
manage_editMimeTypeForm = PageTemplateFile('editMimeType', _www)
security = ClassSecurityInfo()
security.declareProtected(ManagePortal, 'register')
security.declareProtected(ManagePortal, 'unregister')
security.declarePublic('mimetypes')
security.declarePublic('list_mimetypes')
security.declarePublic('lookup')
security.declarePublic('lookupExtension')
security.declarePublic('classify')
# FIXME
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, fill=1):
MimeTypesRegistry.__init__(self, fill=1)
del self.defaultMimetype
self.manage_addProperty('defaultMimetype', 'text/plain', 'string')
del self.unicodePolicy
self.manage_addProperty('unicodePolicies', 'strict ignore replace', 'tokens')
self.manage_addProperty('unicodePolicy', 'unicodePolicies', 'selection')
def lookup(self, mimetypestring):
result = MimeTypesRegistry.lookup(self, mimetypestring)
return tuple([m.__of__(self) for m in result])
security.declareProtected(ManagePortal, 'manage_delObjects')
def manage_delObjects(self, ids, REQUEST=None):
""" delete the selected mime types """
for id in ids:
self.unregister(self.lookup(id)[0])
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()+'/manage_main')
security.declareProtected(ManagePortal, 'manage_addMimeType')
def manage_addMimeType(self, id, mimetypes, extensions, icon_path, binary=0,
REQUEST=None):
"""add a mime type to the tool"""
mt = MimeTypeItem(id, mimetypes, extensions, binary, icon_path)
self.register(mt)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()+'/manage_main')
security.declareProtected(ManagePortal, 'manage_editMimeType')
def manage_editMimeType(self, name, new_name, mimetypes, extensions, icon_path, binary=0,
REQUEST=None):
"""edit a mime type by name"""
mt = self.lookup(name)[0]
self.unregister(mt)
mt.edit(new_name, mimetypes, extensions, icon_path, binary)
self.register(mt)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()+'/manage_main')
InitializeClass(MimeTypesTool)
| UTF-8 | Python | false | false | 3,955 | py | 60 | MimeTypesTool.py | 43 | 0.694817 | 0.692035 | 0 | 101 | 38.158416 | 93 |
rrada/playground | 10,943,576,691,955 | ec80876e136a1c328ed32ec88a8dac6521c15f02 | 41c72c519a40354fb84b6dfcb73d5f9506354ee5 | /controlserver/server.py | 08af23ca62c03fbcbd3237dce3fd2ab8f0c0e718 | []
| no_license | https://github.com/rrada/playground | c8f1b96a645ac9b49014aa1b158a73f09c7aaadd | fdabe925d1829dd9182d9daafe5a0dd4b041bec2 | refs/heads/main | 2023-02-18T08:03:20.791161 | 2021-01-20T19:25:11 | 2021-01-20T19:25:11 | 331,373,273 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import socket
import struct
import time
import signal
import argparse
from threading import Thread, get_ident
from socketserver import (
BaseServer,
BaseRequestHandler,
UDPServer,
UnixStreamServer,
StreamRequestHandler,
ThreadingMixIn,
)
from enum import IntEnum
from cmd import Cmd
VERSION = '0.1'
SERVER_HOST = '0.0.0.0'
SERVER_PORT = 10000
SERVER_ID = 0
BUFFER_SIZE = 1024
CLEANUP_INTERNVAL = 10
REMOTE_LIFETIME_MAX = 10
# 48 bytes max size
# 8B | 2B | 38B
# --------------
# ID | CMD | MSG
HEADER_FMT = '!QH38s'
# 38B bytes max size
# 2B | 36B
# --------------
# STATE | MSG
MSG_FMT = '!H36s'
DEBUG=True
def dbgprint(args):
if DEBUG:
print(args)
class EMsgType(IntEnum):
PING = 0
JOB_OFFER = 1
class ERemoteState(IntEnum):
IDLE = 0
WORKING = 1
ERROR = 2
class ControlServerRemoteHandler(BaseRequestHandler):
"""Handle incomming communication with remote"""
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
id, cmd, msg = struct.unpack(HEADER_FMT, data)
if cmd == EMsgType.PING:
# unpack and decode msg part of custom dgram
state, desc = struct.unpack(MSG_FMT, msg)
self.server.add_remote(id, self.client_address[0], self.client_address[1], state)
desc_decoded = desc.decode('utf-8')
if state == ERemoteState.IDLE:
pass
elif state == ERemoteState.WORKING:
pass
dbgprint(f"Remote state {ERemoteState(state).name} || desc: {desc_decoded}")
# sent same data back to client in uppercase
socket.sendto(data.upper(), self.client_address)
else:
dbgprint(f"Received cmd {EMsgType(cmd)}, remote should PING only")
class ControlServer(ThreadingMixIn, UDPServer):
remotes = {}
last_cleanup = time.time()
last_test_send = time.time()
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
UDPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
def add_remote(self, id, addr, port, state):
"""Adds or updates the remote"""
if not self.remote_exist(id):
self.remotes[id] = {}
self.remotes[id]['addr'] = addr
self.remotes[id]['port'] = port
self.remotes[id]['state'] = state
self.remotes[id]['last_seen'] = time.time()
dbgprint(f'Adding remote: {id}')
else:
self.remotes[id]['addr'] = addr
self.remotes[id]['port'] = port
self.remotes[id]['state'] = state
self.remotes[id]['last_seen'] = time.time()
dbgprint(f'Updating remote: {id}')
def remove_remote(self, id):
if self.remote_exist(id):
dbgprint(f'Removing stale remote: {id}')
del self.remotes[id]
def cleanup_remotes(self):
"""cleanup stale clients in defined interval"""
if time.time() - self.last_cleanup > CLEANUP_INTERNVAL:
if len(self.remotes) > 0:
for remote in self.remotes.copy():
if time.time() - self.remotes[remote]['last_seen'] > REMOTE_LIFETIME_MAX:
self.remove_remote(remote)
# update cleanup timer
self.last_cleanup = time.time()
def remote_exist(self, id) -> bool:
return True if id in self.remotes else False
def is_remote_alive(self, id) -> bool:
if self.remote_exist(id):
return True if (time.time() - self.remotes[id]['last_seen'] < REMOTE_LIFETIME_MAX) else False
def is_remote_idle(self, id) -> bool:
if self.remote_exist(id):
return self.remotes[id]['state'] == ERemoteState.IDLE
def service_actions(self):
self.cleanup_remotes()
# just testing communication sent to client in 1 s interval
if time.time() - self.last_test_send > 1:
for remote in self.remotes.keys():
if self.is_remote_alive(remote) and self.is_remote_idle(remote):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
pack = struct.pack(HEADER_FMT, SERVER_ID, EMsgType.JOB_OFFER, "Job offer from server".encode('utf-8'))
sock.sendto(pack, (self.remotes[remote]['addr'], self.remotes[remote]['port']))
sock.close()
self.last_test_send = time.time()
def server_activate(self):
pass
def signal_handler(signalNumber, frame):
raise ExitApp
class ExitApp(Exception):
pass
if __name__ == '__main__':
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# Arguments parser
ap = argparse.ArgumentParser()
ap.add_argument("-H", "--host", action='store', type=str, default=SERVER_HOST, help=f"Control server host [{SERVER_HOST}]")
ap.add_argument("-p", "--port", action='store', type=int, default=SERVER_PORT, help=f"Control server port [{SERVER_PORT}]")
args = vars(ap.parse_args())
server = ControlServer((args['host'], args['port']), ControlServerRemoteHandler)
try:
server.serve_forever()
except ExitApp:
# close & cleanup
server.shutdown() | UTF-8 | Python | false | false | 5,333 | py | 4 | server.py | 2 | 0.600225 | 0.591037 | 0 | 181 | 28.469613 | 127 |
zahrafali/comp5107 | 6,786,048,354,761 | e6ce4222cbe4a10e335ee145203e253b016bc5cd | 8578e5a5a6f32c2e56f554ced60ad7287aebdf71 | /Project/testing.py | 651e07e05757ced82501efb3d7e4c68a968eb47e | []
| no_license | https://github.com/zahrafali/comp5107 | 334bdb17d8119823e3e73023121369ef03bf145d | b64f631d8e8dca32ba30e57ceceae2c9a2b65862 | refs/heads/master | 2022-03-01T21:57:47.055407 | 2018-04-24T04:36:00 | 2018-04-24T04:36:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import helper as h
import numpy as np
import max_likelihood as ml
import bayesian_method as bl
import parzen_window as pz
import ho_kashyab as hk
import k_nn as kn
import fishers_disc as fd
def test_classifier(class1_test_points, class2_test_points, x1_ml_estimated_cov, x2_ml_estimated_cov,
x1_ml_estimated_mean, x2_ml_estimated_mean, class1_testing_points_count,
class2_testing_points_count, p1, p2):
# classification results
class1_true = 0.0
class1_false = 0.0
class2_true = 0.0
class2_false = 0.0
# print(class1_test_points[:, 1])
# print(class1_testing_points_count)
# classify each point
for j in range(class1_testing_points_count):
discriminant_value = h.calculate_discriminant(class1_test_points[:, j], x1_ml_estimated_cov,
x2_ml_estimated_cov, x1_ml_estimated_mean, x2_ml_estimated_mean,
p1,
p2)
# print("class1 Disc Val: ", discriminant_value)
if discriminant_value > 0:
class1_true += 1
else:
class1_false += 1
for j in range(class2_testing_points_count):
discriminant_value = h.calculate_discriminant(class2_test_points[:, j], x1_ml_estimated_cov,
x2_ml_estimated_cov, x1_ml_estimated_mean, x2_ml_estimated_mean,
p1,
p2)
# print("class2 Disc Val: ", discriminant_value)
if discriminant_value < 0:
class2_true += 1
else:
class2_false += 1
class1_accuracy = (class1_true / len(class1_test_points[0])) * 100
class2_accuracy = (class2_true / len(class2_test_points[0])) * 100
total_accuracy = (class1_true + class2_true) * 100 / (len(class1_test_points[0]) + len(class2_test_points[0]))
# print(class1_true, class1_false)
# print(class2_true, class2_false)
print(total_accuracy)
return class1_accuracy, class2_accuracy, total_accuracy
def ml_k_cross_validation(class1_data, class2_data, p1, p2, k, n1, n2):
test_results_ml_class1 = []
test_results_ml_class2 = []
accuracies = []
for i in range(0, k, 1):
print('Cross:' + str(i + 1))
class1_testing_points_count = int(n1 / k)
class1_training_points_count = int(n1 - n1 / k)
class1_start = int(n1 * i / k)
class1_end = int((i + 1) * n1 / k)
class2_testing_points_count = int(n2 / k)
class2_training_points_count = int(n2 - n2 / k)
class2_start = int(n2 * i / k)
class2_end = int((i + 1) * n2 / k)
# print("start:", class1_start, "\tend:", class1_end)
# print("start:", class2_start, "\tend:", class2_end)
class1_test_points = class1_data[:, class1_start: class1_end]
class1_train_points = class1_data[:, 0:class1_start]
class1_train_points = np.append(class1_train_points, class1_data[:, class1_end:], axis=1)
class2_test_points = class2_data[:, class2_start: class2_end]
class2_train_points = class2_data[:, 0:class2_start]
class2_train_points = np.append(class2_train_points, class2_data[:, class2_end:], axis=1)
# estimated mean and cov using ML
x1_ml_estimated_mean = ml.estimate_mean_ml(class1_train_points, len(class1_train_points[0]))
x1_ml_estimated_cov = ml.estimate_cov_ml(class1_train_points, x1_ml_estimated_mean,
class1_training_points_count)
x2_ml_estimated_mean = ml.estimate_mean_ml(class2_train_points, len(class2_train_points[0]))
x2_ml_estimated_cov = ml.estimate_cov_ml(class2_train_points, x2_ml_estimated_mean,
class2_training_points_count)
ml_class1_accuracy, ml_class2_accuracy, total_accuracy = test_classifier(class1_test_points, class2_test_points,
x1_ml_estimated_cov,
x2_ml_estimated_cov,
x1_ml_estimated_mean,
x2_ml_estimated_mean,
class1_testing_points_count,
class2_testing_points_count, p1, p2)
# print(ml_class1_accuracy, ml_class2_accuracy)
test_results_ml_class1 = np.append(test_results_ml_class1, ml_class1_accuracy)
test_results_ml_class2 = np.append(test_results_ml_class2, ml_class2_accuracy)
accuracies = np.append(accuracies, total_accuracy)
print('\nML Average Accuracy:', np.mean(accuracies))
return test_results_ml_class1, test_results_ml_class2
def bl_k_cross_validation(class1_data, class2_data, p1, p2, k, n1, n2):
test_results_bl_class1 = []
test_results_bl_class2 = []
accuracies = []
for i in range(0, k, 1):
print('Cross:' + str(i + 1))
class1_testing_points_count = int(n1 / k)
class1_training_points_count = int(n1 - n1 / k)
class1_start = int(n1 * i / k)
class1_end = int((i + 1) * n1 / k)
class2_testing_points_count = int(n2 / k)
class2_training_points_count = int(n2 - n2 / k)
class2_start = int(n2 * i / k)
class2_end = int((i + 1) * n2 / k)
# print("start:", class1_start, "\tend:", class1_end)
# print("start:", class2_start, "\tend:", class2_end)
class1_test_points = class1_data[:, class1_start: class1_end]
class1_train_points = class1_data[:, 0:class1_start]
class1_train_points = np.append(class1_train_points, class1_data[:, class1_end:], axis=1)
class2_test_points = class2_data[:, class2_start: class2_end]
class2_train_points = class2_data[:, 0:class2_start]
class2_train_points = np.append(class2_train_points, class2_data[:, class2_end:], axis=1)
class1_ml_est_mean = ml.estimate_mean_ml(class1_train_points, len(class1_train_points[0]))
class1_ml_est_cov = ml.estimate_cov_ml(class1_train_points, class1_ml_est_mean,
class1_training_points_count)
class2_ml_est_mean = ml.estimate_mean_ml(class2_train_points, len(class2_train_points[0]))
class2_ml_est_cov = ml.estimate_cov_ml(class2_train_points, class2_ml_est_mean,
class2_training_points_count)
# Estimating the means using BL
class1_bl_initial_mean = np.ones((len(class1_data), 1))
class1_bl_initial_cov = np.identity(len(class1_data))
class2_bl_initial_mean = np.ones((len(class2_data), 1))
class2_bl_initial_cov = np.identity(len(class2_data))
class1_bl_est_mean = bl.estimate_mean_bl(class1_train_points, class1_bl_initial_mean, class1_bl_initial_cov,
class1_ml_est_cov, len(class1_train_points[0]))
class2_bl_est_mean = bl.estimate_mean_bl(class2_train_points, class2_bl_initial_mean, class2_bl_initial_cov,
class2_ml_est_cov, len(class2_train_points[0]))
bl_class1_accuracy, bl_class2_accuracy, total_accuracy = test_classifier(class1_test_points, class2_test_points,
class1_ml_est_cov, class2_ml_est_cov,
class1_bl_est_mean, class2_bl_est_mean,
class1_testing_points_count,
class2_testing_points_count, p1, p2)
# print(bl_class1_accuracy, bl_class2_accuracy)
test_results_bl_class1 = np.append(test_results_bl_class1, bl_class1_accuracy)
test_results_bl_class2 = np.append(test_results_bl_class2, bl_class2_accuracy)
accuracies = np.append(accuracies, total_accuracy)
print('\nBL Average Accuracy:', np.mean(accuracies))
return test_results_bl_class1, test_results_bl_class2
# def parzen_k_cross_validation(class1_data, class2_data, p1, p2, k, n1, n2, kernel_cov, step_size):
# test_results_parzen_class1 = []
# test_results_parzen_class2 = []
#
# accuracies = []
#
# for i in range(0, k, 1):
# print('Cross:' + str(i + 1))
# class1_testing_points_count = int(n1 / k)
# class1_training_points_count = int(n1 - n1 / k)
# class1_start = int(n1 * i / k)
# class1_end = int((i + 1) * n1 / k)
#
# class2_testing_points_count = int(n2 / k)
# class2_training_points_count = int(n2 - n2 / k)
# class2_start = int(n2 * i / k)
# class2_end = int((i + 1) * n2 / k)
#
# # print("start:", class1_start, "\tend:", class1_end)
# # print("start:", class2_start, "\tend:", class2_end)
#
# class1_test_points = class1_data[:, class1_start: class1_end]
# class1_train_points = class1_data[:, 0:class1_start]
# class1_train_points = np.append(class1_train_points, class1_data[:, class1_end:], axis=1)
#
# class2_test_points = class2_data[:, class2_start: class2_end]
# class2_train_points = class2_data[:, 0:class2_start]
# class2_train_points = np.append(class2_train_points, class2_data[:, class2_end:], axis=1)
#
# # estimated mean and cov using parzen window
# x1_parzen_estimated_mean, x1_parzen_estimated_covariance, x2_parzen_estimated_mean, x2_parzen_estimated_covariance = h.estimated_mean_parzen(
# class1_train_points, class2_train_points, kernel_cov, step_size)
#
# class1_parzen_est_mean, class1_parzen_est_cov, class2_parzen_est_mean, class2_parzen_est_cov = pz.estimated_mean_parzen(
# class1_data, class2_data, len(class1_data), kernel_cov, step_size)
#
# parzen_class1_accuracy, parzen_class2_accuracy = test_classifier(class1_test_points, class2_test_points,
# x1_parzen_estimated_covariance,
# x2_parzen_estimated_covariance,
# x1_parzen_estimated_mean,
# x2_parzen_estimated_mean,
# class1_testing_points_count,
# class2_testing_points_count, p1, p2)
# test_results_parzen_class1 = np.append(test_results_parzen_class1, parzen_class1_accuracy)
# test_results_parzen_class2 = np.append(test_results_parzen_class2, parzen_class2_accuracy)
#
# return test_results_parzen_class1, test_results_parzen_class2
def knn_k_cross_validation(class1_data, class2_data, k, n1, n2, k_nn):
test_results_knn_class1 = []
test_results_knn_class2 = []
accuracies = []
for i in range(0, k, 1):
print('Cross:' + str(i + 1))
class1_testing_points_count = int(n1 / k)
class1_training_points_count = int(n1 - n1 / k)
class1_start = int(n1 * i / k)
class1_end = int((i + 1) * n1 / k)
class2_testing_points_count = int(n2 / k)
class2_training_points_count = int(n2 - n2 / k)
class2_start = int(n2 * i / k)
class2_end = int((i + 1) * n2 / k)
class1_test_points = class1_data[:, class1_start: class1_end]
class1_train_points = class1_data[:, 0:class1_start]
class1_train_points = np.append(class1_train_points, class1_data[:, class1_end:], axis=1)
class2_test_points = class2_data[:, class2_start: class2_end]
class2_train_points = class2_data[:, 0:class2_start]
class2_train_points = np.append(class2_train_points, class2_data[:, class2_end:], axis=1)
class1_test_points = np.array(class1_test_points).transpose()
class2_test_points = np.array(class2_test_points).transpose()
class1_true = 0
class1_false = 0
class2_true = 0
class2_false = 0
for x in class1_test_points:
classification = kn.get_neighbors(x, class1_train_points, class2_train_points, k_nn)
if classification == 1:
class1_true = class1_true + 1
else:
class1_false = class1_false + 1
for x in class2_test_points:
classification = kn.get_neighbors(x, class1_train_points, class2_train_points, k_nn)
if classification == 2:
class2_true = class2_true + 1
else:
class2_false = class2_false + 1
class1_accuracy = (class1_true / len(class1_test_points)) * 100
class2_accuracy = (class2_true / len(class2_test_points)) * 100
test_results_knn_class1 = np.append(test_results_knn_class1, class1_accuracy)
test_results_knn_class2 = np.append(test_results_knn_class2, class2_accuracy)
accuracy = (class1_true + class2_true) * 100 / (len(class1_test_points) + len(class2_test_points))
accuracies = np.append(accuracies, accuracy)
# print(class1_testing_points_count, class2_testing_points_count)
#
# print(class1_true, class1_false)
# print(class2_true, class2_false)
print(accuracy)
print('\nK-NN Average Accuracy:', np.mean(accuracies))
return test_results_knn_class1, test_results_knn_class2
def fd_k_cross_validation(class1_data, class2_data, k, n1, n2, w, p1, p2):
test_results_fd_class1 = []
test_results_fd_class2 = []
accuracies = []
for i in range(0, k, 1):
print('Cross:' + str(i + 1))
class1_testing_points_count = int(n1 / k)
class1_training_points_count = int(n1 - n1 / k)
class1_start = int(n1 * i / k)
class1_end = int((i + 1) * n1 / k)
class2_testing_points_count = int(n2 / k)
class2_training_points_count = int(n2 - n2 / k)
class2_start = int(n2 * i / k)
class2_end = int((i + 1) * n2 / k)
class1_test_points = class1_data[:, class1_start: class1_end]
class1_train_points = class1_data[:, 0:class1_start]
class1_train_points = np.append(class1_train_points, class1_data[:, class1_end:], axis=1)
class2_test_points = class2_data[:, class2_start: class2_end]
class2_train_points = class2_data[:, 0:class2_start]
class2_train_points = np.append(class2_train_points, class2_data[:, class2_end:], axis=1)
class1_ml_est_mean = ml.estimate_mean_ml(class1_train_points, len(class1_train_points[0]))
class1_ml_est_cov = ml.estimate_cov_ml(class1_train_points, class1_ml_est_mean,
class1_training_points_count)
class2_ml_est_mean = ml.estimate_mean_ml(class2_train_points, len(class2_train_points[0]))
class2_ml_est_cov = ml.estimate_cov_ml(class2_train_points, class2_ml_est_mean,
class2_training_points_count)
fd_mean1 = w.transpose() @ class1_ml_est_mean
fd_mean2 = w.transpose() @ class2_ml_est_mean
fd_cov1 = w.transpose() @ class1_ml_est_cov @ w
fd_cov2 = w.transpose() @ class2_ml_est_cov @ w
class1_test_points = np.array(class1_test_points).transpose()
class2_test_points = np.array(class2_test_points).transpose()
class1_true = 0
class1_false = 0
class2_true = 0
class2_false = 0
for x in class1_test_points:
x_test = w.transpose() @ x
classification = fd.classify(x_test, fd_mean1, fd_mean2, fd_cov1, fd_cov2, p1, p2)
if classification == 1:
class1_true = class1_true + 1
else:
class1_false = class1_false + 1
for x in class2_test_points:
x_test = w.transpose() @ x
classification = fd.classify(x_test, fd_mean1, fd_mean2, fd_cov1, fd_cov2, p1, p2)
if classification == 2:
class2_true = class2_true + 1
else:
class2_false = class2_false + 1
class1_accuracy = (class1_true / len(class1_test_points)) * 100
class2_accuracy = (class2_true / len(class2_test_points)) * 100
test_results_fd_class1 = np.append(test_results_fd_class1, class1_accuracy)
test_results_fd_class2 = np.append(test_results_fd_class2, class2_accuracy)
accuracy = (class1_true + class2_true) * 100 / (len(class1_test_points) + len(class2_test_points))
accuracies = np.append(accuracies, accuracy)
print(accuracy)
print('\nFisher\'s Disc. Average Accuracy:', np.mean(accuracies))
return test_results_fd_class1, test_results_fd_class2
def hk_k_cross_validation(class1_data, class2_data, k, n1, n2):
test_results_hk_class1 = []
test_results_hk_class2 = []
accuracies = []
a = []
# b = []
for i in range(0, k, 1):
print('Cross:' + str(i + 1))
class1_testing_points_count = int(n1 / k)
class1_training_points_count = int(n1 - n1 / k)
class1_start = int(n1 * i / k)
class1_end = int((i + 1) * n1 / k)
class2_testing_points_count = int(n2 / k)
class2_training_points_count = int(n2 - n2 / k)
class2_start = int(n2 * i / k)
class2_end = int((i + 1) * n2 / k)
class1_test_points = class1_data[:, class1_start: class1_end]
class1_train_points = class1_data[:, 0:class1_start]
class1_train_points = np.append(class1_train_points, class1_data[:, class1_end:], axis=1)
class2_test_points = class2_data[:, class2_start: class2_end]
class2_train_points = class2_data[:, 0:class2_start]
class2_train_points = np.append(class2_train_points, class2_data[:, class2_end:], axis=1)
a, b = hk.ho_kashyap(class1_train_points, class2_train_points)
class1_ones = np.ones(len(class1_test_points[0]))
class2_ones = np.ones(len(class2_test_points[0]))
print('Adding ones:')
class1_test_points = np.insert(class1_test_points, 0, class1_ones, axis=0)
class2_test_points = np.insert(class2_test_points, 0, class2_ones, axis=0)
print('Done')
class1_test_points = np.array(class1_test_points).transpose()
class2_test_points = -1*np.array(class2_test_points).transpose()
class1_true = 0
class1_false = 0
class2_true = 0
class2_false = 0
for x in class1_test_points:
classification = a.transpose() @ x
if classification > 0:
class1_true = class1_true + 1
else:
class1_false = class1_false + 1
for x in class2_test_points:
classification = a.transpose() @ x
if classification < 0:
class2_true = class2_true + 1
else:
class2_false = class2_false + 1
class1_accuracy = (class1_true / len(class1_test_points)) * 100
class2_accuracy = (class2_true / len(class2_test_points)) * 100
test_results_hk_class1 = np.append(test_results_hk_class1, class1_accuracy)
test_results_hk_class2 = np.append(test_results_hk_class2, class2_accuracy)
accuracy = (class1_true + class2_true) * 100 / (len(class1_test_points) + len(class2_test_points))
accuracies = np.append(accuracies, accuracy)
print(accuracy)
# hk.plot_disc(a[0],)
print('\nHo-Kashyap Average Accuracy:', np.mean(accuracies))
return test_results_hk_class1, test_results_hk_class2
| UTF-8 | Python | false | false | 20,142 | py | 31 | testing.py | 15 | 0.566528 | 0.527008 | 0 | 440 | 44.777273 | 151 |
irfankhan309/Dealer_2 | 13,082,470,409,108 | a0d1dbb6642769f52bad32653c9b039b425eaece | feb3431bf792d2e7ea17bd89b9feb8750dcf286f | /Dealer/DealerApp/models.py | 7e53f28209751f62b99c51799035606576bb1672 | []
| no_license | https://github.com/irfankhan309/Dealer_2 | 6275f964bccc04c71732bb0260e8e5f343f597b6 | 3cc9ad9097f12e75ed5f6ada05af1c6fed8a1fbd | refs/heads/master | 2020-05-01T01:54:05.318100 | 2019-03-22T20:37:04 | 2019-03-22T20:37:04 | 177,205,879 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class PostEnquiry(models.Model):
Name=models.CharField(max_length=40)
uploaded_at=models.DateTimeField(auto_now_add=True)
Vehicle=models.CharField(max_length=40)
BIKE_Model=models.CharField(max_length=35)
Color=models.CharField(max_length=35)
Contact_Number=models.CharField(max_length=15)
class Sale(models.Model):
Name_Of_Bike=models.CharField(max_length=45)
Image=models.FileField()
Model=models.CharField(max_length=30)
Color=models.CharField(max_length=30)
Description=models.CharField(max_length=100, blank=True)
| UTF-8 | Python | false | false | 622 | py | 14 | models.py | 8 | 0.750804 | 0.720257 | 0 | 18 | 33.555556 | 60 |
pallabpain/programming-problems | 18,700,287,638,832 | 4308118e33091f57987b99e49804590fc2502871 | b71bb819113600c76d5c22f9660c8243b532ab87 | /longest_common_substring.py | a428e086fc7c59a84d1cbd85e24d9a6561f54fb7 | []
| no_license | https://github.com/pallabpain/programming-problems | be4cb18018e0d7772b6e1d75f4c598c6ed3a3caf | 1ffe0633bc0ae131e03350d8e5a16ad0bed5a223 | refs/heads/master | 2021-07-06T16:25:32.004406 | 2020-08-11T18:54:44 | 2020-08-11T18:54:44 | 163,048,031 | 0 | 0 | null | false | 2020-08-11T18:56:19 | 2018-12-25T05:12:51 | 2020-02-03T10:38:35 | 2020-08-11T18:55:54 | 5 | 0 | 0 | 0 | Python | false | false | from pprint import pprint
def longest_common_substring(A, B):
len_A = len(A)
len_B = len(B)
dp = [[0 for _ in range(len_B + 1)] for _ in range(len_A + 1)]
max_length = 0
for i in range(len_A + 1):
for j in range(len_B + 1):
if i == 0 or j == 0:
dp[i][j] = 0
elif A[i-1] == B[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
max_length = max(max_length, dp[i][j])
else:
dp[i][j] = 0
pprint(dp)
return max_length
if __name__ == "__main__":
A = "SomeRandomText"
B = "SomeMoreRandomText"
expected = 11 # eRandomText
actual = longest_common_substring(A, B)
if actual == expected:
print("Passed.")
else:
print("Failed.")
| UTF-8 | Python | false | false | 778 | py | 7 | longest_common_substring.py | 5 | 0.473008 | 0.451157 | 0 | 28 | 26.785714 | 66 |
s14004/tek | 2,121,713,872,632 | dc2f2a6b6f18a6fc44374ac96e16ceef5839bc33 | 99c2ac6f6e631b32222eca717515fb7844619546 | /a/Animal.py | b0d09be3840f78bef8c65b256d290df781628bd1 | []
| no_license | https://github.com/s14004/tek | f9388e0ffe3dde2eb2d07f1e3911a57ea1848ad9 | ae8ffa3597152696d47c6b1095389da696cd58bc | refs/heads/master | 2021-01-10T21:50:45.639530 | 2015-06-26T04:40:10 | 2015-06-26T04:40:10 | 37,701,963 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Animal:
def __init__(self, name, voice):
self.name = name
self.voice = voice
def say(self):
print(self.voice)
class Dog(Animal):
pass
if __name__ == '__main__':
puppy = Dog(name='shima', voice='nyan!')
puppy.say() | UTF-8 | Python | false | false | 267 | py | 6 | Animal.py | 6 | 0.535581 | 0.535581 | 0 | 15 | 16.866667 | 44 |
Best1s/python_re | 15,341,623,216,068 | f7d811839f98410cf439b468cf1d39db7457e6b2 | cec0cdfbd057c2d2ba153aa6f163adb250565e9a | /python_web_spider/web_spider/data_Spider/random_ip.py | a6cf60215e6fd4c9e64193346e3b5efc350833e0 | []
| no_license | https://github.com/Best1s/python_re | 91117cd5b1f896c2b2f3987f1625663aa1952354 | abd526743c67a1bf72ddce39a0268b8e9fe15d26 | refs/heads/master | 2020-05-05T13:37:41.428881 | 2020-02-25T03:41:00 | 2020-02-25T03:41:00 | 180,086,606 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
def random_ip():
ip = str(random.randint(0,255)) + '.' + str(random.randint(0,255)) + '.' \
+ str(random.randint(0,255)) + '.' + str(random.randint(0,255)) + '\n'
return ip
def write_ip1(num):
n = 1
with open ('ip1','w+') as ip1:
while True:
ip1.write(random_ip())
n += 1
if n > num:
break
ip1.close()
def write_ip2(num):
with open ('ip2','w+') as ip2:
for i in range(num):
ip2.write(random_ip())
ip2.close()
if __name__ == '__main__':
print random_ip()
num = 100
num = num / 2
write_ip1(num)
write_ip2(num)
| UTF-8 | Python | false | false | 617 | py | 120 | random_ip.py | 91 | 0.518639 | 0.463533 | 0 | 27 | 21.222222 | 77 |
uncharted-distil/simon | 15,264,313,778,932 | 50ed987345c7ac65707b8278b43ece4c810dd7ce | dc42c2638262502ce0cbc003d8cc6e8298ef5fac | /Simon/dev/graphutils/getFromDatalake.py | a92350ee7370e8ff5d287bd28d1c1a386f3741f8 | [
"MIT"
]
| permissive | https://github.com/uncharted-distil/simon | 0d8722e7e031135571cdd09b7d8ffec844142ce8 | 26e4e54e6de455bde8ee1a24634d060e1ec7babb | refs/heads/master | 2021-12-01T11:33:42.697819 | 2021-03-25T03:47:59 | 2021-03-25T03:47:59 | 261,869,244 | 0 | 1 | MIT | true | 2021-03-25T03:47:59 | 2020-05-06T20:18:04 | 2021-01-27T14:53:30 | 2021-03-25T03:47:59 | 391,589 | 0 | 1 | 1 | null | false | false | import azure_utils.client as client
import graphutils.printSample as printSample
import graphutils.getConnection as gc
import graphutils.insertColumnDatasetJoin as insert
import pandas
import sys
import random
import pyodbc
def graphDoesntContainFile(filename,cnxn):
cursor = cnxn.cursor()
cursor.execute("SELECT top(1) * FROM datasets where name=?",filename)
name = cursor.fetchone()
return name == None
store_name = 'nktraining'
adl = client.get_adl_client(store_name)
files = adl.ls('training-data/CKAN')
random.shuffle(files)
cnxn = gc.getConnection()
i = 0
for file in files:
if(i > 1000):
break
if graphDoesntContainFile(file, cnxn):
try:
with adl.open(file, blocksize=2**20) as f:
if(file.startswith('training-data/CKAN/BroadcastLogs') or file.startswith('training-data/CKAN/barrownndremptyexemption')):
continue
if(file.endswith('csv')):
print("Loading (" + str(i) + "): " + file + " into metadata store")
frame = pandas.read_csv(f,nrows=3,sep=None)
# else:
# frame = pandas.read_excel(f)
for colName in frame.columns:
if not str(colName).startswith('Unnamed'):
insert.insertColumnDatasetJoin(colName, file, cnxn)
i = i + 1
cnxn.commit()
except UnicodeEncodeError:
print("Failed to parse filename")
except ValueError:
print("Encountered poorly formatted file: " + str(file))
except TypeError:
print("Encountered bad delimiter in: " + str(file))
except:
print("It broke and I don't know why, possibly something about newlines " + str(file))
print(sys.exc_info())
else:
print("Skipping " + file + " because it is already in db")
printSample.printSample(cnxn.cursor())
cnxn.close() | UTF-8 | Python | false | false | 1,696 | py | 50 | getFromDatalake.py | 27 | 0.706958 | 0.700472 | 0 | 63 | 25.936508 | 126 |
loloxwg/PythonExperiments | 10,934,986,780,085 | 3172b0d39c3b58d71dfb6cf01bda9a4bb3565b8e | d86c072cccd474a9e63498b7c143d30860a10852 | /experiment1/hello.py | 626ba563268a33d2253debb624392bc73d576000 | []
| no_license | https://github.com/loloxwg/PythonExperiments | 4003b95346f1ba85075d72006e1010377730efbb | 1812d377ca7f5211c890e8cc9e6b5e9102729d05 | refs/heads/main | 2023-02-15T23:16:05.740507 | 2020-12-29T09:12:46 | 2020-12-29T09:12:46 | 307,132,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print("123.456\n"
"hello\n"
"who do you think i am?\n"
"'nice guy'\n"
"On,yes,i am a\n"
"nice guy")
| UTF-8 | Python | false | false | 131 | py | 102 | hello.py | 90 | 0.458015 | 0.412214 | 0 | 6 | 20.666667 | 32 |
panggggg/TDD | 1,752,346,668,382 | 5273cb4934621bd8f1a58f311a8f122236541f40 | 66bf25e702479199d357a2e4c827087220153c3a | /fizzbuzz.py | 63c034c6243f374b4d381ac6c784fbde89802e03 | []
| no_license | https://github.com/panggggg/TDD | 0e797b9380a46722c398d6344690c583d889bbb0 | 8803ced7b0df80acd58b65437f4d525b0231865e | refs/heads/master | 2023-08-11T22:35:02.995309 | 2021-09-27T09:05:24 | 2021-09-27T09:05:24 | 371,554,987 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # หาร3ลงตัว -> fizz , หาร5ลงตัว -> buzz ถ้าไม่ใช่ return ตัวเลข
# 3, 6, 9 -> fizz
# 5, 10, 20 -> buzz
# 15, 30, 45 -> fizzbuzz
# 1, 2, 4 -> number
def fizzbuzz(num):
result = get_result_buzz(num, str(num))
if is_divide_by_three(num):
result = "fizz"
result += get_result_buzz(num, "")
return result
def is_divide_by_three(num):
return num % 3 == 0
def get_result_buzz(num, default):
result = ["buzz", default, default, default, default]
return result[num % 5]
# result = [
# "buzz",
# default,
# default,
# default,
# default,
# ]
# return result[num % 5]
# if num % 15 == 0:
# return "fizzbuzz"
# if num % 3 == 0:
# return "fizz"
# if num % 5 == 0:
# return "buzz"
# if num % 2 == 0:
# return "bang"
# return str(num)
# for i in range(10):
# print(i, fizzbuzz(i))
| UTF-8 | Python | false | false | 990 | py | 19 | fizzbuzz.py | 16 | 0.501078 | 0.46444 | 0 | 50 | 17.56 | 63 |
mart00n/introto6.00 | 893,353,221,773 | a41d84af340805ab52607c710494320d8a0026ec | 776367ad388fc3452b2da5f70d1792d3b584e710 | /ps1/ps1c_redo.py | 0809cc4679588814ab38da85179410d5c8ef317e | []
| no_license | https://github.com/mart00n/introto6.00 | e3bc0f72ff47ca53e3180fc6234ebac3adc589d5 | ea0e828d063a94bf521bb0db471144bcb9e25d07 | refs/heads/master | 2021-01-10T07:17:24.702867 | 2017-02-12T17:44:15 | 2017-02-12T17:44:15 | 36,614,728 | 0 | 1 | null | false | 2017-01-29T16:26:50 | 2015-05-31T17:49:35 | 2017-01-14T21:52:17 | 2017-01-29T16:26:50 | 420 | 0 | 1 | 0 | Python | null | null | # mart00n
# 10/09/2016
eps = 0.01
bal = float(input('Enter balance: '))
intrate = float(input('Enter your annual interest rate: '))
monthrate = intrate / 12.0
low = bal / 12.0
hi = (bal * (1.0 + monthrate) ** 12.0) / 12.0
loopbal = bal
payment = (hi - low) / 2.0
while abs(loopbal) >= eps:
for i in range(1,13):
loopbal = loopbal * (1.0 + monthrate) - payment
if loopbal < -eps:
hi = payment
loopbal = bal
payment = (hi - low) / 2.0 + low
elif loopbal > eps:
low = payment
loopbal = bal
payment = (hi - low) / 2.0 + low
else:
break
print('Pay', payment, 'per month to pay off your debt within 1 year.')
| UTF-8 | Python | false | false | 689 | py | 12 | ps1c_redo.py | 12 | 0.560232 | 0.503628 | 0 | 27 | 24.444444 | 70 |
chaitanyanettem/code-challenges | 4,690,104,327,768 | 0b1c06bed49552be156e20c45dcec21f56f9c3a8 | 15303640ce88b6610367bab723ddb89c764b58d0 | /clever/authorization.py | fa5364180f974d699f829d521321aa094be0a006 | []
| no_license | https://github.com/chaitanyanettem/code-challenges | 693f862577b968d9f077b3dc248517d308df9143 | 13d4029c1f293e96c47f085e2659eda884dde1fa | refs/heads/master | 2021-05-16T02:56:41.772970 | 2014-05-01T13:56:03 | 2014-05-01T13:56:03 | 15,516,277 | 1 | 4 | null | false | 2017-01-25T06:52:44 | 2013-12-29T22:49:12 | 2014-05-01T13:56:14 | 2014-05-01T13:56:19 | 148 | 0 | 1 | 1 | C | null | null | header = {'Authorization' : 'Bearer DEMO_TOKEN'}
base_url = 'https://api.clever.com'
rel_uri = '/v1.1/sections' | UTF-8 | Python | false | false | 111 | py | 5 | authorization.py | 2 | 0.675676 | 0.657658 | 0 | 3 | 36.333333 | 48 |
whitney-mitchell/python--family-dictionary | 7,421,703,523,367 | 1755cd675b948ec7e45d1bd8e62ef843a50ba303 | e2cb86ba1d62c126663ac2c189cc3634d570407e | /family_dict.py | 17da2ef17a29b0977ff8c0b7b63799676c2a5202 | []
| no_license | https://github.com/whitney-mitchell/python--family-dictionary | 96baca8bacec144b4b35073fc95135e57701435e | 5856e5658c9911b75e01cd4033554bb334bc2cc2 | refs/heads/master | 2021-01-20T18:29:00.136635 | 2016-07-08T19:51:40 | 2016-07-08T19:51:40 | 62,907,458 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Define a dictionary that contains information about several members of your family.
my_family = { 'cat': { 'name': 'Georgia', 'age': 13 },
'mother': { 'name': 'Mary', 'age': 66 },
'boyfriend': { 'name': 'Jesse', 'age': 30 }
}
# Using a dictionary comprehension, produce output that looks like the following example.
# Krista is my sister and is 42 years old.
# Helpful hint: To convert an integer into a string in Python, it's str(integer_value)
# for key, value in my_family.items():
# name = value['name']
# age = value['age']
# output = ['{0} is my {1} and is {2} years old'.format(name, key, age)]
output = {value['name']+" is my "+key+" and is "+str(value['age'])+" years old." for key, value in my_family.items()}
print(output)
import code
code.interact(local=locals())
| UTF-8 | Python | false | false | 805 | py | 1 | family_dict.py | 1 | 0.64472 | 0.631056 | 0 | 20 | 39.25 | 117 |
bernardoduran95/Coursera | 13,443,247,667,288 | ffef7a245ee1b16bf89de3c8975f844694fa644a | bcfe4be80262c90ab27c492ec931ed0dbcc156af | /Dados (2).py | 7665b18a1d84b5b307cedb9c0c5ef7d8fc7e3b06 | []
| no_license | https://github.com/bernardoduran95/Coursera | d000cf4afb302b39ae687fb705aaddfd9073b1b1 | f98c666166755d7fcbe35e4bc272b11d97ef44b0 | refs/heads/main | 2023-08-19T11:22:57.482284 | 2021-09-02T15:11:25 | 2021-09-02T15:11:25 | 402,456,923 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
respuesta = input("Desea lanzar los dados?(si/no): ")
suma = 0
while respuesta == 'si' or respuesta == 'SI':
n1 = random.randint(1,6)
n2 = random.randint(1,6)
suma = n1 + n2
print("Lanzamiento N°1: ", n1)
print("Lanzamiento N°2: ", n2)
print("La suma de los lanzamientos es: ", suma)
respuesta = input("Desea seguir lanzando los dados?: ")
else:
print("Fin del Programa")
| UTF-8 | Python | false | false | 450 | py | 4 | Dados (2).py | 3 | 0.587054 | 0.558036 | 0 | 18 | 22.777778 | 59 |
underdogYnino/mysite | 7,249,904,819,678 | 5176b3d8a0b55c604e1f0ba2e2905b919e92eef4 | 26bd16e3c3a4386a7a7ebc598d01957746d71528 | /upload/migrations/0003_auto_20201129_1341.py | 69afd3420a0e1fe88a5894abb451e6cc120f9c09 | []
| no_license | https://github.com/underdogYnino/mysite | d035afe7e51db065f6ecb23416b37ae1fba7c0bc | 9aa75c4444388c11ae3579d2d201445bef079f03 | refs/heads/main | 2023-01-31T13:26:25.258192 | 2020-12-13T09:02:12 | 2020-12-13T09:02:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.3 on 2020-11-29 05:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('upload', '0002_auto_20201129_1338'),
]
operations = [
migrations.RenameModel(
old_name='upload_images',
new_name='uploadImage',
),
]
| UTF-8 | Python | false | false | 341 | py | 19 | 0003_auto_20201129_1341.py | 8 | 0.589443 | 0.498534 | 0 | 17 | 19.058824 | 47 |
bawigga/opencv_sandbox | 10,788,957,866,828 | 4486c306d901bffc9eae6d50dd70d61eeb8492c7 | be5ba307a5715b2e48344f65954ec7168ac7f138 | /facial_detection/detect.py | dd176cb112482430073e9a4f32e05c0403e87de2 | []
| no_license | https://github.com/bawigga/opencv_sandbox | 8887aee94bcc9e08c4f654e65d6b4799b9b98055 | a760746d60d4c5220f6ba41a5a96aac1c3f95e8c | refs/heads/master | 2016-09-05T13:30:18.415066 | 2015-05-14T04:57:13 | 2015-05-14T04:57:13 | 35,590,900 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import numpy as np
import cv2
cascadeFile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'haarcascade_frontalface_default.xml')
faceCascade = cv2.CascadeClassifier(cascadeFile)
cam = cv2.VideoCapture(0)
cam.set(3,640)
cam.set(4,480)
while(cam.isOpened()):
ret, frame = cam.read()
if ret==True:
# frame = cv2.flip(frame,0)
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cam.release()
cv2.destroyAllWindows() | UTF-8 | Python | false | false | 878 | py | 1 | detect.py | 1 | 0.585421 | 0.546697 | 0 | 36 | 23.416667 | 110 |
aschmid/bats_pitch_implementation | 16,295,105,935,166 | b20a949b29ccf8b9946a37dd32fcbcff1b5b766f | e124852138d1125f342867007a944c82d49eff95 | /bats_pitch_web/utils.py | 2165ce74b8a442455e461561c50c7f05bd955855 | []
| no_license | https://github.com/aschmid/bats_pitch_implementation | 08f7129fdda0402bf6fd601a5f332767ea59364b | 785af065d18dde8b8534b63a81bd3774883b5d2b | refs/heads/master | 2021-01-21T23:23:26.838571 | 2017-01-29T23:19:30 | 2017-01-29T23:19:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import OrderedDict
from bats_pitch.message_types import KNOWN_MESSAGE_TYPES
from bats_pitch.message_types.validator import get_message_type
__author__ = 'Dominic Dumrauf'
UNRECOGNIZED_LINES = 'Unrecognized Lines'
TOTAL_MESSAGES = 'Number of Lines'
def _get_line(l):
"""
Removes a leading 'S' if present in the given line 'l'.
"""
if l.startswith('S'):
return l[1:]
else:
return l
def _get_new_analysis_dict():
"""
Returns a dictionary which contains all known messages and an initial count
of zero messages for each type.
"""
detected_messages = OrderedDict()
for known_message_type in KNOWN_MESSAGE_TYPES:
detected_messages[known_message_type.name] = 0
detected_messages[UNRECOGNIZED_LINES] = 0
detected_messages[TOTAL_MESSAGES] = 0
return detected_messages
def analyze_stream(stream):
"""
Analyzes a given 'stream' and creates a statistic about the number of message
types in the stream.
"""
detected_messages = _get_new_analysis_dict()
analysis = []
for line_nr, raw_line in enumerate(stream):
clean_line = _get_line(raw_line)
detected_messages_type = get_message_type(clean_line)
if detected_messages_type:
detected_messages[detected_messages_type.name] += 1
else:
detected_messages[UNRECOGNIZED_LINES] += 1
detected_messages[TOTAL_MESSAGES] += 1
analysis.append({
'line_nr': line_nr,
'raw_line': raw_line,
'clean_line': clean_line,
'detected_messages_type': detected_messages_type,
})
return detected_messages, analysis
| UTF-8 | Python | false | false | 1,689 | py | 55 | utils.py | 48 | 0.649497 | 0.645352 | 0 | 54 | 30.277778 | 81 |
openstack/murano | 309,237,693,125 | 54857b19632f3bd5fc58210cff32103bf2167345 | b26f8032f3ffb23a5d8cb7e9d470d718fd505870 | /murano/tests/unit/dsl/test_gc.py | 92573244bc75f854785fd97e5417f74b67de4c86 | [
"Apache-2.0"
]
| permissive | https://github.com/openstack/murano | e678ced3a52056317447aa90c7b3ae0d78d59a06 | c898a310afbc27f12190446ef75d8b0bd12115eb | refs/heads/master | 2023-08-29T11:52:02.745223 | 2023-05-09T04:19:01 | 2023-05-09T04:19:01 | 9,971,852 | 94 | 63 | Apache-2.0 | false | 2021-02-07T06:04:46 | 2013-05-10T01:10:31 | 2021-02-07T03:32:23 | 2021-02-07T03:32:49 | 19,347 | 105 | 64 | 0 | Python | false | false | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from murano.dsl import exceptions
from murano.dsl.principal_objects import garbage_collector
from murano.tests.unit.dsl.foundation import object_model as om
from murano.tests.unit.dsl.foundation import test_case
class TestGC(test_case.DslTestCase):
def setUp(self):
super(TestGC, self).setUp()
self.package_loader.load_package('io.murano', None).register_class(
garbage_collector.GarbageCollector)
self.runner = self.new_runner(om.Object('TestGC'))
def test_model_destroyed(self):
model = om.Object(
'TestGCNode', 'root',
value='root',
nodes=[
om.Object(
'TestGCNode', 'node1',
value='node1',
nodes=['root', 'node2']
),
om.Object(
'TestGCNode', 'node2',
value='node2',
nodes=['root', 'node1']
),
]
)
model = {'Objects': None, 'ObjectsCopy': model}
self.new_runner(model)
self.assertCountEqual(['node1', 'node2'], self.traces[:2])
self.assertEqual('root', self.traces[-1])
def test_collect_from_code(self):
self.runner.testObjectsCollect()
self.assertEqual(['B', 'A'], self.traces)
def test_collect_with_subscription(self):
self.runner.testObjectsCollectWithSubscription()
self.assertEqual(
['Destroy A', 'Destroy B', 'Destruction of B', 'B', 'A'],
self.traces)
def test_call_on_destroyed_object(self):
self.assertRaises(
exceptions.ObjectDestroyedError,
self.runner.testCallOnDestroyedObject)
self.assertEqual(['foo', 'X'], self.traces)
def test_destruction_dependencies_serialization(self):
self.runner.testDestructionDependencySerialization()
node1 = self.runner.serialized_model['Objects']['outNode']
node2 = node1['nodes'][0]
deps = {
'onDestruction': [{
'subscriber': self.runner.root.object_id,
'handler': '_handler'
}]
}
self.assertEqual(deps, node1['?'].get('dependencies'))
self.assertEqual(
node1['?'].get('dependencies'),
node2['?'].get('dependencies'))
model = self.runner.serialized_model
model['Objects']['outNode'] = None
self.new_runner(model)
self.assertEqual(['Destroy A', 'Destroy B', 'B', 'A'], self.traces)
def test_is_doomed(self):
self.runner.testIsDoomed()
self.assertEqual([[], True, 'B', [True], False, 'A'], self.traces)
def test_is_destroyed(self):
self.runner.testIsDestroyed()
self.assertEqual([False, True], self.traces)
def test_static_property_not_destroyed(self):
self.runner.testStaticProperties()
self.assertEqual([], self.traces)
def test_args_not_destroyed(self):
self.runner.testDestroyArgs()
self.assertEqual([], self.traces)
def test_runtime_property_not_destroyed(self):
self.runner.testReachableRuntimeProperties()
self.assertEqual([False, ], self.traces)
| UTF-8 | Python | false | false | 3,830 | py | 674 | test_gc.py | 262 | 0.602611 | 0.596084 | 0 | 107 | 34.794393 | 78 |
chahushui/zhihu-monitor | 14,044,543,094,147 | 996dd6a6a11d3e6a5e9da6e5583c1d1c52296c16 | b6419a3ddacdf528bce5382da315b48ca75af8fd | /api/app/resources/crawler.py | 742c8e60effe9ac0c463da397c470cbff2ce9f23 | []
| no_license | https://github.com/chahushui/zhihu-monitor | ca168237475f301ef18d0067dcfeaab6e414b574 | 35b1c7fc89a37ba19ecf5dc334d245c97e1ebd70 | refs/heads/master | 2023-03-31T20:52:12.726175 | 2021-03-21T07:48:53 | 2021-03-21T07:48:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import datetime
from copy import deepcopy
from app.crawler.crawler_task import update_data
from app.extensions import scheduler
from flask_restful import reqparse
from app.resources import BaseResource
class Crawler(BaseResource):
def __init__(self):
super(Crawler, self).__init__()
# 接受的数据类型
self.parser = reqparse.RequestParser()
# get请求参数
# self.parser.add_argument('page', type=int, location='args')
# self.parser.add_argument('size', type=int, location='args')
# self.fields = deepcopy(base_settings.answers_fields)
def get(self):
response_data = deepcopy(self.base_response_data)
scheduler.add_job(func=update_data, id="start_crawler", trigger="date",
next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=5))
return response_data, 200
| UTF-8 | Python | false | false | 936 | py | 32 | crawler.py | 20 | 0.66849 | 0.66302 | 0 | 27 | 32.851852 | 96 |
jumbokh/pyclass | 4,569,845,250,075 | ea614bd0ea875f5ea7ae3edd0559146fa6ece8b7 | 9fc768c541145c1996f2bdb8a5d62d523f24215f | /code/HomeWork/ch5/H_5_5.py | 90325c1d4155513c8efa8323fdad423ca6361f65 | []
| no_license | https://github.com/jumbokh/pyclass | 3b624101a8e43361458130047b87865852f72734 | bf2d5bcca4fff87cb695c8cec17fa2b1bbdf2ce5 | refs/heads/master | 2022-12-25T12:15:38.262468 | 2020-09-26T09:08:46 | 2020-09-26T09:08:46 | 283,708,159 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # H_5_5.py 功能:輸入頭和腳的數量,並判斷出有多少馴鹿及聖誕老人
# 輸入頭及腳的數量
head = int(input('請輸入頭的數量 : '))
foot = int(input('請輸入腳的數量 : '))
# 計算馴鹿和聖誕老人的數量
reindeer = (foot/2) - head
Santa = head - reindeer
# 將結果顯示出來
print('聖誕老人有 : %d 位' %(Santa))
print('馴鹿有 : %d 隻' %(reindeer)) | UTF-8 | Python | false | false | 403 | py | 194 | H_5_5.py | 181 | 0.649402 | 0.63745 | 0 | 10 | 24.2 | 37 |
zcmail/vbpp | 2,671,469,673,086 | d239cbb458a9d6a8278f224680ef2d2f1fb3ea7a | 401aae1a063e98e2c98ba366e1a17f36f2bedb5c | /tests/test_Gtilde.py | ae5f30574a76717f8bceee28d1b490f3c1a29ed7 | [
"Apache-2.0"
]
| permissive | https://github.com/zcmail/vbpp | 66df32f2d6268a16e8033c7a7b6871ffa9040296 | 00668f3b84b62a9ecf1f580630e8bb59df38ba87 | refs/heads/master | 2022-04-20T01:44:59.946539 | 2020-01-08T16:51:02 | 2020-01-08T16:51:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pytest
import tensorflow as tf
from vbpp.Gtilde import np_Gtilde_lookup, tf_Gtilde_lookup
class Data:
z = - np.concatenate([np.random.randn(101)**2,
10**np.random.uniform(0, 11, 1000),
np.r_[0.0, 0.001, 1.0, 1.001, 1.01, 10.0, 11.0]])
z.sort()
def test_Gtilde_errors_for_positive_values():
with pytest.raises(ValueError):
np_Gtilde_lookup(np.r_[0.1, -0.1, -1.2])
def test_Gtilde_at_zero():
npG, _ = np_Gtilde_lookup(0.0)
assert np.allclose(npG, 0.0)
def test_Gtilde_with_scalar():
z = np.float64(- 12.3) # give explicit type so np and tf match up
npG, _ = np_Gtilde_lookup(z)
tfG = tf_Gtilde_lookup(z).numpy()
assert npG == tfG
@pytest.mark.parametrize('shape', [(-1,), (-1, 1), (-1, 2), (2, -1)])
def test_Gtilde(shape):
z = Data.z.reshape(shape)
npG, _ = np_Gtilde_lookup(z)
assert npG.shape == z.shape
tfG = tf_Gtilde_lookup(z).numpy()
assert tfG.shape == z.shape
np.testing.assert_equal(npG, tfG, "tensorflowed should equal numpy version")
if shape == (-1,):
assert list(npG) == sorted(npG), "Gtilde should be monotonous"
def test_Gtilde_gradient_matches():
z = Data.z
_, npgrad = np_Gtilde_lookup(z)
assert npgrad.shape == z.shape
z_tensor = tf.identity(z)
with tf.GradientTape() as tape:
tape.watch(z_tensor)
tf_res = tf_Gtilde_lookup(z_tensor)
tfgrad = tape.gradient(tf_res, z_tensor).numpy()
assert tfgrad.shape == z.shape
np.testing.assert_equal(npgrad, tfgrad, "tensorflowed should equal numpy version")
| UTF-8 | Python | false | false | 1,628 | py | 5 | test_Gtilde.py | 5 | 0.617322 | 0.58231 | 0 | 48 | 32.916667 | 86 |
x1001000/BERT_NLU | 11,759,620,495,374 | da9677807e382ee419dc740b6bbca672cf8dd9b2 | e17fa313bbc98b82fa9166635d2c6b29f7cafae1 | /BERT_run_classifier.py | cdc7d0d251eb23ecde5467d89b88a2a9ef5de34a | []
| no_license | https://github.com/x1001000/BERT_NLU | 2c09c212fa829176f13864f81b01bd448bc07066 | d6e5769d7493730657d4434faaabbf3cd25703e1 | refs/heads/master | 2020-11-28T11:44:15.802489 | 2019-11-06T06:00:14 | 2019-11-06T06:00:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #run_classifier.py
class C(DataProcessor):
"""Processor for Demo data set."""
def __init__(self):
self.labels = set()
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
# return list(self.labels)
return ["fashion", "houseliving","game"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[0])
self.labels.add(label)
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
# DemoProcessor
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"demo": C,
}
#Run
export BERT_Chinese_DIR=chinese_L-12_H-768_A-12
export Demo_DIR=input
python3 run_classifier.py \
--task_name=demo \
--do_train=true \
--do_eval=true \
--data_dir=$Demo_DIR \
--vocab_file=$BERT_Chinese_DIR/vocab.txt \
--bert_config_file=$BERT_Chinese_DIR/bert_config.json \
--init_checkpoint=$BERT_Chinese_DIR/bert_model.ckpt \
--max_seq_length=128 \
--train_batch_size=32 \
--learning_rate=2e-5 \
--num_train_epochs=3.0 \
--output_dir=Demo_output
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
export Demo_DIR=input
export TRAINED_CLASSIFIER=Demo_output
python3 run_classifier.py \
--task_name=demo \
--do_predict=true \
--data_dir=$Demo_DIR \
--vocab_file=$BERT_Chinese_DIR/vocab.txt \
--bert_config_file=$BERT_Chinese_DIR/bert_config.json \
--init_checkpoint=$BERT_Chinese_DIR/bert_model.ckpt \
--max_seq_length=128 \
--output_dir=test_output
| UTF-8 | Python | false | false | 2,427 | py | 1 | BERT_run_classifier.py | 1 | 0.609806 | 0.597445 | 0 | 88 | 26.568182 | 81 |
HsOjo/PyJSONEditor | 19,301,583,038,442 | eb2c13c7a8d87f6f61cebe4ddfa10eceab952aac | 5b3090dece7d3d276922f53bfba18fdff3a5ba12 | /app/config.py | 78537a503e7bcc7c1423585e4736732f3ec285c5 | [
"MIT"
]
| permissive | https://github.com/HsOjo/PyJSONEditor | 338978b36a545982bec7285ba1de9aa5704f39b0 | c2cf5398fa569ba0575048f3deebbf23028a61a1 | refs/heads/master | 2020-06-30T00:35:40.215143 | 2019-10-15T11:27:01 | 2019-10-15T11:27:01 | 200,668,517 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from app.base.config import ConfigBase
class Config(ConfigBase):
_protect_fields = [
'baidu_app_id',
'baidu_key',
]
baidu_app_id = ''
baidu_key = ''
| UTF-8 | Python | false | false | 183 | py | 12 | config.py | 10 | 0.562842 | 0.562842 | 0 | 10 | 17.3 | 38 |
siddharthcurious/Pythonic3-Feel | 10,222,022,200,262 | a9767ff8584097a72971d1e8644b417eb926a01d | 78d23de227a4c9f2ee6eb422e379b913c06dfcb8 | /LeetCode/846.py | 384d03f6466286d2c3fad09a5cdd6413b61dcffb | []
| no_license | https://github.com/siddharthcurious/Pythonic3-Feel | df145293a3f1a7627d08c4bedd7e22dfed9892c0 | 898b402b7a65073d58c280589342fc8c156a5cb1 | refs/heads/master | 2020-03-25T05:07:42.372477 | 2019-09-12T06:26:45 | 2019-09-12T06:26:45 | 143,430,534 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import Counter
class Solution:
def isNStraightHand(self, hand, W):
"""
:type hand: List[int]
:type W: int
:rtype: bool
"""
L = len(hand)
if L%W != 0:
return False
counter = Counter(hand)
while counter:
tmin = min(counter)
for k in range(tmin, tmin+W):
v = counter.get(k)
if not v:
return False
if v == 1:
del counter[k]
else:
counter[k] = v-1
return True
if __name__ == "__main__":
s = Solution()
hand = [1, 2, 3, 6, 2, 3, 4, 7, 8]
W = 3
s.isNStraightHand(hand, W) | UTF-8 | Python | false | false | 746 | py | 360 | 846.py | 343 | 0.41689 | 0.399464 | 0 | 33 | 21.636364 | 41 |
Saket-mangalam/ESwingGolf | 10,127,532,925,612 | 1c3cdba7454e74f22e5bef994c6eda258dbcac10 | bd1958595f8524b423beb3dbde0f3b93cdd1f790 | /Testset/matcher.py | ea9460d11d68af1a3b639c91e8abd796bbb83539 | []
| no_license | https://github.com/Saket-mangalam/ESwingGolf | 0d42a35e1248c9e8748b39aa93e2327519c2ef4d | 2f48d314431be498d7f29594b35dacd62071e823 | refs/heads/master | 2020-04-13T03:05:40.834055 | 2019-03-19T02:55:37 | 2019-03-19T02:55:37 | 162,921,387 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ''' author: saket'''
import os
import utils
import time
import cv2
import numpy as np
import torch
from get_args import get_args
from datetime import datetime
from tqdm import tqdm
from process_functions import *
# different file names
left_image_suffix = "im0.png"
left_gt_suffix = "disp0GT.pfm"
right_image_suffix = "im1.png"
right_gt_suffix = "disp1GT.pfm"
calib_suffix = "calib.txt"
out_file = "disp0MCCNN.pfm"
out_img_file = "disp0MCCNN.pgm"
out_time_file = "timeMCCNN.txt"
def main():
args = get_args()
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and args.ngpu > 0) else "cpu")
patch_height = args.patch_size
patch_width = args.patch_size
######################
left_image_list = args.list_dir
#save_dir = args.save_dir
#data_dir = args.data_dir
#save_res_dir = os.path.join(save_dir, "submit_{}".format(args.tag))
#save_img_dir = os.path.join(save_dir, "submit_{}_imgs".format(args.tag))
#util.testMk(save_res_dir)
#util.testMk(save_img_dir)
#index = 0
#start = args.start
#end = args.end
with open(left_image_list, "r") as i:
img_paths = i.readlines()
####################
# do matching
for left_path in tqdm(img_paths):
# get data path
left_path = left_path.strip()
right_path = left_path.replace(left_image_suffix, right_image_suffix)
calib_path = left_path.replace(left_image_suffix, calib_suffix)
# generate output path
out_path = left_path.replace(left_image_suffix, out_file)
out_time_path = left_path.replace(left_image_suffix, out_time_file)
out_img_path = left_path.replace(left_image_suffix, out_img_file)
height, width, ndisp = utils.parseCalib(calib_path)
print ("left_image: {}\nright_image: {}".format(left_path, right_path))
print ("height: {}, width: {}, ndisp: {}".format(height, width, ndisp))
#print "out_path: {}\nout_time_path: {}\nout_img_path: {}".format(out_path, out_time_path, out_img_path)
# reading images
left_image = cv2.imread(left_path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
right_image = cv2.imread(right_path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
left_image = (left_image - np.mean(left_image, axis=(0, 1))) / np.std(left_image, axis=(0, 1))
right_image = (right_image - np.mean(right_image, axis=(0, 1))) / np.std(right_image, axis=(0, 1))
left_image = np.expand_dims(left_image, axis=2)
right_image = np.expand_dims(right_image, axis=2)
assert left_image.shape == (height, width, 1)
assert right_image.shape == (height, width, 1)
print ("{}: images read".format(datetime.now()))
# start timer for time file
stTime = time.time()
# compute features
left_feature, right_feature = compute_features(left_image, right_image, patch_height, patch_width, args)
left_feature = np.array(left_feature.detach())
right_feature = np.array(right_feature.detach())
#print (left_feature.shape)
print ("{}: features computed".format(datetime.now()))
# form cost-volume
left_cost_volume, right_cost_volume = compute_cost_volume(left_feature, right_feature, ndisp)
print ("{}: cost-volume computed".format(datetime.now()))
# cost-volume aggregation
print ("{}: begin cost-volume aggregation. This could take long".format(datetime.now()))
left_cost_volume, right_cost_volume = cost_volume_aggregation(left_image, right_image, left_cost_volume,
right_cost_volume, \
args.cbca_intensity, args.cbca_distance,
args.cbca_num_iterations1)
print ("{}: cost-volume aggregated".format(datetime.now()))
# semi-global matching
print ("{}: begin semi-global matching. This could take long".format(datetime.now()))
left_cost_volume, right_cost_volume = SGM_average(left_cost_volume, right_cost_volume, left_image, right_image, \
args.sgm_P1, args.sgm_P2, args.sgm_Q1, args.sgm_Q2,
args.sgm_D, args.sgm_V)
print ("{}: semi-global matched".format(datetime.now()))
# cost-volume aggregation afterhand
print ("{}: begin cost-volume aggregation. This could take long".format(datetime.now()))
left_cost_volume, right_cost_volume = cost_volume_aggregation(left_image, right_image, left_cost_volume,
right_cost_volume, \
args.cbca_intensity, args.cbca_distance,
args.cbca_num_iterations2)
print ("{}: cost-volume aggregated".format(datetime.now()))
# disparity map making
left_disparity_map, right_disparity_map = disparity_prediction(left_cost_volume, right_cost_volume)
print ("{}: disparity predicted".format(datetime.now()))
# interpolation
left_disparity_map = interpolation(left_disparity_map, right_disparity_map, ndisp)
print ("{}: disparity interpolated".format(datetime.now()))
# subpixel enhancement
left_disparity_map = subpixel_enhance(left_disparity_map, left_cost_volume)
print ("{}: subpixel enhanced".format(datetime.now()))
# refinement
# 5*5 median filter
left_disparity_map = median_filter(left_disparity_map, 5, 5)
# bilateral filter
left_disparity_map = bilateral_filter(left_image, left_disparity_map, 5, 5, 0, args.blur_sigma,
args.blur_threshold)
print ("{}: refined".format(datetime.now()))
# end timer
endTime = time.time()
# save as pgm and pfm
utils.saveDisparity(left_disparity_map, out_img_path)
utils.writePfm(left_disparity_map, out_path)
utils.saveTimeFile(endTime - stTime, out_time_path)
print ("{}: saved".format(datetime.now()))
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 6,405 | py | 19 | matcher.py | 15 | 0.58548 | 0.578767 | 0 | 150 | 41.706667 | 121 |
EmuKit/emukit | 11,347,303,604,540 | f9abdcec8a95b6c4d26dcf18ba0c92700746d3f2 | 3a9865cfc313b461727358ea427bab898e0c3bf1 | /tests/emukit/bayesian_optimization/test_mean_plugin_expected_improvement.py | 47d4fbba96c500cbcc93a8662b4dbf20ee16ca3e | [
"Apache-2.0"
]
| permissive | https://github.com/EmuKit/emukit | 12df214366a989e85a03ead695ad36569fbf1f38 | 22e11c216840427d2d0766bddad25a0606e606bc | refs/heads/main | 2023-08-28T15:04:44.776523 | 2023-08-23T13:28:23 | 2023-08-23T13:28:23 | 147,290,595 | 274 | 66 | Apache-2.0 | false | 2023-08-23T13:28:25 | 2018-09-04T05:01:34 | 2023-08-15T06:35:23 | 2023-08-23T13:28:24 | 17,626 | 530 | 124 | 42 | Python | false | false | from unittest.mock import MagicMock
import numpy as np
import pytest
from emukit.bayesian_optimization.acquisitions.expected_improvement import (
ExpectedImprovement,
MeanPluginExpectedImprovement,
)
from emukit.core.interfaces import IModel, IModelWithNoise
from emukit.model_wrappers import GPyModelWrapper
class MockIModel(IModel):
def __init__(self, X, Y):
self._X = X
self._Y = Y
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
def deterministic_test_func(x: np.ndarray) -> np.ndarray:
return np.sin(x * 30 + x**2).sum(axis=-1, keepdims=True)
class MockNoiselessModel(MockIModel, IModelWithNoise):
"""
A mock model with zero observation noise (predict() and predict_noiseless() will return the
same predictive distribution).
This model mocks predictions for the deterministic_test_func() (the mean prediction will
be the same as function output).
"""
@staticmethod
def _mean_func(X):
return deterministic_test_func(X)
@staticmethod
def _var_func(X):
return (np.cos(X * 10) + 1.2).sum(axis=-1, keepdims=True)
def predict(self, X):
return self._mean_func(X), self._var_func(X)
def predict_noiseless(self, X):
return self.predict(X)
class MockConstantModel(MockIModel, IModelWithNoise):
"""Model the predicts the same output distribution everywhere"""
def predict(self, X):
# Return mean 1 and variance 8
return np.ones([X.shape[0], 1]), 8 * np.ones([X.shape[0], 1])
def predict_noiseless(self, X):
# Return mean 1 and variance 1
return np.ones([X.shape[0], 1]), np.ones([X.shape[0], 1])
def test_mean_plugin_ei_same_as_standard_on_noiseless():
np.random.seed(42)
X = np.random.randn(10, 3)
Y = deterministic_test_func(X)
model = MockNoiselessModel(X, Y)
mean_plugin_ei = MeanPluginExpectedImprovement(model)
standard_ei = ExpectedImprovement(model)
x_new = np.random.randn(100, 3)
## Assert the two expected improvement are equal
assert pytest.approx(standard_ei.evaluate(x_new)) == mean_plugin_ei.evaluate(x_new)
def test_mean_plugin_expected_improvement_returns_expected():
np.random.seed(43)
X = np.random.randn(10, 3)
Y = np.random.randn(10, 1)
model = MockConstantModel(X, Y)
mean_plugin_ei = MeanPluginExpectedImprovement(model)
x_new = np.random.randn(100, 3)
acquisition_values = mean_plugin_ei.evaluate(x_new)
# The mean at every previously observed point will be 1, hence y_minimum will be 1.0.
# The predicted values in the batch should all have mean 1 and variance 1
# The correct expected improvement for Gaussian Y ~ Normal(1, 1), and y_minimum = 1.0 is 0.3989422804014327
assert pytest.approx(0.3989422804014327, abs=0, rel=1e-7) == acquisition_values
| UTF-8 | Python | false | false | 2,897 | py | 331 | test_mean_plugin_expected_improvement.py | 252 | 0.679213 | 0.648481 | 0 | 98 | 28.55102 | 111 |
mbusc1/Python-Projects | 9,749,575,798,908 | eb3776d3bb10a533b7ec11a83f63817290ba77f5 | 876c0fcfcc6201ab36e3eefe61feac5053acd642 | /program3/pcollections.py | 56955da32323255b3b3f103885bd97ffd5323daf | []
| no_license | https://github.com/mbusc1/Python-Projects | 10ca8b6c229681007ff9af84fcdf6b964213deff | 0d28b8f3f1d82fdce958172b84a06c384c0d5d7b | refs/heads/master | 2021-01-09T04:27:57.604419 | 2020-02-21T23:17:03 | 2020-02-21T23:17:03 | 242,245,375 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Submitter: mbuscemi(Buscemi, Matthew)
# Partner : wbuscemi(Buscemi, William)
# We certify that we worked cooperatively on this programming
# assignment, according to the rules for pair programming
import re, traceback, keyword
def pnamedtuple(type_name, field_names, mutable=False, defaults={}):
def show_listing(s):
for line_number, line_text in enumerate( s.split('\n'),1 ):
print(f' {line_number: >3} {line_text.rstrip()}')
# put your code here
# bind class_definition (used below) to the string constructed for the class
#check that type_name is vaild
def _is_legal(name):
if type(name) != str:
raise SyntaxError("Name is not valid string. Use a charachter followed by any number of alphanumerics, which are not python keywords")
if name in keyword.kwlist:
raise SyntaxError("Name is not valid string. Use a charachter followed by any number of alphanumerics, which are not python keywords")
checked_name = re.search(r'^[a-zA-Z]\w*$',name)
if checked_name == None:
raise SyntaxError("Name is not valid string. Use a charachter followed by any number of alphanumerics, which are not python keywords")
else:
return(checked_name.group(0))
def _is_legal_list(names):
if type(names) == str:
checked_names = re.split(r'[, ]+',names)
for checked_name in checked_names:
#group 1 in a match
_is_legal(checked_name)
return checked_names
elif type(names) == list:
for name in names:
cn = re.search(r'^[a-zA-Z]\w*$',name)
if cn == None:
raise SyntaxError("Name is not valid string. Use a charachter followed by any number of alphanumerics, which are not python keywords")
return names
else:
raise SyntaxError("Name is not valid string. Use a charachter followed by any number of alphanumerics, which are not python keywords")
class_name = _is_legal(type_name)
class_fields = _is_legal_list(field_names)
#begin building class string
class_definition = f'''class {class_name}:
_fields = {class_fields}
_mutable = {mutable}
'''
#INIT
class_init = 'def __init__(self, {}):\n'.format(', '.join([f if f not in defaults.keys() else f'{f}={defaults[f]}' for f in class_fields]))
for f in class_fields:
class_init += f' self.{f} = {f}\n'
class_definition += class_init + '\n'
#REPR
arg_str = ','.join([f'{f}={{{f}}}' for f in class_fields])
f_str = ','.join([f'{f}=self.{f}' for f in class_fields])
class_repr=f" def __repr__(self):\n return '{class_name}({arg_str})'.format({f_str})\n\n"
class_definition += class_repr
#Simple Query
for f in class_fields:
class_definition += f''' def get_{f}(self):
return self.{f}
\n'''
#GET ITEM
class_definition += f''' def __getitem__(self,arg):
indexes = {class_fields}
if type(arg) == int and arg in range(len(indexes)):
cmd = f'self.get_{{indexes[arg]}}()'
return eval(cmd)
elif type(arg) == str and arg in indexes:
cmd = f'self.get_{{arg}}()'
return eval(cmd)
else:
raise IndexError('Argument is not a feild or is out of range')
\n'''
#equals
class_definition += f''' def __eq__(self,right):
if type(self) != type(right):
return False
if self.__dict__ != right.__dict__:
return False
return True
\n'''
#_asdict
class_definition += f''' def _asdict(self):
return dict(self.__dict__)
\n'''
#_make
class_definition += f''' def _make(iterable):
args = ','.join([str(x) for x in iterable])
cmd = f'{class_name}({{args}})'
return eval(cmd)
\n'''
#_replace
class_definition += f''' def _replace(self,**kargs):
for arg in kargs:
if arg not in {class_fields}:
raise TypeError("_replace arguments must match keyword arguments of class.")
if self._mutable:
for arg,val in kargs.items():
self.__dict__[arg] = val
else:
class_list = []
for key in {class_fields}:
if key in kargs:
class_list.append(kargs[key])
else:
class_list.append(self.__dict__[key])
return {class_name}._make(class_list)
\n'''
# When debugging, uncomment following line to show source code for the class
#show_listing(class_definition)
# Execute this class_definition, a str, in a local name space; then bind the
# the source_code attribute to class_definition; after try/except return the
# class object created; if there is a syntax error, list the class and
# also show the error
name_space = dict( __name__ = f'pnamedtuple_{type_name}' )
try:
exec(class_definition,name_space)
name_space[type_name].source_code = class_definition
except (TypeError,SyntaxError):
show_listing(class_definition)
traceback.print_exc()
return name_space[type_name]
if __name__ == '__main__':
# Test pnamedtuple below in script with Point = pnamedtuple('Point','x,y')
Point = pnamedtuple('Point','x,y')
#driver tests
import driver
driver.default_file_name = 'bscp3F19.txt'
# driver.default_show_exception= True
# driver.default_show_exception_message= True
# driver.default_show_traceback= True
driver.driver()
| UTF-8 | Python | false | false | 5,797 | py | 35 | pcollections.py | 22 | 0.574263 | 0.573055 | 0 | 160 | 35.23125 | 154 |
ignaciovillaverde/PythonInterpreter | 17,016,660,448,112 | c5a1cb6a2f39cf02c0a3394156a40f469afda692 | a7bf5a72c3565b2ecc48d2277f69c2e4c3b48dbe | /test/integrationTest/programs/if_else.py | 106d93faa34cb0fa16fc0931bd37d98ba7057d3e | []
| no_license | https://github.com/ignaciovillaverde/PythonInterpreter | 862a5ee1895487df20682746fbf7b3218fefe317 | 3a9c8b19d889d19dd6f10bda48772b7b38f075b3 | refs/heads/master | 2016-08-08T01:40:44.926259 | 2015-07-18T01:30:21 | 2015-07-18T01:30:21 | 37,035,841 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = "hola"
if a == "hola":
print "Correcto"
else:
print "Incorrecto"
a = 4
if a <= 4:
a = 5
print a
else:
print "Incorrecto"
a = "a"
b = "b"
if (a > b):
print "Incoreecto"
else:
print "Correcto"
if -4:
print "Correcto"
else:
print "Incoreecto"
| UTF-8 | Python | false | false | 253 | py | 74 | if_else.py | 67 | 0.600791 | 0.58498 | 0 | 21 | 11.047619 | 19 |
zrbruce/PythonCFD | 3,968,549,804,567 | cbdb3bbeea0670561e99cf23e5c588edd3e6061d | 0805f521d48e9a05138de022a320bf525b6377c7 | /Step5 - 2D Linear Convection.py | f34caa257717a27c3fb93e8b2c92bcf04da70e54 | []
| no_license | https://github.com/zrbruce/PythonCFD | 61a0ed80f3cc9c9a008979fcf5f6dd728eb6d814 | 6ecdb1a7ca73bda5c5cd281d7078fbe957c722e1 | refs/heads/master | 2021-06-03T19:34:43.474115 | 2016-03-12T19:16:54 | 2016-03-12T19:16:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #2D linear convection
from mpl_toolkits.mplot3d import Axes3D
import numpy
from matplotlib import pyplot
nx = 81
ny = 81
nt = 100
c = 1
dx = 2/(nx-1)
dy = 2/(ny-1)
sigma = 0.2
dt = sigma*dx
x = numpy.linspace(0,2,nx)
y = numpy.linspace(0,2,ny)
u = numpy.ones((ny,nx)) #create a 1xn vector of 1's
un = numpy.ones((ny,nx))
#assign initial conditions
u[ .5/dy : 1/dy+1, .5/dx : 1/dx+1] = 2 ##set hat function I.C. : u(.5<=x<=1 && .5<=y<=1 ) is 2
for n in range(nt + 1): #looping across time steps
un = u.copy()
u[1:,1:] = un[1:,1:] - (c*dt/dx*(un[1:,1:] - un[1:, :-1])) - (c*dt/dy*(un[1:,1:] - un[:-1,1:]))
u[0,:] = 1
u[-1,:] = 1
u[:,0] = 1
u[:,-1] = 1
#plot the initial condition
fig = pyplot.figure(figsize = (11,7), dpi = 100)
ax = fig.gca(projection = '3d')
X, Y = numpy.meshgrid(x,y)
surf2 = ax.plot_surface(X,Y, u[:]) | UTF-8 | Python | false | false | 885 | py | 9 | Step5 - 2D Linear Convection.py | 8 | 0.544633 | 0.472316 | 0 | 40 | 21.15 | 99 |
anselus/server | 7,189,775,277,692 | 2e62bc9be1f73e54357d430e717d5d88b0f3c007 | 4290c4d9b75c12982b4e1fc1f9308998dece15fd | /utils/genkeypair.py | d0833ac780fc93607d610b62e4d98a5c9aef8ba2 | []
| no_license | https://github.com/anselus/server | 18f1a8ac78a6e0c662fb0531f4612317eb7e4780 | 6ea909ab0602205a176d09b82f4bc4891ac2990d | refs/heads/master | 2020-08-11T10:10:04.258015 | 2020-07-13T17:56:38 | 2020-07-13T17:56:38 | 214,547,219 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import nacl.public
import nacl.secret
import nacl.utils
from os import path
import sys
def encode_file(file_name):
keypair = nacl.public.PrivateKey.generate()
pub_name = file_name + '.pub'
if path.exists(pub_name):
response = input("%s exists. Overwrite? [y/N]: " % pub_name)
if not response or response.casefold()[0] != 'y':
return
try:
out = open(pub_name, 'wb')
out.write(bytes(keypair.public_key))
except Exception as e:
print('Unable to save %s: %s' % (pub_name, e))
priv_name = file_name + '.priv'
try:
out = open(priv_name, 'wb')
out.write(bytes(keypair))
except Exception as e:
print('Unable to save %s: %s' % (priv_name, e))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s <namebase>" % path.basename(sys.argv[0]))
else:
encode_file(sys.argv[1])
| UTF-8 | Python | false | false | 839 | py | 10 | genkeypair.py | 6 | 0.644815 | 0.638856 | 0 | 35 | 22.942857 | 62 |
dStass/programming_challenges | 17,016,660,452,789 | 85e52f706f0e6fbbe0657933c1e12d389528f57a | c9849593c53060bec8fbcea6275c0ba8e68ac968 | /HackerRank/wendy_and_bob/string_divisibility.py | 1cf58fd80be008e5dc4bb887d7bbc00140b7cc5e | []
| no_license | https://github.com/dStass/programming_challenges | f6e21ad42f71f05656a5966af24749539ab436bb | e17097c1808e418fc28d8ee627b23340e381df58 | refs/heads/master | 2023-04-30T13:55:38.589234 | 2022-08-21T12:43:39 | 2022-08-21T12:43:39 | 203,951,732 | 0 | 0 | null | false | 2023-04-21T20:43:42 | 2019-08-23T07:55:39 | 2022-08-21T12:43:45 | 2023-04-21T20:43:42 | 5,513 | 0 | 0 | 4 | Python | false | false | def findSmallestDivisor(s, t):
divisible = isDivisible(s, t)
if not divisible:
return -1
for i in range(1, len(t) + 1):
divisor = t[:i]
if isDivisible(s, divisor):
return len(divisor)
return len(t)
def isDivisible(s, t):
s_split = s.split(t)
divisible = True
if len(s_split) == 1:
return False
for each in s_split:
if each != '':
divisible = False
break
return divisible
s = 'rbrb'
t = 'rbrb'
print(findSmallestDivisor(s,t)) | UTF-8 | Python | false | false | 544 | py | 119 | string_divisibility.py | 118 | 0.544118 | 0.536765 | 0 | 26 | 19.961538 | 35 |
ianagpawa/json_builder | 8,048,768,730,645 | 973efe0983c0dd45ab18b7c4a2217752b366fb51 | 646af1afd978c13858b576ad2d3e34a63a9e8866 | /Project.py | 195e4dc6e918a613dab0d23a312e5292c119fb8a | []
| no_license | https://github.com/ianagpawa/json_builder | 2c1d490f6d71f09204f55140cf28e45ee440de39 | 270e3e80072deb8063e0bf8b0ee14d3f18d4e545 | refs/heads/master | 2020-04-26T16:13:25.100980 | 2019-03-05T03:19:03 | 2019-03-05T03:19:03 | 173,671,391 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Project:
def __init__(self, title, summary, description, link, github, tech, name, date):
self.title = title
self.summary = summary
self.description = description
self.link = link
self.github = github
self.tech = tech
self.name = name
self.date = date
def details(self):
return {
"title": self.title,
"summary": self.summary,
"description": self.description,
"link": self.link,
"github": self.github,
"tech": self.tech,
"name": self.name,
"data": self.date
} | UTF-8 | Python | false | false | 654 | py | 7 | Project.py | 5 | 0.507645 | 0.507645 | 0 | 23 | 27.478261 | 84 |
schifzt/statistics_ML | 14,456,859,919,098 | 75f5a2d77cb7b75e9755ad3f7c122c9dd75bb4a0 | 11049c6f1a1b9bc223856fad6eefa2f5f4085463 | /exact-sparse-recovery/create_input.py | 42e5c49b98ecc7f0104a3c9cfb73f4e30770b592 | []
| no_license | https://github.com/schifzt/statistics_ML | 77341c32b5961ba959e98e34c74d22cca3b3fc77 | 2c61b0fdbbd785c8ecc4d666d24f94eb7c445f91 | refs/heads/master | 2023-03-03T19:16:09.337416 | 2023-02-17T15:48:27 | 2023-02-17T15:48:27 | 200,043,161 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
# -------------------------------------------------------------------------
# (rho, alpha)がrho < alphaを満たすならば、L0ノルム最小化により再構成可能
rho = 0.3 # sparsity parameter
alpha = 0.3 # dim_measurement / dim_signal
dim_signal = 100
dim_measurement = int(alpha*dim_signal)
is_integer = False
# -------------------------------------------------------------------------
np.set_printoptions(
formatter={'all':lambda x: '{:<10d}'.format(int(x)) if x == 0 else "{:.3f}".format(x)},
threshold=np.inf
)
# Create a true signal x0. dim = dim_signal
x0 = np.zeros(dim_signal)
K = 0
for n in range(dim_signal):
if np.random.rand() > rho:
x0[n] = np.random.normal(0, 1)
K += 1
if is_integer:
x0[n] = int(x0[n])
else:
pass
# Create measurement matrix A. dim = dim_measurement x dim_signal
mean = np.zeros(dim_measurement*dim_signal)
cov = np.identity(dim_measurement*dim_signal) * 1/dim_signal
A = np.random.multivariate_normal(mean, cov).reshape((dim_measurement, dim_signal))
if is_integer:
A = A.astype(int)
# Create measurement vector y := Ag
y = A@x0
print(x0)
# print(K)
# print(A)
print(y)
# Create output string for matrix
def matrix2string(A: np.ndarray):
M, N = A.shape
out = "["
for m in range(M):
out += "| "
for n in range(N-1):
out += "{:.3f}".format(A[m][n]) + ", "
out += "{:.3f}".format(A[m][N-1])
out += "\n"
out += " "
out += "|]"
return out
# Create dzn file
with open("input.dzn", "w") as f:
s = ""
s += f"dim_signal = {dim_signal};\n"
s += f"dim_measurement = {dim_measurement};\n"
s += f"K = {K};\n"
s += "\n"
s += "x0 = " + np.array2string(x0, separator=", ") + ";\n"
s += "y = " + np.array2string(y, separator=", ") + ";\n"
s += "\n"
s += "A = " + matrix2string(A) + ";\n"
f.write(s)
| UTF-8 | Python | false | false | 1,945 | py | 18 | create_input.py | 16 | 0.509742 | 0.491838 | 0 | 73 | 25.013699 | 91 |
AK-1121/code_extraction | 12,781,822,702,167 | 4520798c8511e0e147216c5a61eb2225bb1567a3 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_8536.py | cc151469c0821398d71d450da621e1753f8907f5 | []
| no_license | https://github.com/AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Python: Check if one dictionary is a subset of another larger dictionary
all(item in superset.items() for item in subset.items())
| UTF-8 | Python | false | false | 132 | py | 29,367 | python_8536.py | 29,367 | 0.772727 | 0.772727 | 0 | 2 | 65 | 74 |
cdbethune/goat-d3m-wrapper | 8,031,588,857,069 | e6c11f1b90767de9a69a3c134384d242763277b1 | 964ee35103d97cb09c3c35e70f5f7d73a6929cdf | /GoatD3MWrapper/reverse.py | 4811336ecff2ed7db6a6d667f992e178f3f019b0 | [
"MIT"
]
| permissive | https://github.com/cdbethune/goat-d3m-wrapper | a4d1aa9423d1eda6f123f67a9a0d4b4636cc6d0e | 7e033a555cd1db3e3b029fdfa476c4f8f0db78c9 | refs/heads/master | 2020-04-07T17:20:44.211660 | 2019-11-22T16:47:13 | 2019-11-22T16:47:13 | 158,566,061 | 0 | 0 | null | true | 2018-11-21T15:07:43 | 2018-11-21T15:07:43 | 2018-10-30T00:57:58 | 2018-10-30T00:57:56 | 81 | 0 | 0 | 0 | null | false | null | import os
import sys
import subprocess
import collections
import pandas as pd
import requests
import time
import typing
from json import JSONDecoder
from typing import List, Tuple
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m import container, utils
from d3m.metadata import hyperparams, base as metadata_base, params
from d3m.container import DataFrame as d3m_DataFrame
from common_primitives import utils as utils_cp
from .forward import check_geocoding_server
__author__ = 'Distil'
__version__ = '1.0.7'
__contact__ = 'mailto:numa@yonder.co'
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
# LRU Cache helper class
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.cache = collections.OrderedDict()
def get(self, key):
key = ''.join(str(e) for e in key)
try:
value = self.cache.pop(key)
self.cache[key] = value
return value
except KeyError:
return -1
def set(self, key, value):
key = ''.join(str(e) for e in key)
try:
self.cache.pop(key)
except KeyError:
if len(self.cache) >= self.capacity:
self.cache.popitem(last=False)
self.cache[key] = value
class Hyperparams(hyperparams.Hyperparams):
geocoding_resolution = hyperparams.Enumeration(default = 'city',
semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
values = ['city', 'country', 'state', 'postcode'],
description = 'type of clustering algorithm to use')
rampup_timeout = hyperparams.UniformInt(lower=1, upper=sys.maxsize, default=100, semantic_types=[
'https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description='timeout, how much time to give elastic search database to startup, may vary based on infrastructure')
class reverse_goat(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
Accept a set of lat/long pair, processes it and returns a set corresponding geographic location names
Parameters
----------
inputs : pandas dataframe containing 2 coordinate float values, i.e., [longitude,latitude]
representing each geographic location of interest - a pair of values
per location/row in the specified target column
Returns
-------
Outputs
Pandas dataframe containing one location per longitude/latitude pair (if reverse
geocoding possible, otherwise NaNs) appended as new columns
"""
# Make sure to populate this with JSON annotations...
# This should contain only metadata which cannot be automatically determined from the code.
metadata = metadata_base.PrimitiveMetadata(
{
# Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()".
'id': "f6e4880b-98c7-32f0-b687-a4b1d74c8f99",
'version': __version__,
'name': "Goat_reverse",
# Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.
'keywords': ['Reverse Geocoder'],
'source': {
'name': __author__,
'contact': __contact__,
'uris': [
# Unstructured URIs.
"https://github.com/NewKnowledge/goat-d3m-wrapper",
],
},
# A list of dependencies in order. These can be Python packages, system packages, or Docker images.
# Of course Python packages can also have their own dependencies, but sometimes it is necessary to
# install a Python package first to be even able to run setup.py of another package. Or you have
# a dependency which is not on PyPi.
'installation': [{
'type': metadata_base.PrimitiveInstallationType.PIP,
'package_uri': 'git+https://github.com/NewKnowledge/goat-d3m-wrapper.git@{git_commit}#egg=GoatD3MWrapper'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "UBUNTU",
"package": "default-jre",
"version": "2:1.8-56ubuntu2"
},
{
"type": "TGZ",
"key": "photon-db-latest",
"file_uri": "http://public.datadrivendiscovery.org/photon.tar.gz",
"file_digest":"d7e3d5c6ae795b5f53d31faa3a9af63a9691070782fa962dfcd0edf13e8f1eab"
}],
# The same path the primitive is registered with entry points in setup.py.
'python_path': 'd3m.primitives.data_cleaning.geocoding.Goat_reverse',
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.NUMERICAL_METHOD,
],
'primitive_family': metadata_base.PrimitiveFamily.DATA_CLEANING,
}
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0, volumes: typing.Dict[str, str] = None)-> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, volumes=volumes)
self._decoder = JSONDecoder()
self.volumes = volumes
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
"""
Accept a set of lat/long pair, processes it and returns a set corresponding geographic location names
Parameters
----------
inputs : pandas dataframe containing 2 coordinate float values, i.e., [longitude,latitude]
representing each geographic location of interest - a pair of values
per location/row in the specified target column
Returns
-------
Outputs
Pandas dataframe containing one location per longitude/latitude pair (if reverse
geocoding possible, otherwise NaNs)
"""
# confirm that server is responding before proceeding
address = 'http://localhost:2322/'
PopenObj = check_geocoding_server(address, self.volumes, self.hyperparams['rampup_timeout'])
# find location columns, real columns, and real-vector columns
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/Location')
real_values = inputs.metadata.get_columns_with_semantic_type('http://schema.org/Float')
real_values += inputs.metadata.get_columns_with_semantic_type('http://schema.org/Integer')
real_values = list(set(real_values))
real_vectors = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/FloatVector')
target_column_idxs = []
target_columns = []
# convert target columns to list if they have single value and are adjacent in the df
for target, target_col in zip(targets, [list(inputs)[idx] for idx in targets]):
if target in real_vectors:
target_column_idxs.append(target)
target_columns.append(target_col)
# pair of individual lat / lon columns already in list
elif list(inputs)[target - 1] in target_columns:
continue
elif target in real_values:
if target+1 in real_values:
# convert to single column with list of [lat, lon]
col_name = "new_col_" + target_col
inputs[col_name] = inputs.iloc[:,target:target+2].values.tolist()
target_columns.append(col_name)
target_column_idxs.append(target)
target_column_idxs.append(target + 1)
target_column_idxs.append(inputs.shape[1] - 1)
# make sure columns are structured as 1) lat , 2) lon pairs
for col in target_columns:
if inputs[col].apply(lambda x: x[0]).max() > 90:
inputs[col] = inputs[col].apply(lambda x: x[::-1])
# delete columns with path names of nested media files
outputs = inputs.remove_columns(target_column_idxs)
goat_cache = LRUCache(10)
out_df = pd.DataFrame(index=range(inputs.shape[0]),columns=target_columns)
# reverse-geocode each requested location
for i,ith_column in enumerate(target_columns):
j = 0
for longlat in inputs[ith_column]:
cache_ret = goat_cache.get(longlat)
if(cache_ret==-1):
r = requests.get(address+'reverse?lat='+str(longlat[0])+'&lon='+str(longlat[1]))
tmp = self._decoder.decode(r.text)
if len(tmp['features']) == 0:
if self.hyperparams['geocoding_resolution'] == 'postcode':
out_df.iloc[j,i] = float('nan')
else:
out_df.iloc[j,i] = ''
elif self.hyperparams['geocoding_resolution'] not in tmp['features'][0]['properties'].keys():
if self.hyperparams['geocoding_resolution'] == 'postcode':
out_df.iloc[j,i] = float('nan')
else:
out_df.iloc[j,i] = ''
else:
out_df.iloc[j,i] = tmp['features'][0]['properties'][self.hyperparams['geocoding_resolution']]
goat_cache.set(longlat,out_df.iloc[j,i])
else:
out_df.iloc[j,i] = cache_ret
j=j+1
# need to cleanup by closing the server when done...
PopenObj.kill()
# Build d3m-type dataframe
d3m_df = d3m_DataFrame(out_df)
for i,ith_column in enumerate(target_columns):
# for every column
col_dict = dict(d3m_df.metadata.query((metadata_base.ALL_ELEMENTS, i)))
if self.hyperparams['geocoding_resolution'] == 'postcode':
col_dict['structural_type'] = type(1)
col_dict['semantic_types'] = ('http://schema.org/Integer', 'https://metadata.datadrivendiscovery.org/types/Attribute')
else:
col_dict['structural_type'] = type("it is a string")
col_dict['semantic_types'] = ('http://schema.org/Text', 'https://metadata.datadrivendiscovery.org/types/Attribute')
col_dict['name'] = target_columns[i]
d3m_df.metadata = d3m_df.metadata.update((metadata_base.ALL_ELEMENTS, i), col_dict)
df_dict = dict(d3m_df.metadata.query((metadata_base.ALL_ELEMENTS, )))
df_dict_1 = dict(d3m_df.metadata.query((metadata_base.ALL_ELEMENTS, )))
df_dict['dimension'] = df_dict_1
df_dict_1['name'] = 'columns'
df_dict_1['semantic_types'] = ('https://metadata.datadrivendiscovery.org/types/TabularColumn',)
df_dict_1['length'] = d3m_df.shape[1]
d3m_df.metadata = d3m_df.metadata.update((metadata_base.ALL_ELEMENTS,), df_dict)
return CallResult(outputs.append_columns(d3m_df))
if __name__ == '__main__':
input_df = pd.DataFrame(data={'Name':['Paul','Ben'],'Long/Lat':[list([-97.7436995, 30.2711286]),list([-73.9866136, 40.7306458])]})
volumes = {} # d3m large primitive architecture dict of large files
volumes["photon-db-latest"] = "/geocodingdata"
from d3m.primitives.data_cleaning.multitable_featurization import Goat_reverse as reverse_goat # form of import
client = reverse_goat(hyperparams={'target_columns':['Long/Lat'],'rampup':8},volumes=volumes)
print("reverse geocoding...")
print("result:")
start = time.time()
result = client.produce(inputs = input_df)
end = time.time()
print(result)
print("time elapsed is (in sec):")
print(end-start)
| UTF-8 | Python | false | false | 12,163 | py | 4 | reverse.py | 4 | 0.603963 | 0.59015 | 0 | 256 | 46.511719 | 134 |
hyh-sherry/python-challenge | 12,197,707,159,202 | 1176755564806ca77b20bd9b50ed3c6f8a3b3b94 | 12d8399723ff086e57152c41faf54df151adc2d8 | /PyPoll/main.py | 99d70837bf70841649ca6e0f40a43957f42d25b6 | []
| no_license | https://github.com/hyh-sherry/python-challenge | 222de41f783aea42ed10904b6d6d081d40f81018 | 623b94d6e176c80bee7b6d996d7586c5f39a830e | refs/heads/master | 2020-06-01T15:04:40.403887 | 2019-06-12T02:56:46 | 2019-06-12T02:56:46 | 190,826,652 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Create a Python script that analyzes the votes and calculates each of the following:
#The total number of votes cast
#A complete list of candidates who received votes
#The percentage of votes each candidate won
#The total number of votes each candidate won
#The winner of the election based on popular vote.
import os
import csv
election_csv = os.path.join(os.path.dirname( __file__ ), "..","Resources","election_data.csv")
with open(election_csv, "r") as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
total = 0
candidates = []
#The total number of votes cast
#A complete list of candidates who received votes
for row in csvreader:
total += 1
if row[2] not in candidates:
candidates.append(row[2])
print(len(candidates))
votes_for_each = [0,0,0,0]
percent_of_votes = []
#The percentage of votes each candidate won
#The total number of votes each candidate won
with open(election_csv, "r") as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
for row in csvreader:
if row[2] == candidates[0]:
votes_for_each[0] += 1
elif row[2] == candidates[1]:
votes_for_each[1] += 1
elif row[2] == candidates[2]:
votes_for_each[2] += 1
else:
votes_for_each[3] += 1
percent_of_votes.append(round(votes_for_each[0]/total*100,3))
percent_of_votes.append(round(votes_for_each[1]/total*100,3))
percent_of_votes.append(round(votes_for_each[2]/total*100,3))
percent_of_votes.append(round(votes_for_each[3]/total*100,3))
election_list = list(zip(candidates,percent_of_votes,votes_for_each))
print(election_list)
#The winner of the election based on popular vote.
max_votes = max(percent_of_votes)
for i in range(len(election_list)):
if election_list[i][1] == max_votes:
winner = election_list[i][0]
#Print Result
print("Election Results")
print("-------------------------")
print(f"Total Votes: {total}")
print("-------------------------")
for i in range(len(candidates)):
print(f"{candidates[i]}: {percent_of_votes[i]}% ({votes_for_each[i]})")
print("-------------------------")
print(f"Winner: {winner}")
print("-------------------------")
#Write a txt file with results
result_file = open("PyPoll/PyPoll_Results.txt","w+")
print("Election Results",file = result_file)
print("-------------------------",file = result_file)
print(f"Total Votes: {total}",file = result_file)
print("-------------------------",file = result_file)
for i in range(len(candidates)):
print(f"{candidates[i]}: {percent_of_votes[i]}% ({votes_for_each[i]})",file = result_file)
print("-------------------------",file = result_file)
print(f"Winner: {winner}",file = result_file)
print("-------------------------",file = result_file)
result_file.close() | UTF-8 | Python | false | false | 3,209 | py | 2 | main.py | 2 | 0.555002 | 0.54129 | 0 | 84 | 37.214286 | 102 |
chati757/python-learning-space | 15,822,659,545,412 | ebde30fdb3bd051881397dbadb360ff1a5d12d51 | 47128c6ff1277eedf851670d33f7a288fdfe2246 | /selenium/chrome.py | 7cd84bf4f18849bf1475ec1e23dda9d08bbe4465 | []
| no_license | https://github.com/chati757/python-learning-space | 5de7f11a931cf95bc076473da543331b773c07fb | bc33749254d12a47523007fa9a32668b8dc12a24 | refs/heads/master | 2023-08-13T19:19:52.271788 | 2023-07-26T14:09:58 | 2023-07-26T14:09:58 | 83,208,590 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
import time
from selenium.webdriver.remote.remote_connection import LOGGER, logging
LOGGER.setLevel(logging.WARNING)
'''
https://chromedriver.chromium.org/downloads
โหลดมาแล้วสร้างที่อยู่สักที พร้อมกับ set path env (system level) ในที่นี้ Ex.C:\chrome_webdriver\chromedriver.exe
ทดสอบ run chromedriver.exe และ enable firewall และ permission ที่ติดทั้งหมดออก
'''
path = "C:\chrome_webdriver\chromedriver.exe"
options = webdriver.ChromeOptions();
options.add_experimental_option("excludeSwitches", ["enable-logging"])
options.add_argument('--disable-logging')
#if linux use service_log_path='/dev/null'
browser = webdriver.Chrome(executable_path=path,chrome_options=options,service_log_path='NUL')
browser.get("https://www.google.com")
time.sleep(10)
browser.close() | UTF-8 | Python | false | false | 937 | py | 387 | chrome.py | 312 | 0.779172 | 0.776662 | 0 | 20 | 38.9 | 113 |
jorgemarpa/HiTS-local | 12,240,656,797,639 | 6364fbb4dfaff7a5a03b0d54d9cf7906bbd7bb0d | 69e0d1fd511b0c15d7009bdc1f71ec4eb0e7e8fc | /download_sdss_spec.py | f51d7ed81a0345fd7362119d8205a2bdeafffca0 | []
| no_license | https://github.com/jorgemarpa/HiTS-local | a46773b12246ea6be08b2ec76fa5415d4f1125dd | a6e5baefa08ac093af8a0d2baa5263c7aad17ff4 | refs/heads/master | 2019-01-01T02:56:49.699492 | 2018-11-01T01:18:09 | 2018-11-01T01:18:09 | 45,215,511 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pyfits
import numpy as np
import urllib
import astropy
import astropy.cosmology
import astropy.units as u
import sdss_reader
import os
import pylab
import sys
from astroquery.irsa_dust import IrsaDust
import astropy.coordinates as coord
import astropy.units as u
if len(sys.argv)<3:
id0=0
id1=1000
else:
id0=np.int(sys.argv[1])
id1=np.int(sys.argv[2])
def extinction_correction(xarr1,data1,error1,RA,DEC):
"""
Corrects spectra from galatic reddening.
Input:
xarr1: Wavelenght in Angstroms. It MUST be OBSFRAME
data1: Flux does not matter the units
error1:FluxError does not matter the units
RA, DEC: coordinate RA and DEC in degrees. ICRS system.
To properly run this routhine you need to import:
from astroquery.irsa_dust import IrsaDust
import astropy.coordinates as coord
import astropy.units as u
import numpy as np
Returns:
xarr,data,error
"""
c = coord.SkyCoord(RA,DEC , unit="deg")
table = IrsaDust.get_extinction_table(c)
wlext1=table['LamEff']*1e4
Aext1=table['A_SFD']
wlext=np.zeros(len(wlext1)-8) #
Aext=np.zeros(len(Aext1)-8) # This is to avoid repetition of close WL in different
wlext=wlext1[8:] #photometric systems
Aext=Aext1[8:]
sorted=np.argsort(wlext)
wlext=wlext[sorted]
Aext=Aext[sorted]
Adata=np.interp(np.log10(xarr),np.log10(wlext),np.log10(Aext))
Adata=10**Adata
data=10**Adata*data1
error=10**Adata*error1
return xarr,data,error
def Flux_to_Luminosity(data,error, redshift,units=1e-17, Ho=70,OmegaM=0.3 ):
"""
Converts flux to luminosity
data: flux
error: flux error
units: 1e-17 erg/(scm2AA) by default. Always in erg/(scm2AA)
Ho: Hubble constant in km/s
OmegaM: Normalized matter density wr to critical density
Here we assume a FlatLCDM model: OmegaDE v= 1- OmegaM
Return:
data,error in erg/(s\AA)
"""
cos=astropy.cosmology.FlatLambdaCDM(Ho,OmegaM)
dL=cos.luminosity_distance(redshift)
dL=dL.to(u.cm).value
#print data
data=4*np.pi*data*units*dL**2
error=4*np.pi*error*units*dL**2
return data, error
CATALOG='BOSSDR12' # Can also be 'SDSSDR7'
plot=0 #Plot downloaded spectra ?
download=1
SDSS_info_dir='../SDSS_data/' #dir related with the spectra that will be downloaded
#SDSS_spec_root='http://das.sdss.org/spectro/1d_26/' #SDSS DR7 . Root of the web page to
#download spectra.
# Objects with CIV emission that will be downloaded
# Organized by Plate MedianJulianDate fibre
#plate,MJD,fibre=np.genfromtxt(SDSS_info_dir+CIVobjs,unpack=1,dtype='str')
if CATALOG=='SDSSDR7':
redshift_cut=1.79 # Minimum redshift to guarantee SiIV+OIV]1400 coverage.
SDSS_spec_root='http://dr12.sdss3.org/sas/dr12/sdss/spectro/redux/26/spectra/' #DR12, for old DR7 spec
savespec='../spec/' #where to save the downloaded spectra
if not os.path.exists(savespec):
os.mkdir(savespec)
#---------------------------------------------------------------------
CIVobjs='CIV_PlateMJdFiber.txt'
CIVobjs='TN12_MgIIxCIV.dat'
index, plate, MJD, fibre, logL3000, FWHMMgII, logL1450, FWHMCIV=np.genfromtxt(SDSS_info_dir+CIVobjs,unpack=1,dtype='str',skip_header=3)
plate=np.array([str(np.int(np.float(pl))) for pl in plate])
MJD=np.array([str(np.int(np.float(mj))) for mj in MJD])
fibre=np.array([str(np.int(np.float(fib))) for fib in fibre])
CIVsel='CIV_selected.txt'
if os.path.exists(CIVsel) and os.path.isfile(CIVsel):
os.remove(CIVsel)
#---------------------------------------------------------------------
fn = SDSS_info_dir + CIVsel
f = open(fn, "w")
f.write("#plate\tMJD\tfiber\n")
f.close()
#---------------------------------------------------------------------
redshift_info='HewettWild2010redshift.txt'
zinfo=np.loadtxt(SDSS_info_dir+redshift_info,dtype='str',skiprows=20)
plz=np.array([ np.int(zinfo[:,8][i]) for i in range(len(zinfo[:,8])) ])
mjdz=np.array([ np.int(zinfo[:,9][i]) for i in range(len(zinfo[:,9])) ])
fibz=np.array([ np.int(zinfo[:,10][i]) for i in range(len(zinfo[:,10])) ])
z=np.array([ np.float(zinfo[:,3][i]) for i in range(len(zinfo[:,3])) ])
#----------------------------------------------------------------------
if CATALOG=='BOSSDR12':
redshift_cut=1.7 # Minimum redshift to guarantee SiIV+OIV]1400 coverage.
CIVobjs='CIV_selectedBOSS.txt'
pyfits_hdu = pyfits.open(SDSS_info_dir+'DR12Q.fits') # Full SDSSDR12 QUASAR CATALOG
# Complete description and furter information in
#http://www.sdss.org/dr12/algorithms/boss-dr12-quasar-catalog/
QDR12= pyfits_hdu[1].data #extracting the data
#Selecting redshift between 1.67 to 2.4 to guarantee rest-frame spectral coverage between
# ~1350 to ~3080AA to cover from SiOIV to MgII. Spectral obs-frame coverage of BOSS 3600 to 10500AA
zup=2.3
zlow=1.7
wherelow=QDR12['Z_VI']>zlow
whereup=QDR12['Z_VI']<zup
np.savetxt(SDSS_info_dir+CIVobjs,np.transpose([QDR12['PLATE'][whereup*wherelow],QDR12['MJD'][whereup*wherelow],QDR12['FIBERID'][whereup*wherelow],QDR12['Z_VI'][whereup*wherelow]]),fmt='%10i %10i %10i %10.3f', header='plate MJD fiber redshift')
#Selecting redshift between 1.67 to 2.4 to guarantee rest-frame spectral coverage between
# ~1350 to ~3080AA to cover from SiOIV to MgII. Spectral obs-frame coverage of BOSS 3600 to 10500AA
SDSS_spec_root='http://data.sdss3.org/sas/dr12/boss/spectro/redux/v5_7_0/spectra/' # DR12 BOSS
savespec='../spec/BOSS/' #where to save the downloaded spectra
if not os.path.exists(savespec):
os.mkdir(savespec)
plate, MJD, fibre, redshifts=np.genfromtxt(SDSS_info_dir+CIVobjs,unpack=1,dtype='str',skip_header=1)
plate=np.array([str(np.int(np.float(pl))) for pl in plate])[id0:id1]
MJD=np.array([str(np.int(np.float(mj))) for mj in MJD])[id0:id1]
fibre=np.array([str(np.int(np.float(fib))) for fib in fibre])[id0:id1]
redshifts=np.array([np.float(red) for red in redshifts])[id0:id1]
CIVsel='CIV_selected_BOSS.txt'
if os.path.exists(CIVsel) and os.path.isfile(CIVsel):
os.remove(CIVsel)
#---------------------------------------------------------------------
fn = SDSS_info_dir + CIVsel
f = open(fn, "w")
f.write("#plate\tMJD\tfiber\n")
f.close()
#Dowloaded from http://mnras.oxfordjournals.org/content/suppl/2013/01/18/j.1365-2966.2010.16648.x.DC1/mnras0408-2302-SD1.txt
# Col. 1: SDSS name
# Col. 2: RA
# Col. 3: DEC
# Col. 4: z
# Col. 5: z_e
# Col. 6: FIRST Detection status
# Col. 7: Alternate redshift
# Col. 8: z estimation method code
# Col. 9: Plate
# Col. 10: MJD
# Col. 11: fibre
#---------------------------------------------------------------------
#for pl,mjd,fib in zip(plate,MJD,fibre):
for index in range(len(plate)):
pl=plate[index]
mjd=MJD[index]
fib=fibre[index]
#------DOWNLOADING SDSS DR12 FILE WITH THE APPROPIATE STRUCTURE---#
if len(pl)==3:
pl1='0'+pl
else:
pl1=pl
if len(fib)==2:
fib1='00'+fib
elif len(fib)==1:
fib1='000'+fib
elif len(fib)==3:
fib1='0'+fib
else:
fib1=fib
print pl1, mjd, fib1
#---Cross matching HW2010 redshifts with DR12
if CATALOG=='SDSSDR7':
wp=(plz==np.int(pl1))
wf=(fibz==np.int(fib1))
wm=(mjdz==np.int(mjd))
try:
redshift=z[wp*wf*wm][0]
except:
print 'object does not match with HW2010'
continue
#---Cross matching HW2010 redshifts with DR12
if CATALOG=='BOSSDR12':
redshift=redshifts[index]
print redshift
#fileroot= 'spSpec' +'-'+ mjd + '-' + pl1 + '-' + fib1 SDSS DR7
fileroot= 'spec' +'-' + pl1 + '-' + mjd + '-' + fib1 #SDSS DR12. I am downloading SDSSDR7 from the DR12 webpage. That is why
# the file structure is the same.
filename= fileroot+ '.fits'
sdss_file=savespec+filename
if download==1:
#download_site=SDSS_spec_root + pl1 + '/1d/' + filename #SDSS DR7
download_site=SDSS_spec_root + pl1 + '/'+filename
urllib.urlretrieve(download_site, filename=sdss_file)
try:
data,error,xarr,hdr=sdss_reader.read_sdss(sdss_file)
os.remove(sdss_file)
except:
os.remove(sdss_file)
print download_site, 'could not be downloaded'
continue
#------DOWNLOADING SDSS DR12 FILE WITH THE APPROPIATE STRUCTURE---#
if redshift> redshift_cut-0.00001:
if download==1:
#-----Correcting for extinction-----#
RA=hdr['RA'];DEC=hdr['DEC']
xarr,data,error=extinction_correction(xarr,data,error,RA,DEC)
#-----Correcting for extinction-----#
xarr=xarr/(1.0+redshift)
data,error=Flux_to_Luminosity(data,error, redshift,units=np.float(hdr['BUNIT'][0:5]) )
np.savetxt(savespec+fileroot+'.txt',np.transpose([xarr,data,error]),header='Wavelenght AA Flux Error in erg/(sAA)')
if plot==1:
pylab.figure()
pylab.plot(xarr,data)
f = open(fn, "a")
f.write("\n".join(["\t".join([str(q) for q in [pl1, mjd, fib1]])]) )
#"\n".join(["\t".join([str(q) for q in p])
f.write("\n")
f.close()
#np.savetxt(np.transpose([xarr,data,error]),savespec+fileroot+'.txt')
#table = pyfits_hdu[0].data
#pyfits_hdu=pyfits.open(sdss_file)
#hdr = pyfits_hdu[0]._header
#x0=hdr['COEFF0']
#dx=hdr['COEFF1']
#table = pyfits_hdu[0].data
#xarr=np.array([ 10**(x0+dx*i) for i in range(hdr['NAXIS1']) ])
#---------------------------------------------------------------------
# Total structure of the file:
# SDSS_spec_root + plate[id] + '/' + 'spSpec' +'-'+ MJD[id] + '-' + plate[id] + '-' fibre + '.fit'
# example: http://das.sdss.org/spectro/1d_26/0276/1d/spSpec-51909-0276-006.fit
# #--------- SDSS_spec_root-------#plate#----------#MJD-plate-fiber
#---------------------------------------------------------------------
#download_string=
| UTF-8 | Python | false | false | 10,414 | py | 32 | download_sdss_spec.py | 1 | 0.589015 | 0.553006 | 0 | 327 | 30.834862 | 264 |
mjsphdev/my_expenses | 3,092,376,493,772 | c34ac04d01fb59a1e7d13a6144ac76ff9736e58e | 36c4fd01715ee2b495f0540e4a91709e513f407c | /main/migrations/0002_billspayments_month.py | 99e9d0b33cf45f59d817f8ad2bf9fc0a78755a33 | []
| no_license | https://github.com/mjsphdev/my_expenses | 41f72f8f1cb240d039a769814e2259798c83421a | 4b2b7485a9eaa8c7460aba3ff7c34121e27c3346 | refs/heads/main | 2023-07-23T04:52:18.517589 | 2021-08-18T03:53:46 | 2021-08-18T03:53:46 | 396,293,312 | 0 | 0 | null | false | 2021-08-18T03:53:47 | 2021-08-15T09:21:23 | 2021-08-15T09:28:23 | 2021-08-18T03:53:46 | 6,709 | 0 | 0 | 0 | CSS | false | false | # Generated by Django 3.2.5 on 2021-08-05 03:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='billspayments',
name='month',
field=models.CharField(default='8,2021', max_length=50, unique=True),
),
]
| UTF-8 | Python | false | false | 407 | py | 30 | 0002_billspayments_month.py | 18 | 0.589681 | 0.525799 | 0 | 18 | 21.611111 | 81 |
TeraMatrix/unmp-m2m | 1,425,929,154,651 | 53482ddfe33154db27edff41fbd3f8e8ad407965 | 384daeb81f238ef6a3b09e66a8f77ffab653a1d2 | /htdocs/advanced_status_controller.py | d46a17131d0a9cff0aea6cc2f6e097adc59011ba | []
| no_license | https://github.com/TeraMatrix/unmp-m2m | a446fae084ffdfc9b1c210eac3a93cc5b2eeb23f | a962173a503e19d60dd56a67d0b7c12c9a55bf35 | refs/heads/master | 2021-05-01T10:46:46.721522 | 2014-06-04T23:47:19 | 2014-06-04T23:47:19 | 19,937,512 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2.6
from datetime import datetime
from datetime import timedelta
from json import JSONEncoder
from advanced_status_bll import AdvancedStatusBll
from advanced_status_view import AdvancedStatusView
# from common_controller import *
# from nms_config import *
# from odu_controller import *
global bll_obj
bll_obj = AdvancedStatusBll()
def get_advanced_status_value(h):
"""
@param h:
"""
global html, bll_obj
html = h
device_type_dict = {'ap25': 'AP25', 'odu16': 'RM18', 'odu100':
'RM', 'idu4': 'IDU'}
ip_address = html.var('ip_address')
device_type_id = html.var('device_type_id')
user_id = html.req.session["user_id"]
selected_listing = ""
if device_type_id == 'odu100' or device_type_id == 'odu16':
selected_listing = "odu_listing.py"
elif device_type_id == 'idu4':
selected_listing = "idu_listing.py"
elif device_type_id == 'ap25':
selected_listing = "ap_listing.py"
elif device_type_id == 'ccu':
selected_listing = "ccu_listing.py"
css_list = [
"css/style.css", "css/custom.css", "calendrical/calendrical.css",
"css/demo_table_jui.css", "css/jquery-ui-1.8.4.custom.css"]
javascript_list = ["js/lib/main/highcharts.js", "js/unmp/main/advanced_status.js",
"calendrical/calendrical.js", "js/lib/main/jquery.dataTables.min.js"]
html.new_header(
'%s %s Historical Status' % (device_type_dict[device_type_id],
ip_address.replace("'", "")), selected_listing, "", css_list, javascript_list)
html_content = AdvancedStatusView.ap_set_variable(
ip_address, device_type_id, user_id)
html.write(str(html_content))
html.new_footer()
def ap_total_status_name(h):
"""
@param h:
"""
global html, bll_obj
html = h
user_id = html.req.session["user_id"]
device_type_id = html.var('device_type_id')
ip_address = html.var('ip_address')
result_dict = bll_obj.total_graph_name_display(device_type_id, user_id)
controller_dict = AdvancedStatusView.graph_name_listing(
result_dict, ip_address)
html.req.content_type = 'application/json'
html.req.write(str(JSONEncoder().encode(controller_dict)))
def advanced_status_json_creation(h):
"""
@param h:
"""
global html, bll_obj
html = h
graph_id = html.var('graph_id')
device_type_id = html.var('device_type_id')
ip_address = html.var('ip_address')
user_id = html.req.session["user_id"]
controller_dict = bll_obj.advanced_graph_json(
graph_id, device_type_id, user_id, ip_address)
h.req.content_type = 'application/json'
h.req.write(str(JSONEncoder().encode(controller_dict)))
def advanced_status_update_date_time(h):
"""
@param h:
"""
global html
html = h
try:
now = datetime.now()
end_date = now.strftime("%d/%m/%Y")
end_time = now.strftime("%H:%M")
output_dict = {'success': 0, 'end_date': end_date,
'end_time': end_time}
except Exception as e:
output_dict = {'success': 1, 'output': str(e[-1])}
finally:
html.req.content_type = 'application/json'
html.req.write(str(JSONEncoder().encode(output_dict)))
def advanced_status_creation(h):
"""
@param h:
"""
global html, bll_obj
html = h
graph_type = html.var('graph_type')
table_name = html.var('table_name')
column_value = html.var('field')
cal_type = html.var('calType')
interface_value = html.var('tab')
graph_type = html.var('type')
start_date = html.var('start_date')
start_time = html.var('start_time')
end_date = html.var('end_date')
end_time = html.var('end_time')
flag = html.var('flag')
ip_address = html.var('ip_address')
update_field = html.var('update')
start = html.var('start')
limit = html.var('limit')
start_date = datetime.strptime(
start_date + ' ' + start_time, "%d/%m/%Y %H:%M")
end_date = datetime.strptime(end_date + ' ' + end_time, "%d/%m/%Y %H:%M")
column_name = column_value.split(",")
table_name = table_name.split(",")
user_id = html.req.session["user_id"]
if update_field == '' or update_field == None:
update_field_name = ''
else:
update_field_name = update_field
controller_dict = bll_obj.advanced_graph_data(
'graph', user_id, table_name[0], table_name[1], table_name[-2],
table_name[-1], start, limit, flag, start_date, end_date, ip_address,
graph_type, update_field_name, interface_value, cal_type, column_name)
html.req.content_type = 'application/json'
html.req.write(str(JSONEncoder().encode(controller_dict)))
def status_data_table_creation(h):
"""
@param h:
"""
global html, bll_obj
html = h
result1 = ''
ip_address = html.var('ip_address') # take ip_address from js side
start_date = html.var('start_date')
start_time = html.var('start_time')
end_date = html.var('end_date')
end_time = html.var('end_time')
device_type = html.var('device_type')
graph_id = html.var('graph_id')
start_date = datetime.strptime(
start_date + ' ' + start_time, "%d/%m/%Y %H:%M")
end_date = datetime.strptime(end_date + ' ' + end_time, "%d/%m/%Y %H:%M")
user_id = html.req.session["user_id"]
controller_dict = bll_obj.ap_data_table(
user_id, ip_address, start_date, end_date, graph_id, device_type)
html.req.content_type = 'application/json'
html.req.write(str(JSONEncoder().encode(controller_dict)))
def advanced_status_excel_creating(h):
"""
@param h:
"""
global html, bll_obj
html = h
result1 = ''
device_type = html.var('device_type_id')
ip_address = html.var('ip_address') # take ip_address from js side
start_date = html.var('start_date')
start_time = html.var('start_time')
end_date = html.var('end_date')
end_time = html.var('end_time')
report_type = html.var("type")
graph_id = html.var("graph_id")
select_option = html.var("select_option")
if int(select_option) > 0:
end_date = str(datetime.date(datetime.now()))
start_time = '00:00'
end_time = '23:59'
if int(select_option) == 1:
start_date = str(datetime.date(datetime.now()))
elif int(select_option) == 2:
start_date = str(
datetime.date(datetime.now()) + timedelta(days=-7))
elif int(select_option) == 3:
start_date = str(
datetime.date(datetime.now()) + timedelta(days=-15))
elif int(select_option) == 4:
start_date = str(
datetime.date(datetime.now()) + timedelta(days=-30))
start_date = datetime.strptime(
start_date + ' ' + start_time, "%Y-%m-%d %H:%M")
end_date = datetime.strptime(
end_date + ' ' + end_time, "%Y-%m-%d %H:%M")
else:
start_date = datetime.strptime(
start_date + ' ' + start_time, "%d/%m/%Y %H:%M")
end_date = datetime.strptime(
end_date + ' ' + end_time, "%d/%m/%Y %H:%M")
user_id = html.req.session["user_id"]
controller_dict = bll_obj.advaeced_excel_report(
report_type, device_type, user_id, ip_address, start_date,
end_date, graph_id, select_option)
# html.req.content_type = 'application/json'
# html.req.write(str(JSONEncoder().encode(controller_dict)))
html.write(str(controller_dict))
# def page_tip_advanced_status(h):
# global html
# html = h
# import defaults
# f = open(defaults.web_dir + "/htdocs/locale/page_tip_advanced_status.html", "r")
# html_view = f.read()
# f.close()
# html.write(str(html_view))
| UTF-8 | Python | false | false | 7,833 | py | 167 | advanced_status_controller.py | 110 | 0.594281 | 0.587642 | 0 | 230 | 33.056522 | 115 |
ParthasarathySubburaj/Algorithms-Data-Structures-specilization | 8,632,884,310,488 | 226aeacde2239209028d4eca85f14c68864ac04e | 45f843360039a162d6b7daad963bc0accdbcb2c3 | /Algorithmic_Tool_Box/week6_dynamic_programming2/knapsack.py | 9b557113111f7bd3d99c2f1a240b0baee7554715 | []
| no_license | https://github.com/ParthasarathySubburaj/Algorithms-Data-Structures-specilization | b41c4fd07d34e56f2d98f28867eeed23761da7a4 | 78794fbb3c2b907ea91a188d44d3424ea77395e5 | refs/heads/master | 2022-04-12T19:35:11.173645 | 2020-03-15T15:26:30 | 2020-03-15T15:26:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Uses python3
import sys
import numpy as np
def optimal_weight(W, w):
# write your code here
matrix = np.full((W+1, len(w)+1),0, dtype=int)
for i in range(1, len(w)+1):
for weight in range(1, W+1):
matrix[weight, i] = matrix[weight, i-1]
if w[i-1] <= weight:
value = matrix[weight - w[i-1], i-1] + w[i-1]
if value > matrix[weight, i]:
matrix[weight, i] = value
return matrix[W, len(w)]
if __name__ == '__main__':
input = sys.stdin.read()
W, n, *w = list(map(int, input.split()))
print(optimal_weight(W, w))
| UTF-8 | Python | false | false | 622 | py | 24 | knapsack.py | 23 | 0.517685 | 0.496785 | 0 | 20 | 30.1 | 61 |
prerna-khanna/torchtuples | 283,467,865,834 | 58dcdb4744f9f2e26ac9328557b93bcd7ba8e794 | fd7803084ffa8258244605c42140f85fec6d5d2a | /torchtuples/testing.py | c04cee2ad48344f96912ebb3f5134ace3585b4f9 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | https://github.com/prerna-khanna/torchtuples | c1ab42f30f2b120fec840165f0b59119e7f7d41c | 1e02f1f8ed4f4758796e3d77573af321a7e5ca09 | refs/heads/master | 2020-06-28T23:45:39.255464 | 2019-08-03T13:03:29 | 2019-08-03T13:03:29 | 200,374,627 | 0 | 0 | BSD-2-Clause | true | 2019-08-03T12:45:30 | 2019-08-03T12:45:29 | 2019-07-16T10:47:45 | 2019-07-15T14:26:50 | 420 | 0 | 0 | 0 | null | false | false | import numpy as np
import torch
from torchtuples.tupletree import TupleTree, tuplefy
def assert_tupletree_equal(a, b, check_dtypes=True):
assert type(a) == type(b) == TupleTree, 'Not TupleTree'
assert a.numerate() == b.numerate(), 'Not same structure'
assert a.types() == b.types(), 'Not same types'
if check_dtypes:
ad, bd = (tuplefy(a, b)
.apply(lambda x: x.dtype if hasattr(x, 'dtype') else 'not_tensor'))
assert ad == bd, 'Not same dtype'
for aa, bb in zip(a.flatten(), b.flatten()):
if hasattr(aa, 'dtype'):
assert (aa == bb).all(), 'Not equal values'
else:
assert aa == bb, 'Not equal values' | UTF-8 | Python | false | false | 696 | py | 20 | testing.py | 14 | 0.591954 | 0.591954 | 0 | 19 | 35.684211 | 85 |
cambridge-cares/TheWorldAvatar | 15,238,543,982,211 | 50958c249efc596666e92ba575a073f7b0bd042b | 65c84146c9f3871f54fe51ab437bd14927ade706 | /Agents/AirQualityAgent/agent/dataretrieval/readings.py | 01dc52d7df55298bec0c16644cdf777bc1c94123 | [
"MIT"
]
| permissive | https://github.com/cambridge-cares/TheWorldAvatar | 3a227c629acb10b9be7effe58696aef48b27dd21 | 01a7d108754e0249d8d3b5b75708345c399c1ee9 | refs/heads/main | 2023-08-21T11:05:09.147655 | 2023-08-18T07:46:57 | 2023-08-18T07:46:57 | 345,141,689 | 57 | 26 | MIT | false | 2023-09-13T14:16:57 | 2021-03-06T16:33:07 | 2023-09-13T10:16:00 | 2023-09-13T14:16:57 | 433,486 | 48 | 19 | 149 | Java | false | false | ################################################
# Authors: Markus Hofmeister (mh807@cam.ac.uk) #
# Date: 05 Apr 2022 #
################################################
# The purpose of this module is to provide functions to retrieve
# readings data from KG
import re
import datetime as dt
import pandas as pd
from agent.kgutils import kgclient
from agent.kgutils.kgclient import KGClient
from agent.kgutils.timeseries import TSClient
from agent.kgutils.querytemplates import *
from agent.errorhandling.exceptions import InvalidInput, TSException
from agent.utils.stack_configs import DB_PASSWORD, DB_URL, DB_USER, QUERY_ENDPOINT, UPDATE_ENDPOINT
from agent.utils.readings_mapping import TIME_FORMAT
# Initialise logger
from py4jps import agentlogging
logger = agentlogging.get_logger("prod")
def get_instantiated_observations(stations: list = None,
query_endpoint: str = QUERY_ENDPOINT,
update_endpoint: str = UPDATE_ENDPOINT):
"""
Returns DataFrame of (all) instantiated observations in KG
(data for all stations is returned if no stations list is provided)
Arguments:
stations - list of ReportingStation IRIs (WITHOUT trailing '<' and '>'
for which to retrieve data)
Returns DataFrame with columns: ['station', 'stationID', 'quantityType',
'dataIRI', 'comment', 'reading']
station: station IRI
stationID: created unique UK Air station ID for that station
quantityType: IRI of OntoEMS quantity, e.g. https://www.theworldavatar.com/kg/ontoems/OzoneConcentration
dataIRI: IRI of quantity instance to which time series is attached
comment: label of measured pollutant
reading: shorthand of OntoEMS quantity, e.g. OzoneConcentration
"""
# Construct KG client and execute query
query_string = instantiated_observations(station_iris=stations)
kg_client = KGClient(query_endpoint, update_endpoint)
results = kg_client.performQuery(query=query_string)
# Parse results into DataFrame
df = pd.DataFrame(columns=['station', 'stationID', 'quantityType', 'dataIRI', 'comment'],
data=results)
# Add column with shorthand of quantity type
df['reading'] = df['quantityType'].apply(lambda x: x.split('/')[-1])
return df
def get_instantiated_observation_timeseries(stations: list = None,
query_endpoint: str = QUERY_ENDPOINT,
update_endpoint: str = UPDATE_ENDPOINT):
"""
Returns DataFrame of (all) instantiated observation timeseries in KG
(data for all stations is returned if no stations list is provided)
Arguments:
stations - list of ReportingStation IRIs (WITHOUT trailing '<' and '>'
for which to retrieve data)
Returns DataFrame with columns: ['station', 'stationID', 'quantityType',
'dataIRI', 'comment', 'tsIRI', 'unit', 'reading']
station: station IRI
stationID: created unique UK Air station ID for that station
quantityType: IRI of OntoEMS quantity, e.g. https://www.theworldavatar.com/kg/ontoems/OzoneConcentration
dataIRI: IRI of quantity instance to which time series is attached
comment: label of measured pollutant
tsIRI: IRI of time series instance
unit - unit for time series, e.g. mg/m3
reading: shorthand of OntoEMS quantity, e.g. OzoneConcentration
"""
# Construct KG client and execute query
query_string = instantiated_observation_timeseries(stations)
kg_client = KGClient(query_endpoint, update_endpoint)
results = kg_client.performQuery(query=query_string)
# Parse results into DataFrame
df = pd.DataFrame(columns=['station', 'stationID', 'quantityType', 'dataIRI',
'comment', 'tsIRI', 'unit'], data=results)
# Add column with shorthand of quantity type
df['reading'] = df['quantityType'].apply(lambda x: x.split('/')[-1])
return df
def get_time_series_data(station_iris: list = None,
observation_types: list = None,
tmin: str = None, tmax: str = None,
query_endpoint: str = QUERY_ENDPOINT,
update_endpoint: str = UPDATE_ENDPOINT):
"""
Retrieve time series data for provided observation types and stations from KG
Arguments
station_iris - list of station IRIs for which to retrieve time series data
(all stations if None)
observation_types - list of observation types (e.g., PM10Concentration)
for which to retrieve data (all if None)
tmin - oldest time step for which to retrieve data
tmax - latest time step for which to retrieve data
Returns
List of (Java) time series objects
List of dictionaries with ts names (i.e. [{dataIRI: name}, ...])
List of dictionaries with ts units (i.e. [{dataIRI: unit}, ...])
"""
def _validate_time_format(time_string):
rec = re.compile(r'\d{4}-\d{1,2}-\d{1,2}T\d{1,2}:\d{1,2}:\d{1,2}Z')
if bool(rec.match(time_string)):
return time_string
else:
t = None
# Adding potentially missing Z at end of time string
rec = re.compile(r'Z$')
if not bool(rec.match(time_string)):
time_string += 'Z'
logger.info('Provided time string assumed in UTC.')
rec = re.compile(r'\d{4}-\d{1,2}-\d{1,2}T\d{1,2}:\d{1,2}Z')
if bool(rec.match(time_string)):
t = dt.datetime.strptime(time_string, '%Y-%m-%dT%H:%MZ')
else:
rec = re.compile(r'\d{4}-\d{1,2}-\d{1,2}T\d{1,2}Z')
if bool(rec.match(time_string)):
t = dt.datetime.strptime(time_string, '%Y-%m-%dT%HZ')
else:
rec = re.compile(r'\d{4}-\d{1,2}-\d{1,2}Z')
if bool(rec.match(time_string)):
t = dt.datetime.strptime(time_string, '%Y-%m-%dZ')
# Return properly formatted time string if format could be derived
return dt.datetime.strftime(t, TIME_FORMAT)
# Validate format of provided tmin and tmax
if tmin:
try:
tmin = _validate_time_format(tmin)
except Exception as ex:
logger.error(f'Provided format of tmin could not be derived. Expected format: {TIME_FORMAT}')
raise InvalidInput(f'Provided format of tmin could not be derived. Expected format: {TIME_FORMAT}') from ex
if tmax:
try:
tmax = _validate_time_format(tmax)
except Exception as ex:
logger.error(f'Provided format of tmax could not be derived. Expected format: {TIME_FORMAT}')
raise InvalidInput(f'Provided format of tmax could not be derived. Expected format: {TIME_FORMAT}') from ex
# Create DataFrame from instantiated observation time series
# ['station', 'stationID', 'quantityType', 'dataIRI', 'comment', 'tsIRI', 'unit', 'reading']
df = get_instantiated_observation_timeseries(station_iris, query_endpoint, update_endpoint)
# Get relevant subset of available time series data
if observation_types:
observation_types = [str(i).lower() for i in observation_types]
df = df[df['reading'].str.lower().isin(observation_types)]
# Get list of lists of dataIRIs to retrieve
dataIRIs_list = [list(df.loc[df['tsIRI'] == tsIRI, 'dataIRI']) for tsIRI in df['tsIRI'].unique()]
# Initialise return list
ts_data = []
ts_names = []
ts_units = []
# Initialise KG and TimeSeries Clients
kg_client = KGClient(query_endpoint, update_endpoint)
ts_client = TSClient(kg_client=kg_client, rdb_url=DB_URL, rdb_user=DB_USER,
rdb_password=DB_PASSWORD)
for dataIRIs in dataIRIs_list:
# Get time series within desired bounds
try:
with ts_client.connect() as conn:
ts_data.append(ts_client.tsclient.getTimeSeriesWithinBounds(dataIRIs, tmin, tmax, conn))
except Exception as ex:
logger.error(f'Error while retrieving time series data for dataIRIs: {dataIRIs}')
raise TSException(f'Error while retrieving time series data for dataIRIs: {dataIRIs}') from ex
# Get time series names and units (as dict with dataIRIs as key)
df_sub = df.loc[df['dataIRI'].isin(dataIRIs), ['dataIRI','unit', 'comment']]
ts_names.append(dict(zip(df_sub['dataIRI'], df_sub['comment'].str.capitalize())))
ts_units.append(dict(zip(df_sub['dataIRI'], df_sub['unit'])))
return ts_data, ts_names, ts_units
| UTF-8 | Python | false | false | 9,087 | py | 9,300 | readings.py | 4,926 | 0.60394 | 0.598767 | 0 | 193 | 46.082902 | 119 |
HugheHuang/core-python-applications-programming-3rd-edition- | 3,779,571,222,707 | f3052ebb556f12a9468c7834e90c7fc594231e2a | 749ac3de7856dba11693181061fe9d9cc1c5d840 | /CH2/tsUclnt.py | 35f4e49a5c7b47a8e452f6bd029b826049de373c | []
| no_license | https://github.com/HugheHuang/core-python-applications-programming-3rd-edition- | 08a238901b948f3335543dd72741b927b35b49f1 | 2b54fbcbf01d9f36d2e1e18cc40d7be3f13a7557 | refs/heads/master | 2021-01-20T02:53:14.628435 | 2017-04-26T09:19:47 | 2017-04-26T09:19:47 | 89,461,688 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = tsUclnt.py
__author__ = Hughe
__time__ = 2017-04-22 22:36
"""
from socket import *
HOST='localhost'
PORT=21567
BUFSIZ=1024
ADDR=(HOST,PORT)
udpClnSock=socket(AF_INET,SOCK_DGRAM)
while True:
data=raw_input('> ')
if not data:
break
udpClnSock.sendto(data,ADDR)
data,ADDR=udpClnSock.recvfrom(BUFSIZ)
if not data:
break
print data
udpClnSock.close()
if __name__ == '__main__':
pass | UTF-8 | Python | false | false | 520 | py | 22 | tsUclnt.py | 19 | 0.576923 | 0.534615 | 0 | 30 | 15.4 | 41 |
mavrick202/troposphere | 9,955,734,223,007 | a207d72df7a67b9585a3b2c8d11a99d4b4123e0d | 8ee47a223b9e245fc6744802278d42b8a7062716 | /troposphere/dlm.py | a3a95083b810b5138e2ff2f04b7d1b5dc8f36701 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/mavrick202/troposphere | 14693be8920b3d4ac6e7c098c1f6f1674485e7c8 | 4eb213162cd8018fe0f82dc9597cc306ade49b04 | refs/heads/master | 2020-11-25T10:42:36.993519 | 2019-12-08T22:32:03 | 2019-12-08T22:32:03 | 228,624,641 | 1 | 8 | BSD-2-Clause | true | 2019-12-17T13:38:51 | 2019-12-17T13:38:50 | 2019-12-17T08:04:10 | 2019-12-11T15:17:58 | 2,299 | 0 | 0 | 0 | null | false | false | # Copyright (c) 2015, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import (integer, boolean)
VALID_STATES = ('ENABLED', 'DISABLED')
VALID_RESOURCE_TYPES = ('VOLUME')
VALID_INTERVALS = (2, 3, 4, 6, 8, 12, 24)
VALID_INTERVAL_UNITS = ('HOURS')
def validate_interval(interval):
"""Interval validation rule."""
if interval not in VALID_INTERVALS:
raise ValueError("Interval must be one of : %s" %
", ".join(VALID_INTERVALS))
return interval
def validate_interval_unit(interval_unit):
"""Interval unit validation rule."""
if interval_unit not in VALID_INTERVAL_UNITS:
raise ValueError("Interval unit must be one of : %s" %
", ".join(VALID_INTERVAL_UNITS))
return interval_unit
def validate_state(state):
"""State validation rule."""
if state not in VALID_STATES:
raise ValueError("State must be one of : %s" %
", ".join(VALID_STATES))
return state
class Parameters(AWSProperty):
props = {
'ExcludeBootVolume': (boolean, False),
}
class CreateRule(AWSProperty):
props = {
'Interval': (validate_interval, True),
'IntervalUnit': (validate_interval_unit, True),
'Times': ([basestring], False),
}
class RetainRule(AWSProperty):
props = {
'Count': (integer, True),
}
class Schedule(AWSProperty):
props = {
'CopyTags': (boolean, False),
'CreateRule': (CreateRule, False),
'Name': (basestring, False),
'RetainRule': (RetainRule, False),
'TagsToAdd': ((Tags, list), False),
}
class PolicyDetails(AWSProperty):
props = {
'Parameters': (Parameters, False),
'PolicyType': (basestring, False),
'ResourceTypes': ([basestring], False),
'Schedules': ([Schedule], False),
'TargetTags': ((Tags, list), False),
}
class LifecyclePolicy(AWSObject):
resource_type = "AWS::DLM::LifecyclePolicy"
props = {
'Description': (basestring, False),
'ExecutionRoleArn': (basestring, False),
'PolicyDetails': (PolicyDetails, False),
'State': (validate_state, False),
}
| UTF-8 | Python | false | false | 2,296 | py | 220 | dlm.py | 199 | 0.601916 | 0.596254 | 0 | 90 | 24.511111 | 62 |
WooWan/Koala-Algorithm | 17,016,660,458,356 | d142ca42e8225e318d7c743ebc4a76378fbd95bd | 44335d7e1b2d40298ecac50a4192adfe7a518552 | /study/week2/team1/BOJ_2156_우창완.py | ed1356b0e454c1c0a35c7f9bde36a7f88d0014c9 | []
| no_license | https://github.com/WooWan/Koala-Algorithm | d9ffb96cf68b32eb976f28a02a61014cd691ac97 | 65d8c38e80edb56a976824d28f5afd4cc9cbff0b | refs/heads/master | 2023-04-21T05:02:17.853995 | 2021-05-26T05:33:25 | 2021-05-26T05:33:25 | 345,525,519 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
n= int(input())
arr=[0]*(n+3)
dp=[-1]*(n+3)
for i in range(n):
arr[i]=int(sys.stdin.readline())
dp[0]=arr[0]
dp[1]=arr[1]+arr[0]
dp[2]= max(arr[0]+arr[1],arr[0]+arr[2],arr[1]+arr[2])
for i in range(3,n):
dp[i]=max(dp[i-1], arr[i]+dp[i-2], arr[i]+arr[i-1]+dp[i-3])
print(dp[n-1])
| UTF-8 | Python | false | false | 295 | py | 62 | BOJ_2156_우창완.py | 62 | 0.559322 | 0.484746 | 0 | 15 | 18.666667 | 60 |
s0217391/DifferentProjects | 18,004,502,927,859 | 7256fec4de81f1042fa3535723bb5a6a1517d079 | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/group/behav150.py | eef84581a8f638db5a280805c63202488c2e68a2 | []
| no_license | https://github.com/s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sys
def compute(prey, otherHunter, dist):
temp0 = otherHunter[0] * prey[0]
temp1 = min( dist , prey[0] )
temp1 = otherHunter[0] - otherHunter[1]
temp1 = prey[1] - otherHunter[0]
return [ otherHunter[0] , dist ]
| UTF-8 | Python | false | false | 242 | py | 969 | behav150.py | 924 | 0.669421 | 0.619835 | 0 | 9 | 25.888889 | 40 |
huhaiqng/YWSystemB | 16,217,796,520,125 | 15721fa6cd33abd41f5f84af091b2425cd1c948b | 23f754a39b996ad3e50e539ac1ea88217545df8b | /app/models/project_rabbitmq.py | bf5dc53a46fe71f5976f087c5dcfdae106856856 | []
| no_license | https://github.com/huhaiqng/YWSystemB | 576b0310cfe49086eaafb99eaa83042621d6fab5 | cf601fe4b97e96187e66a084a7e43a0cd259e92f | refs/heads/master | 2022-12-11T06:19:46.025055 | 2021-04-27T07:48:46 | 2021-04-27T07:48:46 | 245,122,835 | 0 | 0 | null | false | 2022-12-08T11:57:56 | 2020-03-05T09:40:26 | 2021-04-27T07:49:06 | 2022-12-08T11:57:54 | 1,386 | 0 | 0 | 11 | Python | false | false | from django.db import models
from django.utils import timezone
from .instance_rabbitmq import RabbitmqInstance
from .project import Project
# Rabbitmq
class ProjectRabbitmq(models.Model):
instance = models.ForeignKey(RabbitmqInstance, on_delete=models.PROTECT, blank=True)
env = models.CharField('环境', max_length=200)
project = models.ForeignKey(Project, on_delete=models.PROTECT)
username = models.CharField('用户名', max_length=200, blank=True)
password = models.CharField('密码', max_length=200, blank=True)
created = models.DateTimeField('创建时间', default=timezone.now)
| UTF-8 | Python | false | false | 615 | py | 136 | project_rabbitmq.py | 132 | 0.762226 | 0.747049 | 0 | 14 | 41.357143 | 88 |
mrjeffstevenson3/mmimproc | 17,085,379,943,650 | 9839d30e1ad89a0874c8ee1ad93577d8adcb3f57 | b83de7b1c7fa7cecd5cdc63554902f4b5746fceb | /mmimproc/qt1/spdft.py | e3e33b5ff05299570141fcabb26f871522af32a1 | []
| no_license | https://github.com/mrjeffstevenson3/mmimproc | 195c2d660e041c68ea9b9db524c444ee111291e0 | 1aed4b1ce0ef5606a702af02b341ce3291a86283 | refs/heads/master | 2021-11-27T12:21:59.356889 | 2021-09-15T20:06:11 | 2021-09-15T20:06:11 | 171,966,939 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Python interface to spdft.m
"""
from StringIO import StringIO
import matlab.engine
from matlab import double as mdouble
from mmimproc.utils import getnetworkdataroot, mmimproc_dir
def fit(X, Y):
"""
:param X: multi dim
:param Y: flip angle
:return:
"""
Xm = mdouble(X.tolist())
Ym = mdouble(Y.tolist())
matlabOut = StringIO()
eng = matlab.engine.start_matlab()
eng.addpath(eng.genpath(str(mmimproc_dir)))
dYm = []
options = {'Xc': mdouble([0, float(X.max())])}
output = eng.spdft(Xm, Ym, dYm, options, nargout=1, stdout=matlabOut)
eng.quit()
print('stdout: {}'.format(matlabOut.getvalue()))
print('output: {}'.format(output))
print('Done')
return output
#/home/toddr/Software/matlab2017b/bin/matlab
| UTF-8 | Python | false | false | 788 | py | 461 | spdft.py | 264 | 0.639594 | 0.63198 | 0 | 31 | 24.387097 | 73 |
JetBrains/intellij-community | 2,851,858,319,832 | aaed8a7f5bc98d0a9128be52cc5161529a706a20 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/PyTypeCheckerInspection/NewTypeAsParameter.py | 6f4d22f037cbd040e55262dc3170ea7dd6e22ff6 | [
"Apache-2.0"
]
| permissive | https://github.com/JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | false | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | 2023-09-12T03:37:30 | 2023-09-12T06:46:46 | 4,523,919 | 15,754 | 4,972 | 237 | null | false | false | from typing import NewType
UserId = NewType("UserId", int)
def get_user(user: UserId) -> str:
pass
get_user(UserId(5))
get_user(<warning descr="Expected type 'UserId', got 'LiteralString' instead">"John"</warning>)
get_user(<warning descr="Expected type 'UserId', got 'int' instead">4</warning>) | UTF-8 | Python | false | false | 303 | py | 127,182 | NewTypeAsParameter.py | 70,394 | 0.709571 | 0.70297 | 0 | 11 | 26.636364 | 95 |
jameskschull/phylo | 12,987,981,137,939 | d6ca812f1833f5d49528020bd5529c14b0f899a3 | c85f3f1cadebefde31c7efa4a76c545a2ab65ce5 | /scripts/compare_to_query.py | fc26223e6aab01af52701621e48552bb005d8b11 | []
| no_license | https://github.com/jameskschull/phylo | 4e0e33ab893cab530da0483a39f1dd48801333fe | f2e6f5a3a85dfcecbcdfafad34aed419ba073602 | refs/heads/master | 2021-07-21T18:56:55.615258 | 2018-10-01T03:10:39 | 2018-10-01T03:10:39 | 132,056,354 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Given a file containing query insertions/deletions (hg38 reference), compares
# to provided query and writes results to file.
# There are two cases:
# 1. infile contains mm10 or canFam3 deletions. In this case, we search canFam3 or mm10
# (respectively), writing to the outfile any loci that are not deleted in the other query.
# This supports the HM/HD hypothesis.
# Infile contains hg38 coordinates of deletion as well as chainID mapping to 2nd query
# 2. infile contains mm10 or canFam3 insertions. In this case, we use the hg38 coordinates
# of each locus to get the chain for canFam3/mm10 respectively. We then find
# Infile contains hg38 coordinates of insertion as well as the chainID mapping to 2nd query
# python compare_to_query.py ../sorted/mm10/hg38plus_mm10minus.bed.canFam3.pickled ../chains/canFam3/hg38.canFam3.all.chain.pickled ../sorted/evidence/hg38plus_mm10minus_canFam3plus.bed 'deletion'
# python compare_to_query.py ../sorted/loxAfr3/hg38.canFam3.dels.mm10.ins.txt.loxAfr3ID ../chains/loxAfr3/hg38.loxAfr3.all.chain
import sys
import time
import pickle
# import edlib
from Bio import SeqIO
from collections import defaultdict
MARGIN = 5
UPPER_THRESHOLD = 0.8
LOWER_THRESHOLD = 0.7
# for filtering outgroup
def search_for_q2_deletions(sites_dict, chain_dict, outfile):
print "Searching for insertions in query2."
print "Writing results to {}.".format(outfile)
out = open(outfile, 'w')
chain_ids = sites_dict.keys()
found_evidence = set([])
# For each chain mapped to in the querydels file
for chain_id in chain_ids:
chain = chain_dict.get(chain_id, None)
if chain == None:
print 'Chain not found!'
continue
else:
print "Chain {} found.".format(chain_id)
chain_len = len(chain)
ref_chain_start = int(chain[0][5])
# For each querydel that maps to that chain
for site in sites_dict[chain_id]:
ref_insertion_start = int(site[3])
ref_insertion_end = int(site[4])
ref_curr_coord = ref_chain_start
site_missing = True
# Search chain
for line in chain[1:]:
print "Current reference coordinate: {}".format(ref_curr_coord)
gapless_block_size = int(line[0])
print "Gapless block size: {}".format(gapless_block_size)
print "Current allowable range: {}-{} + \n".format(ref_curr_coord - MARGIN, ref_curr_coord + gapless_block_size + MARGIN)
# print "Ref coord: {}, insertion start: {}, gapless_block_size: {}.".format(ref_curr_coord, ref_insertion_start, gapless_block_size)
# If we have gone beyond the insertion, break
if ref_curr_coord - MARGIN > ref_insertion_start:
print "Gone past insertion start, breaking."
break
# If whole insertion is contained in a gapless block, site is not evidence
if ref_curr_coord - MARGIN <= ref_insertion_start and ref_curr_coord + gapless_block_size + MARGIN >= ref_insertion_end:
print "Insertion found in query 2."
site_missing = False
break
# If on last line of chain, end loop
if len(line) == 1:
print "Last line of chain"
continue
query_gap_size = int(line[1])
ref_curr_coord += gapless_block_size + query_gap_size
if site_missing == True:
print "Insertion missing in query2: evidence found."
found_evidence.add('\t'.join(site) + '\n')
for evidence in found_evidence:
out.write(evidence)
return
# For hg + q1 - q2 + case and filtering by outgroup
def compare_to_q2_chain(sites_dict, chain_dict, outfile, mode):
print "Searching for evidence, looking for {}s in second query.".format(mode)
print "Writing results to {}.".format(outfile)
out = open(outfile, 'w')
chain_ids = sites_dict.keys()
found_evidence = set([])
# For each chain mapped to in the querydels file
for chain_id in chain_ids:
chain = chain_dict.get(chain_id, None)
if chain == None:
print 'Chain not found!'
continue
else:
print "Chain {} found.".format(chain_id)
# Reference coordinate of chain start
ref_chain_start = int(chain[0][5])
# For each site that maps to that chain
for site in sites_dict[chain_id]:
rangeOver = False
ref_curr_coord = ref_chain_start
ref_ins_start, ref_ins_end = int(site[3]), int(site[4]) # reference coords of insertion
gapless_bp, gap_bp = 0, 0 # number of bp within insertion range that are gapless/gap
################ SEARCH CHAIN ################
for line in chain[1:]:
# Edge case: last line of chain has only gapless block
if len(line) < 3:
break
gapless_block_size = int(line[0])
query_gap_size = int(line[1])
# If moved past insertion, stop search
if rangeOver == True:
break
########## PROCESS GAPLESS BLOCK AND QUERY GAP ##########
for i, block_size in enumerate([gapless_block_size, query_gap_size]):
# Case 1: Insertion range starts within the block
if ref_curr_coord + block_size > ref_ins_start and ref_curr_coord <= ref_ins_start:
# i): only part of insertion range is contained within block
if ref_curr_coord + block_size < ref_ins_end:
if i == 0:
gapless_bp += ref_curr_coord + block_size - ref_ins_start
elif i == 1:
gap_bp += ref_curr_coord + block_size - ref_ins_start
# ii): whole insertion range is contained within block
elif ref_curr_coord + block_size >= ref_ins_end:
rangeOver = True
if i == 0:
gapless_bp += ref_ins_end - ref_ins_start
elif i == 1:
gap_bp += ref_ins_end - ref_ins_start
# Case 2: We are already in the range of the insertion
elif ref_curr_coord + block_size > ref_ins_start and ref_curr_coord > ref_ins_start:
# i) only part of insertion range is contained within block
if ref_curr_coord + block_size < ref_ins_end:
if i == 0:
gapless_bp += block_size
elif i == 1:
gap_bp += block_size
# ii) rest of insertion range is contained within block:
elif ref_curr_coord + block_size >= ref_ins_end:
rangeOver = True
if i == 0:
gapless_bp += ref_ins_end - ref_curr_coord
elif i == 1:
gap_bp += ref_ins_end - ref_curr_coord
if rangeOver == False:
ref_curr_coord += block_size
else:
break
#########################################################
if gapless_bp == 0 and gap_bp == 0: continue
print "Gapless bp: {}, gap bp: {}. Total insertion size: {}.".format(gapless_bp, gap_bp, ref_ins_end-ref_ins_start)
gapless_percentage = gapless_bp/float(gapless_bp + gap_bp)
if gapless_percentage > UPPER_THRESHOLD and mode=='insertion':
print "Insertion found! Gapless percentage: {}".format(gapless_percentage)
found_evidence.add('\t'.join(site) + '\n')
elif gapless_percentage < LOWER_THRESHOLD and mode=='deletion':
print "Deletion found! Gapless percentage: {}".format(gapless_percentage)
found_evidence.add('\t'.join(site) + '\n')
for evidence in found_evidence:
out.write(evidence)
return
# 1. Use the query (mouse) coordinates of the insertion to get the mouse sequence
# 2. Use the reference (human) coordinates (where start and end are actually the same) to get the dog chain and find the corresponding dog coordinates
# 3. Use those coordinates to get the dog sequence
# 4. Find edit distance between dog/mouse sequence
def double_insertion(sites_dict, chain_dict, outfile):
out = open(outfile, 'w')
print "Writing evidence to {}/.".format(outfile)
query1_whole_genome = SeqIO.to_dict(SeqIO.parse('/cluster/u/jschull/phylo/wholegenomes/fasta/mm10.fa', 'fasta'))
print "Loaded query1 genome."
query2_whole_genome = SeqIO.to_dict(SeqIO.parse('/cluster/u/jschull/phylo/wholegenomes/fasta/canFam3.fa', 'fasta'))
print "Loaded query2 genome."
# For each chain
for chain_id in sites_dict.keys():
chain = chain_dict.get(chain_id, None)
if chain is None:
print "Chain not found!"
continue
q2_chr = chain[0][7]
q2_strand = chain[0][9]
q2_chain_start = int(chain[0][10])
ref_chain_start = int(chain[0][5])
# For each insertion that maps to that chain
for site in sites_dict[chain_id]:
print '\t'.join(site)
insertion_size = int(site[1])
# GET Q1 SEQUENCE
q1_chr = site[5]
q1_strand = site[6]
q1_start = int(site[7])
q1_end = int(site[8])
q1_seq = query1_whole_genome[q1_chr][q1_start:q1_end]
print "Mouse coordinates (from site): {}: {}-{} (strand: {})".format(q1_chr, q1_start, q1_end, q1_strand)
# GET Q2 SEQUENCE
ref_position = int(site[3]) # since this is an insertion, ref start and end are the same
print "Human insertion start position: {}".format(ref_position)
ref_left = ref_position - ref_chain_start # bp to ref start point
q2_curr = q2_chain_start
for line in chain[1:]:
#if on last chain line, break
if len(line) < 3: break
gapless_block_size = int(line[0])
ref_block_size = int(line[1])
query_block_size = int(line[2])
if ref_left - gapless_block_size < 0:
q2_start = q2_curr + ref_left
break
ref_left -= gapless_block_size
q2_curr += gapless_block_size
if ref_left - ref_block_size < 0:
q2_start = q2_curr + ref_left
break
ref_left -= ref_block_size
q2_curr += query_block_size
q2_seq = query2_whole_genome[q2_chr][q2_start:q2_start + insertion_size]
# Account for strand
if q2_strand == '-':
q2_seq = q2_seq.reverse_complement()
if q1_strand == '-':
q1_seq = q1_seq.reverse_complement()
q1_seq, q2_seq = str(q1_seq), str(q2_seq)
print "Mouse sequence: {}".format(q1_seq)
print "Dog sequence: {}".format(q2_seq)
# Calculate similarity
len_longer_seq = max(len(q1_seq), len(q2_seq))
# similarity = (len_longer_seq - int(edlib.align(q1_seq, q2_seq)["editDistance"]))/float(len_longer_seq)
# compare to threshold, write if they're similar enough
if similarity > THRESHOLD:
print "Sites have similarity of {}: evidence found!".format(similarity)
out.write('\t'.join(site) + '\n')
else:
print "Sites have similarity of {}: insufficient.".format(similarity)
return
# returns dictionary with (chainID : list of sites) items
def get_sites_dict(sitesfile, start, end):
print "Loading sites."
sites_dict = defaultdict(list)
with open(sitesfile, 'r') as f:
for lineNum, line in enumerate(f.readlines(), 1):
if lineNum < start:
continue
if lineNum > end:
break
line = line.split()
sites_dict[line[len(line) - 1]].append(line)
print "Sites loaded. \n"
return sites_dict
# returns dictionary of (chainID : chain) items
def get_chain_dict(chainfile, sites_dict):
print "Loading chains."
begin_chainload = time.time()
# strings
chainIDs = sites_dict.keys()
# print "Num chain IDs: {}".format(len(chainIDs))
maxID = str(max([int(chainID) for chainID in chainIDs]))
# print "Max ID to load: {}.".format(maxID)
minID = str(min([int(chainID) for chainID in chainIDs]))
# print "Min ID to load: {}.".format(minID)
foundFirstChain = False
loadedAllChains = False
chain_dict = defaultdict(list)
with open(chainfile, 'r') as f:
# Initialize 'current chain'
curr_chain_id = -1
curr_chain = []
num_chains = 0
for line in f.readlines():
line = [word.strip() for word in line.split()]
# ignore comments and blank line at end of each chain
if len(line) == 0 or line[0].startswith('#'): continue
################ Deal with line ################
# Only start building dict once reached min chainID
if foundFirstChain == False:
# Set to true once we've reached our first relevant chain
if line[0] == 'chain' and line[12] == minID:
foundFirstChain = True
curr_chain_id = line[12]
else:
continue
# In relevant section of chain files
else:
if line[0] == 'chain':
# Add loaded chain to dictionary
chain_dict[curr_chain_id] = curr_chain
curr_chain = []
# print "Adding chain to dictionary."
# If we've reached designated limit, open new file
if curr_chain_id == maxID:
loadedAllChains = True
break
curr_chain_id = line[12]
curr_chain.append(line)
################ Move to next line ################
# Edge case: maxID is last ID in file
if not loadedAllChains: chain_dict[curr_chain_id] = curr_chain
print "Loaded chains in {} minutes.\n".format((time.time() - begin_chainload)/60)
# print "Loaded these keys: {}.".format(sorted(chain_dict.keys()))
return chain_dict
def main():
sitesfile = sys.argv[1] # file containing indels
chainfile = sys.argv[2] # name of query2 (to compare to)
outfile = sys.argv[3] # file to write evidence to
# 0 = deletion in query 1 (look for insertion in q2),
# 1 = insertion in query 1 (look for insertion in q2)
# 2 = __ in query 1 (look for deletion in q2)
mode = int(sys.argv[4])
# first line to look at in sitesfile
start = int(sys.argv[5])
# last line to look at in sitesfile
if len(sys.argv)==7:
end = int(sys.argv[6])
else:
end = float('inf')
sites_dict = get_sites_dict(sitesfile, start, end)
chain_dict = get_chain_dict(chainfile, sites_dict)
# hg + q1 - q2 +
if mode == 0:
compare_to_q2_chain(sites_dict, chain_dict, outfile, 'insertion')
# hg - q1 + q2 +
elif mode == 1:
double_insertion(sites_dict, chain_dict, outfile)
# hg + q1 - q2 + outgroup -
elif mode == 2:
compare_to_q2_chain(sites_dict, chain_dict, outfile, 'deletion')
return
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 13,443 | py | 11 | compare_to_query.py | 10 | 0.655508 | 0.639366 | 0 | 461 | 28.16269 | 196 |
Henriquefalconi/PROCESSAMENTO-DE-IMAGENS | 18,408,229,848,126 | 5279297ce24b16de4ae3b178dc01049013c1786a | 2ebfea87e5970229db5de52b6a0709b9fc9877bc | /Processamento de Imagens/P2.py | 005ffd0c3c279cff419e40fb4bb21a991ee78d41 | []
| no_license | https://github.com/Henriquefalconi/PROCESSAMENTO-DE-IMAGENS | dd7714780a050441a59f2d3273ba7b11dc40a5fd | 7c93b802cce64d40f7d918e40f66b211dcf3d697 | refs/heads/master | 2020-08-06T09:26:44.015825 | 2019-10-05T18:13:50 | 2019-10-05T18:13:50 | 212,923,686 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
im = cv2.imread('MULHERES.jpg')
cv2.imshow('imagem',im)
cv2.waitKey(0)
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 107 | py | 9 | P2.py | 9 | 0.738318 | 0.682243 | 0 | 6 | 16.833333 | 31 |
Guaxinim5573/audacious-player | 6,476,810,688,582 | ac997210f727cef75ca4f791476f27407d53411c | 9d3123cd4ca0f38f55488e8446681ea0dd018948 | /audtool/__init__.py | 38005601c897e3e947cdabb62140e650a9ec4e7f | [
"MIT"
]
| permissive | https://github.com/Guaxinim5573/audacious-player | deffdef48b7feae495ab3cf0b07c45e79504358c | 7bcd2afdd91bb18a41fb70500aaf76eaa17da837 | refs/heads/master | 2023-01-03T13:13:03.176366 | 2020-10-28T03:09:17 | 2020-10-28T03:09:17 | 307,890,542 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
import logging
logger = logging.getLogger(__name__)
# Run a command line command and returns stdout
def _run(command):
result = subprocess.run(command, check=True, stdout=subprocess.PIPE, text=True)
result.stdout = result.stdout[:-1]
return result.stdout
def is_playing():
result = subprocess.run(["audtool", "playback-status"], stdout=subprocess.PIPE, text=True)
logger.debug(result.stdout)
if result.returncode == 0 and result.stdout is not None and result.stdout != "stopped":
return True
return False
def status():
return _run(["audtool", "playback-status"])
# Get current song
def get_current_song():
return _run(["audtool", "current-song"])
# Skip to next song
def next():
_run(["audtool", "playlist-advance"])
_run(["audtool", "playback-play"])
def prev():
_run(["audtool", "playlist-reverse"])
_run(["audtool", "playback-play"])
def volume(amount):
_run(["audtool", "set-volume", amount])
def playpause():
_run(["audtool", "playback-playpause"])
# Display all songs in current playlist
def display_songs():
lines = _run(["audtool", "playlist-display"]).splitlines()
lines.pop() # Removes last item, whe don't need that
lines.pop(0) # We also don't need the first item
songs = []
for line in lines:
[pos, name, length] = line.split(" | ")
pos = pos.lstrip()
name = name.rstrip()
songs.append({"name": name, "pos": pos, "length": length})
return songs
def jump(pos):
_run(["audtool", "playlist-jump", pos]) | UTF-8 | Python | false | false | 1,473 | py | 7 | __init__.py | 4 | 0.684997 | 0.68296 | 0 | 55 | 25.8 | 91 |
Ruitongliu224/590-CODES | 14,302,241,100,897 | 2046c75646b8815c5bd9312b415cca94765e5116 | 834b826a2dda410e43e7e16315508ee26775dbed | /LECTURE-CODES/WEEK8/WIKI/02-wiki-topic-search.py | 8081e5ca4c44cee1f77d2dccc061f9d8a0cc10e6 | []
| no_license | https://github.com/Ruitongliu224/590-CODES | 92249ed7ad995bc3b8ccdc07167bc3ba776ce7e6 | 43f581664888efcaa8495c27e6d150da5abfe33e | refs/heads/main | 2023-08-28T04:17:53.995551 | 2021-11-06T18:09:36 | 2021-11-06T18:09:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# conda install -c conda-forge wikipedia
# conda install -c conda-forge wordcloud
# pip install wikipedia_sections
import wikipedia
# see https://meta.wikimedia.org/wiki/List_of_Wikipedias
# for langages prefixes
# wikipedia.set_lang('es') #es=spanish en=english
#--------------------------
# USER INPUTS
#--------------------------
max_num_pages=2 #max num pages returned by wiki search
verbose=False
#------------------------
#WORD CLOUD PLOT
#------------------------
def generate_word_cloud(my_text):
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
# exit()
# Import package
# Define a function to plot word cloud
def plot_cloud(wordcloud):
# Set figure size
plt.figure(figsize=(40, 30))
# Display image
plt.imshow(wordcloud)
# No axis details
plt.axis("off");
# Generate word cloud
wordcloud = WordCloud(
width = 3000,
height = 2000,
random_state=1,
background_color='salmon',
colormap='Pastel1',
collocations=False,
stopwords = None).generate(my_text)
plot_cloud(wordcloud)
plt.show()
#------------------------
#QUERY WIKI
#------------------------
country_list=['japan','mexico']
# stop_words=['mexi', 'spani', 'japan','food','references','china','chinese','external', 'see','citation', 'links', 'works','cited',]
stop_words=['']
for country in country_list:
# topic='food in '+country
topic='food '+country
#--------------------------
#SEARCH FOR RELEVANT PAGES
#--------------------------
titles=wikipedia.search(topic,results=max_num_pages)
print("TITLES=",titles)
#FUNCTION TO PRINT BASIC ABOUT WIKI PAGE
def print_info(wiki_page):
print("-------------------------")
print(wiki_page.title)
print(wiki_page.url)
# print(wiki_page.sections)
if(verbose):
print(wiki_page.sections)
print(wiki_page.categories)
print(wiki_page.html)
print(wiki_page.images)
print(wiki_page.content)
print(wikipedia.summary(wiki_page.title, auto_suggest=False))
print(wiki_page.references)
print(wiki_page.links[0],len(page.links))
#--------------------------
#LOOP OVER TITLES
#--------------------------
num_files=0
sections=[]
for title in titles:
try:
page = wikipedia.page(title, auto_suggest=False)
#print_info(page)
sections=sections+page.sections
num_files+=1
except:
print("SOMETHING WENT WRONG:", title);
#CONVERT TO ONE LONG STRING
text=''
for string in sections:
words=string.lower().split()
for word in words:
if(word not in stop_words):
text=text+word+' '
# # print(string)
print(text);
generate_word_cloud(text)
#exit() | UTF-8 | Python | false | false | 2,714 | py | 45 | 02-wiki-topic-search.py | 38 | 0.595431 | 0.588799 | 0 | 112 | 23.232143 | 133 |
jxylon/BCD2017 | 5,437,428,631,162 | 672b61e4e3c15e47ba7a6072ce17c0eda683d6a6 | e9d3c8966aa8414d4103e5d9a9e54494d4d59bd3 | /read.py | ab7c70108da8b42ddd31a103bca093de3994bcaa | []
| no_license | https://github.com/jxylon/BCD2017 | 5561f1def4314275805beb6be31c0ee35e2a1931 | af5a278d3247ab423857e2dc4a2fe57d6cf32abb | refs/heads/master | 2021-05-16T07:33:10.797171 | 2019-07-25T12:27:44 | 2019-07-25T12:27:44 | 103,833,773 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 读取文件内容
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
def read_file(i):
# 文件名
fn = ['dsjtzs_txfz_training_sample', 'dsjtzs_txfz_test_sample', 'dsjtzs_txfz_training', 'dsjtzs_txfz_test1']
index_name = ['a1', 'a2', 'a3', 'a4']
# 读取文件
df = pd.read_csv(fn[i] + '.txt', sep=' ', header=None)
# 自定义列索引名称
# 返回Dataframe
return df
def x_y_t(df):
# 得到x,y,t,target,label
x, y, t, label, target, squ = [], [], [], [], [], []
# Dataframe行遍历
for l in range(len(df)):
# 第0列:序号
squ.append(df[0][l])
# 第1列:坐标轨迹
line = df[1][l].split(';')
# 第2列:目标坐标
target1=(df[2][l])
target1=target1.split(',')
target.append([target1[0],target1[1]])
# 第3列:label
label.append(df[3][l])
# 一次轨迹
x1, y1, t1= [], [], []
# 拆分坐标轨迹
for i in range(len(line) - 1):
line1 = line[i].split(',')
# x
x1.append(line1[0])
# y
y1.append(line1[1])
# time
t1.append(line1[2])
x.append(x1)
y.append(y1)
t.append(t1)
return x, y, t, label, target, squ
def draw(x, y, t, label, squ):
count = 0
for i in chain(range(30,60), range(2940, 2970)):
plt.subplot(6, 10, count + 1)
plt.scatter(x[i], y[i], c=t[i], s=20)
plt.xticks([])
plt.yticks([])
plt.title(str(squ[i]) + ' ' + str(label[i]))
count += 1
plt.show()
if __name__ == '__main__':
df = read_file(2)
x, y, t, label, target, squ = x_y_t(df)
draw(x, y, t, label, squ) | UTF-8 | Python | false | false | 1,859 | py | 4 | read.py | 3 | 0.466935 | 0.433583 | 0 | 67 | 23.985075 | 112 |
nordic-institute/X-Road-tests | 10,402,410,834,385 | ad7832dc33fcc15f59f201346fdd1f3e027da101 | c01a58ecd6614128e3c29a70e3e768b220a2a4a2 | /common/xrd-ui-tests-python/tests/xroad_member_access_229/xroad_member_access.py | c35f0be1594007630296e01e9291ccc1a3b05a4d | [
"MIT"
]
| permissive | https://github.com/nordic-institute/X-Road-tests | 772a6d7485606c1f10b61a1260b8fb66111bf0be | e030661a0ad8ceab74dd8122b751e88025a3474a | refs/heads/develop | 2021-06-03T01:38:20.542859 | 2019-03-18T12:16:18 | 2019-03-18T12:16:18 | 125,643,677 | 2 | 3 | MIT | false | 2018-06-14T15:09:21 | 2018-03-17T15:36:32 | 2018-06-12T17:53:11 | 2018-06-14T15:09:21 | 11,233 | 0 | 0 | 0 | Python | false | null | # coding=utf-8
from view_models import clients_table_vm, popups
from helpers import xroad, soaptestclient
from tests.xroad_add_to_acl_218 import add_to_acl
# These faults are checked when we need the result to be unsuccessful. Otherwise the checking function returns True.
faults_unsuccessful = ['Server.ServerProxy.AccessDenied']
# These faults are checked when we need the result to be successful. Otherwise the checking function returns False.
faults_successful = ['Server.ServerProxy.AccessDenied', 'Server.ServerProxy.UnknownService',
'Server.ServerProxy.ServiceDisabled', 'Server.ClientProxy.*', 'Client.*']
def test_xroad_member_access(case, client=None, client_id=None, requester=None, wsdl_index=None, wsdl_url=None,
service_name=None):
'''
MainController test function. Tests XRoad member access.
:return:
'''
self = case
client_id = xroad.get_xroad_subsystem(client)
requester_id = xroad.get_xroad_subsystem(requester)
query_url = self.config.get('ss2.service_path')
query_filename = self.config.get('services.request_template_filename')
query = self.get_xml_query(query_filename)
sync_retry = 0
sync_max_seconds = 0
testclient_params = {
'xroadProtocolVersion': self.config.get('services.xroad_protocol'),
'xroadIssue': self.config.get('services.xroad_issue'),
'xroadUserId': self.config.get('services.xroad_userid'),
'serviceMemberInstance': client['instance'],
'serviceMemberClass': client['class'],
'serviceMemberCode': client['code'],
'serviceSubsystemCode': client['subsystem'],
'serviceCode': xroad.get_service_name(service_name),
'serviceVersion': xroad.get_service_version(service_name),
'memberInstance': requester['instance'],
'memberClass': requester['class'],
'memberCode': requester['code'],
'subsystemCode': requester['subsystem'],
'requestBody': self.config.get('services.testservice_2_request_body')
}
testclient = soaptestclient.SoapTestClient(url=query_url,
body=query,
retry_interval=sync_retry, fail_timeout=sync_max_seconds,
faults_successful=faults_successful,
faults_unsuccessful=faults_unsuccessful, params=testclient_params)
def xroad_member_access():
"""
:param self: MainController class object
:return: None
"""
self.log('*** SERVICE_17 / SERVICE_18')
# UC SERVICE_17 / SERVICE_18 Giving and removing access to XRoad member
# UC SERVICE_17/SERVICE_18 test query (1) from SS2 client subsystem to service bodyMassIndex. Query should fail.
self.log('SERVICE_17/SERVICE_18 test query (1) {0} to service bodyMassIndex. Query should fail.'.format(
query_filename))
case.is_true(testclient.check_fail(), msg='2.2.9-1 test query (1) succeeded')
# UC SERVICE_17/SERVICE_18 set bodyMassIndex address and ACL (give access to SS2 client subsystem)
self.log('SERVICE_17/SERVICE_18 set bodyMassIndex address and ACL (give access to {0}'.format(requester_id))
add_acl = add_to_acl.test_add_subjects(self, client=client, wsdl_url=wsdl_url,
service_name=service_name, service_subjects=[requester_id],
remove_data=False,
allow_remove_all=False)
try:
# Try to add subject to ACL
add_acl()
# UC SERVICE_17/SERVICE_18 test query (2) from SS2 client subsystem to service bodyMassIndex. Query should succeed.
self.log('SERVICE_17/SERVICE_18 test query (2) {0} to service bodyMassIndex. Query should succeed.'.format(
query_filename))
case.is_true(testclient.check_success(), msg='SERVICE_17/SERVICE_18 test query (2) failed')
finally:
# Always try to remove access
# UC SERVICE_17/SERVICE_18 Remove added subject from test service ACL
self.log('SERVICE_17/SERVICE_18 Remove added subject from test service ACL.')
# Open client popup using shortcut button to open it directly at Services tab.
clients_table_vm.open_client_popup_services(self, client_id=client_id)
# Find the table that lists all WSDL files and services
services_table = self.by_id(popups.CLIENT_DETAILS_POPUP_SERVICES_TABLE_ID)
# Wait until that table is visible (opened in a popup)
self.wait_until_visible(services_table)
# Find the WSDL, expand it and select service
clients_table_vm.client_services_popup_open_wsdl_acl(self, services_table=services_table,
service_name=service_name,
wsdl_index=wsdl_index, wsdl_url=wsdl_url)
add_to_acl.remove_subjects_from_acl(self, [requester_id], select_duplicate=True)
# Check if removal was successful - create a test query that should fail.
# UC SERVICE_17/SERVICE_18 test query (3) from SS2 client subsystem to service bodyMassIndex. Query should fail.
self.log('SERVICE_17/SERVICE_18 test query (3) {0} to service bodyMassIndex. Query should fail.'.format(
query_filename))
case.is_true(testclient.check_fail(), msg='SERVICE_17/SERVICE_18 test query (3) succeeded')
return xroad_member_access
| UTF-8 | Python | false | false | 5,722 | py | 383 | xroad_member_access.py | 329 | 0.62618 | 0.611325 | 0 | 114 | 49.192982 | 127 |
DrCrow89/meine_python_uebungen | 9,311,489,105,618 | bc2a47c851ea5a8923e2c7a3a4fec708c79b05d5 | 564154d8bf9899495f8e92727f7279906894382c | /EinstiegInPython/u_modul.py | a759aaf8a9a83e9b1125e21ef589d3862e2297ae | []
| no_license | https://github.com/DrCrow89/meine_python_uebungen | 2d690fe5862ba83466e8e8f81ca63248f81bb50d | 0a07585451ecd3ab3d9798912cae62d8fac21639 | refs/heads/master | 2020-04-17T09:44:58.400529 | 2019-05-21T21:56:22 | 2019-05-21T21:56:22 | 166,471,684 | 0 | 0 | null | false | 2019-05-21T21:56:23 | 2019-01-18T20:58:29 | 2019-05-19T08:15:45 | 2019-05-21T21:56:22 | 758 | 0 | 0 | 0 | Python | false | false | import u_modul_finanz
# Ausgabe
print("Es ergibt sich ein Steuerbetrag von", u_modul_finanz.steuer(1800), "Euro")
print("Es ergibt sich ein Steuerbetrag von", u_modul_finanz.steuer(2200), "Euro")
print("Es ergibt sich ein Steuerbetrag von", u_modul_finanz.steuer(2500), "Euro")
print("Es ergibt sich ein Steuerbetrag von", u_modul_finanz.steuer(2900), "Euro")
| UTF-8 | Python | false | false | 360 | py | 42 | u_modul.py | 37 | 0.747222 | 0.702778 | 0 | 6 | 59 | 81 |
Cookinne/Mario | 6,004,364,316,507 | c6ce4873f37862ccf294668725d097b5020ded4a | 27c2fe12518a9f487b7fd5495439709a3c14507a | /Mario.py | 664a7eadece3078f51c0397bdf2be9cf8791d301 | []
| no_license | https://github.com/Cookinne/Mario | 75f522ad249cbedc28d5bbea84850bc3d3c8b63b | 89f614a31443cbfdb682f9f2a32f2f0528d56946 | refs/heads/master | 2022-03-27T08:46:44.159792 | 2019-12-23T05:25:58 | 2019-12-23T05:25:58 | 229,912,230 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import api.analyze
from api.ip import ipAnalysis
from lib.data import config
from core.webserver import webserver
def check_env():
config['ip'] = ipAnalysis.get_local_ip()
def start():
if get_mod() == "python":
results = api.analyze.analyze_suricata(
"files/suricata/eve.json", data="xy", language="en")
print(results)
elif get_mod() == "web":
webserver()
def get_mod():
if len(sys.argv) == 2:
return "web"
else:
return "python"
if __name__ == "__main__":
check_env()
start()
| UTF-8 | Python | false | false | 585 | py | 10 | Mario.py | 6 | 0.591453 | 0.589744 | 0 | 31 | 17.870968 | 64 |
Grap3fruit22/ChessEngine | 876,173,360,772 | 5475a0a73bd18568e540dcab76fabe540ed77f1c | 2bf3fff514709e93c2491989c2babc31f537655e | /BFTestBasic.py | 09385ac313a8244e47c677ad27e60d0b6154ee8a | []
| no_license | https://github.com/Grap3fruit22/ChessEngine | ff1132b34f188b9bf8e910d97ebb8f93bfbc29f4 | bc60792a153d4a9cdcf860824e539e67de8f510a | refs/heads/master | 2020-05-05T03:25:35.791516 | 2019-05-07T17:07:55 | 2019-05-07T17:07:55 | 179,671,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 17:38:15 2019
@author: 44775
"""
import chess
import chess.syzygy
import chess.polyglot
#import pdb; pdb.set_trace()
global totalNodes
totalNodes = 0
from ChessCore import calcMinimaxMoveBF
def CalcBranchFact(Quantity):
"""Calculates the average branching fact per move, on average
for a game, and then takes this avg over X number of games."""
alpha = float("-inf")
beta = float("inf")
depth = 1
depthmax = 4
arr = [0] * 781
TT = {}
GameBF = []
EpochBF = []
for num in range(0,Quantity):
"""Loops through to play X games."""
board = chess.Board()
totalNodes = 0
MoveBF = []
GameBF = []
while (not board.is_game_over(claim_draw=False)):
depth = 1
totalNodes = 0
if (board.turn):
"""Plays for W"""
moveval, move, totalNodes, null = calcMinimaxMoveBF(board,depth,board.turn,alpha,beta,0,[])
depth += 1
while(depth<depthmax):
moveval, move, totalNodes, MoveBF = calcMinimaxMoveBF(board,depth,board.turn,alpha,beta,0,[])
display([moveval, move])
depth += 1
GameBF.append(sum(MoveBF)/len(MoveBF))
board.push_uci(move.uci())
else:
"""Plays for B"""
moveval, move, totalNodes, null = calcMinimaxMoveBF(board,depth,board.turn,alpha,beta,0,[])
depth += 1
while(depth<depthmax):
moveval, move, totalNodes, MoveBF = calcMinimaxMoveBF(board,depth,board.turn,alpha,beta,0,[])
depth += 1
GameBF.append(sum(MoveBF)/len(MoveBF))
board.push_uci(move.uci())
"""Keeps track of each game BF as its recorded"""
EpochBF.append(sum(GameBF)/len(GameBF))
return EpochBF, sum(EpochBF)/Quantity
X, Y = CalcBranchFact(1)
print(X)
print(Y) | UTF-8 | Python | false | false | 2,172 | py | 16 | BFTestBasic.py | 15 | 0.515654 | 0.498158 | 0 | 73 | 27.780822 | 113 |
madclumsil33t/atat | 12,343,736,016,828 | f62445c6588936f817e170307cd931948ad0f2bb | 9ba33919959f8cdb722682296e94c7ddad8e9410 | /script/reset_database.py | dd815d095498a37ceb922dcca1ba7a7f08b8c981 | [
"MIT"
]
| permissive | https://github.com/madclumsil33t/atat | 59657549a9fa9561ec640e64035987d0c6c21a5c | 290b4adc58791e95dac73ad17ec6645f55307609 | refs/heads/main | 2023-04-02T16:37:03.567866 | 2020-12-18T21:26:58 | 2020-12-18T21:26:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# Add root application dir to the python path
import os
import sys
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(parent_dir)
import sqlalchemy
from alembic import config as alembic_config
from seed_roles import seed_roles
from atat.database import db
from atat.app import make_config, make_app
def reset_database():
conn = db.engine.connect()
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
trans = conn.begin()
# drop all tables
meta.drop_all()
trans.commit()
# rerun the migrations
alembic_config.main(argv=["upgrade", "head"])
# seed the permission sets
seed_roles()
if __name__ == "__main__":
config = make_config({"default": {"DEBUG": False}})
app = make_app(config)
print(f"Creating extension {app}")
with app.app_context():
reset_database()
| UTF-8 | Python | false | false | 901 | py | 479 | reset_database.py | 298 | 0.664817 | 0.664817 | 0 | 41 | 20.97561 | 75 |
Anirudh-Muthukumar/Leetcode-Solutions | 13,194,139,559,015 | ab7d55d939a1a28df1d60479e365a353f38c6d8f | e5afe031dcf0e7d11f95d3c39b4d750b7a8cc8de | /More Problems/First Unique Number.py | 28da1f3fe7e0cb200fab020b43199905c7ba588c | []
| no_license | https://github.com/Anirudh-Muthukumar/Leetcode-Solutions | f172695297ac72a94d0a0313e8df08511e3991ba | 5904949fcfab9a842ba2269f68f25d7bd0149a55 | refs/heads/August | 2023-04-01T08:32:40.682661 | 2021-01-12T13:44:22 | 2021-01-12T13:44:22 | 247,761,302 | 0 | 0 | null | false | 2021-01-12T13:55:49 | 2020-03-16T16:15:48 | 2021-01-12T13:44:33 | 2021-01-12T13:55:49 | 1,184 | 0 | 0 | 0 | Python | false | false | class Node:
def __init__(self, val):
self.value = val
self.prev = None
self.next = None
class FirstUnique:
def __init__(self, A):
self.queue = set()
self.cache = {} # mapping {value: Node}
self.size = 0
self.head = Node(float('-inf')) # pointer to first ndoe
self.tail = Node(float('inf')) # pointer to last node
# Connect head and tail
self.head.next = self.tail
self.tail.prev = self.head
for value in A:
if value not in self.queue: # unique number
new_node = Node(value)
self.appendNode(new_node)
self.cache[value] = new_node
self.size += 1
elif value in self.cache: # not unique number
not_unique_node = self.cache[value]
self.removeNode(not_unique_node) # remove node from linked list
del self.cache[value] # remove value from cache
self.size -= 1
self.queue.add(value)
def appendNode(self, node):
''' Adds node to the tail of the linked list '''
node.prev = self.tail.prev
self.tail.prev.next = node
self.tail.prev = node
node.next = self.tail
def removeNode(self, node):
''' Removes node from the linked list '''
prev_node, next_node = node.prev, node.next
prev_node.next = next_node
next_node.prev = prev_node
def showFirstUnique(self):
''' Displays the node pointed by head'''
if self.size==0:
return -1
first_unique_node = self.head.next
return first_unique_node.value
def add(self, value):
''' Check occurence of new value and add/delete'''
if value not in self.queue: # unique number
new_node = Node(value)
self.appendNode(new_node)
self.cache[value] = new_node
self.size += 1
elif value in self.cache: # not unique number
not_unique_node = self.cache[value]
self.removeNode(not_unique_node) # remove node from linked list
del self.cache[value] # remove value from cache
self.size -= 1
self.queue.add(value) | UTF-8 | Python | false | false | 2,486 | py | 504 | First Unique Number.py | 502 | 0.501609 | 0.498793 | 0 | 87 | 27.586207 | 79 |
drmckinney75/SDEV140 | 8,761,733,318,212 | 87c23c4d8c498f18f0c33760584dc21b200c4282 | 297b5da85847639cb827a2e8e4cac1518e660d1f | /M02_Assn1_Ex12_Mckinney_David.py | 1b7245c6e4b4d2934866525d03e9587b8f39a958 | []
| no_license | https://github.com/drmckinney75/SDEV140 | 6067389eb9ec66598e0bc3b604af53e41e49264d | 109a14e6da003f31e3318e8e9c7f292fa0976f18 | refs/heads/main | 2023-03-18T17:02:32.055366 | 2021-03-17T04:53:05 | 2021-03-17T04:53:05 | 348,164,807 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # David Ryan McKinney
# SDEV140 M02_Assn1 Ex12
# 1/26/2021 11am
# V1.0
# Software Volume Discount Calculator
# quantityOrdered (int) quantity of software ordered
# discountDec (float) discount percent as decimal
# discountAmt (float) discount amount in $USD
# softwareCost (int) cost of software package
# totalDiscount (float) total savings
# finalPrice (float) price after discounts
# Welcome message for user
print("Hello consumer. Thank you very kindly for choosing DRM Enterprises for your software needs.")
print("We have a sliding scale discount, depending on the quantity ordered.")
quantityOrdered = int(input("How many packages would you like to order? "))
# assigning software package cost
softwareCost = 99
#calculating subtotal
subtotal = (softwareCost * quantityOrdered)
# calculating discount percentage
if quantityOrdered < 10:
discountDec = 0.0
elif quantityOrdered < 20:
discountDec = 0.10
elif quantityOrdered < 50:
discountDec = 0.20
elif quantityOrdered < 100:
discountDec = 0.30
elif quantityOrdered >= 100:
discountDec = 0.40
#calculating total discount
totalDiscount = (subtotal * discountDec)
#calculating finalPrice
finalPrice = (subtotal - totalDiscount)
#User display screen of total calculations
print("Quantity of Software Purchases: ", quantityOrdered)
print("Pre-Discount Total: $", subtotal)
print("Discount Percentage: ", (discountDec * 100), "%" )
print("Discount :", totalDiscount)
print("Final Sale Price: $", finalPrice)
| UTF-8 | Python | false | false | 1,533 | py | 26 | M02_Assn1_Ex12_Mckinney_David.py | 24 | 0.737769 | 0.705153 | 0 | 45 | 32.066667 | 100 |
snakemake/snakemake | 19,335,942,806,812 | d89d662c5fe6a95a1e82f90ac9e6f91097b464ca | dacdebab897f9287f37a2e85c5705a926ddd36aa | /tests/test_groupid_expand/Snakefile | 0d30974de9b2ba40e45b2c62421c02a0116b85b9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/snakemake/snakemake | 5d4528193d87786d7b372ca7653ece302ff46965 | 27b224ed12448df8aebc7d1ff8f25e3bf7622232 | refs/heads/main | 2023-09-02T08:37:04.323976 | 2023-08-11T10:02:34 | 2023-08-11T10:02:34 | 212,840,200 | 1,941 | 536 | MIT | false | 2023-09-11T09:51:44 | 2019-10-04T14:58:11 | 2023-09-09T18:40:58 | 2023-09-11T09:51:43 | 92,176 | 1,914 | 467 | 879 | HTML | false | false | shell.executable("bash")
rule all:
input:
expand("bar{i}.txt", i=range(3)),
rule grouplocal:
output:
"foo.{groupid}.txt",
group:
"foo"
shell:
"echo {wildcards.groupid} > {output}"
def get_input(wildcards, groupid):
return f"foo.{groupid}.txt"
rule consumer:
input:
get_input,
output:
"bar{i}.txt",
group:
"foo"
shell:
"cp {input} {output}"
| UTF-8 | Python | false | false | 448 | 675 | Snakefile | 474 | 0.522321 | 0.520089 | 0 | 30 | 13.933333 | 45 |
|
orangedeer/Airbnb | 14,843,407,013,760 | d8540b4f9bd3ecc8d462dfdc04d13fe8aa5668d6 | 357bad603e85d3b22d7d7b996e640803b6fa7595 | /3-crawler/listing/listing/spiders/listing.py | 821f44513f024123c8a864b4171018dd2e3d6312 | []
| no_license | https://github.com/orangedeer/Airbnb | 2750c83aac6ed6b92146c629b5874ee5af1e3196 | 6c34497785ab59c2945bd6ba4e499d6eff0c203a | refs/heads/master | 2018-12-20T14:29:41.629522 | 2018-03-05T02:34:43 | 2018-03-05T02:34:43 | 84,370,170 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date :
# @Author :
# @Usage :
from lxml import etree
import scrapy
import urllib2
import json
import re
import time
import requests
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class Listing(scrapy.Spider):
name = 'listing'
path = '/Users/CJW/Desktop/thu/科研/工作/论文第三弹/data/NYC/'
start_urls = ['https://www.airbnb.com/rooms/' + line.strip() for line in open('/Users/CJW/Desktop/thu/科研/工作/论文第三弹/data/NYC/merge/listings_undone_merge.txt', 'r').readlines()]
handle_httpstatus_list = [404]
def start_requests(self):
file = open(self.path + 'output/listing_info.txt', 'w')
file.close()
file = open(self.path + 'output/listing_similar.txt', 'w')
file.close()
for url in self.start_urls:
yield scrapy.Request(url = url, callback = self.parse)
def parse(self, response):
url = response.url
pattern_listing = re.compile(r'\D+rooms/(\d+)\D*')
listing_id = pattern_listing.sub(r'\1', url)
if not listing_id.isdigit():
return
content = response.body
page = etree.HTML(content)
about_this_listing = page.xpath(".//span[text()='About this listing']")
about = ''
if len(about_this_listing) == 1:
about_this_listing = about_this_listing[0].getparent().getnext()
if about_this_listing is not None:
for about_text in about_this_listing.getchildren():
about_text_lines = about_text.getchildren()
for about_text_line in about_text_lines:
if about_text_line.text is not None:
about += re.sub(r'[\t\r\n]+', ' ', about_text_line.text)
the_space = page.xpath(".//span[text()='The space']")
if len(the_space) == 0:
the_space = []
if the_space is not None:
slideshow_info = page.find(".//script[@data-hypernova-key='p3hero_and_slideshowbundlejs']").text
slideshow_info = json.loads(slideshow_info[4:len(slideshow_info)-3])
if about_this_listing is None:
about = slideshow_info['slideshowProps']['listing']['summary']
for row in slideshow_info['slideshowProps']['listing']['space_interface']:
key = row['label']
value = row['value']
if value is None:
value = ''
the_space.append(key + value)
else:
for i in range(len(the_space)):
columns = the_space[i].getparent().getparent().getnext().getchildren()[0].getchildren()
if len(columns) == 2:
break
the_space = []
for column in columns:
rows = column.findall(".//div")
for row in rows:
row_children = row.getchildren()
if len(row_children) >= 5:
key = row_children[0].tail
value = row_children[4].text
if value is None:
value = ''
the_space.append(key + value)
else:
row_children = row_children[0].getchildren()
key = row_children[0].tail
if key is not None:
value = row_children[4].text
if value is None:
value = ''
the_space.append(key + value)
the_space = ';'.join(the_space)
listing_info = page.find(".//script[@data-hypernova-key='listingbundlejs']").text
listing_info = json.loads(listing_info[4:len(listing_info)-3])
host_id = str(listing_info['listing']['user']['id'])
photo_count = len(listing_info['listing']['photos'])
amenities = []
for amenity in listing_info['listing']['listing_amenities']:
if amenity['is_present']:
amenities.append(amenity['tag'] + ':1')
else:
amenities.append(amenity['tag'] + ':0')
listing_amenities = ';'.join(amenities)
prices = []
for key in listing_info['listing']['price_interface'].keys():
if listing_info['listing']['price_interface'][key] is None:
prices.append(key + ':None')
else:
prices.append(key + ':' + listing_info['listing']['price_interface'][key]['value'])
price_interface = ';'.join(prices)
description = ''
if listing_info['listing'].has_key('description'):
description = re.sub(r'[\t\r\n]+', ' ', listing_info['listing']['description'])
elif listing_info['listing'].has_key('sectioned_description'):
if listing_info['listing']['sectioned_description'] is not None:
for section in listing_info['listing']['sectioned_description'].keys():
if listing_info['listing']['sectioned_description'][section] is not None:
description += section + ':' + re.sub(r'[\t\r\n]+', ' ', listing_info['listing']['sectioned_description'][section])
description = description.replace('@@', ' ')
structured_house_rules = ';'.join(listing_info['listing']['guest_controls']['structured_house_rules'])
pattern = re.compile(r'\D+(\d+)\D*')
localized_minimum_nights_description = pattern.sub(r'\1', listing_info['listing']['localized_minimum_nights_description'])
review_details_interface = listing_info['listing']['review_details_interface']
review_summary = []
if len(review_details_interface) > 0:
for dimension in review_details_interface['review_summary']:
review_summary.append(dimension['category'] + ':' + str(dimension['value']))
review_summary = ';'.join(review_summary)
review_count = review_details_interface['review_count']
host_other_property_review_count = review_details_interface['host_other_property_review_count']
review_score = review_details_interface['review_score']
else:
review_summary = ''
review_count = 0
host_other_property_review_count = 0
review_score = 0
host_details = listing_info['aboutTheHost']['host_details']
response_rate = host_details['response_rate']
if response_rate is None:
response_rate = 'None'
else:
response_rate = response_rate['rate']
response_time = host_details['response_time']
if response_time is None:
response_time = 'None'
is_superhost = 0
if host_details['show_superhost_badge']:
is_superhost = 1
slideshow_info = page.find(".//script[@data-hypernova-key='p3hero_and_slideshowbundlejs']").text
slideshow_info = json.loads(slideshow_info[4:len(slideshow_info)-3])
if slideshow_info['heroProps'].has_key('pricing_quote'):
pricing_quote = slideshow_info['heroProps']['pricing_quote']
rate = pricing_quote['rate']['amount']
rate_type = pricing_quote['rate_type']
cleaning_fee_as_guest = pricing_quote['cleaning_fee_as_guest']
can_instant_book = 0
if pricing_quote['can_instant_book']:
can_instant_book = 1
else:
cleaning_fee_as_guest = 0
if slideshow_info['slideshowProps']['listing']['price_interface']['cleaning_fee'] is not None:
cleaning_fee_as_guest = slideshow_info['slideshowProps']['listing']['price_interface']['cleaning_fee']['value']
can_instant_book = 0
if slideshow_info['slideshowProps']['listing']['instant_bookable']:
can_instant_book = 1
slideshow_info = json.loads(page.find(".//meta[@id='_bootstrap-room_options']").get('content'))
rate = slideshow_info['nightly_price']
rate_type = 'nightly'
if slideshow_info['isMonthly']:
rate_type = 'monthly'
latitude = page.find(".//meta[@property='airbedandbreakfast:location:latitude']").get('content')
longitude = page.find(".//meta[@property='airbedandbreakfast:location:longitude']").get('content')
wishlist_count = page.find(".//span[@class='wishlist-button-subtitle-text']")
if wishlist_count is None:
wishlist_count = 0
else:
pattern = re.compile(r'\D+(\d+)\D*')
wishlist_count = pattern.sub(r'\1', wishlist_count.text)
file = open(self.path + 'output/listing_info.txt', 'a')
result = [listing_id, host_id, str(photo_count), about, the_space, listing_amenities, price_interface, description, structured_house_rules, localized_minimum_nights_description, review_summary, str(review_count), str(host_other_property_review_count), str(review_score), response_rate, response_time, str(is_superhost), str(rate), rate_type, str(cleaning_fee_as_guest), str(can_instant_book), latitude, longitude, wishlist_count]
file.write(('%s\n') % ('@@'.join(result)))
file.close()
url = 'https://www.airbnb.com/rooms/' + listing_id + '/similar_listings'
yield scrapy.Request(url = url, callback = self.parse_similar_api)
def parse_similar_api(self, response):
url = response.url
pattern = re.compile(r'\D+(\d+)\D+')
listing_id = pattern.sub(r'\1', url)
if response.status == 404:
url = 'https://www.airbnb.com/api/v2/similar_listings?key=d306zoyjsyarp7ifhu67rjxn52tv0t20¤cy=USD&locale=en&_format=for_listing_card&listing_id=' + listing_id
yield scrapy.Request(url = url, callback = self.parse_similar_api_v2)
return
content = response.body
content = json.loads(content)
file = open(self.path + 'output/listing_similar.txt', 'a')
if content.has_key('properties'):
for similar_listing in content['properties']:
similar_listing_id = similar_listing['id']
host_id = similar_listing['user']['id']
room_type = similar_listing['room_type']
price_to_display = similar_listing['price_to_display']
instant_book = 0
if similar_listing['instant_book']:
instant_book = 1
review_count = similar_listing['review_count']
picture_count = similar_listing['picture_count']
raw_distance = similar_listing['raw_distance']
result = [listing_id, str(similar_listing_id), str(host_id), room_type, str(price_to_display), str(instant_book), str(review_count), str(picture_count), raw_distance]
file.write(('%s\n') % (','.join(result)))
file.close()
def parse_similar_api_v2(self, response):
url = response.url
pattern = re.compile(r'.+listing_id=(\d+)\D*')
listing_id = pattern.sub(r'\1', url)
content = response.body
content = json.loads(content)
file = open(self.path + 'output/listing_similar.txt', 'a')
if content.has_key('similar_listings'):
for similar_listing in content['similar_listings']:
similar_listing_id = similar_listing['listing']['id']
host_id = similar_listing['listing']['user_id']
room_type = similar_listing['listing']['room_type']
price_to_display = similar_listing['pricing_quote']['rate']['amount']
instant_book = 0
if similar_listing['listing']['instant_bookable']:
instant_book = 1
review_count = similar_listing['listing']['reviews_count']
picture_count = similar_listing['listing']['picture_count']
raw_distance = similar_listing['distance']
result = [listing_id, str(similar_listing_id), str(host_id), room_type, str(price_to_display), str(instant_book), str(review_count), str(picture_count), raw_distance]
file.write(('%s\n') % (','.join(result)))
file.close()
| UTF-8 | Python | false | false | 10,392 | py | 44 | listing.py | 44 | 0.672943 | 0.666667 | 0 | 232 | 43.637931 | 431 |
Michael-py/coding_challenges | 18,794,776,915,149 | 14eed64d024b4cf6bd077676d5e07057600da0b3 | 74db2f30b62f6524c716fc3bc301b4177ec96876 | /occur.py | be2b9cb8f3c85bfa22fad698568ce1e86aa6b2d2 | []
| no_license | https://github.com/Michael-py/coding_challenges | a2e8015f2e936b2e356b8ff0f3e089fbff549140 | fe5b343552fc510ef2eb4511f295767a64f455a2 | refs/heads/main | 2023-05-03T17:58:06.087414 | 2021-05-17T09:29:54 | 2021-05-17T09:29:54 | 368,127,866 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | counts = 0
def occur(s, t, count=0):
global counts
if len(s) == 1 and s[0] == t:
count += 1
for i in range(len(s)):
if s[i] == t:
count+=1
else:
counts += 1
occur(s[i+1:], t, count)
return count
print(occur("propylleucineglycogen", "i"))
print(counts) | UTF-8 | Python | false | false | 279 | py | 24 | occur.py | 23 | 0.569892 | 0.541219 | 0 | 23 | 11.173913 | 42 |
infoaed/ckanext-harvest | 8,813,272,922,447 | 7ffc8fdd6dd5cda11727227c4c20bb467a53f595 | 474b190ecb13b3385544925e67fe7b219ac265d0 | /ckanext/harvest/plugin.py | 8521bb1848235fc18b016cf5843bdfb05a2afd52 | []
| no_license | https://github.com/infoaed/ckanext-harvest | e959c27879b14d6e884aec9591b1b643dc8168b6 | 52c9fc67a53c0ef27af0f386448a8954a61dbe61 | refs/heads/look_feel_est | 2021-01-12T07:18:19.031748 | 2017-01-06T11:35:06 | 2017-01-06T11:35:06 | 76,940,794 | 0 | 0 | null | true | 2016-12-20T09:04:24 | 2016-12-20T09:04:24 | 2015-02-25T20:26:35 | 2015-02-25T20:49:30 | 1,086 | 0 | 0 | 0 | null | null | null | import os
from logging import getLogger
from pylons import config
from genshi.input import HTML
from genshi.filters import Transformer
import ckan.lib.helpers as h
from ckan.plugins import implements, SingletonPlugin
from ckan.plugins import IRoutes, IConfigurer
from ckan.plugins import IConfigurable, IActions, IAuthFunctions
from ckanext.harvest.model import setup as model_setup
log = getLogger(__name__)
assert not log.disabled
class Harvest(SingletonPlugin):
implements(IConfigurable)
implements(IRoutes, inherit=True)
implements(IConfigurer, inherit=True)
implements(IActions)
implements(IAuthFunctions)
def configure(self, config):
# Setup harvest model
model_setup()
def before_map(self, map):
controller = 'ckanext.harvest.controllers.view:ViewController'
map.redirect('/harvest/', '/harvest') # because there are relative links
map.connect('harvest', '/harvest',controller=controller,action='index')
map.connect('/harvest/new', controller=controller, action='new')
map.connect('/harvest/edit/:id', controller=controller, action='edit')
map.connect('/harvest/delete/:id',controller=controller, action='delete')
map.connect('/harvest/:id', controller=controller, action='read')
map.connect('harvesting_job_create', '/harvest/refresh/:id',controller=controller,
action='create_harvesting_job')
map.connect('/harvest/object/:id', controller=controller, action='show_object')
return map
def update_config(self, config):
here = os.path.dirname(__file__)
template_dir = os.path.join(here, 'templates')
public_dir = os.path.join(here, 'public')
if config.get('extra_template_paths'):
config['extra_template_paths'] += ',' + template_dir
else:
config['extra_template_paths'] = template_dir
if config.get('extra_public_paths'):
config['extra_public_paths'] += ',' + public_dir
else:
config['extra_public_paths'] = public_dir
## IActions
def get_actions(self):
module_root = 'ckanext.harvest.logic.action'
action_functions = _get_logic_functions(module_root)
return action_functions
## IAuthFunctions
def get_auth_functions(self):
module_root = 'ckanext.harvest.logic.auth'
auth_functions = _get_logic_functions(module_root)
return auth_functions
def _get_logic_functions(module_root, logic_functions={}):
for module_name in ['get', 'create', 'update', 'delete']:
module_path = '%s.%s' % (module_root, module_name,)
module = __import__(module_path)
for part in module_path.split('.')[1:]:
module = getattr(module, part)
for key, value in module.__dict__.items():
if not key.startswith('_') and (hasattr(value, '__call__')
and (value.__module__ == module_path)):
logic_functions[key] = value
return logic_functions
| UTF-8 | Python | false | false | 3,060 | py | 28 | plugin.py | 20 | 0.647059 | 0.646732 | 0 | 95 | 31.210526 | 90 |
unhyperbolic/SnapRepr | 9,019,431,357,823 | 5d40e311b7fa34ef5771b38606585d5bc8d7ca17 | 48b062ce35ab2917f2e23fb62df5e91107ad3a3e | /src/bin/SnapReprMagmaSl3NeumannZagier.py | 604675b8e1bcc303d0f4fca37c7062322a6815cb | []
| no_license | https://github.com/unhyperbolic/SnapRepr | 8e7deedd293e4b8f2835058221400cb395c6ec8c | 27cd01f86244c76a1c2881feeda14a480715f2e6 | refs/heads/master | 2020-12-25T17:28:19.781929 | 2016-08-16T06:09:23 | 2016-08-16T06:09:23 | 2,908,257 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import os
import sys
this_path, this_file = os.path.split(sys.argv[0])
abs_path = os.path.abspath(this_path)
base_path, this_dir = os.path.split(abs_path)
sys.path.append(base_path)
try:
from manifold import sl3NeumannZagierType
from manifold.triangulation import read_triangulation_from_file
import algebra.magma
except ImportError as e:
print e
print
print "This program was called as :", sys.argv[0]
print "Absolute path to this program is :", abs_path
print "Base path is :", base_path
sys.exit(1)
def get_term_order(polys, pre_vars = [], post_vars = []):
all_vars = sum([p.variables() for p in polys], [])
sort_vars = set(all_vars) - set(pre_vars) - set(post_vars)
sort_vars = list(sort_vars)
sort_vars.sort()
return pre_vars + sort_vars + post_vars
def produce_magma_out(trig):
eqns = sl3NeumannZagierType.produce_all_equations_non_degenerate(trig)
term_order = get_term_order(eqns, pre_vars = ['t'])
return algebra.magma.primary_decomposition(eqns, term_order = term_order)
def main():
trig_filename = sys.argv[1]
if trig_filename[-5:] == '.trig':
base_filename = trig_filename[:-5]
else:
base_filename = trig_filename
trig = read_triangulation_from_file(trig_filename)
open(base_filename+'_sl3NeumannZagier.magma','w').write(
produce_magma_out(trig))
main()
| UTF-8 | Python | false | false | 1,440 | py | 50 | SnapReprMagmaSl3NeumannZagier.py | 47 | 0.655556 | 0.649306 | 0 | 52 | 26.692308 | 77 |
MaxSac/cubic_interpolation | 7,172,595,416,070 | df528fe1aa05e58fed98bef9bbdca88d0d76d249 | 1d9a7cc16d67c3e3e166ed26907c71e276ed70f9 | /conanfile.py | 63258a512b9c36dcf76b8c13bd3621617c44f818 | [
"MIT"
]
| permissive | https://github.com/MaxSac/cubic_interpolation | 4c2d58394be5e96c66872bfe73f7199ab8d8240e | d8ba7a19f06afa010747750bdb1c34fd9f811ba5 | refs/heads/main | 2023-08-22T14:45:31.505342 | 2022-12-01T02:55:25 | 2022-12-01T02:55:25 | 310,883,494 | 2 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.32.0"
class CubicInterpolationConan(ConanFile):
name = "cubicinterpolation"
homepage = "https://github.com/MaxSac/cubic_interpolation"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
description = "Leightweight interpolation library based on boost and eigen."
topics = ("interpolation", "splines", "cubic", "bicubic", "boost", "eigen3")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": True,
"fPIC": True
}
exports_sources = "*"
generators = "cmake_find_package", "cmake_paths"
_cmake = None
# def config_options(self):
# if self.settings.os == "Windows":
# del self.options.fPIC
# def configure(self):
# if self.options.shared:
# del self.options.fPIC
def requirements(self):
self.requires("boost/1.72.0")
self.requires("eigen/3.3.9")
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "16",
"gcc": "5",
"clang": "5",
"apple-clang": "5.1",
}
@property
def _required_boost_components(self):
return ["filesystem", "math", "serialization"]
def validate(self):
miss_boost_required_comp = any(getattr(self.options["boost"], "without_{}".format(boost_comp), True) for boost_comp in self._required_boost_components)
if self.options["boost"].header_only or miss_boost_required_comp:
raise ConanInvalidConfiguration("{0} requires non header-only boost with these components: {1}".format(self.name, ", ".join(self._required_boost_components)))
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, "14")
minimum_version = self._minimum_compilers_version.get(
str(self.settings.compiler), False
)
if not minimum_version:
self.output.warn(
"CubicInterpolation requires C++14. Your compiler is unknown. Assuming it supports C++14."
)
elif tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
"CubicInterpolation requires C++14, which your compiler does not support."
)
if self.settings.compiler == "Visual Studio" and self.options.shared:
raise ConanInvalidConfiguration("cubicinterpolation shared is not supported with Visual Studio")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BUILD_EXAMPLE"] = False
self._cmake.definitions["BUILD_DOCUMENTATION"] = False
self._cmake.configure()
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses" )
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "CubicInterpolation"
self.cpp_info.names["cmake_find_package_multi"] = "CubicInterpolation"
self.cpp_info.libs = ["CubicInterpolation"]
self.cpp_info.requires = ["boost::headers", "boost::filesystem", "boost::math", "boost::serialization", "eigen::eigen"]
| UTF-8 | Python | false | false | 3,657 | py | 7 | conanfile.py | 1 | 0.618266 | 0.61061 | 0 | 101 | 35.207921 | 170 |
goggle/aur | 19,602,230,772,238 | e8d4af1700381168fa497877531a1d841f23ab06 | d823c59cf74fef1d3f3f7be338e7a1c3084c958b | /scripts/generate_readme.py | be914fb22be2840f58c3da30771a43b333aa4da9 | [
"MIT"
]
| permissive | https://github.com/goggle/aur | 134966dd0f097d8ff3948418fd6820c10336c08a | c6f13640d64032094ed1143a81b63fd1a20a61d3 | refs/heads/master | 2023-08-30T20:02:40.130499 | 2023-08-30T01:39:34 | 2023-08-30T01:39:34 | 112,211,050 | 0 | 0 | MIT | false | 2023-09-04T13:13:38 | 2017-11-27T15:07:41 | 2022-04-12T00:35:49 | 2023-09-04T13:13:36 | 146 | 0 | 0 | 1 | Shell | false | false | #!/usr/bin/env python
import argparse
import io
import os
import re
import sys
AUR_USER = 'aexl'
# Prefer to show links to the development repository instead
# to e.g. PyPI
PROJECT_LINKS = {
'kodi-addon-checker': 'https://github.com/xbmc/addon-check',
'python-kodistubs': 'https://github.com/romanvm/Kodistubs',
'python-tableone': 'https://github.com/tompollard/tableone',
}
def main():
parser = argparse.ArgumentParser(
description='Generate README.md for github')
parser.add_argument(
'--check', '-c', action='store_true',
help='Check if the current README.md matches the output of the script')
args = parser.parse_args()
if args.check:
if check():
print('No update of README.md needed.')
else:
print('README.md needs to be updated.')
sys.exit(1)
else:
print(generate_readme(), end='')
def get_dir_names():
return sorted([d for d in os.listdir('..') if os.path.isdir(
os.path.join('..', d)) and '.SRCINFO' in os.listdir(
os.path.join('..', d))])
def parse_package_info(pkgname):
srcinfo = os.path.join('..', pkgname, '.SRCINFO')
d = {
'name': pkgname,
}
regex_template = r'%s\s*=\s*(.+)'
with open(srcinfo, 'r') as f:
content = f.read()
for k in ('pkgdesc', 'pkgver', 'url', 'license'):
reg = regex_template % k
value = re.search(reg, content).group(1)
d.update({k: value})
return d
def generate_readme():
title = '# AUR (Arch User Repository) Packages'
badges = [('')]
description = ('My [aur](https://aur.archlinux.org/packages/'
'?K=%s&SeB=m) packages.' % AUR_USER)
packages = [parse_package_info(name) for name in get_dir_names()]
sio = io.StringIO('')
sio.writelines([title, '\n\n'])
for badge in badges:
sio.writelines(badge)
sio.writelines('\n')
sio.writelines(['\n', description, '\n\n## Packages\n\n'])
sio.writelines(
'| Name | Description | License | Project page | AUR page |')
sio.writelines('\n')
sio.writelines('|---|---|---|:---:|:---:|')
sio.writelines('\n')
pline_template = ('| **%s** | %s | %s | [:heavy_check_mark:](%s) '
'| [:heavy_check_mark:](%s) |')
for p in packages:
url = PROJECT_LINKS.get(p['name'])
url = url if url else p['url']
line = pline_template % (
p['name'], p['pkgdesc'], p['license'],
url, 'https://aur.archlinux.org/packages/%s/' % p['name'])
sio.writelines(line)
sio.writelines('\n')
sio.seek(0)
output = sio.read()
sio.close()
return output
def check():
with open(os.path.join('..', 'README.md'), 'r') as f:
current = f.read()
return current == generate_readme()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,012 | py | 10 | generate_readme.py | 6 | 0.554781 | 0.552457 | 0 | 103 | 28.242718 | 79 |
Lazy-LZY/droidlet | 12,884,901,903,076 | 353bde026a5e38a461dcb0ad0aa13365afe181f5 | 7c3742e2aa0f97b7f9e9250e8fdf852aa153ea38 | /droidlet/tools/hitl/utils/hitl_logging.py | 85a7bdd8ff4d4e625d829b5a588a568b3acf70e2 | [
"MIT"
]
| permissive | https://github.com/Lazy-LZY/droidlet | c811d37a053f79f88e6d214ff1fc74687c40d353 | 0a01a7fa7a7c65b2f9a3aebf5e79040940daf9d2 | refs/heads/main | 2023-07-08T17:37:43.190410 | 2023-02-01T19:34:23 | 2023-02-01T19:34:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Copyright (c) Facebook, Inc. and its affiliates.
The hitl_logging.py include a HitlLogging class for logging in HiTL module.
"""
from datetime import datetime, timezone
import logging
import os
import inspect
HITL_TMP_DIR = (
os.environ["HITL_TMP_DIR"] if os.getenv("HITL_TMP_DIR") else f"{os.path.expanduser('~')}/.hitl"
)
DEFAULT_LOG_FORMATTER = logging.Formatter(
"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)s() %(levelname)s]: %(message)s"
)
class HitlLogging:
"""
The HitlLogging class is a wrapper for the python basic logging,
allows the caller class to registering for a logger name and logs into separate files.
The logger generated by this class provides same APIs as the python logging library.
The log would be output to both console and a log file, the log file is located under the HiTL temporary directory
following the below format:
{Hitl Tmp Dir}/{Batch Id}/{Logger Name}{Timestamp}.log
Parameters:
- batch_id: required - batch_id of the hitl jobs
- logger_name: optional, default is set to caller class name
- formatter: optional, default is DEFAULT_LOG_FORMATTER
- level: optional, default is logging.WARNING (same as python logging module)
"""
def __init__(
self,
batch_id: int,
logger_name=None,
formatter=DEFAULT_LOG_FORMATTER,
level=logging.WARNING,
):
# Get caller class to use as logger name if logger name is not specified
if logger_name is None:
logger_name = inspect.stack()[1][0].f_locals["self"].__class__.__name__
# get timestamp to differentiate different instance
timestamp = datetime.now(timezone.utc).isoformat()
logger_name = f"{logger_name}{timestamp}"
log_dir = os.path.join(HITL_TMP_DIR, f"{batch_id}/pipeline_logs")
os.makedirs(log_dir, exist_ok=True)
log_file = f"{log_dir}/{logger_name}.log"
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger = logging.getLogger(logger_name)
logger.setLevel(level)
logger.addHandler(fh)
logger.addHandler(sh)
self._logger = logger
self._log_file = log_file
def get_logger(self):
return self._logger
def get_log_file(self):
return self._log_file
def shutdown(self):
for handler in self._logger.handlers:
self._logger.removeHandler(handler)
handler.close()
| UTF-8 | Python | false | false | 2,596 | py | 1,005 | hitl_logging.py | 752 | 0.644453 | 0.643683 | 0 | 83 | 30.277108 | 118 |
karafede/WRF_Chem | 6,313,601,930,717 | 4f129425a08e7c57ba685fc9aff96c64f30de14a | 1c5840724994dcb2451eb6a4cc9632e4a658da20 | /WRFChem_new_postproc/sendMail.py | f63e0068b9539d8c0d5a64ad086756a292b3f4b1 | []
| no_license | https://github.com/karafede/WRF_Chem | a4714fa276ffd060c9732bc2de638b2d9455dd7d | 2e51daaa5bbf30672e99938b3cad623292e6fb47 | refs/heads/master | 2020-12-14T10:34:47.803226 | 2020-01-12T07:56:16 | 2020-01-12T07:56:16 | 95,371,002 | 12 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import os ; import sys ; import smtplib ; import mimetypes ;
from smtplib import SMTP
from smtplib import SMTPException
from email.mime.multipart import MIMEMultipart ;
from email import encoders
from email.message import Message ;
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
#_from = "fog.masdar.ac.ae" ;
_to = ["fkaragulian@masdar.ac.ae","vkvalappil@masdar.ac.ae","mtemimi@masdar.ac.ae","mjweston@masdar.ac.ae"] ;
_sub = "WRF Chem Run"
_content = str(sys.argv[1])
_text_subtype = "plain"
_to=','.join(_to)
mail=MIMEMultipart('alternative')
mail["Subject"] = _sub
#mail["From"] = _from
mail["To"] = _to
mail.attach(MIMEText(_content, _text_subtype ))
try:
_from = "fog@masdar.ac.ae" ;
smtpObj = smtplib.SMTP('mail.masdar.ac.ae',587)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login('fog', 'P@ssword321')
smtpObj.sendmail(_from, _to, mail.as_string())
smtpObj.close()
print 'Success'
except:
try:
_from = "fog.masdar@gmail.com" ;
smtpObj = SMTP('smtp.gmail.com',587)
#Identify yourself to GMAIL ESMTP server.
smtpObj.ehlo()
#Put SMTP connection in TLS mode and call ehlo again.
smtpObj.starttls()
smtpObj.ehlo()
#Login to service
smtpObj.login(user='fog.masdar@gmail.com', password='fog@masdar123')
#Send email
smtpObj.sendmail(_from, _to, mail.as_string())
#close connection and session.
smtpObj.quit()
except SMTPException as error:
print "Error: unable to send email : {err}".format(err=error)
quit()
| UTF-8 | Python | false | false | 1,817 | py | 45 | sendMail.py | 40 | 0.585581 | 0.578426 | 0 | 52 | 32.942308 | 118 |
CamiloAguilar/openpose-tda-action-recognition | 12,850,542,197,742 | 3c5f70e5c4607e4013e60cba53f7be330bba6db2 | 40d081db87258dc9c7dd2452f9c665df48f4aab0 | /live_prediction.py | 324eda953a7253ae9368a00808ea9c089f86e303 | []
| no_license | https://github.com/CamiloAguilar/openpose-tda-action-recognition | dc1d62503d650f7beb5083596dfaae1dc6471df6 | 0e95ca353335acdb2f9e15958c59a1074d705441 | refs/heads/master | 2020-06-23T02:06:14.042757 | 2018-08-31T11:59:25 | 2018-08-31T11:59:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
from sklearn.externals import joblib
import copy
from time import time
import numpy as np
import logging
import os
from shutil import copyfile
from action_recognition.tracker import Tracker, TrackVisualiser
from action_recognition.detector import CaffeOpenpose
from action_recognition.analysis import PostProcessor, ChunkVisualiser
from action_recognition import transforms
def main(args):
os.makedirs(args.out_directory, exist_ok=True)
_, video_ending = os.path.splitext(args.video)
# Copy video file so we can create multiple different videos
# with it as base simultaneously.
tmp_video_file = "output/tmp" + video_ending
copyfile(args.video, tmp_video_file)
classifier = joblib.load(args.classifier)
detector = CaffeOpenpose(args.model_path)
tracker = Tracker(detector, out_dir=args.out_directory)
logging.info("Classes: {}".format(classifier.classes_))
valid_predictions = []
track_people_start = time()
for tracks, img, current_frame in tracker.video_generator(args.video, args.draw_frames):
# Don't predict every frame, not enough has changed for it to be valuable.
if current_frame % 20 != 0 or len(tracks) <= 0:
write_predictions(valid_predictions, img)
continue
# We only care about recently updated tracks.
tracks = [track for track in tracks
if track.recently_updated(current_frame)]
track_people_time = time() - track_people_start
logging.debug("Number of tracks: {}".format(len(tracks)))
predict_people_start = time()
valid_predictions = predict(tracks, classifier, current_frame, args.confidence_threshold)
predict_people_time = time() - predict_people_start
write_predictions(valid_predictions, img)
save_predictions(valid_predictions, args.video, tmp_video_file, args.out_directory)
logging.info("Predict time: {:.3f}, Track time: {:.3f}".format(
predict_people_time, track_people_time))
track_people_start = time()
def predict(tracks, classifier, current_frame, confidence_threshold):
# Extract the latest frames, as we don't want to copy
# too much data here, and we've already predicted for the rest
processor = PostProcessor()
processor.tracks = [copy.deepcopy(t.copy(-50)) for t in tracks]
processor.post_process_tracks()
predictions = [predict_per_track(t, classifier) for t in processor.tracks]
valid_predictions = filter_bad_predictions(
predictions, confidence_threshold, classifier.classes_)
save_predictions_to_track(predictions, classifier.classes_, tracks, current_frame)
no_stop_predictions = [predict_no_stop(track, confidence_threshold)
for track in tracks]
for t in [t for p, t in no_stop_predictions if p]:
valid_predictions.append(t)
log_predictions(predictions, no_stop_predictions, classifier.classes_)
return valid_predictions
def predict_per_track(track, classifier):
all_chunks = []
all_frames = []
divisions = [(50, 0), (30, 10), (25, 0), (20, 5)]
for frames_per_chunk, overlap in divisions:
chunks, chunk_frames = track.divide_into_chunks(frames_per_chunk, overlap)
if len(chunks) > 0:
all_chunks.append(chunks[-1])
all_frames.append(chunk_frames[-1])
if len(all_chunks) > 0:
predictions = classifier.predict_proba(all_chunks)
average_prediction = np.amax(predictions, axis=0)
return all_chunks[0], all_frames[0], average_prediction
else:
return None, None, [0] * len(classifier.classes_)
def write_predictions(valid_predictions, img):
for label, confidence, position, _, _ in valid_predictions:
TrackVisualiser().draw_text(img, "{}: {:.3f}".format(label, confidence), position)
def save_predictions(valid_predictions, video_name, video, out_directory):
for i, (label, _, _, chunk, frames) in enumerate(valid_predictions):
write_chunk_to_file(video_name, video, frames, chunk, label, out_directory, i)
def filter_bad_predictions(predictions, threshold, classes):
valid_predictions = []
for chunk, frames, prediction in predictions:
label, confidence = get_best_pred(prediction, classes)
if confidence > threshold:
position = tuple(chunk[-1, 0, :2].astype(np.int))
prediction_tuple = (label, confidence, position, chunk, frames)
valid_predictions.append(prediction_tuple)
return valid_predictions
def save_predictions_to_track(predictions, classes, tracks, current_frame):
for t, (_, _, prediction) in zip(tracks, predictions):
label, confidence = get_best_pred(prediction, classes)
t.add_prediction(label, confidence, current_frame)
def get_best_pred(prediction, classes):
best_pred_i = np.argmax(prediction)
confidence = prediction[best_pred_i]
label = classes[best_pred_i]
return label, confidence
def write_chunk_to_file(video_name, video, frames, chunk, label, out_dir, i):
_, video_name = os.path.split(video_name)
video_name, _ = os.path.splitext(video_name)
file_name = "{}-{}-{}-{}.avi".format(video_name, frames[-1], i, label)
out_file = os.path.join(out_dir, file_name)
ChunkVisualiser().chunk_to_video_scene(video, chunk, out_file, frames, label)
def predict_no_stop(track, confidence_threshold):
if len(track) < 50:
return False, ()
classifier_prediction = classifier_predict_no_stop(track, confidence_threshold)
# Copy last 200 frames to chunk for visusalisation.
track = track.copy(-200)
chunks, chunk_frames = track.divide_into_chunks(len(track) - 1, 0)
position = tuple(chunks[0, -1, 1, :2].astype(np.int))
prediction_tuple = ("Has not stopped", classifier_prediction,
position, chunks[0], chunk_frames[0])
return classifier_prediction > confidence_threshold, prediction_tuple
def classifier_predict_no_stop(track, confidence_threshold):
# If there haven't been that many predictions, we can't say anything.
if len(track.predictions) < 5:
return 0
number_moving = sum(prediction['label'] == 'moving' and
prediction['confidence'] > confidence_threshold
for prediction in list(track.predictions.values())[-20:])
return number_moving / len(track.predictions)
def log_predictions(predictions, no_stop_predictions, classes):
prints = []
for _, _, prediction in predictions:
prints.append(get_best_pred(prediction, classes))
if no_stop_predictions:
for label, confidence, _, _, _ in [t for p, t in no_stop_predictions if p]:
prints.append((label, confidence))
logging.info("Predictions: " + ", ".join(
["{}: {:.3f}".format(*t)
for t in prints]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=('Generates action predictions live given a video and a pre-trained classifier. '
'Uses Tracker.tracker.video_generator which yields every track every frame, '
'from which it predicts the class of action using the pre-trained classifier. '
'To get a better prediction, it takes the latest 50, 30, 25, and 20 frames '
'as chunks and selects the likliest prediction among the five * n_classes. '
'It also predicts if a person has not stopped moving (e.g. if they are moving '
'through a self-checkout area without scanning anything) by checking if '
'a proportion of the latest identified actions for a track/person is moving.'))
parser.add_argument('--classifier', type=str,
help='Path to a .pkl file with a pre-trained action recognition classifier.')
parser.add_argument('--video', type=str,
help='Path to video file to predict actions for.')
parser.add_argument('--model-path', type=str, default='../openpose/models/',
help='The model path for OpenPose.')
parser.add_argument('--confidence-threshold', type=float, default=0.6,
help='Threshold for how confident the model should be in each prediction.')
parser.add_argument('--draw-frames', action='store_true',
help='Flag for if the frames with identified frames should be drawn or not.')
parser.add_argument('--out-directory', type=str, default='output/prediction',
help=('Output directory to where the processed video and identified '
'chunks are saved.'))
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
main(args)
| UTF-8 | Python | false | false | 8,845 | py | 67 | live_prediction.py | 52 | 0.658112 | 0.650763 | 0 | 217 | 39.760369 | 101 |
dr-dos-ok/Code_Jam_Webscraper | 9,285,719,302,576 | 0d8d360946f0197ea13043df01396035d73f0870 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/611.py | 3a95e997fc1e04e0f164fe490791c54600cc2dce | []
| no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'j0hnny'
if __name__ == '__main__':
results = []
with open('A-small-attempt2.in', 'r') as input:
cases = int(input.readline())
for case in range(cases):
(P, Q) = input.readline().split('/')
P = int(P)
Q = int(Q)
print P, Q
div = 3
while div <= P:
if P % div == 0 and Q % div == 0:
P /= div
Q /= div
div += 1
g = 0
while Q > 1:
if Q % 2 != 0:
g = None
break
else:
if P < Q:
g += 1
Q /= 2
if g is None:
results.append('impossible')
else:
results.append(g)
with open('output', 'w') as output:
for case in range(cases):
res = results[case]
s = 'Case #%d: %s\n' % (case+1, res)
print s
output.write(s) | UTF-8 | Python | false | false | 1,101 | py | 60,747 | 611.py | 60,742 | 0.321526 | 0.309718 | 0 | 42 | 24.261905 | 51 |
niemasd/tools | 6,004,364,300,037 | ea2b3138b2c2189abee29ad6c1752fac8f8b071b | bb8ea165bfdbe0f79c89c3e0ca6d3f9c66c9c247 | /hamming.py | 3b94b16d38e0dec182efc0d0b9ffe8c65ec5c455 | []
| no_license | https://github.com/niemasd/tools | 3ace96922893131db77063ce960bec133c94ada6 | 6c411987c810a28cbddb9e6bf37ed87c87e235b4 | refs/heads/master | 2023-07-10T18:56:32.538450 | 2023-06-27T21:24:56 | 2023-06-27T21:24:56 | 71,021,185 | 14 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python3
'''
Niema Moshiri 2017
Compute all pairwise Hamming distances from a given multiple sequence alignment
'''
import argparse
from sys import stdin,stdout
from common import hamming,readFASTA
# parse arguments
def parseArgs():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', required=False, type=argparse.FileType('r'), default=stdin, help="Input FASTA")
parser.add_argument('-o', '--output', required=False, type=argparse.FileType('w'), default=stdout, help="Output")
parser.add_argument('-p', '--proportion', action='store_true', help="Hamming Distance as proportion of length (instead of count)")
args = parser.parse_args()
return args.input, args.output, args.proportion
# main code execution
infile, outfile, prop = parseArgs()
seqs = readFASTA(infile)
infile.close()
keys = list(seqs.keys())
L = None
for k in keys:
if L is None:
L = len(seqs[k])
assert L == len(seqs[k]), "All sequences must be of equal length"
for i in range(len(keys)-1):
for j in range(i+1,len(keys)):
if prop:
outfile.write('%f\n'%hamming(seqs[keys[i]],seqs[keys[j]],prop=True))
else:
outfile.write('%d\n'%hamming(seqs[keys[i]],seqs[keys[j]],prop=False))
| UTF-8 | Python | false | false | 1,340 | py | 50 | hamming.py | 47 | 0.686567 | 0.681343 | 0 | 35 | 37.285714 | 134 |
PauloVitorRocha/TPPE-Trab1 | 16,140,487,131,022 | bb1ec2ddb7f9c3329e497227b20062e2bfda9268 | 4ac845992c77391a97fe024dafac9a1323787fa1 | /src/decision_node.py | 9da36461eb6ac7b41a2751d488b0c461c856cb49 | []
| no_license | https://github.com/PauloVitorRocha/TPPE-Trab1 | d2fb7b41bba87ea0686fa363dc5b4dabdd7e41d1 | c239f96a54bac4ef7519506d1c10a39e77d3648d | refs/heads/main | 2023-03-31T01:35:19.980234 | 2021-04-04T22:34:51 | 2021-04-04T22:34:51 | 352,805,525 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from transitions import ActivityTransitions
class DecisionStream():
def __init__(self):
self.transitions = []
self.merge_node = None
self.final_node = None
self.activity_node = []
self.elements = []
def read_activity(self):
act_name = input("Nome da atividade: ")
self.activity_node.append(act_name)
self.elements.append(0)
def read_transition(self):
transition_name = input("Nome da transicao: ")
transition_prob = input("Probabilidade da transicao: ")
transition = ActivityTransitions(transition_name, transition_prob)
self.transitions.append(transition)
def read_final(self):
final_name = input("Nome do no final: ")
self.final_node = final_name
self.elements.append(1)
def read_merge(self):
merge_name = input("Nome do no de merge: ")
self.merge_node = merge_name
self.elements.append(2)
def decision_stream_to_xml(self, f, k):
f.write("\t\t\t<DecisionStream count=\"{}\">\n".format(k+1))
a_count = 0
m_count = 0
f_count = 0
for i in self.elements:
if i == 0:
f.write("\t\t\t\t<Activity name=\"{}\"/>\n".format(self.activity_node[a_count]))
a_count += 1
elif i == 1:
f.write("\t\t\t\t<FinalNode name=\"{}\"/>\n".format(self.final_node[f_count]))
f_count += 1
elif i == 2:
f.write("\t\t\t\t<MergeNode name=\"{}\"/>\n".format(self.merge_node[m_count]))
m_count += 1
f.write("\t\t\t\t<DecisionStreamTransitions>\n".format(k+1))
for transition in self.transitions:
transition.transition_to_xml(f, True)
f.write("\t\t\t\t</DecisionStreamTransitions>\n")
f.write("\t\t\t</DecisionStream>\n")
| UTF-8 | Python | false | false | 1,936 | py | 8 | decision_node.py | 5 | 0.542872 | 0.53564 | 0 | 54 | 34.537037 | 96 |
Paccy10/ampersand_app_api | 2,534,030,743,499 | f751ca7c76142bd084c4708abfc74786c6d31a31 | 24f505c2617b766c20244b38883a5e9784b7e250 | /api/models/station.py | 03e7d587fe718caefec2b4ac4639e2e628744569 | []
| no_license | https://github.com/Paccy10/ampersand_app_api | 5fce3edf4b0b137e8b6d1e46bacf2e40d677cc5b | 7945e0f92408a9754de43cc398b587018df81c6c | refs/heads/develop | 2022-12-19T04:58:43.027383 | 2020-10-01T11:19:13 | 2020-10-01T11:19:13 | 299,681,483 | 0 | 1 | null | false | 2020-10-01T11:19:14 | 2020-09-29T16:54:52 | 2020-09-30T11:05:26 | 2020-10-01T11:19:14 | 50 | 0 | 0 | 0 | Python | false | false | """ Module for Station Model """
from config.db import db
from .base import BaseModel
class Station(BaseModel):
""" Station Model class """
__tablename__ = 'stations'
location = db.Column(db.String(250), nullable=False, unique=True)
number_of_batteries = db.Column(
db.Integer, nullable=False, unique=True)
| UTF-8 | Python | false | false | 336 | py | 25 | station.py | 22 | 0.672619 | 0.66369 | 0 | 14 | 23 | 69 |
tkcroat/SC | 15,101,105,035,591 | a5ffce0017924344df4e4cb12bee447017a99896 | 6e8bb755c0ea46670a7fb8f5dda8c10d9c308d4c | /SC_messaging_main.py | 6220ca6ef305f9c77ed51646e8d2088705dd91bd | [
"MIT"
]
| permissive | https://github.com/tkcroat/SC | 2e3c120f000fdea249b185b127734f4c8a0b577a | 4c2c7663298cbd454ff7aba535b689b44b48a7d1 | refs/heads/master | 2021-07-17T17:03:25.214498 | 2020-08-25T16:36:12 | 2020-08-25T16:36:12 | 204,846,677 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 17:41:41 2017
@author: tkc
"""
import pandas as pd
import os, sys
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\SC' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
print ('SC folder added')
import pkg.SC_signup_functions as SC
import pkg.SC_messaging_functions as SCmess
import pkg.SC_config as cnf # specifies input/output file directories
#%%
from importlib import reload
reload(SCmess)
#%%
os.chdir('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
signupfile='Winter2017_signups.xlsx'
signupfile='Spring2017_signups.xlsx'
signupfile='Fall2018_signups.xlsx'
# Load signups,player and family contact info; format names/numbers, eliminate duplicates
players, famcontact, SCsignup, season, year = SC.loadprocessfiles(signupfile)
teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437')
coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437') # common excel file encoding
#teams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Teams')
#teams=pd.read_csv(cnf._INPUT_DIR+'\\Teams_2019.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches') # load coach info
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
# Teams folder under each season?
gdrivedict={
'$GDRIVEWINTER':'https://drive.google.com/drive/u/0/folders/1oQQUiIKneC36P7mvJrVQNfC5M70NFrDW',
'$GDRIVEFALL':'https://drive.google.com/open?id=1DU-6x6wqOkiiAh5OvlzKAsombspgYAnq',
'$GDRIVE_SCHEDULING':'https://docs.google.com/forms/d/e/1FAIpQLSf_f7d1eHXn8Kfm75sqM0Wvv3CKPUemI-GWRWddSkIAqdd_6Q/viewform'
}
#%%
''' Messages to parents: 1) team assignment 2) Recruit missing players 3) missing unis
4) send schedule 5) other message 6) all parent message
'''
SCmess.emailparent_tk(teams, season, year)
# testing ssl connections/ troubleshooting
from urllib.request import urlopen
res = urlopen('https://www.howsmyssl.com/a/check').read() # tls version is 1.2
#%% Messages to coaches
# 1) missing uniforms (coach summary) 2) send team contact lists 3) send bill summary
# 4) other/generic
# missing unis will auto-load old teams
# TODO add sendschedule option
SCmess.emailcoach_tk(teams, coaches, gdrivedict)
# Testing
notifyfamilies(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
teams=teams.drop_duplicates('Team')
mtype='recruit'
mtype='teamassign' # notification of team assignment and CYC card status
#%% Messages to recruits (load after editing)
Recruits=pd.read_excel(signupfile, sheetname='Recruits')
emailtitle='Cabrini-Soulard sports for $FIRST this fall?'
messagefile='messages\\player_recruiting.txt'
SCmess.emailrecruits(Recruits, emailtitle, messagefile)
#%% Messages to all sports parents (typically last 3 seasons)
# Return email list for all players this season and up to prior year of same season
emaillist=SCmess.makeemaillist(Mastersignups, famcontact, season, year, SMS=False)
emailstr=', \r\n'.join(emaillist)
emaillist.to_csv('email_list_3Oct18.csv')
#%% Messages to coaches
SCmess.emailcoach_tk(teams, coaches, gdrivedict)
# Send team billing summary to (head) coaches: team bill summary, contact list,
mtype='bills'; mtype='contacts'; mtype='unis'; # choose message type
kwargs={}
# needed for billing
emailtitle='Fees still owed by your Cabrini team'
messagefile='messages\\coach_email_outstanding_bills.txt'
kwargs.update({'asst':False}) # Optional send to asst. coaches if set to True
billlist=pd.read_csv('Billlist_18Jan17.csv', encoding='cp437') # pruned bill list current season only balances owed
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
kwargs.update({'bills':billlist, 'SUs':Mastersignups})
# needed for team contacts (mtype contacts)
emailtitle='Contact list for your Cabrini team'
messagefile='messages\\coach_email_contacts.txt'
gdrive='https://drive.google.com/open?id=0B9k6lJXBTjfiVDJ3cU9DRkxEMVU' # Sharable link for this season
kwargs.update({'asst':True}) # Optional send to asst. coaches if set to True
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
kwargs.update({'gdrive':gdrive}) # google drive link for this season
# Needed for outstanding uniform return
kwargs={}
mtype='unis'
missing=pd.read_csv('missingunilist_27Apr17.csv', encoding='cp437')
oldteams=pd.read_excel('Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'mformat':'txt'}) # html or string/text message format (testing only)
kwargs.update({'oldteams':oldteams,'missing':missing})
kwargs.update({'asst':False}) # Optional send to asst. coaches if set to True
messagefile='messages\\coach_email_outstanding_unis.txt'
emailtitle='Return of uniforms for your Cabrini team'
messagefile='coach_email_log_29Apr17.html' # test send
# Write batch e-mails to coaches into html log file
SCbill.testcoachemail(teams, coaches, mtype, emailtitle, messagefile, **kwargs)
SCbill.emailcoaches(teams, coaches, mtype, emailtitle, messagefile, **kwargs)
| UTF-8 | Python | false | false | 5,097 | py | 27 | SC_messaging_main.py | 25 | 0.757504 | 0.736512 | 0 | 113 | 44.097345 | 126 |
cristina-cojocaru/syntax-project-with-SpaCy | 9,526,237,468,881 | cb3a647891db1321bd39f1c8998238e220437a52 | fce81535e93d68ab6a07880df3611c682fc65b4a | /Syntax_work.py | 49fecde55a93bacdb15300911e22d346a5b38593 | []
| no_license | https://github.com/cristina-cojocaru/syntax-project-with-SpaCy | cee00732a5e95c65b15869fe3c449f93695fdb66 | 3abea86d5f89399b4723869311a7944960a05952 | refs/heads/master | 2022-11-27T17:18:07.951962 | 2020-08-12T12:56:09 | 2020-08-12T12:56:09 | 286,997,264 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import spacy
import sys
from collections import Counter
nlp_fr = spacy.load('fr')
class Textfile:
def __init__(self, name, encoding="utf-8"):
self._name = name
self._encoding = encoding
self._content = ""
def read(self):
try:
f = open(self._name, encoding=self._encoding, mode='r')
self._content = f.read()
f.close()
except OSError as err:
print("OS error: {0}".format(err))
def calcul(self, outputfile):
try:
f = open(outputfile, encoding=self._encoding, mode='w')
verbs={}
doc=nlp_fr(self._content)
counts=Counter()
#calcul d'occurences de chaque verbe
for sent in doc.sents:
for tok in sent:
# skip spaces
if tok.pos_ == 'SPACE': continue
if tok.pos_ == "VERB":
counts[tok.lemma_]+=1
#créer une liste ordonnée de tous les verbes (ordonnée en fonction du nombre d'occurences)
verbes = sorted(counts, key=counts.get, reverse=True)
#créer un dictionnaire qui a comme clé le verbe et comme valeurs les pourcentages des compléments
# chercher les compléments pour chaque lemme verbal
#dictionnaire avec tous les compléments
for verb in verbes:
complements={'obj':0,'obl':0,'iobj':0,'ccomp':0,'xcomp':0}
for sent in doc.sents:
for tok in sent:
# skip spaces
if tok.pos_ == 'SPACE': continue
#si le mot est dépendant du verbe
if tok.head.text == verb:
for key in complements.keys():
if tok.dep_==key:
complements[key]+=1
for key in complements.keys():
# on calcule les pourcentages
complements[key] = int(complements[key]*100/counts[verb])
# pour chaque verbe on écrit dans le fichier le nombre d'occurences
f.write(str(counts[verb]))
f.write(" ")
# le lemme verbal
f.write(verb)
f.write(" ")
#et les pourcentages
f.write(str(complements))
f.write("\n")
f.close()
except OSError as err:
print("OS error: {0}".format(err))
def main():
#condition d'existence du paramètre
if sys.argv is None or len(sys.argv) <2:
print("you need to insert the name of the file")
exit()
filename = sys.argv[1] #le programme prend comme parametre le nom du fichier
tf=Textfile(filename) #on instantie la classe Textfile
tf.read() #on appelle la fonction read() pour lire le fichier
tf.calcul("verbes.txt") #appel de la fonction calcul
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,148 | py | 3 | Syntax_work.py | 1 | 0.495697 | 0.490915 | 0 | 89 | 33.224719 | 109 |
stwobe/dropboxcopy01 | 15,650,860,844,275 | 2a6d485821ab8b84ac77d71dca9bb8e7bfdce660 | 0f46ff5c2cc972dc6636fc3a3b90dd42e732bced | /1Python/Python3/11_Scripts_2016/fishies.py | baeb3455ccfc30617a930cf74f1c8b4086b8a32c | []
| no_license | https://github.com/stwobe/dropboxcopy01 | a01b93397d38c42545f7d5b62385ddf16c9645dd | 12eab06f1004d00198536ac15c3d112bd3b5da7d | refs/heads/master | 2021-01-13T13:56:45.594145 | 2019-01-15T23:21:11 | 2019-01-15T23:21:11 | 72,943,249 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #title: fishies.py
#author: Steve Roberts
#date: Sunday February 21st 2016
#usage: Run this script and it will print stuff...with a value of
#alternative usage -
import time
def fish(x):
time.sleep(0.7)
print("\n" * 600)
time.sleep(0.9)
print ("Fishy wishers!!!!!\n" * x)
print("")
time.sleep(0.6)
print(x)
time.sleep(0.7)
print (x ** x)
print("")
time.sleep(0.8)
print ("""Blooop
Blooooooop
Bloooooooooop
Bloooooooooooooooooop
Bloooooooooooooooooooooooooooop
Bloooooooooooooooooooooooooooooooooooooooop
Bloooooooooooooooooooooooooooooooooooooooooooooooop
Blooooooooooooooooooooooooooooooooooooooooooooooooooooooooop""" * x)
time.sleep(0.8)
print("Yayyy!")
time.sleep(1.2)
fish(10) #comment this line out if you wan to use this a function to be imported.
#and call it with fishies.fish(3), for example
| UTF-8 | Python | false | false | 859 | py | 139 | fishies.py | 129 | 0.718277 | 0.688009 | 0 | 31 | 26.548387 | 81 |
declangallen/AWS_admin | 2,731,599,222,630 | 1c6756cbd34dc0754253222c3f27744cfc0b2d07 | 35f47325babc9b267fc5999aabd1ab614eb2baf2 | /upload_to_S3.py | 180c6c7f21250c4b2bf35712a3b1a329fec07fa5 | []
| no_license | https://github.com/declangallen/AWS_admin | 1e183d507780c0ceab4e8e9ad001c4e4259b721a | 00db2c125b65590e6e2c711fad1ff78b7952b266 | refs/heads/master | 2020-08-14T13:47:50.337219 | 2019-10-15T01:36:59 | 2019-10-15T01:36:59 | 215,179,639 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import boto3
import os
import uuid
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
# s3_resource.create_bucket(Bucket='dgallens3testpython1',
# CreateBucketConfiguration={
# 'LocationConstraint': 'eu-west-1'})
def create_bucket_name(bucket_prefix):
# The generated bucket name must be between 3 and 63 chars long
return ''.join([bucket_prefix, str(uuid.uuid4())])
def create_bucket(bucket_prefix, s3_connection):
session = boto3.session.Session()
current_region = session.region_name
bucket_name = create_bucket_name(bucket_prefix)
bucket_response = s3_connection.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': current_region})
print(bucket_name, current_region)
return bucket_name, bucket_response
# first_bucket_name, first_response = create_bucket(
# bucket_prefix='dgtest123',
# s3_connection=s3_resource.meta.client)
first_bucket = s3_resource.Bucket(name='dgallens3testpython')
first_object = s3_resource.Object(
bucket_name='dgallens3testpython', key='table.txt')
first_object.upload_file('table.txt')
def copy_to_bucket(bucket_from_name, bucket_to_name, file_name):
copy_source = {
'Bucket': bucket_from_name,
'Key': file_name
}
s3_resource.Object(bucket_to_name, file_name).copy(copy_source)
copy_to_bucket('dgallens3testpython', 'dgallens3testpython1', 'table.txt') | UTF-8 | Python | false | false | 1,492 | py | 3 | upload_to_S3.py | 2 | 0.688338 | 0.66756 | 0 | 44 | 32.931818 | 74 |
Nebula1084/crowd | 4,526,895,540,850 | 6b624821f67b16f9921675970717af972dae474d | 0aad0d91502da270b40224fcfaa96d8fbc972736 | /loader/word2vec.py | f5470bd326d6f8df58dc8f8c5432416184b83147 | []
| no_license | https://github.com/Nebula1084/crowd | 5836b247db0d4821e06940befddbeaa1010dd1fb | 12c52c2edbc92b009f30c3011bccc9b67f899b4b | refs/heads/master | 2021-05-01T10:03:23.797342 | 2018-03-12T04:34:15 | 2018-03-12T04:34:15 | 121,073,744 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import gc
import os
import pickle
import gensim
import numpy as np
class Word2Vec(object):
def __init__(self, path, embed_size):
self.path = path
self.word2vec = None
self.embeddings = None
self.vocabulary_word2index = None
self.vocabulary_index2word = None
self.embed_size = embed_size
self.vocab_size = 0
def load(self):
print("Start to load model from %s" % self.path)
self.word2vec = gensim.models.KeyedVectors.load_word2vec_format(self.path, binary=True)
self.vocabulary_word2index, self.vocabulary_index2word = self.create_vocabulary()
self.vocab_size = len(self.vocabulary_index2word)
self.embeddings = self.create_embeddings()
def create_vocabulary(self, name_scope='word2vec'):
cache_path = './data/' + name_scope + "_word_vocabulary.pik"
print('Cache_path:', cache_path, 'file_exists:', os.path.exists(cache_path))
if os.path.exists(cache_path):
print('Use exist vocabulary cache')
with open(cache_path, 'rb') as data_f:
vocabulary_word2index, vocabulary_index2word = pickle.load(data_f)
return vocabulary_word2index, vocabulary_index2word
else:
print('Create new vocabulary')
vocabulary_word2index = {'PAD_ID': 0, 'EOS': 1}
vocabulary_index2word = {0: 'PAD_ID', 1: 'EOS'}
special_index = 1
for i, vocab in enumerate(self.word2vec.vocab):
vocabulary_word2index[vocab] = i + 1 + special_index
vocabulary_index2word[i + 1 + special_index] = vocab
with open(cache_path, 'wb') as data_f:
pickle.dump((vocabulary_word2index, vocabulary_index2word), data_f)
return vocabulary_word2index, vocabulary_index2word
def create_indices(self, text):
print('Create new indices')
indices = []
for i, sentence in enumerate(text):
index = [self.vocabulary_word2index.get(word, 0) for word in sentence]
indices.append(index)
return np.array(indices)
def create_embeddings(self):
print('Start to create embeddings')
count_exist = 0
count_not_exist = 0
word_embedding = [[]] * self.vocab_size # create an empty word_embedding list.
word_embedding[0] = np.zeros(self.embed_size) # assign empty for first word:'PAD'
bound = np.sqrt(6.0) / np.sqrt(self.vocab_size) # bound for random variables.
for i in range(1, self.vocab_size): # loop each word
word = self.vocabulary_index2word[i] # get a word
# noinspection PyBroadException
try:
embedding = self.word2vec[word] # try to get vector:it is an array.
except Exception:
embedding = None
if embedding is not None: # the 'word' exist a embedding
word_embedding[i] = embedding
count_exist = count_exist + 1 # assign array to this word.
else: # no embedding for this word
word_embedding[i] = np.random.uniform(-bound, bound, self.embed_size)
count_not_exist = count_not_exist + 1 # init a random value for the word.
del self.word2vec
gc.collect()
word_embedding_final = np.array(word_embedding) # covert to 2d array.
return word_embedding_final
| UTF-8 | Python | false | false | 3,444 | py | 21 | word2vec.py | 15 | 0.606562 | 0.593496 | 0 | 81 | 41.518519 | 95 |
rymate1234/Gnome-IRC | 14,774,687,507,077 | df8da40fc07b40d9762dfc906f9530f649ebaf2c | eedce303b54ba9c7db482b36b9da7aa4312a799a | /gnomeirc/MainWindow.py | aa291b909e8ba18a4e2bb80e17127d7c46cd04f1 | [
"MIT"
]
| permissive | https://github.com/rymate1234/Gnome-IRC | a9b65cfb39435fcdf58aafe050ca6e6fcc99e51f | 5f6269b5cd414f8a6e122099f4496949d183f219 | refs/heads/master | 2021-01-17T16:59:49.332087 | 2015-06-21T11:10:06 | 2015-06-21T11:10:06 | 30,494,811 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2
from twisted.internet import gtk3reactor
from gnomeirc import Utils
from gnomeirc.ChannelDialog import ChannelDialog
from gnomeirc.GtkChannelListBoxItem import GtkChannelListBoxItem, GtkChannelCloseButton
from twisted.internet import defer
from gnomeirc.TabCompletionEntry import TabCompletionEntry
from gnomeirc.UserList import UserList
gtk3reactor.install()
from twisted.internet import reactor
from gi.repository import Gtk, Gio, Gdk
import time, os
from gnomeirc.ConnectDialog import ConnectDialog
# twisted imports
from twisted.words.protocols import irc
from twisted.internet import protocol
if os.path.dirname(os.path.realpath(__file__)).startswith("/usr/local/"):
DATADIR = "/usr/local/share/gnome-irc/"
elif os.path.dirname(os.path.realpath(__file__)).startswith("/usr/"):
DATADIR = "/usr/share/gnome-irc/"
else:
DATADIR = ""
css = """
#toolbar-gnomeirc {
border-radius: 0;
}
"""
class Client(irc.IRCClient):
def __init__(self, *args, **kwargs):
self._namescallback = {}
self._whoiscallback = {}
self.channels = {}
self.channel_users = {}
self.chan_list_items = {}
self.selected = ""
def _get_nickname(self):
return self.factory.username
def _get_password(self):
return self.factory.password
nickname = property(_get_nickname)
password = property(_get_password)
versionName = "GnomeIRC Alpha"
def connectionMade(self):
irc.IRCClient.connectionMade(self)
builder = Gtk.Builder()
builder.add_from_file(DATADIR + "data/main_view.glade")
self.message_entry_container = builder.get_object("message_entry_container")
self.messages_view = builder.get_object("messages")
self.messages_scroll = builder.get_object("messages_scroll")
self.ircview = builder.get_object("ircviewpane")
self.chan_list = builder.get_object("channel_list")
self.message_entry = TabCompletionEntry(self.update_completion)
self.message_entry_container.add(self.message_entry)
# get some stuff
self.parent = self.factory.parent
self.parent.addTab(self.ircview, self.factory.server_name, self)
self.addChannel(self.factory.server_name)
self.log("[Connected established at %s]" %
time.asctime(time.localtime(time.time())), self.factory.server_name)
def signedOn(self):
"""Called when the client has succesfully signed on to server."""
self.log("Successfuly connected!", self.factory.server_name)
self.message_entry.connect("key-press-event", self.keypress)
self.chan_list.connect("row-selected", self.channel_selected)
self.messages_view.connect('size-allocate', self.on_new_line)
self.join(self.factory.channel)
def receivedMOTD(self, motd):
"""Called when the client gets the motd"""
self.log("Server MOTD is: ", self.factory.server_name)
self.log("\n".join(motd), self.factory.server_name)
def show_users(self):
users = self.channel_users[self.selected]
users.get_users().sort()
self.users_popover = Gtk.Popover().new(self.parent.users_button)
self.users_popover.set_border_width(6);
self.users_popover.set_position(Gtk.PositionType.TOP)
self.users_popover.set_modal(True)
self.users_popover.set_vexpand(False)
self.users_popover.connect("closed", self.users_list_closed)
self.users_popover.set_size_request(160,300)
self.populate_users_menu(users)
self.users_popover.add(self.users_list_container)
self.users_popover.show_all()
def populate_users_menu(self, users):
self.users_list_add("Operators", True)
ops = [user for user in users if user.startswith("@")]
for s in ops:
self.users_list_add(s)
self.users_list_add("Voiced", True)
voiced = [user for user in users if user.startswith("+")]
for s in voiced:
self.users_list_add(s)
self.users_list_add("Users", True)
users = [user for user in users if not(user.startswith("+") or user.startswith("@"))]
for s in users:
self.users_list_add(s)
def users_list_add(self, user, bold=False):
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
hbox.pack_start(vbox, True, True, 0)
if bold:
label1 = Gtk.Label()
label1.set_markup("<b>" + user + "</b>")
else:
label1 = Gtk.Label(user, xalign=0)
vbox.pack_start(label1, True, True, 0)
row.show_all()
self.users_list.add(row)
def users_list_closed(self, *args):
self.users_popover.remove(self.users_list)
self.users_list.destroy()
del self.users_list
self.users_popover.destroy()
del self.users_popover
def dialog_response_join(self, dialog, response):
if response == Gtk.ResponseType.OK:
channel = dialog.channel.get_text()
dialog.destroy()
self.join(channel)
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
self.log("[Disconnected at %s]" %
time.asctime(time.localtime(time.time())), self.factory.server_name)
# callbacks for events
def keypress(self, widget, event):
adj = self.messages_scroll.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
if event.keyval == Gdk.KEY_Return:
self.handle_message(widget.get_text())
widget.set_text("")
return True
if event.keyval == Gdk.KEY_Tab:
return True
return False
def handle_message(self, message):
if message.startswith("/"):
cmd_args = message.split(" ")
if cmd_args[0] == "/me":
message = message.replace("/me ", "")
self.describe(self.selected, message)
self.log("* %s %s" % (self.nickname, message), self.selected)
elif cmd_args[0] == "/join":
channel = message.replace("/join ", "")
self.join(channel)
else:
self.msg(self.selected, message)
self.log("<%s> %s" % (self.nickname, message), self.selected)
def channel_selected(self, widget, selected):
self.selected = selected.channel
self.messages_view.set_buffer(self.channels[selected.channel])
def update_completion(self, prefix):
user_store = Gtk.ListStore(str)
if self.selected == "":
user_store.append([""])
return user_store
for user in self.channel_users[self.selected].get_raw_users():
if user.startswith(prefix):
user_store.append([user])
return user_store
def joined(self, channel):
self.addChannel(channel)
self.selected = channel
self.channel_users[channel] = UserList()
self.log("[You have joined %s]" % channel, channel)
def on_new_line(self, widget, event, data=None):
adj = self.messages_scroll.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
def privmsg(self, user, channel, msg):
"""This will get called when the bot receives a message."""
if not any(channel in s for s in self.channels):
self.addChannel(channel) # multiple messages_scrollchannels for znc
if channel == self.selected:
adj = self.messages_scroll.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
user = user.split('!', 1)[0]
self.log("<%s> %s" % (user, msg), channel)
def action(self, user, channel, msg):
"""This will get called when the bot sees someone do an action."""
user = user.split('!', 1)[0]
self.log("* %s %s" % (user, msg), channel)
# irc callbacks
def irc_NICK(self, prefix, params):
"""Called when an IRC user changes their nickname."""
old_nick = prefix.split('!')[0]
new_nick = params[0]
for channel, users in self.channel_users.iteritems():
if users.has_user(old_nick):
self.log("%s is now known as %s" % (old_nick, new_nick), channel)
users.change_user(old_nick, new_nick)
# For fun, override the method that determines how a nickname is changed on
# collisions. The default method appends an underscore.
def alterCollidedNick(self, nickname):
"""
Generate an altered version of a nickname that caused a collision in an
effort to create an unused related name for subsequent registration.
"""
return nickname + '_'
def log(self, message, channel):
end_iter = self.channels[channel].get_end_iter()
timestamp = time.strftime("[%H:%M:%S]", time.localtime(time.time()))
self.channels[channel].insert(end_iter, '%s %s\n' % (timestamp, message))
def addChannel(self, channel):
row = GtkChannelListBoxItem(channel)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
hbox.pack_start(vbox, True, True, 0)
label1 = Gtk.Label(channel, xalign=0)
vbox.pack_start(label1, True, False, 0)
button = GtkChannelCloseButton(channel)
button.props.valign = Gtk.Align.CENTER
button.connect("clicked", self.on_close_clicked)
hbox.pack_start(button, False, False, 0)
row.show_all()
self.chan_list.add(row)
self.channels[channel] = Gtk.TextBuffer.new(None)
self.chan_list_items[channel] = row
self.chan_list.select_row(row)
def on_close_clicked(self, widget):
chan_list_item = self.chan_list_items[widget.channel]
prev_chan_list_item = self.chan_list.get_row_at_index(chan_list_item.get_index() - 1)
self.chan_list.remove(chan_list_item)
self.part(widget.channel)
self.chan_list.show_all()
self.selected = ""
self.chan_list.select_row(prev_chan_list_item)
# Names command - used for the users list
def names(self, channel):
channel = channel.lower()
d = defer.Deferred()
if channel not in self._namescallback:
self._namescallback[channel] = ([], [])
self._namescallback[channel][0].append(d)
self.sendLine("NAMES %s" % channel)
return d
def irc_RPL_NAMREPLY(self, prefix, params):
channel = params[2]
nicklist = params[3].split(' ')
if channel not in self._namescallback:
self.channel_users[channel].add_users(nicklist)
return
n = self._namescallback[channel][1]
n += nicklist
def irc_RPL_ENDOFNAMES(self, prefix, params):
channel = params[1]
if channel not in self._namescallback:
return
callbacks, namelist = self._namescallback[channel]
for cb in callbacks:
cb.callback(namelist)
del self._namescallback[channel]
# handling for the WHOIS command
def performWhois(self, username):
username = username.lower()
d = defer.Deferred()
if username not in self._whoiscallback:
self._whoiscallback[username] = ([], [])
self._whoiscallback[username][0].append(d)
self.whois(username)
return d
def irc_RPL_WHOISCHANNELS(self, prefix, params):
nickname = params[1].lower()
callbacks, namelist = self._whoiscallback[nickname]
n = self._whoiscallback[nickname][1]
n += params
for cb in callbacks:
cb.callback(namelist)
del self._whoiscallback[nickname]
class IRCFactory(protocol.ClientFactory):
"""A factory for Clients.
A new protocol instance will be created each time we connect to the server.
"""
# the class of the protocol to build when new connection is made
protocol = Client
def __init__(self, username, channel, password, server_name, parent):
self.channel = channel
self.username = username
self.password = password
self.server_name = server_name
self.parent = parent
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, show an error."""
self.showError('Connection lost! Reason: %s\n' % (reason))
# connector.connect()
def clientConnectionFailed(self, connector, reason):
self.showError('Connection failed! Reason: %s\n' % (reason))
# reactor.stop()
def showError(self, error):
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.OK, "Error with connection")
dialog.format_secondary_text(
error)
dialog.show()
class MainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Gnome IRC")
self.clients = {}
self.set_default_size(1024, 600)
style_provider = Gtk.CssProvider()
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
if Utils.isGnome():
# we're in gnome, so use the gnome UI
self.hb = Gtk.HeaderBar()
self.hb.set_show_close_button(True)
self.hb.props.title = "Gnome IRC"
self.set_titlebar(self.hb)
self.server_tabs = Gtk.Notebook.new()
self.add(self.server_tabs)
else:
# not gnome, use the header bar as a toolbar
layout = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(layout)
self.hb = Gtk.HeaderBar()
self.hb.set_name("toolbar-gnomeirc")
layout.pack_start(self.hb, False, True, 0)
self.server_tabs = Gtk.Notebook.new()
layout.pack_start(self.server_tabs, True, True, 0)
# add the buttons to the toolbar
self.connect_button = Gtk.Button("Quick Connect")
self.connect_button.connect("clicked", self.on_connect_clicked)
self.hb.pack_start(self.connect_button)
# Join Channel Button
button = Gtk.Button()
icon = Gio.ThemedIcon(name="list-add")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button.add(image)
button.connect("clicked", self.on_join_clicked)
# Users list button
button2 = Gtk.Button()
icon = Gio.ThemedIcon(name="avatar-default-symbolic")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button2.add(image)
self.users_button = button2
button2.connect("clicked", self.on_users_clicked)
self.hb.pack_end(button)
self.hb.pack_end(button2)
self.show_all()
self.connect("delete_event", self.on_quit)
def on_connect_clicked(self, widget):
dialog = ConnectDialog(self)
dialog.connect('response', self.dialog_response_cb)
dialog.show()
def dialog_response_cb(self, dialog, response):
if response == Gtk.ResponseType.OK:
server = dialog.address_entry.get_text()
port = int(dialog.port_entry.get_text())
nickname = dialog.nick_entry.get_text()
password = dialog.password.get_text()
channel = dialog.channel.get_text()
server_name = dialog.server_name.get_text()
dialog.destroy()
factory = IRCFactory(nickname, channel, password, server_name, self)
# connect factory to this host and port
reactor.connectTCP(server, port, factory)
# disable the button once connected, at least until we have a proper multiple server implementation
# self.connect_button.set_sensitive(False);
# self.connect_button.set_label("Connected to " + server);
win.show_all()
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def addTab(self, widget, server, client):
self.server_tabs.append_page(widget, Gtk.Label(server))
self.clients[server] = client
self.show_all()
def on_join_clicked(self, widget):
if not self.clients:
return
current_client = self.clients[self.get_current_page()]
dialog = ChannelDialog(self)
dialog.connect('response', current_client.dialog_response_join)
dialog.show()
def on_users_clicked(self, widget):
if not self.clients:
return
current_client = self.clients[self.get_current_page()]
if not hasattr(current_client, "users_popover"):
builder = Gtk.Builder()
builder.add_from_file(DATADIR + "data/users_list.glade")
current_client.users_list = builder.get_object("users_list")
current_client.users_list_container = builder.get_object("users_list_container")
#current_client.names(current_client.selected).addCallback(current_client.got_users)
current_client.show_users()
def get_current_page(self):
page_num = self.server_tabs.get_current_page()
page_widget = self.server_tabs.get_nth_page(page_num)
page_name = self.server_tabs.get_tab_label_text(page_widget)
return page_name
def on_quit(self, *args):
#Gtk.main_quit()
reactor.stop()
win = MainWindow()
win.set_wmclass ("Gnome IRC", "Gnome IRC")
win.set_title ("Gnome IRC")
win.show_all()
reactor.run() | UTF-8 | Python | false | false | 18,482 | py | 13 | MainWindow.py | 8 | 0.598474 | 0.595174 | 0 | 529 | 32.941399 | 111 |
fahad-gcet/tasks_api | 11,836,929,877,769 | dc7fc2d7ab494cc363b1b403763ada06635dc8ba | 324a59acbd10605e5200f1e858c5aa699843ce66 | /api/views.py | d48aa4aa65f8cd764e4269c1850e81e4c325dc03 | []
| no_license | https://github.com/fahad-gcet/tasks_api | 0a0c7abcdb9a4c407515eeb66d771232c72e46d7 | 307b2548f014d002b4c7c574d35ba8f403b10073 | refs/heads/master | 2021-06-27T03:52:48.401495 | 2017-09-19T11:09:51 | 2017-09-19T11:09:51 | 104,043,206 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from api.models import Task
from api.serializers import TaskSerializer
from api.permissions import IsOwnerOrReadOnly
class TaskMixin:
queryset = Task.objects.all()
serializer_class = TaskSerializer
permission_classes = (IsOwnerOrReadOnly,)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class TaskList(TaskMixin, ListCreateAPIView):
pass
class TaskDetail(TaskMixin, RetrieveUpdateDestroyAPIView):
pass | UTF-8 | Python | false | false | 558 | py | 2 | views.py | 2 | 0.78853 | 0.78853 | 0 | 19 | 28.421053 | 83 |
bryanm92s/Python | 17,995,912,985,600 | 2011b2ccb19860c71cc006909b41aab8831e7fd3 | 19d63b7769f02d3f94cb8b8c3e77361bdbc518e9 | /Ejercicios en python 1/Tiemposolmar_5.py | ee3278b34890f7279d43437bb50f87a8e6def841 | []
| no_license | https://github.com/bryanm92s/Python | ff964de7deb535ee34387900835f1b3d7bc935d3 | 8c8aee7399a448e6b38e704ddc19160a7b252de5 | refs/heads/master | 2022-04-14T07:23:59.015588 | 2020-04-12T20:21:52 | 2020-04-12T20:21:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
CALCULAR CANTIDAD DE SEGUNDOS QUE LE TOMA A LA LUZ VIAJAR DEL SOL A MARTE
'''
print ("CALCULAR CANTIDAD DE SEGUNDOS QUE LE TOMA A LA LUZ VIAJAR DEL SOL A MARTE")
veloc_luz= 300000
dist_marte=227940000
# dist_tierra= 150000000
segun_minutos=60
#tiempo_s =dist_marte/veloc_luz
tiempo_s =dist_marte/veloc_luz
tiempo_m =tiempo_s/segun_minutos
print ("La cantidad de segundos que le toma a la luz viajar del SOL a MARTE es : ", float(tiempo_s), "Segundos")
print ("La cantidad de minutos que le toma a la luz viajar del SOL a MARTE es : ", float(tiempo_m), "Minutos") | UTF-8 | Python | false | false | 600 | py | 24 | Tiemposolmar_5.py | 24 | 0.7 | 0.656667 | 0 | 19 | 30.631579 | 112 |
MasoniteFramework/core | 7,662,221,692,642 | 8a2d5c6274d4222e18d0b5fae952f03dcf3b2c3b | ae645d5694ac7dbb49d4c6dfc6dd98ef82b1fafa | /masonite/testing/TestCase.py | 8c9f32a912eda447cfec00a3565d1e816c93a187 | [
"MIT"
]
| permissive | https://github.com/MasoniteFramework/core | 39d541286aa707423c4f7ddc464092b4d530c6f7 | 235ee98a20f7359b0201aa4d2b2e7cf7b6c36f8b | refs/heads/2.2 | 2021-05-13T13:38:20.286037 | 2019-11-29T14:28:32 | 2019-11-29T14:28:32 | 116,712,367 | 97 | 101 | MIT | false | 2023-08-02T01:36:52 | 2018-01-08T18:23:39 | 2023-05-23T19:12:38 | 2020-03-03T07:25:33 | 4,545 | 84 | 55 | 6 | Python | false | false | import io
import json
import sys
import unittest
from contextlib import contextmanager
from urllib.parse import urlencode
from masonite import env
from masonite.exceptions import RouteNotFoundException
from masonite.helpers.migrations import Migrations
from masonite.helpers.routes import create_matchurl, flatten_routes
from masonite.testsuite import generate_wsgi
from orator.orm import Factory
from masonite.app import App
from .MockRoute import MockRoute
class TestCase(unittest.TestCase):
sqlite = True
transactions = True
refreshes_database = False
_transaction = False
_with_subdomains = False
def setUp(self):
from wsgi import container
self.container = container
self.acting_user = False
self.factory = Factory()
self.withoutExceptionHandling()
self.withoutCsrf()
if not self._transaction:
self.startTransaction()
if hasattr(self, 'setUpFactories'):
self.setUpFactories()
if self.sqlite and env('DB_CONNECTION') != 'sqlite':
raise Exception("Cannot run tests without using the 'sqlite' database.")
if not self.transactions and self.refreshes_database:
self.refreshDatabase()
self.route_middleware = False
self.http_middleware = False
self.headers = {}
def buildOwnContainer(self):
self.container = self.create_container()
return self
@classmethod
def setUpClass(cls):
cls.staticSetUpDatabase()
@classmethod
def tearDownClass(cls):
if not cls.refreshes_database and cls.transactions:
cls.staticStopTransaction()
else:
cls.staticTearDownDatabase()
def refreshDatabase(self):
if not self.refreshes_database and self.transactions:
self.stopTransaction()
self.startTransaction()
if hasattr(self, 'setUpFactories'):
self.setUpFactories()
else:
self.tearDownDatabase()
self.setUpDatabase()
def startTransaction(self):
from config.database import DB
DB.begin_transaction()
self.__class__._transaction = True
def stopTransaction(self):
from config.database import DB
DB.rollback()
self.__class__._transaction = False
@classmethod
def staticStopTransaction(cls):
from config.database import DB
DB.rollback()
cls._transaction = False
def make(self, model, factory, amount=50):
self.registerFactory(model, factory)
self.makeFactory(model, amount)
def makeFactory(self, model, amount):
return self.factory(model, amount).create()
def registerFactory(self, model, callable_factory):
self.factory.register(model, callable_factory)
def setUpDatabase(self):
self.tearDownDatabase()
Migrations().run()
if hasattr(self, 'setUpFactories'):
self.setUpFactories()
def tearDownDatabase(self):
Migrations().reset()
@staticmethod
def staticSetUpDatabase():
Migrations().run()
@staticmethod
def staticTearDownDatabase():
Migrations().reset()
def tearDown(self):
if not self.transactions and self.refreshes_database:
self.tearDownDatabase()
if self.container.has('Request'):
self.container.make('Request').get_and_reset_headers()
def call(self, method, url, params, wsgi={}):
custom_wsgi = {
'PATH_INFO': url,
'REQUEST_METHOD': method
}
custom_wsgi.update(wsgi)
if not self._with_csrf:
params.update({'__token': 'tok'})
custom_wsgi.update({
'HTTP_COOKIE': 'csrf_token=tok',
'CONTENT_LENGTH': len(str(json.dumps(params))),
'wsgi.input': io.BytesIO(bytes(json.dumps(params), 'utf-8')),
})
custom_wsgi.update({
'QUERY_STRING': urlencode(params),
})
self.run_container(custom_wsgi)
self.container.make('Request').request_variables = params
return self.route(url, method)
def get(self, url, params={}, wsgi={}):
return self.call('GET', url, params, wsgi=wsgi)
def withSubdomains(self):
self._with_subdomains = True
return self
def json(self, method, url, params={}):
return self.call(method, url, params, wsgi={
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(str(json.dumps(params))),
'wsgi.input': io.BytesIO(bytes(json.dumps(params), 'utf-8')),
})
def post(self, url, params={}):
return self.call('POST', url, params)
def put(self, url, params={}):
return self.json('PUT', url, params)
def patch(self, url, params={}):
return self.json('PATCH', url, params)
def delete(self, url, params={}):
return self.json('DELETE', url, params)
def actingAs(self, user):
if not user:
raise TypeError("Cannot act as a user of type: {}".format(type(user)))
self.acting_user = user
return self
def route(self, url, method=False):
for route in self.container.make('WebRoutes'):
matchurl = create_matchurl(url, route)
if self.container.make('Request').has_subdomain():
# Check if the subdomain matches the correct routes domain
if not route.has_required_domain():
continue
if matchurl.match(url) and method in route.method_type:
return MockRoute(route, self.container)
raise RouteNotFoundException("Could not find a route based on the url '{}'".format(url))
def routes(self, routes=[], only=False):
if only:
self.container.bind('WebRoutes', flatten_routes(only))
return
self.container.bind('WebRoutes', flatten_routes(self.container.make('WebRoutes') + routes))
@contextmanager
def captureOutput(self):
new_out, new_err = io.StringIO(), io.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout
finally:
sys.stdout, sys.stderr = old_out, old_err
def run_container(self, wsgi_values={}):
wsgi = generate_wsgi()
wsgi.update(wsgi_values)
self.container.bind('Environ', wsgi)
self.container.make('Request')._test_user = self.acting_user
self.container.make('Request').load_app(self.container).load_environ(wsgi)
if self._with_subdomains:
self.container.make('Request').activate_subdomains()
if self.headers:
self.container.make('Request').header(self.headers)
if self.route_middleware is not False:
self.container.bind('RouteMiddleware', self.route_middleware)
if self.http_middleware is not False:
self.container.bind('HttpMiddleware', self.http_middleware)
try:
for provider in self.container.make('WSGIProviders'):
self.container.resolve(provider.boot)
except Exception as e:
if self._exception_handling:
self.container.make('ExceptionHandler').load_exception(e)
else:
raise e
def withExceptionHandling(self):
self._exception_handling = True
def withoutExceptionHandling(self):
self._exception_handling = False
def withCsrf(self):
self._with_csrf = True
return self
def withoutCsrf(self):
self._with_csrf = False
return self
def assertDatabaseHas(self, schema, value):
from config.database import DB
table = schema.split('.')[0]
column = schema.split('.')[1]
self.assertTrue(DB.table(table).where(column, value).first())
def assertDatabaseNotHas(self, schema, value):
from config.database import DB
table = schema.split('.')[0]
column = schema.split('.')[1]
self.assertFalse(DB.table(table).where(column, value).first())
def on_bind(self, obj, method):
self.container.on_bind(obj, method)
return self
def withRouteMiddleware(self, middleware):
self.route_middleware = middleware
return self
def withHttpMiddleware(self, middleware):
self.http_middleware = middleware
return self
def withHeaders(self, headers={}):
self.headers = headers
return self
def withoutHttpMiddleware(self):
self.http_middleware = []
return self
def create_container(self):
container = App()
from config import application
from config import providers
container.bind('WSGI', generate_wsgi())
container.bind('Application', application)
container.bind('Container', container)
container.bind('ProvidersConfig', providers)
container.bind('Providers', [])
container.bind('WSGIProviders', [])
"""Bind all service providers
Let's register everything into the Service Container. Once everything is
in the container we can run through all the boot methods. For reasons
some providers don't need to execute with every request and should
only run once when the server is started. Providers will be ran
once if the wsgi attribute on a provider is False.
"""
for provider in container.make('ProvidersConfig').PROVIDERS:
located_provider = provider()
located_provider.load_app(container).register()
if located_provider.wsgi:
container.make('WSGIProviders').append(located_provider)
else:
container.make('Providers').append(located_provider)
for provider in container.make('Providers'):
container.resolve(provider.boot)
"""Get the application from the container
Some providers may change the WSGI Server like wrapping the WSGI server
in a Whitenoise container for an example. Let's get a WSGI instance
from the container and pass it to the application variable. This
will allow WSGI servers to pick it up from the command line
"""
return container
| UTF-8 | Python | false | false | 10,408 | py | 274 | TestCase.py | 238 | 0.620388 | 0.61962 | 0 | 326 | 30.92638 | 99 |
ibell/achp | 12,317,966,249,364 | 669da02540b90f70dac527ae3fa4d0ce22096608 | 3859ee7a1694f30c69e4cb4ee392f3e197b23aaa | /src/Compressor.py | 1ea0a6e5a2d98b1772525ca57663885f8434916e | []
| no_license | https://github.com/ibell/achp | 71467905986ae5f0c7dcab0b2ca98bfd0aa30977 | 1003d16c651447d0068173e6d3186ebae9672bb1 | refs/heads/master | 2016-08-02T21:40:56.971781 | 2013-10-26T23:33:45 | 2013-10-26T23:33:45 | 12,282,085 | 8 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_Compressor', [dirname(__file__)])
except ImportError:
import _Compressor
return _Compressor
if fp is not None:
try:
_mod = imp.load_module('_Compressor', fp, pathname, description)
finally:
fp.close()
return _mod
_Compressor = swig_import_helper()
del swig_import_helper
else:
import _Compressor
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _Compressor.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _Compressor.SwigPyIterator_value(self)
def incr(self, n=1): return _Compressor.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _Compressor.SwigPyIterator_decr(self, n)
def distance(self, *args): return _Compressor.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _Compressor.SwigPyIterator_equal(self, *args)
def copy(self): return _Compressor.SwigPyIterator_copy(self)
def next(self): return _Compressor.SwigPyIterator_next(self)
def __next__(self): return _Compressor.SwigPyIterator___next__(self)
def previous(self): return _Compressor.SwigPyIterator_previous(self)
def advance(self, *args): return _Compressor.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _Compressor.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _Compressor.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _Compressor.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _Compressor.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _Compressor.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _Compressor.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _Compressor.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class vectord(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectord, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectord, name)
__repr__ = _swig_repr
def iterator(self): return _Compressor.vectord_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _Compressor.vectord___nonzero__(self)
def __bool__(self): return _Compressor.vectord___bool__(self)
def __len__(self): return _Compressor.vectord___len__(self)
def pop(self): return _Compressor.vectord_pop(self)
def __getslice__(self, *args): return _Compressor.vectord___getslice__(self, *args)
def __setslice__(self, *args): return _Compressor.vectord___setslice__(self, *args)
def __delslice__(self, *args): return _Compressor.vectord___delslice__(self, *args)
def __delitem__(self, *args): return _Compressor.vectord___delitem__(self, *args)
def __getitem__(self, *args): return _Compressor.vectord___getitem__(self, *args)
def __setitem__(self, *args): return _Compressor.vectord___setitem__(self, *args)
def append(self, *args): return _Compressor.vectord_append(self, *args)
def empty(self): return _Compressor.vectord_empty(self)
def size(self): return _Compressor.vectord_size(self)
def clear(self): return _Compressor.vectord_clear(self)
def swap(self, *args): return _Compressor.vectord_swap(self, *args)
def get_allocator(self): return _Compressor.vectord_get_allocator(self)
def begin(self): return _Compressor.vectord_begin(self)
def end(self): return _Compressor.vectord_end(self)
def rbegin(self): return _Compressor.vectord_rbegin(self)
def rend(self): return _Compressor.vectord_rend(self)
def pop_back(self): return _Compressor.vectord_pop_back(self)
def erase(self, *args): return _Compressor.vectord_erase(self, *args)
def __init__(self, *args):
this = _Compressor.new_vectord(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _Compressor.vectord_push_back(self, *args)
def front(self): return _Compressor.vectord_front(self)
def back(self): return _Compressor.vectord_back(self)
def assign(self, *args): return _Compressor.vectord_assign(self, *args)
def resize(self, *args): return _Compressor.vectord_resize(self, *args)
def insert(self, *args): return _Compressor.vectord_insert(self, *args)
def reserve(self, *args): return _Compressor.vectord_reserve(self, *args)
def capacity(self): return _Compressor.vectord_capacity(self)
__swig_destroy__ = _Compressor.delete_vectord
__del__ = lambda self : None;
vectord_swigregister = _Compressor.vectord_swigregister
vectord_swigregister(vectord)
class OutputEntryClass(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, OutputEntryClass, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, OutputEntryClass, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _Compressor.new_OutputEntryClass(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _Compressor.delete_OutputEntryClass
__del__ = lambda self : None;
OutputEntryClass_swigregister = _Compressor.OutputEntryClass_swigregister
OutputEntryClass_swigregister(OutputEntryClass)
class ACHPComponentClass(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ACHPComponentClass, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ACHPComponentClass, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _Compressor.delete_ACHPComponentClass
__del__ = lambda self : None;
ACHPComponentClass_swigregister = _Compressor.ACHPComponentClass_swigregister
ACHPComponentClass_swigregister(ACHPComponentClass)
class CompressorClass(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CompressorClass, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CompressorClass, name)
__repr__ = _swig_repr
def __init__(self):
this = _Compressor.new_CompressorClass()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _Compressor.delete_CompressorClass
__del__ = lambda self : None;
__swig_setmethods__["Tsat_s_K"] = _Compressor.CompressorClass_Tsat_s_K_set
__swig_getmethods__["Tsat_s_K"] = _Compressor.CompressorClass_Tsat_s_K_get
if _newclass:Tsat_s_K = _swig_property(_Compressor.CompressorClass_Tsat_s_K_get, _Compressor.CompressorClass_Tsat_s_K_set)
__swig_setmethods__["Tsat_d_K"] = _Compressor.CompressorClass_Tsat_d_K_set
__swig_getmethods__["Tsat_d_K"] = _Compressor.CompressorClass_Tsat_d_K_get
if _newclass:Tsat_d_K = _swig_property(_Compressor.CompressorClass_Tsat_d_K_get, _Compressor.CompressorClass_Tsat_d_K_set)
__swig_setmethods__["DT_sh_K"] = _Compressor.CompressorClass_DT_sh_K_set
__swig_getmethods__["DT_sh_K"] = _Compressor.CompressorClass_DT_sh_K_get
if _newclass:DT_sh_K = _swig_property(_Compressor.CompressorClass_DT_sh_K_get, _Compressor.CompressorClass_DT_sh_K_set)
__swig_setmethods__["Tsat_s"] = _Compressor.CompressorClass_Tsat_s_set
__swig_getmethods__["Tsat_s"] = _Compressor.CompressorClass_Tsat_s_get
if _newclass:Tsat_s = _swig_property(_Compressor.CompressorClass_Tsat_s_get, _Compressor.CompressorClass_Tsat_s_set)
__swig_setmethods__["Tsat_d"] = _Compressor.CompressorClass_Tsat_d_set
__swig_getmethods__["Tsat_d"] = _Compressor.CompressorClass_Tsat_d_get
if _newclass:Tsat_d = _swig_property(_Compressor.CompressorClass_Tsat_d_get, _Compressor.CompressorClass_Tsat_d_set)
__swig_setmethods__["power_map"] = _Compressor.CompressorClass_power_map_set
__swig_getmethods__["power_map"] = _Compressor.CompressorClass_power_map_get
if _newclass:power_map = _swig_property(_Compressor.CompressorClass_power_map_get, _Compressor.CompressorClass_power_map_set)
__swig_setmethods__["Vdot_ratio"] = _Compressor.CompressorClass_Vdot_ratio_set
__swig_getmethods__["Vdot_ratio"] = _Compressor.CompressorClass_Vdot_ratio_get
if _newclass:Vdot_ratio = _swig_property(_Compressor.CompressorClass_Vdot_ratio_get, _Compressor.CompressorClass_Vdot_ratio_set)
__swig_setmethods__["P1"] = _Compressor.CompressorClass_P1_set
__swig_getmethods__["P1"] = _Compressor.CompressorClass_P1_get
if _newclass:P1 = _swig_property(_Compressor.CompressorClass_P1_get, _Compressor.CompressorClass_P1_set)
__swig_setmethods__["P2"] = _Compressor.CompressorClass_P2_set
__swig_getmethods__["P2"] = _Compressor.CompressorClass_P2_get
if _newclass:P2 = _swig_property(_Compressor.CompressorClass_P2_get, _Compressor.CompressorClass_P2_set)
__swig_setmethods__["F"] = _Compressor.CompressorClass_F_set
__swig_getmethods__["F"] = _Compressor.CompressorClass_F_get
if _newclass:F = _swig_property(_Compressor.CompressorClass_F_get, _Compressor.CompressorClass_F_set)
__swig_setmethods__["T1_actual"] = _Compressor.CompressorClass_T1_actual_set
__swig_getmethods__["T1_actual"] = _Compressor.CompressorClass_T1_actual_get
if _newclass:T1_actual = _swig_property(_Compressor.CompressorClass_T1_actual_get, _Compressor.CompressorClass_T1_actual_set)
__swig_setmethods__["v_map"] = _Compressor.CompressorClass_v_map_set
__swig_getmethods__["v_map"] = _Compressor.CompressorClass_v_map_get
if _newclass:v_map = _swig_property(_Compressor.CompressorClass_v_map_get, _Compressor.CompressorClass_v_map_set)
__swig_setmethods__["v_actual"] = _Compressor.CompressorClass_v_actual_set
__swig_getmethods__["v_actual"] = _Compressor.CompressorClass_v_actual_get
if _newclass:v_actual = _swig_property(_Compressor.CompressorClass_v_actual_get, _Compressor.CompressorClass_v_actual_set)
__swig_setmethods__["mdot"] = _Compressor.CompressorClass_mdot_set
__swig_getmethods__["mdot"] = _Compressor.CompressorClass_mdot_get
if _newclass:mdot = _swig_property(_Compressor.CompressorClass_mdot_get, _Compressor.CompressorClass_mdot_set)
__swig_setmethods__["fp"] = _Compressor.CompressorClass_fp_set
__swig_getmethods__["fp"] = _Compressor.CompressorClass_fp_get
if _newclass:fp = _swig_property(_Compressor.CompressorClass_fp_get, _Compressor.CompressorClass_fp_set)
__swig_setmethods__["eta_oi"] = _Compressor.CompressorClass_eta_oi_set
__swig_getmethods__["eta_oi"] = _Compressor.CompressorClass_eta_oi_get
if _newclass:eta_oi = _swig_property(_Compressor.CompressorClass_eta_oi_get, _Compressor.CompressorClass_eta_oi_set)
__swig_setmethods__["Wdot"] = _Compressor.CompressorClass_Wdot_set
__swig_getmethods__["Wdot"] = _Compressor.CompressorClass_Wdot_get
if _newclass:Wdot = _swig_property(_Compressor.CompressorClass_Wdot_get, _Compressor.CompressorClass_Wdot_set)
__swig_setmethods__["CycleEnergyIn"] = _Compressor.CompressorClass_CycleEnergyIn_set
__swig_getmethods__["CycleEnergyIn"] = _Compressor.CompressorClass_CycleEnergyIn_get
if _newclass:CycleEnergyIn = _swig_property(_Compressor.CompressorClass_CycleEnergyIn_get, _Compressor.CompressorClass_CycleEnergyIn_set)
__swig_setmethods__["Vdot_pumped"] = _Compressor.CompressorClass_Vdot_pumped_set
__swig_getmethods__["Vdot_pumped"] = _Compressor.CompressorClass_Vdot_pumped_get
if _newclass:Vdot_pumped = _swig_property(_Compressor.CompressorClass_Vdot_pumped_get, _Compressor.CompressorClass_Vdot_pumped_set)
__swig_setmethods__["P"] = _Compressor.CompressorClass_P_set
__swig_getmethods__["P"] = _Compressor.CompressorClass_P_get
if _newclass:P = _swig_property(_Compressor.CompressorClass_P_get, _Compressor.CompressorClass_P_set)
__swig_setmethods__["M"] = _Compressor.CompressorClass_M_set
__swig_getmethods__["M"] = _Compressor.CompressorClass_M_get
if _newclass:M = _swig_property(_Compressor.CompressorClass_M_get, _Compressor.CompressorClass_M_set)
__swig_setmethods__["inlet_state"] = _Compressor.CompressorClass_inlet_state_set
__swig_getmethods__["inlet_state"] = _Compressor.CompressorClass_inlet_state_get
if _newclass:inlet_state = _swig_property(_Compressor.CompressorClass_inlet_state_get, _Compressor.CompressorClass_inlet_state_set)
__swig_setmethods__["outlet_state"] = _Compressor.CompressorClass_outlet_state_set
__swig_getmethods__["outlet_state"] = _Compressor.CompressorClass_outlet_state_get
if _newclass:outlet_state = _swig_property(_Compressor.CompressorClass_outlet_state_get, _Compressor.CompressorClass_outlet_state_set)
def set_P(self, *args): return _Compressor.CompressorClass_set_P(self, *args)
def speed_test(self, *args): return _Compressor.CompressorClass_speed_test(self, *args)
def calculate(self): return _Compressor.CompressorClass_calculate(self)
def test(self): return _Compressor.CompressorClass_test(self)
def OutputList(self): return _Compressor.CompressorClass_OutputList(self)
CompressorClass_swigregister = _Compressor.CompressorClass_swigregister
CompressorClass_swigregister(CompressorClass)
# This file is compatible with both classic and new-style classes.
| UTF-8 | Python | false | false | 15,784 | py | 13 | Compressor.py | 9 | 0.687532 | 0.685251 | 0 | 264 | 57.772727 | 141 |
ox-it/moxie | 14,723,147,899,793 | dede27f101dbe8ce7c3d49dc852628f03ab85cd1 | 6f3bf2bb9c8cb90e32a2514765fae3ee06bde405 | /moxie/tests/test_authentication_hmac.py | 0651a23f2e4e6932f187e370717c0efa8932ba2c | [
"Apache-2.0"
]
| permissive | https://github.com/ox-it/moxie | cf298ed6d4107ae1e6ab96f41655fa48e7ce97c1 | cc234a4170358c62b86d9fdb7760949b33a81937 | refs/heads/master | 2020-03-03T00:18:37.056387 | 2019-06-14T14:30:15 | 2019-06-14T14:30:15 | 5,481,115 | 2 | 2 | Apache-2.0 | false | 2020-07-01T07:41:30 | 2012-08-20T12:49:13 | 2020-06-30T12:38:12 | 2020-07-01T07:41:29 | 1,735 | 13 | 2 | 1 | Python | false | false | import unittest
from moxie import create_app
from moxie.core.views import accepts
from moxie.authentication import HMACView
class DummyUser(object):
def __init__(self, secret_key):
self.secret_key = secret_key
self.name = 'Dave'
class TestAuthenticatedView(HMACView):
def handle_request(self):
one_user = DummyUser('mysupersecretkey')
if self.check_auth(one_user.secret_key):
return {'name': one_user.name}
@accepts('foo/bar')
def basic_response(self, response):
return 'Hello %s!' % response['name'], 200
class HMACAuthenticationTestCase(unittest.TestCase):
def setUp(self):
self.user = DummyUser('mysupersecretkey')
self.app = create_app()
self.app.add_url_rule('/test', 'test', TestAuthenticatedView.as_view('test'))
def test_successful_hmac(self):
headers = [
('Accept', 'foo/bar'),
('Date', 'Wednesday'),
('X-HMAC-Nonce', 'foobarbaz'),
('Authorization', '668db85d1dff6718d778454fc8c1d368a906f675'),
]
with self.app.test_client() as c:
rv = c.get('/test', headers=headers)
self.assertEqual(rv.status_code, 200)
def test_hmac_signature_mismatch(self):
headers = [
('Accept', 'foo/bar'),
('Date', 'Wednesday'),
('X-HMAC-Nonce', 'foobarbaz'),
('Authorization', 'wrong-wrong'),
]
with self.app.test_client() as c:
rv = c.get('/test', headers=headers)
self.assertEqual(rv.status_code, 401)
def test_missing_header(self):
headers = [
('Accept', 'foo/bar'),
('Date', 'Wednesday'),
('Authorization', 'wrong-wrong'),
]
with self.app.test_client() as c:
rv = c.get('/test', headers=headers)
self.assertEqual(rv.status_code, 401)
self.assertIn("missing header", rv.headers['WWW-Authenticate'])
| UTF-8 | Python | false | false | 1,995 | py | 139 | test_authentication_hmac.py | 74 | 0.574937 | 0.555388 | 0 | 63 | 30.666667 | 85 |
alvinr/data-modeling | 6,786,048,366,191 | aa9136c95fa51d7f7d56658425e758d2077d6739 | a8738df3536e25ebbbe134df19d74ec69c323769 | /redis/faceting/all.py | 3cab02d1ad2f908f6d0eaf7e667e6e4a64a82fdf | []
| no_license | https://github.com/alvinr/data-modeling | 3c53ef8b15d540d8bd5866f0acdbdf6986916f39 | 54e7a39f73393e9f75acaa6df7af94d4fafda618 | refs/heads/master | 2021-09-08T03:52:36.916432 | 2021-08-25T17:35:52 | 2021-08-25T17:35:52 | 64,255,246 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from redis import StrictRedis
import os
import hashlib
import json
redis = StrictRedis(host=os.environ.get("REDIS_HOST", "localhost"),
port=os.environ.get("REDIS_PORT", 6379),
db=0)
redis.flushall()
def create_event(product):
p = redis.pipeline()
p.sadd("reserve_seating:" + str(product['reserve_seating']), product['sku'])
p.sadd("medal_event:" + str(product['medal_event']), product['sku'])
p.sadd("venue:" + str(product['venue']), product['sku'])
p.hmset("products:" + product['sku'], product)
p.execute()
def create_events():
m100m_final = { 'sku': "123-ABC-723",
'name': "Men's 100m Final",
'reserve_seating': True,
'medal_event': True,
'venue': "Olympic Stadium",
'category': ["Track & Field", "Mens"]
}
w4x100_heat = { 'sku': "737-DEF-911",
'name': "Women's 4x100m Heats",
'reserve_seating': True,
'medal_event': False,
'venue': "Olympic Stadium",
'category': ["Track & Field", "Womens"]
}
wjudo_qual = { 'sku': "320-GHI-921",
'name': "Womens Judo Qualifying",
'reserve_seating': False,
'medal_event': False,
'venue': "Nippon Budokan",
'category': ["Martial Arts", "Womens"]
}
create_event(m100m_final)
create_event(w4x100_heat)
create_event(wjudo_qual)
def match(*keys):
m = []
matches = redis.sinter(keys)
for sku in matches:
record = redis.hgetall("products:" + sku)
m.append(record)
return m
# Find matches based on two criteria
create_events()
# Find the match
matches = match("reserve_seating:True", "medal_event:False")
for m in matches:
print m
matches = match("reserve_seating:True", "medal_event:False", "venue:Olympic Stadium")
for m in matches:
print m
def create_hashed_lookups(lookup_key, products):
h = hashlib.new("ripemd160")
h.update(str(lookup_key))
for sku in products:
redis.sadd("lookups:" + h.hexdigest(), sku)
def match_hashed(lookup_key):
m = []
h = hashlib.new("ripemd160")
h.update(str(lookup_key))
matches = redis.smembers("lookups:" + h.hexdigest())
for sku in matches:
record = redis.hgetall("products:" + sku)
m.append(record)
return m
# Find matches based on hashed criteria
lookup_key={'reserve_seating': True, 'medal_event': True}
create_hashed_lookups(lookup_key, ["123-ABC-723"] )
# Find the match
matches = match_hashed(lookup_key)
for m in matches:
print m
| UTF-8 | Python | false | false | 2,651 | py | 29 | all.py | 13 | 0.582045 | 0.56092 | 0 | 87 | 29.45977 | 85 |
Anonymous20XXcodes/KS-GNN | 3,487,513,461,134 | ddbe55c91c52fe384bb193173b62f03c96c37e28 | 4dafc6f337728f711480c6609d32d64bbb06aa8c | /utils.py | b473690b4c043f699ac2b5790c989b29f08b70fa | []
| no_license | https://github.com/Anonymous20XXcodes/KS-GNN | de1c1d03b628c96e6bbeac3e5a8fcbf10c68baaf | b608b304551f1640cefe6e98dd9a5111f9b2dbe8 | refs/heads/main | 2023-02-28T17:25:21.381610 | 2021-02-09T09:26:50 | 2021-02-09T09:26:50 | 336,988,084 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import scipy.sparse as sp
from scipy.sparse import lil_matrix
import torch
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree, to_undirected
import os
import pandas as pd
args_list = ['dataset','alpha','beta','gamma','sigma','kw','topk','he','hw','lr', 'hid_d', 'd', 'layer_num', 'conv_num'] #data_size
def to_str(s):
return "'"+str(s)+"'"
def get_saved_model_path(args):
return args.model_dir+f'/{args.dataset}{args.comment}.model'
def load_data(args):
dataset_dir = f"./data/{args.dataset}/"
if args.hw and args.hw!='0':
spX_path = dataset_dir+str(args.hw)+"%hidden_spX.npz"
print('Loading hidden attributed graph')
else:
spX_path = dataset_dir+"spX.npz"
if args.he:
edges_path = dataset_dir+f'edge_{args.he}_edge_index.npz'
else:
edges_path = dataset_dir+'edge_index.npz'
edge_index = load_edge_index(edges_path)
coo_X = load_spX(spX_path)
return coo_X, edge_index
def load_spX(path):
return sp.load_npz(path)
def load_edge_index(path):
return np.load(path)['arr_0']
def str2int(s):
return list(map(int,s.strip().split()))
def queries2tensor(qs, attr_num):
q_num = len(qs)
t = torch.zeros(q_num, attr_num)
for i in range(q_num):
t[i,qs[i]] = 1
return t
def eval_Z(Z,q_emb,ans, k=100, verbose=False):
scores = q_emb @ Z.t()
rank = scores.sort(dim=-1, descending=True)[1]
hits = []
nodes_num = Z.shape[0]
for i in range(len(q_emb)):
mark = torch.zeros(nodes_num)
mark[ans[i]]=1
tmp_hit = mark[rank[i,:k]].sum()/k
# print(f'Q_{i} hit@{k}:{tmp_hit:.4f}')
hits.append(tmp_hit)
hits = torch.stack(hits)
if verbose:
print(hits)
res = hits.sort(descending=True)
print('Top 30:', res[1][:30])
return hits.mean().item()
# eval_PCA
def eval_PCA(X, qX, ans, k=100, verbose=False):
u,s,v = torch.svd(X)
SVD_res = eval_Z(X@v[:,:64],qX@v[:,:64],ans,k, verbose=verbose)
return SVD_res
def coo2torch(coo):
values = coo.data
indices = np.vstack((coo.row, coo.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = coo.shape
return torch.sparse.FloatTensor(i, v, torch.Size(shape))
def load_gt(path): # gt is a dictionary {query_str:[nodes_int]}
groud_truth = {}
with open(path,'r') as f:
for line in f.readlines():
query,ans = line.strip().split('\t')
groud_truth[query] = list(map(int,ans.split()))
return groud_truth
| UTF-8 | Python | false | false | 2,503 | py | 134 | utils.py | 5 | 0.63324 | 0.622853 | 0 | 103 | 22.271845 | 131 |
adrian13579/CoolInterpreter | 3,959,959,855,950 | 2c9cf81e228ca7bffbc98eecbdd1457db52b681a | f47383f90e794416e12d34d4c15b354a0cc4d271 | /cmp/parsers/shift_reduce_parser.py | 42fd5a8403dc21350f69d05ce2b64c418a0806cd | []
| no_license | https://github.com/adrian13579/CoolInterpreter | ecff721c7c92e0e5d9cc5f7f2bf4855abcc54d36 | 154bd734a9111a1510e5591ed9d79844c72496a5 | refs/heads/master | 2023-03-07T02:00:18.532393 | 2021-02-18T23:09:10 | 2021-02-18T23:09:10 | 262,991,104 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class ShiftReduceParser:
SHIFT = 'SHIFT'
REDUCE = 'REDUCE'
OK = 'OK'
def __init__(self, G, verbose=False):
self.G = G
self.verbose = verbose
self.action = {}
self.goto = {}
self._build_parsing_table()
def _build_parsing_table(self):
raise NotImplementedError()
def __call__(self, w, get_shift_reduce=False):
stack = [0]
cursor = 0
output = []
operations = []
while True:
state = stack[-1]
lookahead = w[cursor]
if self.verbose: print(stack, w[cursor:])
try:
action, tag = self.action[state, lookahead.token_type.Name][0]
if action == ShiftReduceParser.SHIFT:
operations.append(self.SHIFT)
stack.append(tag)
cursor += 1
elif action == ShiftReduceParser.REDUCE:
operations.append(self.REDUCE)
for _ in range(len(tag.Right)): stack.pop()
stack.append(self.goto[stack[-1], tag.Left.Name][0])
output.append(tag)
elif action == ShiftReduceParser.OK:
return output if not get_shift_reduce else (output, operations)
else:
assert False, 'Must be something wrong!'
except KeyError:
raise ParsingException(
f'Syntax error near token {lookahead.lex}')
class ParsingException(Exception):
@property
def text(self) -> str:
return self.args[0]
| UTF-8 | Python | false | false | 1,618 | py | 28 | shift_reduce_parser.py | 19 | 0.509889 | 0.504944 | 0 | 49 | 32.020408 | 83 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.