repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
imccready/forms | 5,540,507,855,662 | e06ea81a863c953f7cd3c7e9655277ac9a9f115d | 90f67240a9b818267a1c1b30b1735797d143a2a0 | /src/model/stats.py | 70eee44d14e9fadc2535249c3f4fe02f018d1bbc | []
| no_license | https://github.com/imccready/forms | 47a1f36017d2b354db5fe8bb463a58e516e5483f | e92ddb93a32024df4f9d81a3f2adab7017178e78 | refs/heads/master | 2020-04-05T14:40:26.647918 | 2018-11-21T05:16:52 | 2018-11-21T05:16:52 | 156,933,855 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Stats(object):
def __init__(self):
self.total = 0
self.wins = 0
self.place = 0
self.formfields = {}
self.runnerStats = [] | UTF-8 | Python | false | false | 171 | py | 19 | stats.py | 17 | 0.497076 | 0.479532 | 0 | 7 | 23.571429 | 29 |
sesas/Lunch-Roulette | 9,792,525,452,980 | b5a7d246cbb95ed1265f5b1d36eb052b91c36526 | eddf04186915715de0feef755580a238d5060e61 | /py/get_data.py | ebefcda520c6f451bb9efe280aa5112f675a1f95 | []
| no_license | https://github.com/sesas/Lunch-Roulette | 4c2bcd2ba59b494fea68887ce99c7314c3cc3406 | e59ca9cba806e9f80af3aeb5c830d1a7550ce676 | refs/heads/master | 2021-01-16T20:24:22.427836 | 2011-09-26T18:13:08 | 2011-09-26T18:13:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#
# Get data from daily Google lunch roulette spreadsheet and update corresponding MySQL People table
__author__ = 'mario@betaworks.com (Mario Menti)'
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.spreadsheet.service
import gdata.service
import atom.service
import gdata.spreadsheet
import atom
import getopt
import sys
import string
import MySQLdb
import datetime
class SimpleCRUD:
def __init__(self, email, password, db_user, db_pw):
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'betaworks lunch roulette'
self.gd_client.ProgrammaticLogin()
self.db_user = db_user
self.db_pq = db_pw
def _ProcessData(self, feed):
db = MySQLdb.connect("localhost","lunchy", self.db_user, self.db_password)
cursor = db.cursor()
for i, entry in enumerate(feed.entry):
rowdata = map(lambda e: (e[1].text), entry.custom.items())
l_updated = datetime.datetime.strptime(rowdata[0],"%m/%d/%Y %H:%M:%S")
use_av_1 = 0
use_av_2 = 0
use_av_3 = 0
use_av_4 = 0
if rowdata[1].find('12:00') != -1: use_av_1 = 1
if rowdata[1].find('12:30') != -1: use_av_2 = 1
if rowdata[1].find('1:00') != -1: use_av_3 = 1
if rowdata[1].find('1:30') != -1: use_av_4 = 1
mysql_update_qry = "update People set last_updated = '%s', avail_1 = %s, avail_2 = %s, avail_3 = %s, avail_4 = %s where email = '%s'" % (l_updated, use_av_1, use_av_2, use_av_3, use_av_4, rowdata[2])
cursor.execute(mysql_update_qry)
db.close()
def Run(self):
feed = self.gd_client.GetSpreadsheetsFeed()
# assume doc index=0
id_parts = feed.entry[0].id.text.split('/')
doc_key = id_parts[len(id_parts) - 1]
feed = self.gd_client.GetWorksheetsFeed(doc_key)
# assume worksheet index=0
id_parts = feed.entry[0].id.text.split('/')
ws_key = id_parts[len(id_parts) - 1]
# Get the list feed
feed = self.gd_client.GetListFeed(doc_key, ws_key)
self._ProcessData(feed)
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["goog_user=", "goog_pw="])
except getopt.error, msg:
#python get_data.py --user=lunchroul@gmail.com --pw=betawork$
print 'python get_data.py --goog_user [username] --goog_pw [password] --db_user= [username] --db_pw [password]'
sys.exit(2)
goog_user = ''
goog_pw = ''
db_user=''
db_pw=''
key = ''
# Process options
for o, a in opts:
if o == "--goog_user":
goog_user = a
elif o == "--goog_pw":
goog_pw = a
if o == "--db_user":
db_user = a
elif o == "--db_pw":
db_pw = a
if user == '' or pw == '':
print 'python get_data.py --goog_user [username] --goog_pw [password] --db_user= [username] --db_pw [password]'
sys.exit(2)
sample = SimpleCRUD(goog_user, goog_pw, db_user, db_pw)
sample.Run()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,081 | py | 11 | get_data.py | 7 | 0.617007 | 0.598182 | 0 | 104 | 28.625 | 205 |
AlhassanAly/Thesis2020 | 9,328,668,993,264 | 8e19a1ceac5c3cc19b56643188b10a67766f59c9 | a7b1b82defea0123e6fc1c91c572f37154cddb9c | /Iterations.py | 4d024a7ee4ac832d9d21801cfac74dacd98327c6 | [
"MIT"
]
| permissive | https://github.com/AlhassanAly/Thesis2020 | 38617757a5c41ac2e83f49ecd1c1d669f30a3446 | 1a6901924b76fbe5df4e357ff9b50de18a2791b6 | refs/heads/master | 2022-04-17T09:00:02.372051 | 2020-04-19T07:49:03 | 2020-04-19T07:49:03 | 252,303,885 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import os
import csv
import collections
import statistics
from Parameters import Edge_devices
d_num = (Edge_devices * 2 )+ 24
def getFileAverage():
file_path = 'C:/Users/Hassan/Documents/MIRI/Final_Thesis/NetworkX/outputs/'
list_of_files = os.listdir(file_path)
methods_list = ['single fog', 'Cloud through fog', 'Cloud direct', 'Inrange cluster', 'Neighbor cluuster']
average = {'single fog':[],'Cloud through fog':[], 'Cloud direct':[], 'Inrange cluster':[],'Neighbor cluuster':[]}
for file in list_of_files:
if file.endswith("." + "csv"):
filename = file_path + file
with open(filename, 'rU') as f:
reader = csv.reader(f)
data = pd.read_csv(filename, skiprows = d_num)
for col in data.columns:
for m in methods_list:
if m == col:
average[m].append(data.at[0,col])
return average
def getAverage(avg_dict):
final_avg = {}
res_time_deviation = {}
for method,resp_times in avg_dict.items():
if resp_times != []:
final_avg[method]= statistics.mean(resp_times)
res_time_deviation[method] = statistics.stdev(resp_times)
return final_avg, res_time_deviation
if __name__ == '__main__':
average = getFileAverage()
final_avg = getAverage(average)
print (final_avg)
| UTF-8 | Python | false | false | 1,444 | py | 6 | Iterations.py | 6 | 0.583102 | 0.580332 | 0 | 43 | 32.488372 | 118 |
Rabia-Redbrick/Flask-app | 5,231,270,210,028 | e6997a6ca15327286ff29b9ef2876d379ebcffe1 | 90af36d9389ecd2b973af2f0e907a376b665c157 | /app/adddata.py | 3ac62f484da153e8e9c882a6b0894cbbd4403a5b | []
| no_license | https://github.com/Rabia-Redbrick/Flask-app | 3e5021cf24421cd549be831e8ae075f779a2cb52 | 7765bc6eee24493d8da3aa4304f294f10efef131 | refs/heads/master | 2021-03-12T23:44:19.080998 | 2014-11-26T06:58:03 | 2014-11-26T06:58:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import app from app
import db,models from app
u = models.User(nickname='john', email='john@email.com')
db.session.add(u)
db.session.commit() | UTF-8 | Python | false | false | 150 | py | 5 | adddata.py | 3 | 0.7 | 0.7 | 0 | 7 | 19.428571 | 56 |
ElikBelik77/Splinter | 1,846,835,987,303 | ba612cd60fa0b360ab61c93069e26f01b1681ca2 | 5f3d79d5eb7c76f41a4c0464517d4b24b04b5e7f | /Model/Selenium/WhatsAppWriter.py | 61ec475ff90a16e9c589b00ccb4f785f3b751aa4 | []
| no_license | https://github.com/ElikBelik77/Splinter | fe936fd2674acb1d3ad1d43ff517cc1654953a8a | 99cc6c57cdd8bb72919391d18fb97522ccb7b622 | refs/heads/master | 2020-05-24T17:42:23.216699 | 2019-05-21T18:06:55 | 2019-05-21T18:06:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup as bs
import time
import re
from Model import Message
class WhatsAppWriter(object):
def __init__(self,driver_path):
self.driver_path = driver_path
def read_contact(self):
url = self.driver.page_source
soup = bs(url, "lxml")
soupp = list(soup.strings)
pattern1 = re.compile("[+]\d\d\d \d\d-\d\d\d-\d\d\d\d")
pattern2 = re.compile("\d\d:\d\d")
clients = []
prev = ""
for s in soupp:
if (prev != ""):
if (pattern2.match(s) or s == 'אתמול'):
if (pattern1.search(prev)):
prev = prev[1:-1]
if (prev not in clients):
clients.append(prev)
prev = s
dict = {}
dict['users'] = []
dict['groups'] = []
for c in clients:
if (pattern1.match(c)):
dict['users'].append(c)
else:
dict['groups'].append(c)
self.clients = dict
def open_WhatsApp(self):
self.driver = webdriver.Chrome(executable_path=self.driver_path)
self.driver.get("https://web.whatsapp.com/")
input("click after connected")
def get_contact(self):
self.driver.get("http://www.google.com/")
# open tab
self.driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 't')
# You can use (Keys.CONTROL + 't') on other OSs
# Load a page
self.driver.get("https://web.whatsapp.com/")
# Make the tests...
# self.read_contact()
self.driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 'w')
time.sleep(6)
self.read_contact()
return self.clients
def write(self, msg, name):
user = self.driver.find_element_by_xpath('//span[@title = "%s"]' % name)
user.click()
inp_xpath = "//div[@contenteditable='true']"
input_box = self.driver.find_element_by_xpath(inp_xpath)
input_box.send_keys(msg)
input_box.send_keys(Keys.ENTER)
def compareTimes(self, one, two):
one_l = one.split(':')
two_l = two.split(':')
if (int(one_l[0]) == int(two_l[0]) and int(one_l[1]) == int(two_l[1])):
return 0
if (int(one_l[0]) > int(two_l[0])):
return 1
if (int(two_l[0]) > int(one_l[0])):
return 2
if (int(one_l[1]) > int(two_l[1])):
return 1
return 2
def read(self, since, name):
user = self.driver.find_element_by_xpath('//span[@title = "%s"]' % name)
user.click()
url = self.driver.page_source
soup = bs(url, "lxml")
soupp = list(soup.strings)
pattern1 = re.compile("\d\d:\d\d")
pattern2 = re.compile("[+]\d\d\d \d\d-\d\d\d-\d\d\d\d")
next = 0
user = []
msg = []
for s in reversed(soupp):
if (s == name):
break
if (next):
user.append(s)
next = 0
if (pattern1.match(s)):
if (self.compareTimes(s, since) == 0 or self.compareTimes(s, since) == 1):
user.append(s)
next = 1
if (pattern2.match(s)):
if (user != []):
user.append(s)
msg.append(user)
user = []
non_empty_messages = [x for x in msg if len(x)!=0]
for x in non_empty_messages:
x.reverse()
return self.format_messages(non_empty_messages, name)
def format_messages(self, messages, group_name):
formatted_messages = []
for i in range(0,len(messages)):
j = 1
while j < len(messages[i]):
formatted_messages.append(Message.Message(messages[i][0],messages[i][j],messages[i][j+1],group_name,None))
j += 2
return formatted_messages
if __name__ == "__main__":
bot = WhatsAppWriter(r"../chromedriver")
bot.open_WhatsApp()
# print(bot.clients)
l = bot.get_contact()
print(l)
# bot.write("hi")
#l = bot.read("15:55", "hack")
#print(l)
| UTF-8 | Python | false | false | 4,416 | py | 18 | WhatsAppWriter.py | 11 | 0.512129 | 0.50238 | 0 | 149 | 28.604027 | 122 |
cp-helsinge/eksempler | 420,906,805,309 | 0c5b9d38ef3187109dc85d70812e9736cadc2552 | 9e7f7101912840955b372e594911a787e29a4ae5 | /turtle_spiral.py | d22d246666758f8fc7bacdcdf4be1f565ce3a15c | [
"MIT"
]
| permissive | https://github.com/cp-helsinge/eksempler | 65487285307c38ad9d50de9d632fcf6de1c8369b | 3adeb2357a3c2a747ad0d6a313fe87ac5c371873 | refs/heads/master | 2022-05-13T21:40:03.348035 | 2022-05-10T19:00:46 | 2022-05-10T19:00:46 | 235,204,737 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import turtle
###################################################
# range = antal gange
# rigth or left = grader til højre eller venstre
# forward or backward = længde på streg + retning fremad/tilbage
# kan man tegne flere samtidig? hvordan
###################################################
<<<<<<< HEAD
for n in range(1,80):
turtle.left(80)
turtle.forward(n*2)
turtle.right(40)
turtle.backward(n*2)
for n in range(1,50):
turtle.right(80)
turtle.forward(n*1.5)
=======
for n in range(1,32):
turtle.left(45)
turtle.forward(n*7)
>>>>>>> 553b976d6b623bd6c685c9241d731a21b1faed94
input()
| UTF-8 | Python | false | false | 628 | py | 54 | turtle_spiral.py | 51 | 0.5568 | 0.48 | 0 | 24 | 25.041667 | 64 |
KAIST-AILab/End-to-End-Enc-Dec-DSTC9 | 19,533,511,286,929 | 9324eb91e3b6d070115f1f08f6f95a8189797a84 | 49bd24ab4eac9bf5d2d7a92e4cb9b5b961552624 | /t5/generate.py | 2b105ad77132b8d47b39fc32fa97ed7f84797c17 | [
"Apache-2.0"
]
| permissive | https://github.com/KAIST-AILab/End-to-End-Enc-Dec-DSTC9 | 34c45253728d974cf0124599b3f7389fb059f387 | 43882de251866c3793009293c7d190c0a3714797 | refs/heads/main | 2023-06-13T23:02:38.439475 | 2021-07-31T08:57:00 | 2021-07-31T08:57:00 | 322,506,553 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from transformers import T5ForConditionalGeneration
from baseline.generate import main as _main
from baseline.utils.evaluator import GenerationSampleEvaluator
from .dataset import MyResponseGenerationEvalDataset, MySelectionGenerationEvalDataset
from .models import T5DoubleHeadsModel, T5DoubleHeadsMultiLabelModel
from .utils.model import my_run_batch_generation_sample
def _get_classes(args):
run_batch_sample = my_run_batch_generation_sample
evaluator = GenerationSampleEvaluator
if args.task.lower() == 'detection-selection-generation':
# Eval detection & selection & generation model on generation
args.task = "generation"
dataset = MySelectionGenerationEvalDataset
return dataset, T5DoubleHeadsMultiLabelModel, run_batch_sample, evaluator
elif args.task.lower() == 'selection-generation':
# Eval selection & generation model on generation
return MySelectionGenerationEvalDataset, T5DoubleHeadsModel, run_batch_sample, evaluator
elif args.task.lower() == "generation":
# Eval generation-only model
return MyResponseGenerationEvalDataset, T5ForConditionalGeneration, run_batch_sample, evaluator
else:
raise ValueError(
"args.task not in ['selection-generation', 'generation'], got %s" % args.task)
def main():
return _main(_get_classes)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,398 | py | 32 | generate.py | 28 | 0.742489 | 0.738197 | 0 | 34 | 40.117647 | 103 |
GeovaniSTS/PythonCode | 10,651,518,902,496 | 9d143047d922780d972498c48c3ff14c9c168342 | afc9ebbd7024ddeb0261802eff5c0cc0f66e3447 | /477 - Escolhendo_Vocação.py | 3bf2592a7f8df36fd4cf5cab105281777fce6dd7 | []
| no_license | https://github.com/GeovaniSTS/PythonCode | 26fbd9255f6f2caa3eee985239626b46a094b324 | 9b7dd9cb08f154ad2c121256b90c0e68353a1518 | refs/heads/main | 2023-08-06T10:55:06.857782 | 2021-09-21T15:04:38 | 2021-09-21T15:04:38 | 408,847,150 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | forca = int(input())
inteligencia = int(input())
destreza = int(input())
furtividade = int(input())
peso = int(input())
if forca < 5:
print('Mage')
elif destreza < 5:
print('Orc')
elif peso < 5:
print('Paladin')
else:
print('Knight') | UTF-8 | Python | false | false | 255 | py | 60 | 477 - Escolhendo_Vocação.py | 60 | 0.603922 | 0.592157 | 0 | 14 | 16.357143 | 27 |
Herve2iko/Ishuri | 6,012,954,228,794 | 2eccbc86047bde6f45f1c71f4dd4aa114590f75b | f55b764b2905e3df3a4aecf672449120b9c4df42 | /Student/forms.py | c527a7ed03e725f12fa9bf602cf8ba4e38a31f47 | []
| no_license | https://github.com/Herve2iko/Ishuri | 955cc7e6ff05243ef0c7ef66bf6733344f73ca2f | 52519d4627514df349cf3df48759f759af2a9c51 | refs/heads/master | 2022-12-09T13:02:43.155528 | 2020-09-04T13:51:10 | 2020-09-04T13:51:10 | 292,855,304 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from .models import *
class SchoolForm(forms.ModelForm):
class Meta:
model = School
fields = '__all__'
class ClasseForm(forms.ModelForm):
class Meta:
model = Classe
fields = '__all__'
class RegisterStudentForm(forms.ModelForm):
class Meta:
model = RegisterStudent
fields = '__all__'
class ProfileForm(forms.Form):
#____________pour user___________________#
username=forms.CharField(max_length=20)
password=forms.CharField(max_length=20, widget=forms.PasswordInput)
password1=forms.CharField(max_length=20, widget=forms.PasswordInput)
nom=forms.CharField(max_length=20)
prenom=forms.CharField(max_length=20)
#_________________champ pou profil_______________#
age =forms.IntegerField()
matricule=forms.CharField(max_length=20)
photo = forms.ImageField()
class ConnexionForm(forms.Form):
username= forms.CharField(label="Nom d'utilisateur :", max_length=20)
password = forms.CharField(label="mot de passe :", widget= forms.PasswordInput)
| UTF-8 | Python | false | false | 1,012 | py | 9 | forms.py | 6 | 0.689723 | 0.674901 | 0 | 39 | 24.846154 | 80 |
RuanXavierSantos/Exercicios_Python | 19,670,950,225,128 | c8f49c6981e9361a34266a8e5f456523c9b5c2ea | 35898a6ca658ef7364a0078680c88f7397ffdd8d | /aula06_grupo31.py | 799913f675a7051013a5865cde2f56af36c6e7c7 | []
| no_license | https://github.com/RuanXavierSantos/Exercicios_Python | 74c3683ecf7c763956a0a02ee7578cdf3d7b4c5e | f4fbc3a9014ffa2cb58ffc897da6c8daa6fc836d | refs/heads/main | 2023-08-25T06:16:54.591413 | 2021-10-20T13:22:03 | 2021-10-20T13:22:03 | 419,338,327 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 11:11:25 2020
@author: ruan_
"""
#Exercício 10
"""Recebe a quantidade de alunos que vão ser trabalhados no código"""
n=int(input("Informe o numero de alunos: "))
diario={}
i=1
"""Laço que recebe os valores do diário"""
while i<=n:
d={}
print("Informe nome, rga, p1, p2, pf, med e freq")
#Prof, essa parte eu fiz para facilitar a digitação do código (17-23),
#Porque muito provavelmente, vão ter vários diferentes códigos de diferentes
#meios de digitar, se o senhor quiser, remova da linha 12 a 40, e na hora
#de inserir, digite por exemplo:
#diario["aluno1"]["rga"]=2334243345
#ou então, remova da linha 26 a 32 e digite:
#d["nome"]="jhhjdjhd"
#...
#d["freq"]=100
#d["nome"]="jhhjdjhd" ai começa tudo de novo, ele para quando i==n
nome=input("Informe o nome: ")
rga=int(input("Informe o rga: "))
p1=float(input("Informe a p1: "))
p2=float(input("Informe a p2: "))
pf=float(input("Informe a pf: "))
med=float(input("Informe a média: "))
freq=float(input("Informe a frequencia: "))
d["nome"]=nome
d["rga"]=rga
d["p1"]=p1
d["p2"]=p2
d["pf"]=pf
d["med"]=med
d["freq"]=freq
diario["aluno{}".format(i)]=d
i=i+1
"""Função que organiza os dados printados"""
print(diario)
print("\n\n\nPARA MODIFICAR, DIGITE : diario['alunoN']['rga']=2, onde N é um valor para o aluno, rga é uma variavel exemplo e 2 é um valor qualquer\n\n\n")
#Exercício 11
def pdiario(diario):
i=1
for x in diario:
print("=================",x.upper(),"=================")
print("||||Nome: ",diario["aluno{}".format(i)]["nome"])
print("||||RGA: ",diario["aluno{}".format(i)]["rga"])
print("||||Prova 1: ",diario["aluno{}".format(i)]["p1"])
print("||||Prova 2: ",diario["aluno{}".format(i)]["p2"])
print("||||Prova Final: ",diario["aluno{}".format(i)]["pf"])
print("||||Média: ",diario["aluno{}".format(i)]["med"])
print("||||Frequência: ",diario["aluno{}".format(i)]["freq"])
print("===========================================")
i=i+1
"""Função que busca um RGA entre os outros"""
#Exercício 12
def sRGA(diario):
i=1
a=1
rg_a=int(input("Informe um rga: "))
for x in diario:
if rg_a==diario["aluno{}".format(i)]["rga"]:
print("RGA ENCONTRADO: ",diario["aluno{}".format(i)]["nome"]
,"de rga", diario["aluno{}".format(i)]["rga"])
a=0
i=i+1
if a!=0:
print("Nenhum resultado encontrado!")
"""Função que busca a maior média entre as outras"""
#Exercício 13
def media(diario):
i=1
lista=[]
for x in diario:
"""Mecanismo que adiciona as médias em uma lista"""
lista.append(diario["aluno{}".format(i)]["med"])
i=i+1
i=1
j=0
"""Aqui a lista é ordenada"""
lista.sort()
lista.reverse()
while len(lista)>0:
if lista[j]==diario["aluno{}".format(i)]["med"]:
print("================= ALUNO",i,"=================")
print("||||Nome: ",diario["aluno{}".format(i)]["nome"])
print("||||RGA: ",diario["aluno{}".format(i)]["rga"])
print("||||Prova 1: ",diario["aluno{}".format(i)]["p1"])
print("||||Prova 2: ",diario["aluno{}".format(i)]["p2"])
print("||||Prova Final: ",diario["aluno{}".format(i)]["pf"])
print("||||Média: ",diario["aluno{}".format(i)]["med"])
print("||||Frequência: ",diario["aluno{}".format(i)]["freq"])
print("===========================================")
i=1
del lista[j]
#j=j+1
else:
i=i+1
| UTF-8 | Python | false | false | 3,890 | py | 13 | aula06_grupo31.py | 13 | 0.504278 | 0.482759 | 0 | 102 | 35.470588 | 155 |
AyanUpadhaya/LibraryManagerApp | 8,924,942,077,722 | 999e19d1e6324ccab483b2a49c626131ca047747 | 85f4e74aa7a2ae812dee1132edd1c5fd0d16a061 | /datedata.py | e5a271797b52c700afe3ac2d22518bbc7a6fecdd | []
| no_license | https://github.com/AyanUpadhaya/LibraryManagerApp | b69f4c03142dee0156284756152b166c0bd0a226 | 50aecf73017f26366436cf3bc84b349aeb2da67f | refs/heads/master | 2023-05-10T04:15:31.694759 | 2021-06-18T15:22:26 | 2021-06-18T15:22:26 | 374,447,365 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
class Date:
def __init__(self):
self.current=datetime.date(datetime.now())
self.data=str(self.current).split('-')
self.year=self.data[0]
self.date=self.data[2]
self.month=self.data[1]
self.date_int=int(self.data[2])
self.submission_days=self.date_int+6 #6 days for submission
self.submission_date=str(self.submission_days)+'/'+self.month+'/'+self.year
def current_date(self):
currentDate=self.date+'/'+self.month+'/'+self.year
return currentDate | UTF-8 | Python | false | false | 500 | py | 9 | datedata.py | 7 | 0.714 | 0.702 | 0 | 15 | 32.4 | 77 |
sfu-natlang/graphprop-smt | 12,008,728,583,787 | 244813233d865e455f1f32d9ee6e1a7bb0e563e4 | a9ce145e46819a74abe22c96037fbbd1aeefc3e1 | /dp/ppdb_processor.py | 00540841045f0480507e51e731b6b832aa5400e6 | []
| no_license | https://github.com/sfu-natlang/graphprop-smt | 31f64abd9d3558f70e2151469efbb9576ed7b9b2 | 8dd687c2f5839b683179bc054c0f0742b4d94799 | refs/heads/master | 2021-01-01T16:13:54.929737 | 2015-01-24T23:02:15 | 2015-01-24T23:02:15 | 20,464,633 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# input size(l,xl,xxl) type(lexical)
# output phrase_to_id and id_to_phrase object file, basic_graph (junto format)
import sys
import random
import gzip
import pickle
import codecs
input_file = "ppdb-1.0-l-2gram"
output_file = "ppdb-1.0-l-2gram-dp"
dic={}
weight_type="SCORE"
#weight_type="EgivenF"
with open(input_file) as myinput:
#with codecs.open(input_file,encoding='utf-8') as myinput:
#with codecs.open(output_file,encoding='utf-8',mode="w") as myoutput:
with open(output_file,"w") as myoutput:
for line in myinput:
#print line
list = line.split(" ||| ")
source=list[1]
target=list[2]
features=list[3]
if weight_type == "EgivenF":
indx = features.find("p(e|f)=")
temp = features[indx+9:].split()[0]
score = float(temp)
elif weight_type == "SCORE":
d = {}
for item in features.split():
ind = item.split("=")[0]
val = item.split("=")[1]
if ind != "Alignment":
#print ind,val
d[ind] = float(val)
score = d["p(e|f)"]+d["p(f|e)"]+d["p(f|e,LHS)"]+d["p(e|f,LHS)"]+ 100*d["RarityPenalty"]+ 0.3*d["p(LHS|e)"]+0.3*d["p(LHS|f)"]
#print source,'\t',target,'\t',score
if not source in dic:
dic[source]=[(target,score)]
else:
if not target in [item[0] for item in dic[source]]:
#TODO maximum value
dic[source].append((target,score))
for item in dic.keys():
trans = dic[item]
myoutput.write(item+'\t'+str(trans)+'\n')
| UTF-8 | Python | false | false | 1,773 | py | 81 | ppdb_processor.py | 68 | 0.496898 | 0.483926 | 0 | 52 | 33.019231 | 140 |
csgn/google-meet-auto-admiter | 19,344,532,708,654 | a3c7702fcde93b304bda6cf0e5e86af1cdf9b39a | f03e7260990c9bf004dee921ba23da1c825d54c7 | /admiter/admiter.py | 09f05609aa993c3c7e9ed4211d76dd08aa368fbc | [
"MIT"
]
| permissive | https://github.com/csgn/google-meet-auto-admiter | d58c353428e0bc20ea6d8ce85504655101cdc1da | d3c0c1d22854f9a595d2a872e7b02ab46de724c7 | refs/heads/main | 2023-02-04T15:33:57.587297 | 2020-12-22T07:42:44 | 2020-12-22T07:42:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import sys
import time
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, WebDriverException
class Admiter:
def __init__(self, driver):
self.driver = driver
self.__connection = False
@property
def connection(self):
return self.__connection
@connection.setter
def connection(self, value):
self.__connection = value
def connect(self):
link = "https://apps.google.com/intl/en-US/meet/"
print("🌐 waiting {}" .format(link))
try:
self.driver.get(link)
print("✅ Connection Success")
self.connection = True
except:
print("❌ Connection Error")
self.connection = False
def parse_url(self, url):
regex = r"^((http|https)\:\/\/(meet\.google\.com\/)?[a-z]{0,}\-[a-z]{0,}\-[a-z]{0,})"
matches = re.finditer(regex, url)
for match_num, match in enumerate(matches, start=1):
if match.group():
return True
return False
def is_admit(self):
admit_button = "//body/div[@id='yDmH0d']/div[3]/div[1]/div[2]/div[3]/div[3]/span[1]/span[1]"
try:
element = self.driver.find_element(By.XPATH, admit_button)
element.click()
print("🥳 Incoming request accepted")
except:
pass
def listen(self):
while self.connection:
if self.driver:
try:
current_url = self.driver.current_url
print("🟢 {}" .format(current_url))
if self.parse_url(current_url):
self.is_admit()
pass
except ConnectionRefusedError:
print("🔴 {}" .format("connection refused. reconnecting"))
continue
except WebDriverException:
print("💀 Driver shutdown. ( {} )" .format(datetime.now()))
self.connection = False
finally:
time.sleep(0.5)
else:
print("🔎 Driver not found")
sys.exit()
| UTF-8 | Python | false | false | 2,455 | py | 3 | admiter.py | 2 | 0.531443 | 0.525688 | 0 | 79 | 29.772152 | 100 |
zopefoundation/z3c.schema | 9,663,676,430,046 | 54b4878bc2ecfcd13898a4167277e680b896abb9 | 05c85e8044eab807589dee640c2fd6e46acc7055 | /src/z3c/schema/regex/field.py | 56141586563dd8d464492da03e21a7cbb885da79 | [
"ZPL-2.1"
]
| permissive | https://github.com/zopefoundation/z3c.schema | 9b3b7bb9474c438becfc0d7ec9be6d2d49e70655 | de43fe694cee7939947898fc6ff8df65a63091f8 | refs/heads/master | 2023-08-16T17:17:49.794935 | 2023-07-19T06:33:40 | 2023-07-19T06:33:40 | 8,458,491 | 0 | 1 | NOASSERTION | false | 2023-07-19T06:33:09 | 2013-02-27T15:06:22 | 2022-09-16T09:03:40 | 2023-07-19T06:33:08 | 165 | 0 | 2 | 0 | Python | false | false | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
"""
__docformat__ = "reStructuredText"
import re
import zope.interface
import zope.schema
from z3c.schema.regex import interfaces
@zope.interface.implementer(interfaces.IRegex)
class Regex(zope.schema.ASCIILine):
"""Regex schema field.
Must be a compilable regular expression
"""
def _validate(self, value):
super()._validate(value)
try:
re.compile(value)
except re.error as e:
raise interfaces.InvalidRegex('{!r}, {}'.format(value, e))
def fromUnicode(self, value):
v = str(value.strip())
self.validate(v)
return v
| UTF-8 | Python | false | false | 1,256 | py | 39 | field.py | 16 | 0.601911 | 0.596338 | 0 | 43 | 28.209302 | 78 |
jaychsu/algorithm | 11,269,994,233,034 | ce6f9f674ec0091ed8795fa2a048c666291e8455 | ebd9c249d446d809abc9a0f3e4593f34922a1b93 | /lintcode/131_building_outline.py | 8e15b3dfc2be70b8f4bb56fecfd39d8d56b2f92a | []
| no_license | https://github.com/jaychsu/algorithm | ac7a9dc7366f58c635a68bc46bf1640d2f5ff16d | 91892fd64281d96b8a9d5c0d57b938c314ae71be | refs/heads/master | 2023-05-11T00:40:39.237813 | 2022-09-14T07:43:12 | 2022-09-14T07:43:12 | 106,277,156 | 143 | 39 | null | false | 2022-09-14T07:43:13 | 2017-10-09T11:51:48 | 2022-09-14T07:40:58 | 2022-09-14T07:43:12 | 679 | 127 | 34 | 0 | Python | false | false | """
this problem familiar with `leetcode/218_the_skyline_problem.py`
with different output
"""
import heapq
class HashHeapq:
def __init__(self):
self.heap = []
self.deleted = {}
def push(self, val):
heapq.heappush(self.heap, val)
def pop(self):
if self.is_empty():
return -1
return heapq.heappop(self.heap)
def remove(self, val):
if self.is_empty():
return
if val not in self.deleted:
self.deleted[val] = 0
self.deleted[val] += 1
def top(self):
if self.is_empty():
return -1
return self.heap[0]
def is_empty(self):
while self.heap and self.deleted.get(self.heap[0]):
val = heapq.heappop(self.heap)
self.deleted[val] -= 1
return not self.heap
class Solution:
def buildingOutline(self, buildings):
"""
:type buildings: List[List[int]]
:rtype: List[List[int]]
"""
ans = []
if not buildings:
return ans
time = []
for x, _x, height in buildings:
time.append((x, height, True))
time.append((_x, height, False))
time.sort()
heap = HashHeapq()
tmp = []
for x, height, is_start in time:
if is_start:
heap.push(-height)
else:
heap.remove(-height)
max_h = -heap.top() if not heap.is_empty() else 0
if tmp and tmp[-1][0] == x:
tmp.pop()
if tmp and tmp[-1][1] == max_h:
continue
tmp.append([x, max_h])
_x = pre_h = 0
for x, height in tmp:
if pre_h > 0:
ans.append([_x, x, pre_h])
_x = x
pre_h = height
return ans
| UTF-8 | Python | false | false | 1,864 | py | 549 | 131_building_outline.py | 528 | 0.474249 | 0.465129 | 0 | 88 | 20.181818 | 64 |
jpesperidiao/SustentabilityIndicators | 3,384,434,230,909 | ff0bcd12540fc513bd993669020f0380f34771ad | ca38173090cf03f408afc5d97a70fa5ecb2e3143 | /Core/processManager.py | 9eb52373876d92166e3eebe341e87c01a3ebc2ad | []
| no_license | https://github.com/jpesperidiao/SustentabilityIndicators | 5e5b978b7813866e344244fdf59ce60d501e87c5 | 83bd640f4100677c4994b6f6989a469508f1c6fb | refs/heads/master | 2018-09-23T21:26:07.661654 | 2018-07-27T11:57:05 | 2018-07-27T11:57:05 | 134,781,375 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
SustentabilityIndicators
A QGIS plugin
.
-------------------
begin : 2018-05-30
copyright : (C) 2018 by João P. Esperidião
email : joao.p2709@gmail.com
git sha : $Format:%H$
***************************************************************************/
"""
# python libs
import os
# qgis/Qt libs
from qgis.PyQt.QtCore import QObject
# own libs
from SustentabilityIndicators.Gui.slope_map import SlopeAndInfiltrationMap
from SustentabilityIndicators.Gui.landUse_map import LandUseMap
from SustentabilityIndicators.Gui.cn_map import CnMap
from SustentabilityIndicators.Gui.waterRetainingMaxCapacity_map import WaterRetainingMaxCapacityMap
from SustentabilityIndicators.Gui.hydricPotential_map import HydricPotentialMap
class ProcessManager(QObject):
def __init__(self, iface, processName=None, parent=None):
"""
Class constructor.
:param processName: (str) process name ALIAS to be started.
:param parent: (QObject) object parent to ProcessManager.
"""
super(ProcessManager, self).__init__(parent)
# process gui object initialization dictionary
self.processDict = {
self.tr("Slope and Infiltration Rate Maps Export") : SlopeAndInfiltrationMap(iface),
self.tr("Land Use Map Export") : LandUseMap(iface),
self.tr("Curve-Number Map Export") : CnMap(iface),
self.tr("Maximum Water Retaining Capacity Map Export") : WaterRetainingMaxCapacityMap(iface),
self.tr("Hydric Potential Map Export") : HydricPotentialMap(iface),
self.tr("Aquifer Recharge Potential"): None
}
if processName:
self.dlg = self.getDialogFromName(processName=processName)
else:
self.dlg = None
def getAvailableProcesses(self):
"""
Looks for all available processes inside Processes directory.
:return: (list-of-str) list of all processes available (process name)
"""
dir_ = os.path.join(os.path.dirname(__file__), "Processes/")
l = []
for filename in os.listdir(dir_):
if filename == "__init__.py" or filename[-3:].lower() != ".py":
continue
elif ".py" in filename:
l.append(filename.replace(".py", ""))
return l
def getDialogFromName(self, processName):
"""
Gets the dialog from process name.
:param :
"""
return self.processDict[processName]
| UTF-8 | Python | false | false | 2,725 | py | 20 | processManager.py | 17 | 0.573632 | 0.567022 | 0 | 67 | 39.641791 | 105 |
aasa11/pychallenge | 4,518,305,612,999 | 5f9a64bbd544a9c6fa5ebb1d8ddc4a03a1a6da60 | a2312fe43596c308627982719537b6c552ad912d | /p7.py | 5f78b8893576ed928368bc3953efe270d122e061 | []
| no_license | https://github.com/aasa11/pychallenge | 19417e58a3420755174917826b51b433c37ebaa7 | 3a490c05826f8a387f05067b28662c5e042df72f | refs/heads/master | 2021-01-15T14:46:37.302484 | 2013-08-21T07:39:54 | 2013-08-21T07:39:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/
#coding=gbk
'''
Created on 2013/08/06
@summary:
@author: huxiufeng
'''
import Image
def isalpha(ch):
if ord(ch) <= ord('z') and ord(ch) >= ord('a') :
return True
elif ord(ch) <= ord('Z') and ord(ch) >= ord('A') :
return True
elif ord(ch) <= ord('9') and ord(ch) >= ord('0') :
return True
elif ch ==' ' or ch == ',' or ch == '. ' or ch ==':':
return True
return False
def getdata(lst, i):
ch = chr(lst[i])
if isalpha(ch):
return ch
return None
def openimg(imgfile):
im = Image.open(imgfile,'r')
for j in xrange(im.size[1]):
des = ''
for i in xrange(im.size[0]):
ch = getdata(im.getpixel((i,j)), 0)
if ch is not None:
des += ch
print des
#----------------------It is a split line--------------------------------------
def main():
imgfile = r'G:\down\ChrDw\oxygen.png'
openimg(imgfile)
des = ''
for i in [105, 110, 116,101, 103,114,105, 116, 121] :
des +=chr(i)
print des
#----------------------It is a split line--------------------------------------
if __name__ == "__main__":
main()
print "It's ok" | UTF-8 | Python | false | false | 1,215 | py | 17 | p7.py | 16 | 0.450206 | 0.417284 | 0 | 56 | 20.714286 | 79 |
Moldokulovvv/course_shop_api | 1,288,490,236,124 | 73e75ad8ce9d0cd1936284db323224d9d15fe786 | 012b87ebf8edb0c6bdfe47e6f7a56066fc402d9e | /cart/serializer.py | c618b2fdcec2b13a8c0d23419ee8c865c239a40a | []
| no_license | https://github.com/Moldokulovvv/course_shop_api | bd9637d367adadb39eba59a58cbf59e07f931af2 | 2bcf6f3b368f769d6bbcb699cb12a04230d99539 | refs/heads/master | 2023-03-31T22:16:52.933941 | 2021-03-31T13:02:07 | 2021-03-31T13:02:07 | 347,642,117 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import serializers
from account.models import MyUser
from cart.models import Cart
from main.models import Course
from order.models import Order
class CartSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = ('course', 'id' )
def create(self, validated_data):
request = self.context.get('request')
customer_id = request.user.id
# if request.user.order:
# print('123')
validated_data['customer_id'] = customer_id
b = request.data
# print(b.get('course'))
course = Course.objects.get(id=int(b.get('course')))
user = MyUser.objects.get(id=request.user.id)
if not Order.objects.filter(email=request.user.id):
m = Order.objects.create(email=request.user, address='stop')
m.save()
r = Order.objects.get(email=request.user.id)
print(type(r.id))
validated_data['order_id'] = r.id
cart = Cart.objects.create(**validated_data)
return cart
else:
r = Order.objects.get(email=request.user.id)
r.city += "111."
r.save()
print(r.id)
cart = Cart.objects.create(order_id=r.id, **validated_data)
return cart
| UTF-8 | Python | false | false | 1,326 | py | 17 | serializer.py | 16 | 0.57994 | 0.575415 | 0 | 47 | 27 | 72 |
tzmfreedom/fake2any | 11,416,023,101,433 | 032557a68b5274cba9bc91cad54d9f7884452082 | 8965786d259d2b84146f7bc75b67861d3b80dc27 | /fake2any/salesforce.py | 2d2f6fee96e97b7dce2f44d364a73015c556c71c | [
"MIT"
]
| permissive | https://github.com/tzmfreedom/fake2any | ac0b8ee0b2d0334926495bd3aae7918b3ac91964 | 648fef0fb52d612b32ffb35a56ba05f5fd626ec5 | refs/heads/master | 2021-01-10T02:01:45.405521 | 2015-10-12T08:53:38 | 2015-10-12T08:55:49 | 44,094,317 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
from plugin import Plugin
from faker import Factory
from pyforce import pyforce
class Salesforce(Plugin):
"""
"""
def load(self):
svc = pyforce.Client()
svc.login(self._config["username"], self._config["password"])
records = []
for i in range(0, int(self._config["args"].rows)):
record = {}
record["type"] = self._config["sobj_type"]
for column in self._config["columns"]:
record[column["name"]] = self.getFakeColumnValue(column)
records.append(record)
#print(records)
result = svc.create(records)
print(result) | UTF-8 | Python | false | false | 670 | py | 5 | salesforce.py | 5 | 0.567164 | 0.564179 | 0 | 20 | 32.55 | 72 |
markbarbet/simulations | 13,511,967,146,681 | c1d31ae7c2b78bad82caed0035e9c96e9ad1d737 | ec47b276871182eb6f42842a3f750a3ee94961ec | /finalSens.py | b04ed455adab2af0a0e2ca01afeb40725bc6466f | []
| no_license | https://github.com/markbarbet/simulations | 69ea572b99441ff139a67968c69119017d8e5dd3 | dda1dba06ec74f8a6027b012b0ea06fab1ab9159 | refs/heads/master | 2020-03-22T18:51:17.717074 | 2018-07-10T21:28:11 | 2018-07-10T21:28:11 | 140,487,176 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 06 10:19:27 2017
@author: Skoron
"""
import numpy as np
import pandas as pd
import os
import copy
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <=rel_tol
mechs=['appendedinput_beta001_forward_FFCM1.cti']
philist=[0.6,1.0,5.0]
philist=[1.0,0.6,5.0]
num_oldrxns=0
width=0.3
step=0
for i in results:
if i.mechanism.split('\\')[-1]=='FFCM1.cti':
num_oldrxns=len(i.Index[1])
fig,ax=plt.subplots(figsize=(6,2))
axis_font = {'fontname':'Arial', 'size':'14'}
subresults=[]
for k in results:
for j in np.arange(len(philist)):
if k.mechanism.split('\\')[-1] in mechs and isclose(philist[j],k.phi):
#print('hi')
subresults.append(k)
phicond=['$\phi=0.6$','$\phi=1.0$','$\phi=5.0$']
phicond=['$\phi=1.0$','$\phi=0.6$','$\phi=5.0$']
colors=['r','w','b']
fsens=subresults[2].flamespeed_sens[num_oldrxns:]['Su']
rxns=subresults[2].Index[1][num_oldrxns:]
absVal=np.abs(fsens)
temp=pd.DataFrame(columns=['sens','abs','rxn'])
temp['sens']=fsens
temp['abs']=absVal
temp['rxn']=rxns
sortedvals=temp.sort_values(by=['abs'],ascending=False)
topsens=sortedvals['sens'][0:3].tolist()
toprxns=sortedvals['rxn'][0:3].tolist()
indexing=np.arange(len(toprxns))
ax.barh(indexing+step-0.4,topsens[::-1],width,color=colors[2],label=phicond[2],edgecolor='k', linewidth=1)
labels=[]
for l in toprxns:
labels.append(l.split('=')[0].rstrip('<')+'='+l.split('=')[1].lstrip('>'))
maxlen=0
#for l in np.arange(len(labels)):
# if len(labels[l])>maxlen:
#
# maxlen=len(labels[l])
#for l in np.arange(len(labels)):
# if len(labels[l])<maxlen:
# padding=maxlen-len(labels[l])
# labels[l]=labels[l]+' '*padding
#labels=['CO+2H=$\mathrm{H}_2$','2H+$\mathrm{O}_2(+\mathrm{M})=\mathrm{H}_2+\mathrm{O}_2(+\mathrm{M})$',]
ax.set_yticks(np.arange(len(toprxns)))
labelList = ['H+CO+H=CO+H$_2$','H+O$_2$+H(+M)=H$_2$+O$_2$(+M)','H+O$_2$+H(+M)=2OH(+M)','H+H$_2$+O=H+H$_2$O','CO+H+OH=CO+H$_2$O' ]
labelList = labelList[0:3]
#ax.set_yticklabels(labels[::-1],position=(0.05,0),horizontalalignment='left')
ax.set_yticklabels(labelList[::-1],position=(0.05,0),horizontalalignment='left')
hatches=['xxx','////']
for i in np.arange(len(philist)):
if i<2:
step=step+width
fsens1=subresults[i].flamespeed_sens[num_oldrxns:]['Su']
rxns1=subresults[i].Index[1][num_oldrxns:]
absVal1=np.abs(fsens1)
temp1=pd.DataFrame(columns=['sens','abs','rxn'])
temp1['sens']=fsens1
temp1['abs']=absVal1
temp1['rxn']=rxns1
sortedvals1=temp1.sort_values(by=['abs'],ascending=False)
topsens1=sortedvals1['sens'][0:3]
toprxns1=sortedvals1['rxn'][0:3]
indexing1=np.arange(len(toprxns1))
senstoplot=sortedvals1[sortedvals1['rxn'].isin(toprxns)]
possiblesens=senstoplot['sens'].tolist()
possiblerxns=senstoplot['rxn'].tolist()
matched=[False]*10
matched2=[False]*len(senstoplot['rxn'])
finalsensitivities=[]
for j in np.arange(len(toprxns)):
for k in np.arange(len(senstoplot['rxn'])):
if toprxns[j]==senstoplot['rxn'][k] and matched[j]==False and matched2[k]==False:
matched[j]=True
matched2[k]=True
finalsensitivities.append(senstoplot['sens'][k])
ax.barh(indexing+step-0.35,finalsensitivities[::-1],width,color=colors[i],label=phicond[i],hatch=hatches[i],edgecolor='k', linewidth=1)
ax.tick_params(axis='y',direction='in')
ax.tick_params(axis='x',direction='in')
ax.annotate('$\mathrm{H}_2/\mathrm{CO}$:50/50',xy=(-0.055,-0.3),**axis_font)
ax.set_xlabel(r'Sensitivity coefficient, $\frac{\partial\mathrm{ln}S_u^0}{\partial\mathrm{ln}k}$',**axis_font)
handles,labels = ax.get_legend_handles_labels()
handles=[handles[2],handles[1],handles[0]]
labels=[labels[2],labels[1],labels[0]]
ax.legend(handles,labels,loc=1,bbox_to_anchor=(1.005,1.007))
#ax.legend(loc=1,bbox_to_anchor=(1.005,1.007))
#ax.add_line(Line2D([0.5, 0.5], [0, 1], transform=ax.transAxes,
# linewidth=1, color='k'))
ax.axvline(x=0, color='k',linewidth=1.0)
plt.savefig(os.getcwd()+'\\figures\\symposiumFigs\\'+'CO_H2_flamespeed_sens.pdf',dpi=1200,bbox_inches='tight')
# ax.barh(indexing+step,topsens,width,color=colors[0])
# step=step+0.2
#
# ax.set_yticks(np.arange(len(toprxns)))
# ax.set_yticklabels(toprxns)
#fsens2=subresults[1].flamespeed_sens[num_oldrxns:]['Su']
#rxns2=subresults[1].Index[1][num_oldrxns:]
#absVal2=np.abs(fsens2)
#temp2=pd.DataFrame(columns=['sens','abs','rxn'])
#temp2['sens']=fsens2
#temp2['abs']=absVal2
#temp2['rxn']=rxns2
#sortedvals2=temp2.sort_values(by=['abs'],ascending=False)
#topsens2=sortedvals2['sens'][0:10]
#toprxns2=sortedvals2['rxn'][0:10]
#
#fsens3=subresults[2].flamespeed_sens[num_oldrxns:]['Su']
#rxns3=subresults[2].Index[1][num_oldrxns:]
#absVal3=np.abs(fsens3)
#temp3=pd.DataFrame(columns=['sens','abs','rxn'])
#temp3['sens']=fsens3
#temp3['abs']=absVal3
#temp3['rxn']=rxns3
#sortedvals3=temp3.sort_values(by=['abs'],ascending=False)
#topsens3=sortedvals3['sens'][0:10]
#toprxns3=sortedvals3['rxn'][0:10]
| UTF-8 | Python | false | false | 5,278 | py | 57 | finalSens.py | 56 | 0.637742 | 0.591133 | 0 | 140 | 36.664286 | 143 |
pedroalpacheco/100DaysOfCode | 5,626,407,179,280 | a950f0007e304bc8c4fed05c1dac3ca13709ca90 | a7be1cb64c2feef6147fe2073c5d6e50f0cd456d | /051/uri1036.py | 067c28cdc277db9502ef29a9d89fe19e84be9152 | []
| no_license | https://github.com/pedroalpacheco/100DaysOfCode | 85d2e0f0d95b78a74fd9dabda6a1862a7fea28d1 | a4aab06d61b8b45a420e304b744f3211473bbe1a | refs/heads/master | 2021-01-01T18:43:58.232224 | 2017-10-06T13:27:07 | 2017-10-06T13:27:07 | 98,415,867 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
numeros = input().split(' ')
a, b, c = numeros
x = (float(b)**2)-(4*float(a)*float(c))
if x < 0:
print('Impossivel calcular')
else:
try:
x = math.sqrt(x)
r1 = (-float(b) + float(x)) / (2 * float(a))
r2 = (-float(b) - float(x)) / (2 * float(a))
print('R1 = {:.5f}'.format(r1))
print('R2 = {:.5f}'.format(r2))
except ZeroDivisionError:
print("Impossivel calcular");
| UTF-8 | Python | false | false | 443 | py | 66 | uri1036.py | 50 | 0.505643 | 0.476298 | 0 | 21 | 20.047619 | 52 |
mkbeh/rin-bitshares-arbitry-bot | 10,591,389,382,008 | f43752e8690b52ae20d28d3a7f81d8078c622da6 | afe1f756446d89459cd1679bbbfe390fa5d4702e | /src/aiopybitshares/grambitshares.py | b9a16bd4535fb6bc067ad69bed21779ff80c68f3 | [
"MIT"
]
| permissive | https://github.com/mkbeh/rin-bitshares-arbitry-bot | 244329147dc6fd1e3c9c8b59897975e77216e0c6 | 7ef49e593b0a6d3c5c6561749bb36bc0c6bdee7e | refs/heads/master | 2023-08-15T02:12:02.885675 | 2021-11-23T16:14:10 | 2021-11-23T16:14:10 | 160,044,380 | 9 | 7 | MIT | false | 2023-07-20T15:09:52 | 2018-12-02T12:21:37 | 2023-07-06T12:18:49 | 2023-07-20T15:09:52 | 222 | 9 | 6 | 5 | Python | false | false | # -*- coding: utf-8 -*-
import ujson
import aiohttp
from aiohttp.client_exceptions import ClientConnectionError
from src.extra.baserin import BaseRin
default_node = 'wss://bitshares.openledger.info/ws'
class GramBitshares:
def __init__(self, node=default_node):
self._node = node
self._ws = None
self._session = None
async def ws_connect(self, node=default_node):
session = aiohttp.ClientSession()
try:
self._ws = await session.ws_connect(node)
except ClientConnectionError:
await session.close()
raise
else:
return session
async def connect(self, ws_node=default_node):
gram = GramBitshares(ws_node)
self._session = await gram.ws_connect(ws_node)
self._ws = gram._ws
if ws_node == BaseRin.wallet_uri and await self.is_wallet_locked():
await self.unlock_wallet()
return gram
async def call_method(self, method, *args):
await self._ws.send_str(
ujson.dumps(
{'id': 0, 'method': '{}'.format(method), 'params': args}
)
)
return await self._ws.receive_json()
async def is_wallet_locked(self):
return (
await self.call_method('is_locked')
)['result']
async def unlock_wallet(self):
await self.call_method('unlock', BaseRin.wallet_pwd)
async def close(self):
await self._session.close()
| UTF-8 | Python | false | false | 1,489 | py | 21 | grambitshares.py | 20 | 0.588986 | 0.587643 | 0 | 58 | 24.672414 | 75 |
hyeongu95/Algorithm_2021 | 14,370,960,597,714 | eeb89481b4b3cc023d503d8ae3701b3bcc9ad2ee | 8e5054504e992198ed6d56616c92bffcae9ab1da | /16917_양념반후라이드반.py | f20dbdbe1c556a9b469bc1436615b5b8ae6b41c7 | []
| no_license | https://github.com/hyeongu95/Algorithm_2021 | 5085c8965f358a47a6e9aeb0182452da0ece4149 | 02dd15334c23da61aae95186e4fabfa117fc5aba | refs/heads/master | 2023-07-12T18:23:28.154988 | 2021-08-19T08:25:35 | 2021-08-19T08:25:35 | 374,666,740 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 양념반 후라이드반 - 16917
a, b, c, x, y = map(int, input().split())
cnt = max(x * 2, y * 2)
ans = []
for i in range(cnt):
q = a * (x-i)
w = b * (y-i)
e = c * i * 2
if q < 0:
q = 0
if w < 0:
w = 0
ans.append(q + w + e)
print(min(ans)) | UTF-8 | Python | false | false | 284 | py | 11 | 16917_양념반후라이드반.py | 11 | 0.402985 | 0.358209 | 0 | 15 | 16.933333 | 41 |
surister/tracker.noheaven.net-backend | 12,498,354,880,667 | 4d2b23853b0affe4fc5ff46dd53cb5929471c98f | d4104e2e4a8aadaef67aafd526be42c77db8f7c3 | /apps/authentication/urls.py | b0b82e71b236812899a60ff980bb85e792f0c723 | []
| no_license | https://github.com/surister/tracker.noheaven.net-backend | 5fb72cbad7a0c8922063b22ed039eeae9380ceb8 | ea086e2fa0fd4791c275a93ab34918026d2673dd | refs/heads/master | 2023-07-14T21:30:49.948619 | 2021-09-04T18:10:55 | 2021-09-04T18:10:55 | 399,734,466 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from apps.authentication.views import LoginView
from django.contrib.auth.views import LoginView as Lg
urlpatterns = [
path('login/', LoginView.as_view()),
path('login2/', Lg.as_view())
]
| UTF-8 | Python | false | false | 226 | py | 12 | urls.py | 12 | 0.725664 | 0.721239 | 0 | 9 | 24.111111 | 53 |
GabbyBarajasBroussard/anomaly-detection-exercises | 7,988,639,220,153 | b57ca7483e649dedc99d764781a6821412854149 | 32f22b2c9e6b2f189dd6038e9e0773a3596e0ca2 | /TSA-exercises.py | 292db33f3161fc3d7fcb0965b07bffc87360d69c | []
| no_license | https://github.com/GabbyBarajasBroussard/anomaly-detection-exercises | 7211b76ceeb789deec83b80a0470771dda398826 | 8fea912abbe1532ac694c1b63819980c77c681b3 | refs/heads/main | 2023-04-15T10:12:48.189743 | 2021-04-28T15:03:16 | 2021-04-28T15:03:16 | 360,649,527 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import division
import itertools
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from numpy import linspace, loadtxt, ones, convolve
from sklearn.ensemble import IsolationForest
import numpy as np
import pandas as pd
import collections
import math
from sklearn import metrics
from random import randint
from matplotlib import style
import seaborn as sns
# style.use('fivethirtyeight')
get_ipython().run_line_magic('matplotlib', 'inline')
pd.plotting.register_matplotlib_converters()
# In[4]:
def make_log_data():
'''This function reads in the names of the columns and the csv holding the anonymized curriculum data to make a dataframe.'''
colnames = ['date', 'endpoint', 'user_id', 'cohort_id', 'source_ip']
df = pd.read_csv("anonymized-curriculum-access.txt",
sep="\s",
header=None,
names = colnames,
usecols=[0, 2, 3, 4, 5])
return df
df= make_log_data()
df.head()
# In[6]:
def prep_log_data(user, span, weight):
''' This function uses the dataframe created previously, allows the user to be specified, converts the date column to a date/time column, makes the index column the date,
and returns a pd.series called pages which shows the total pages accessed by the user.'''
df=make_log_data()
df = df[df.user_id == user]
df.date = pd.to_datetime(df.date)
df = df.set_index(df.date)
pages = df['endpoint'].resample('d').count()
return df, pages
# In[ ]:
def compute_bollinger(pages, span, weight, user):
''' This function calculates the lower, mid, and upper bands and the standard deviation.
The function then concats the bands and the pages to the dataframe. Finally a new dataframe with
the pages and bands is returned.
'''
midband = pages.ewm(span=span).mean()
stdev = pages.ewm(span=span).std()
ub = midband + stdev*weight
lb = midband - stdev*weight
bb = pd.concat([ub, lb], axis=1)
bol_df = pd.concat([pages, midband, bb], axis=1)
bol_df.columns = ['pages', 'midband', 'ub', 'lb']
bol_df['pct_b'] = (bol_df['pages'] - bol_df['lb'])/(bol_df['ub'] - bol_df['lb'])
bol_df['user_id'] = user
return bol_df
# In[ ]:
def plt_bands(b_df, user):
''' This functions will plot the upper, mid, and lower bands and original count of page accessess for every user.'''
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(b_df.index, b_df.pages, label='Number of Pages, User: '+str(user))
ax.plot(b_df.index, b_df.midband, label = 'Middle band')
ax.plot(b_df.index, b_df.ub, label = 'Upper Band')
ax.plot(b_df.index, b_df.lb, label = 'Lower Band')
ax.set_ylabel('Number of Pages Accessed')
return plt.show()
| UTF-8 | Python | false | false | 2,889 | py | 8 | TSA-exercises.py | 2 | 0.654552 | 0.649706 | 0 | 96 | 29.03125 | 174 |
hakubaa/ormparser | 5,841,155,561,177 | a916f262f74ee826d5697cf05ccd99061aafc2fe | f74a3fb2ba21d6c03eda92a84f4dbdae1c6a12f4 | /parsers/vento/starygosciniec.py | 3298c4e1265cdcf428c9011e72179079dbc4fec8 | [
"MIT"
]
| permissive | https://github.com/hakubaa/ormparser | fe0fa501b8c564cf2a38ae80982583f2e42a41a8 | 98a421649aa4317378d0ab26abcf3df11db6e582 | refs/heads/master | 2018-10-14T01:36:42.069387 | 2018-09-19T20:48:25 | 2018-09-19T20:48:25 | 137,261,736 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ..core import DataUnit
from ..vparsers import *
from . import VentoParser
class StaryGosciniecParser(VentoParser):
url = "http://starygosciniec.pl/mieszkania/"
schema = [
DataUnit(label="Numer", parser=DOMTextExtractor(), id="number"),
DataUnit(label="Pow", parser=AreaParser(DOMTextExtractor()), id="area"),
DataUnit(label="Status", parser=StatusParser(DOMTextExtractor()), id="status"),
DataUnit(label="Plan", parser=LinkParser(DOMElementExtractor("a")), id="plan")
]
| UTF-8 | Python | false | false | 538 | py | 1,282 | starygosciniec.py | 1,266 | 0.665428 | 0.665428 | 0 | 15 | 33.866667 | 87 |
gschen/sctu-ds-2020 | 14,388,140,483,764 | d28c99a5c26a56cd0fe46ceae59facf2dbc124df | bfd75153048a243b763614cf01f29f5c43f7e8c9 | /1906101033-唐超/Day0225/小练习.py | 662fbb078ec4731b29fed3d7534ca63068389fda | []
| no_license | https://github.com/gschen/sctu-ds-2020 | d2c75c78f620c9246d35df262529aa4258ef5787 | e1fd0226b856537ec653c468c0fbfc46f43980bf | refs/heads/master | 2021-01-01T11:06:06.170475 | 2020-07-16T03:12:13 | 2020-07-16T03:12:13 | 239,245,834 | 17 | 10 | null | false | 2020-04-18T13:46:24 | 2020-02-09T04:22:05 | 2020-04-18T13:44:30 | 2020-04-18T13:46:14 | 13,550 | 7 | 9 | 2 | Python | false | false | # 1.给定一个列表,找列表中最大的元素
# 输入: [1,2,3,2]
# 输出: 3
list = [1,2,3,2]
a = list[0]
for i in list:
if i > a:
a=i
print(a)
# 2、计算列表中数字之和
# 输入: [1,2,3,"a",1,'b',21]
# 输出: 7
list1 = [1,2,3,4,'a',1,'b',21]
sum = 0
for i in list1:
if isinstance(i,int):
sum = sum + i
print(sum)
# 3、输入n,检查2-n之间有多少个素数(质
# 数是大于1 (不包括1)的自然数,除1及其.
# 本身外,没有除数。)
# 输入: 5
# 输出: 3
# 原因: 1-5, 素数有2,3,5
x = int(input('请输入一个数'))
for i in range(2,x):
if x % i == 0:
print('不是素数')
break
else:
print('是素数')
x = 10
# for i in range(2,x+1):
# for j in range(2,i):
# if i % j == 0:
# break
# else:
# sum = sum + 1
# print(sum)
| UTF-8 | Python | false | false | 874 | py | 1,830 | 小练习.py | 1,824 | 0.469118 | 0.395588 | 0 | 52 | 12.076923 | 30 |
TinoHaocheng/Machine-Learning-Hw | 2,267,742,736,015 | 49160b6da6f1d040026639dc4c6fac967fa4f434 | edfa948087a218d58f99822e4c230fc72650a6db | /ML_HW2/code/deng_2.py | 25f3815e3f629e8d342256f2df0f651137908ce2 | []
| no_license | https://github.com/TinoHaocheng/Machine-Learning-Hw | db4127460b317507211e35d209ed4623bee2c59d | 02209a1ca5d86bbcb3b9f08ffc3abdb7a9a6a1c7 | refs/heads/master | 2022-08-01T09:12:39.266490 | 2020-05-25T09:29:27 | 2020-05-25T09:29:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import random
import math
import matplotlib.pyplot as plt
import os.path as osp
data_path = osp.join('..','dataset','Faces')
train_size = 5
class_num = 5
class_sam = 10
target = []
target_test = []
itera_num = 50
step_size = 0.001
thres = 10**(-3)
# step_size = 0.001
# thres = 10**(-6)
img_test_raw = []
Test_result = []
def load_img(dir_path):
global img_test_raw
global Test
img=[]
img_test = []
for i in range(class_num):
train_random_list = random.sample(range(0,10),train_size)
# train_random_list.sort()
test_random_list = [ i for i in range(10) if i not in train_random_list ]
print('test is',test_random_list)
print(train_random_list)
for k in range(class_sam):
find = False
for j in train_random_list:
if (j == k):
# path = dir_path + '/s' + str(i+1) +'/'+str(j+1)+'.pgm'
path = osp.join(dir_path,'s'+str(i+1),str(j+1)+'.pgm')
print(path)
a = cv2.imread(path,0)
print('before',np.max(a))
a = a.flatten() / 255.0
print(np.max(a))
print(a.shape, type(a))
img.append(a)
find = True
break
if not find:
path = osp.join(dir_path,'s'+str(i+1),str(k+1)+'.pgm')
# print(path)
a = cv2.imread(path,0)
img_test_raw.append(a)
print('before',np.max(a))
a = a.flatten() / 255.0
print(np.max(a))
print(a.shape, type(a))
img_test.append(a)
print('----------------')
# print(img_test_raw[-1].shape)
return img, img_test
def cal_softmax(x,w):
a = np.dot(w,x.T)
# print(a)
# print(a.shape)
# print('score\n',a)
prob_unnorm = np.exp(a)
prob_sum = np.array( np.sum(prob_unnorm,axis=0) )
# print('The sum of',prob_sum)
# print(prob_sum.shape)
c_n, N = a.shape
for i in range(c_n):
for j in range(N):
a[i][j] = prob_unnorm[i][j] / prob_sum[j]
# print(i,j,a[i][j])
print(a)
return a
def cal_accuracy(w, t, x, test=False):
global Test_result
correct = 0
total_N = x.shape[0]
a = cal_softmax(x,w)
# print(a)
result = np.where(a == np.amax(a, axis=0))
if (test):
Test_result = result[0]
for index, c in enumerate(result[0]):
if( t[index] == (c+1) ):
correct+=1
accuracy = correct / total_N
return accuracy
def plot(error, accu, counter):
fig = plt.figure('E(w)')
fig.suptitle('E(w) curve',fontsize=25)
plt.xlabel('iteration',fontdict={'fontsize':18})
plt.ylabel('E(w)',fontdict={'fontsize':18})
plt.rcParams['grid.alpha'] = 0.2
plt.rcParams['figure.titlesize'] = 30
plt.xticks(fontsize = 15)
plt.yticks(fontsize = 15)
plt.grid(True)
plt.plot(range(counter),error, linestyle='-', zorder = 1, color = 'red', linewidth=1)
plt.plot(range(counter),error,'ro', ms=4)
fig2 = plt.figure('Accuracy')
fig2.suptitle('Accuracy curve',fontsize=25)
plt.xlabel('iteration',fontdict={'fontsize':18})
plt.ylabel('Accu',fontdict={'fontsize':18})
plt.xticks(fontsize = 15)
plt.yticks(fontsize = 15)
plt.rcParams['grid.alpha'] = 0.2
plt.rcParams['figure.titlesize'] = 30
plt.grid(True)
plt.plot(range(counter),accu, linestyle='-', zorder = 1, color = 'blue', linewidth=1)
plt.plot(range(counter),accu,'bo', ms=4)
print('The error is:\n',error)
print('The accuracy is:\n',accu)
plt.show()
def do_gradient_descent(imgs, target, train = True):
total_N = np.array(imgs).shape[0]
w = np.zeros((class_num,len(imgs[0])))
counter = 0
error_record = []
accu_record = []
last_E = 0
while (True):
E = 0
Gradient = np.zeros(w.shape)
a = cal_softmax(np.array(imgs),w)
print(np.max(imgs),np.max(w))
for c in range(class_num):
grad_e = 0
for n in range(total_N):
print('at counter',counter,c,n)
print(a[c][n])
if (math.isnan(a[c][n])):
print('NAN OCCURRED')
print('The learning rate is ',step_size)
print('The error is:\n',error_record)
print('The accuracy is\n',accu_record)
exit(-1)
t = ( target[n] == (c+1) )
error = t * np.log(a[c][n])
grad_e += (a[c][n]-t)* imgs[n]
E -= error
Gradient[c] = grad_e
error_record.append(E)
w = w - step_size*Gradient
# print('w is\n',w)
print('Error is\n',E)
counter += 1
accuracy = cal_accuracy(w,target,np.array(imgs))
accu_record.append(accuracy)
# print('last_E is:',last_E)
stop = ( math.fabs( ((error_record[-1]-last_E)/(last_E+0.0001))) < thres )
# if ( stop or counter >= itera_num or counter == 16):
if ( stop or counter >= itera_num ):
print(last_E, E)
# print(math.fabs((error_record[-1]-last_E)/(last_E+0.0001)))
print(counter)
plot(error_record, accu_record, counter)
return w
last_E = E
def show_result(target_test,accu_test):
global img_test_raw
print('In show')
img_cut =[img_test_raw[i:i+5] for i in range(0,len(img_test_raw),5)]
target = [Test_result[i:i+5] for i in range(0,len(Test_result), 5)]
h,w = img_test_raw[0].shape
p = 50
indent = 5
H = 5*h + indent*4 + p*2 + 20
W = 5*w + indent*4 + p*2
img=np.zeros((H,W),np.uint8)
img.fill(100)
# print(img_cut[-1])
accu = 'Accuracy: '+str(accu_test)
cv2.putText(img,accu, (int(W/2)-60,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
for index,row in enumerate(img_cut):
for i in range(len(row)):
# img_t = cv2.resize( img_cut[index][i], (h,w) )
img_t = img_cut[index][i]
# print(img_t.shape)
cv2.putText(img_t, 'c:%d'%(target[index][i]+1), (60,15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
img[20+p+h*index+indent*index:20+p+h*index+indent*index+h,(i*w+i*5)+50:((i+1)*w+i*5)+50] = img_t
cv2.imwrite('result.jpg',img)
cv2.imshow('test result',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def do_Newton(imgs, target, d):
# imgs = imgs_arr.tolist()
# print(len(imgs))
print('In New, type of imgs is:',type(imgs))
print(type(imgs[0]),imgs[0].shape)
total_N = np.array(imgs).shape[0]
w = np.zeros((class_num,len(imgs[0])))
counter = 0
error_record = []
accu_record = []
w_record = [] ######
last_E = 0
#calculate for gradient (class x data#)
while (True):
E = 0
Gradient = np.zeros(w.shape)
a = cal_softmax(np.array(imgs),w)
print(np.max(imgs),np.max(w))
for c in range(class_num):
grad_e = 0
for n in range(total_N):
print('at counter',counter,c,n)
print(a[c][n])
if (math.isnan(a[c][n])):
print('NAN OCCURRED')
exit(-1)
t = ( target[n] == (c+1) )
error = t * np.log(a[c][n])
grad_e += (a[c][n]-t)* imgs[n]
E -= error
Gradient[c] = grad_e
error_record.append(E)
w_record.append(w)#####
##cal hessian
I = np.identity(d)
Hj = []
for k in range(class_num):
Hessian = np.zeros((d,d))
for n in range(total_N):
out_product = np.dot( np.reshape( imgs[n],(len(imgs[n]),1) ) ,np.reshape( imgs[n],(len(imgs[n]),1) ).T)
v = a[k][n]*(1-a[k][n])
Hessian += (v * out_product)
print('Outproduct for %d is:\n'%(n))
# print(out_product)
Hj.append(Hessian)
for k in range(class_num):
w[k] = w[k] - np.dot( Gradient[k] ,np.linalg.pinv(Hj[k]) )
# print(Hessian, Hessian.shape)
print('Our Hessian for different para',Hj)
# # print('w is\n',w)
print('Error is\n',E)
print(last_E, E)
counter += 1
accuracy = cal_accuracy(w,target,np.array(imgs))
accu_record.append(accuracy)
# if ( stop or counter >= itera_num ):'
if ( counter >= itera_num ):
print(last_E, E)
# print(math.fabs((error_record[-1]-last_E)/(last_E+0.0001)))
print('Stop @ ',counter)
plot(error_record, accu_record, counter)
return w
last_E = E
# input whole_data (np.array(dxN)) output dimension-reduced data y (np.array(kxN))
def do_pca(whole_data, k):
# whole_data = np.array(imgs + imgs_test).T
# print(whole_data.shape)
N = whole_data.shape[1] #num of date
d = whole_data.shape[0] #num of parameter
mean = np.zeros((N,1))
scatter = np.zeros((d,d))
mean = np.sum( whole_data, axis=1 )/N #10304x1
# print(mean, mean.shape)
# print(whole_data[:,0].shape)
for i in range(N):
c = np.dot( (whole_data[:,i].reshape(d,1)-mean),(whole_data[:,i].reshape(d,1)-mean).T )
print(c.shape)
scatter += c
print(scatter, scatter.shape)
eig_val_sc, eig_vec_sc = np.linalg.eig(scatter)
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_val_sc[i]), eig_vec_sc[:,i]) for i in range(len(eig_val_sc))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort(key=lambda x: x[0], reverse=True)
## get first k eigenvetor to form transform matrix
transform_w = np.zeros((d,k))
for i in range(k):
transform_w[:,i] = eig_pairs[i][1]
y = transform_w.T.dot(whole_data)
return y
if __name__ == '__main__':
imgs,imgs_test = load_img(data_path)
# print(type(np.array(imgs)),np.array(imgs).shape)
# print(type(np.array(imgs_test)),np.array(imgs_test).shape)
#########################Gradient descent###############################
# seting target value
for i in range(class_num):
for j in range(train_size):
target.append(i+1)
target_test.append(i+1)
w = do_gradient_descent(imgs, target, True)
accu_test = cal_accuracy(w, target_test, np.array(imgs_test), test=True)
print('The accuracy of test data %f'%(accu_test))
show_result(target_test,accu_test)
######################################################################
exit(0)
#########################Newton######################################### would cal up to 10 mins for pca <3
#pca
k = 5
whole_data = np.array(imgs + imgs_test).T
N = whole_data.shape[1]
####This is what you want
y = do_pca(whole_data,k)
print(y,y.shape)
y_train = y[:,:25]
y_test = y[:,25:N]
print('after pca')
print('train is',y_train.shape,y_train)
print('test is',y_test.shape,y_test)
for i in range(class_num):
for j in range(train_size):
target.append(i+1)
target_test.append(i+1)
print(target)
print(target_test)
#convet to list of array
y_train = (y_train.T).tolist()
y_test = (y_test.T).tolist()
for i in range(len(y_train)):
y_train[i] = np.array(y_train[i])
y_test[i] = np.array(y_test[i])
print(type(y_train))
w = do_Newton(y_train, target, k)
accu_test = cal_accuracy(w, target_test, np.array(y_test), test=True)
print('The accuracy of test data %f'%(accu_test))
show_result(target_test,accu_test)
| UTF-8 | Python | false | false | 12,077 | py | 28 | deng_2.py | 14 | 0.509232 | 0.489774 | 0 | 415 | 28.098795 | 119 |
SanjoSolutions/anno_1602 | 18,399,639,917,979 | b03b0709791054bfad247d89fd50cca56445f5df | 5d3853db441f3763908dd32f77d410ad021d46ba | /load_savegame.py | 5d811aa99e3f91d719a9aa5a438939d5f78cd1af | []
| no_license | https://github.com/SanjoSolutions/anno_1602 | 57ce76956a36c1654b40fe159bc21cd010dacd85 | c7eecb0041ed99568486006d9fe98a5333d41ca6 | refs/heads/main | 2023-06-26T12:56:28.477904 | 2021-07-09T17:45:24 | 2021-07-09T17:45:24 | 377,757,624 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
from ctypes import *
from io import SEEK_CUR
def main():
with open(r'C:\Anno_1602\SAVEGAME\game01.gam', 'rb') as file:
while file.read(1) != b'':
file.seek(-1, SEEK_CUR)
read_block(file)
index = 0
def read_block(file):
global index
id = decode_zero_terminated_string(file.read(16))
index += 16
print(id)
length = int.from_bytes(file.read(4), byteorder='little', signed=False)
index += 4
data_block_start = index
print(length)
data = file.read(length)
index += length
if id == 'INSELHAUS':
island_fields_count = math.floor(length / sizeof(IslandField))
island_fields_array = IslandField * island_fields_count
island_fields = island_fields_array()
memmove(addressof(island_fields), data, island_fields_count * sizeof(IslandField))
island_fields = list(island_fields)
buildings_set = set(island_field.type for island_field in island_fields)
buildings = list(buildings_set)
house_island_fields = [island_field for island_field in island_fields if island_field.type == 605]
for house_island_field in house_island_fields:
print('data_block_start', hex(data_block_start))
print('house', house_island_field.x, house_island_field.y)
else:
print(data)
# class IslandHouse(Structure):
# _fields_ = [
# ('id', c_char * 16),
# ('length', c_uint32),
# ()
# ]
class IslandField(Structure):
_fields_ = [
('building', c_uint16),
('x', c_uint8),
('y', c_uint8),
('rotation', c_uint32),
('unknown_1', c_uint32),
('unknown_2', c_uint32),
('status', c_uint32),
('random', c_uint32),
('player', c_uint32),
('empty', c_uint32)
]
def decode_zero_terminated_string(string):
index = string.find(0x00)
string = string[:index]
string = string.decode('utf-8')
return string
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,035 | py | 13 | load_savegame.py | 8 | 0.584767 | 0.562162 | 0 | 76 | 25.776316 | 106 |
rudilol/PHAServer | 15,874,199,157,705 | cb520225ff9ed087d35f9d7da3f396c04cd15449 | ada2a5bb66c62f0230ae252d97a644e2d9339172 | /Phaserveur/modules/default_s.py | 17a4c2ac13a95a45b99d8c7364abdf776993cb59 | [
"Apache-2.0"
]
| permissive | https://github.com/rudilol/PHAServer | 9013d90131bf9d4d5072f68f722af41896413f38 | e372ce53962eae517e12de4458540cbb553a905d | refs/heads/master | 2022-07-10T23:46:27.628790 | 2020-05-08T01:11:39 | 2020-05-08T01:11:39 | 262,193,903 | 0 | 0 | Apache-2.0 | true | 2020-05-08T01:11:40 | 2020-05-08T01:07:09 | 2020-05-08T01:07:11 | 2020-05-08T01:11:40 | 6 | 0 | 0 | 0 | null | false | false | import sys
import os
from time import sleep
from core.system import *
from modules.php import *
from modules.pyweb import *
def default_s():
if os.path.exists(bpath+"php"):
php()
else:
pyweb() | UTF-8 | Python | false | false | 205 | py | 3 | default_s.py | 2 | 0.697561 | 0.697561 | 0 | 12 | 16.166667 | 33 |
kidosoft/Morelia | 395,137,007,184 | 51907e7093c4ff09a422d6230ef852d496e5ff77 | 004e3b19b0498d79dad4b32c4040766f8fc15206 | /tests/test_setupteardown.py | 9531b254751482071e4195a44eb2a25347af751e | [
"MIT"
]
| permissive | https://github.com/kidosoft/Morelia | d981f85295395c81c2d6051dab11f827f2cc409a | 92a324bc7ba47bc8ab77b90101c11d67a6b2ade2 | refs/heads/master | 2023-07-10T01:24:03.428533 | 2023-06-24T20:26:38 | 2023-06-24T20:26:38 | 27,031,276 | 18 | 8 | MIT | true | 2022-04-05T19:22:42 | 2014-11-23T12:12:10 | 2021-10-13T10:04:32 | 2022-04-05T19:21:02 | 730 | 16 | 5 | 9 | Python | false | false | # -*- coding: utf-8 -*-
from unittest import TestCase
from morelia import verify
from morelia.decorators import tags
@tags(["acceptance"])
class SetUpTearDownTest(TestCase):
def setUp(self):
self.executed = []
def test_executes_setup_teardown_pairs(self):
source = """
Feature: setUp/tearDown pairs
Scenario: passing scenario
When step passes
And next step passes
Scenario: failing scenario
When step fails
"""
try:
verify(source, self)
except AssertionError:
pass
expected_sequence = [
"setUpFeature",
"setUpScenario",
"setUpStep",
"step passes",
"tearDownStep",
"setUpStep",
"next step passes",
"tearDownStep",
"tearDownScenario",
"setUpScenario",
"setUpStep",
"step fails",
"tearDownStep",
"tearDownScenario",
"tearDownFeature",
]
assert expected_sequence == self.executed
def setUpFeature(self):
self.executed.append("setUpFeature")
def tearDownFeature(self):
self.executed.append("tearDownFeature")
def setUpScenario(self):
self.executed.append("setUpScenario")
def tearDownScenario(self):
self.executed.append("tearDownScenario")
def setUpStep(self):
self.executed.append("setUpStep")
def tearDownStep(self):
self.executed.append("tearDownStep")
def step_step_passes(self):
self.executed.append("step passes")
assert True
def step_next_step_passes(self):
self.executed.append("next step passes")
assert True
def step_step_fails(self):
self.executed.append("step fails")
assert False
| UTF-8 | Python | false | false | 1,914 | py | 63 | test_setupteardown.py | 36 | 0.563218 | 0.562696 | 0 | 73 | 25.219178 | 49 |
tgiachi/avalon | 4,346,506,938,477 | a02aff578a4a6710faf7faa39bc7664a4ee21e32 | 97ce6b001079679ff21602f56f25567fb0911864 | /main.py | 62095a96f8aea86d65cc682b1fcb3c02e4337e3f | []
| no_license | https://github.com/tgiachi/avalon | 7be44fd8dae45520b747fe405c990c4953109cad | f6cb71e02c2bff49caa0ae2b9727b51021e48637 | refs/heads/master | 2015-09-24T23:53:25.916158 | 2015-07-13T15:44:57 | 2015-07-13T15:44:57 | 38,966,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
__author__ = 'Tommaso Giachi (squid@stormwind.it)'
__version__ = '1.0'
# Copyright (C) 2015 Tommaso Giachi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import sys
import atexit
import signal
from AvalonServerModule import AvalonServerModule
def print_header():
print(" _____ .__")
print(" / _ \___ _______ | | ____ ____")
print(" / /_\ \ \/ /\__ \ | | / _ \ / \\")
print("/ | \ / / __ \| |_( <_> ) | \\")
print("\____|__ /\_/ (____ /____/\____/|___| /")
print(" \/ \/ \/")
print("Avalon v%s" % __version__)
print(__author__)
def signal_handler(signal, frame):
main.main_bus.publish('exit')
print('You pressed Ctrl+C!')
sys.exit(0)
print_header()
signal.signal(signal.SIGINT, signal_handler)
main = AvalonServerModule.AvalonServerModule(sys.argv)
main.start()
@atexit.register
def handle_exit():
"""
Catch exit and stop threads
:return:
"""
main.main_bus.publish('exit')
| UTF-8 | Python | false | false | 1,678 | py | 6 | main.py | 5 | 0.585221 | 0.580453 | 0 | 55 | 29.381818 | 73 |
dr-dos-ok/Code_Jam_Webscraper | 6,949,257,115,394 | 118b0075daa947257d50318143c27dba6de9c6a3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/3195.py | dcb505030a4646ee4c5fef42f391b6442562e963 | []
| no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: kirodh
#
# Created: 08/04/2017
# Copyright: (c) kirodh 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
from __future__ import print_function
def main():
pass
if __name__ == '__main__':
main()
nameoffile = "B-large.in"
outputfile = "output1.txt"
infile = open(nameoffile,"r")
outfile = open(outputfile,"w")
numcases = int(infile.readline())
for i in range(1,numcases+1):
number = list(infile.readline()) # get rid of the newline char
if number[len(number)-1] == "\n": number = number[0:-1]
if len(number) == 1:
print("Case #"+str(i)+": "+str(number[0]),file=outfile)
else:
constructednumber = ''
tempcount = 1
tempchar = ''
for j in range(1,len(number)):
if number[j-1] == number[j]:
tempchar = number[j-1]
tempcount += 1
elif number[j-1] > number[j]:
#subtract 1
#print(tempchar)
newchar = str(int(number[j-1])-1)
#print(newchar)
constructednumber = constructednumber + newchar
constructednumber = constructednumber +"9"*(tempcount-1 + len(number[j:len(number)]))
print("Case #"+str(i)+": "+str(int(constructednumber)),file=outfile)
break
else:
constructednumber = constructednumber + tempcount*number[j-1]
tempcount=1
tempchar = ''
if j==len(number)-1:
constructednumber = constructednumber + tempcount*number[j]
print("Case #"+str(i)+": "+str(int(constructednumber)),file=outfile)
infile.close()
outfile.close()
| UTF-8 | Python | false | false | 1,939 | py | 60,747 | 3195.py | 60,742 | 0.479629 | 0.461578 | 0 | 57 | 31.947368 | 101 |
lprsd/baba | 15,083,925,145,643 | be9c0e36d7ef14a66453dabb95d8fc426473a288 | 48a9a8a0761ca76ae4865b8682ee09e7a202a9dd | /categories/forms.py | 82834e2f252596812128eff6f720da5e52b074d6 | []
| no_license | https://github.com/lprsd/baba | adb456a8f48259af73b9f855bebe7097da4c5070 | 610c438ba356167168709021edb96790bc628ad4 | refs/heads/master | 2021-06-30T22:03:06.224232 | 2011-10-21T09:26:54 | 2011-10-21T09:26:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from models import Category, UserInfo, UserCategory
from django import forms
from django.contrib.auth.forms import UserCreationForm
class UserForm(forms.ModelForm):
def clean_email_id(self):
try:
ui = UserInfo.objects.get(email_id=self.cleaned_data['email_id'])
ui.send_edit_link()
raise forms.ValidationError('This email is already registered. Check email for a link to edit preferences.')
except UserInfo.DoesNotExist:
return self.cleaned_data['email_id']
class Meta:
model = UserInfo
exclude = ['user','userhash']
class UserDispForm(forms.ModelForm):
class Meta:
model = UserInfo
exclude = ['user','userhash','email_id']
class EmailForm(forms.Form):
email_id = forms.EmailField()
def clean_email_id(self):
try:
ui = UserInfo.objects.get(email_id=self.cleaned_data['email_id'])
ui.send_edit_link()
return self.cleaned_data['email_id']
except UserInfo.DoesNotExist:
raise forms.ValidationError('This email is not registered')
class CategoryForm(forms.Form):
def __init__(self,user_instance=None,*args,**kwargs):
super(CategoryForm,self).__init__(*args,**kwargs)
cat = Category.objects.all()
for el in cat:
self.fields[el.name] = forms.BooleanField(required=False)
if user_instance:
selected_cats = UserCategory.objects.filter(user=user_instance).values_list('category__name',flat=True)
for el in selected_cats:
self.initial[el] = True
def save(self,user_obj):
# embed()
to_add = [el for el in self.cleaned_data if self.cleaned_data[el]]
# to_save = [Category.objects.get(name=el) for el in self.cleaned_data if self.cleaned_data[el]]
to_delete = [el for el in self.changed_data if not el in to_add]
# embed()
for el in to_add:
UserCategory.objects.get_or_create(user=user_obj,
category=Category.objects.get(name=el))
for el in to_delete:
UserCategory.objects.filter(user=user_obj,category__name=el).delete()
| UTF-8 | Python | false | false | 1,942 | py | 7 | forms.py | 5 | 0.709063 | 0.709063 | 0 | 60 | 31.35 | 111 |
MichalJagiello/web_shop | 14,139,032,341,735 | 00788384c6c893adece52d4d453f41c008b51227 | 0622f03e658b2414292b986856071a73f87efd3d | /projects/admin.py | b2b179ac1f116d1a29fb627e37f61314af8e5dc0 | []
| no_license | https://github.com/MichalJagiello/web_shop | 38b6902d229ce06b19f311bcad71aa6a447a35e3 | e4b1a76210d97c6690fb3fb886be9a10db5cc1d0 | refs/heads/master | 2021-01-18T21:57:55.697407 | 2016-06-05T13:42:25 | 2016-06-05T13:42:25 | 49,776,638 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from django import forms
from django.conf import settings
from django.contrib import admin
from autoryzacja.models import PipesUser
from .models import Project, Prefabricate
# Register your models here.
class ProjectCreationForm(forms.ModelForm):
name = forms.CharField(max_length=255, label='Nazwa projektu')
user = forms.ModelChoiceField(queryset=PipesUser.objects.all())
city = forms.CharField(max_length=128, label='Miasto')
street = forms.CharField(max_length=128, label='Ulica')
postcode = forms.CharField(max_length=6, label='Kod pocztowy')
number = forms.CharField(max_length=24, label='Numer')
class Meta:
model = Project
fields = ('name', 'user', 'street', 'number', 'postcode', 'city')
def clean_name(self):
name = self.cleaned_data.get('name')
try:
Project.objects.get(name=name)
except Project.DoesNotExist:
return name
raise forms.ValidationError("Istnieje projekt o podanej nazwie")
def clean_postcode(self):
postcode = self.cleaned_data.get('postcode')
if not re.match(settings.POSTCODE_PATTERN, postcode):
raise forms.ValidationError("Podaj kod pocztowy we właściwym formacie, np. 00-000")
return postcode
def save(self, commit=True):
project = super(ProjectCreationForm, self).save(commit=False)
project.saved = True
if commit:
project.save()
return project
class PrefabricateAdminInline(admin.TabularInline):
model = Prefabricate
fields = ('project', 'prefabricate_mark', 'pipe_diameter', 'pipe_color', 'pipe_type')
ordering = ('project', 'index')
can_delete = False
readonly_fields = ('project', 'prefabricate_mark', 'pipe_diameter', 'pipe_color', 'pipe_type')
def get_max_num(self, request, obj=None, **kwargs):
return len(Prefabricate.objects.filter(project=obj))
class ProjectsAdmin(admin.ModelAdmin):
model = Project
list_display = ('name', 'user', 'created', 'edited')
search_fields = ('name', 'user')
fieldsets = (
('Nazwa', {
'fields': ('name',),
}),
('Użytkownik', {
'fields': ('user',),
}),
('Adres', {
'fields': ('street', 'number', 'postcode', 'city')
}),
('Plik', {
'fields': ('pdf',)
}),
)
inlines = [
PrefabricateAdminInline,
]
def save_model(self, request, obj, form, change):
obj.saved = True
obj.save()
admin.site.register(Project, ProjectsAdmin)
| UTF-8 | Python | false | false | 2,656 | py | 39 | admin.py | 20 | 0.617037 | 0.610253 | 0 | 95 | 26.926316 | 98 |
RoelvanDooren/VisCogSearchRATAUT_Study1 | 11,879,879,546,392 | efe978b80191a772c42e99b5b1ac1f387361efcc | bab4f25cc4be9e27a097cff2f6281295311217c8 | /scrabble_practice.py | 638a4dc7b17aa3cebf1ed596474af1cfa1b9a3c7 | []
| no_license | https://github.com/RoelvanDooren/VisCogSearchRATAUT_Study1 | 6c8430dbbf19aa83429af50492c4e45ec96ad694 | 01659a3bd29a734832405e5cfc20bc285fb87b2c | refs/heads/master | 2021-01-23T08:03:34.920575 | 2017-07-11T06:02:19 | 2017-07-11T06:02:19 | 80,521,655 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
import pygame
import string
import os
import sys
import random
from timeit import default_timer as timer
# Constants
color_font = (30, 30, 30)
background_color = (255, 255, 255)
box_edge_color = (0, 0, 0)
box_back_color = (240, 240, 240)
box_text_color = (100, 100, 100)
button_edge_color = (0, 0, 0)
button_back_color = (200, 200, 200)
button_text_color = (40, 40, 40)
incorrect_color = (255, 0, 40)
correct_color = (50, 255, 0)
counter_text_color = (50, 50, 50)
box_width = 250
box_height = 60
button_width = 150
button_height = 35
screen_w = 640
screen_h = 500
fps = 60
screen = pygame.display.set_mode((screen_w, screen_h), pygame.HWSURFACE |
pygame.DOUBLEBUF | pygame.FULLSCREEN)
# Variables
stimulus_set = []
correct_words = []
class Stimulus:
def __init__(self, surface):
self.surface = surface
self.x = surface.get_width() / 2
self.y = surface.get_height() / 2
self.font = pygame.font.Font(None, 60)
def draw_letterset(self, letters):
self.surface.fill(background_color)
text = self.font.render(letters, 1, color_font)
self.surface.blit(text, (self.x - 110, self.y - 150))
class Input:
"""This class takes care of user input"""
def __init__(self, surface):
self.surface = surface
self.x = surface.get_width() / 2
self.y = surface.get_height() / 2
self.current_string = []
self.previous_string = ""
self.past_correct_words = []
self.past_incorrect_words = []
self.n_correct_words = 0
self.n_incorrect_words = 0
self.prev_n_correct = 0
self.prev_n_incorrect = 0
self.total_repeat_words = 0
self.total_correct_words = 0
self.total_incorrect_words = 0
self.n_repeat_words = 0
self.font = pygame.font.Font(None, 50)
self.font_text = pygame.font.Font(None, 20)
self.font_feedback = pygame.font.Font(None, 40)
self.font_counter = pygame.font.Font(None, 40)
def draw_text_box(self, message):
pygame.draw.rect(self.surface, box_back_color,
((self.x - (box_width / 2)), self.y,
box_width, box_height), 0)
pygame.draw.rect(self.surface, box_edge_color,
((self.x - (box_width / 2)), self.y,
box_width, box_height), 1)
if len(message) != 0:
self.surface.blit(self.font.render(message, 1, box_text_color),
(self.x - 100, self.y + 10))
self.draw_counter()
pygame.display.flip()
def draw_counter(self):
pygame.draw.rect(self.surface, (255, 255, 255),
((self.x - 320), self.y + 155,
640, 100), 0)
countertext_1 = "Probeer de taak door bijvoorbeeld, 'erwt', 'drie' en 'wiedt' te typen."
text = self.font_text.render(countertext_1, 1, counter_text_color)
self.surface.blit(text, (self.x - 220, self.y + 155))
countertext_2 = "Probeer ook een verkeerd woord zoals 'markt' of een dubbel woord (nog een keer 'erwt')."
text = self.font_text.render(countertext_2, 1, counter_text_color)
self.surface.blit(text, (self.x - 280, self.y + 180))
countertext_3 = "Druk op enter om het woord in te voeren"
text = self.font_text.render(countertext_3, 1, counter_text_color)
self.surface.blit(text, (self.x - 125, self.y + 205))
def draw_input(self, correct):
self.text = self.font_counter.render("Score: " + str(self.total_correct_words), 1, color_font)
self.textpos = self.text.get_rect(topleft=(10, 10))
self.surface.blit(self.text, self.textpos)
while True:
event = pygame.event.poll()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
self.current_string = self.current_string[:-1]
elif event.key == pygame.K_RETURN:
self.checker(string.join(self.current_string, ""),
correct)
if self.n_correct_words >= 5:
break
self.current_string = []
elif event.key <= 127:
self.current_string.append(chr(event.key))
elif event.type == pygame.MOUSEBUTTONUP:
self.prev_n_correct = self.n_correct_words
self.prev_n_incorrect = self.n_incorrect_words
self.n_correct_words = 0
self.n_incorrect_words = 0
if self.x - (button_width / 2) <= event.pos[0] <= \
self.x + (button_width / 2) and \
self.y + 100 <= event.pos[1] <= self.y + 100 + \
button_height:
break
self.draw_text_box(string.join(self.current_string, ""))
def checker(self, word, cor_words):
correct = cor_words
word = word
# check whether this word has been entered earlier
if word in self.past_correct_words:
self.surface.blit(self.font_feedback.render("Dit woord heb je "
"al gehad",
1, incorrect_color),
(self.x - 160, self.y - 70))
self.n_repeat_words += 1
self.total_repeat_words += 1
pygame.display.flip()
pygame.time.delay(800)
pygame.draw.rect(self.surface, (255, 255, 255),
((self.x - 200), self.y - 80,
400, 60), 0)
# check whether this word is in the correct words list
elif word in correct:
self.surface.blit(self.font_feedback.render("Correct!",
1, correct_color),
(self.x - 55, self.y - 70))
self.past_correct_words.append(word)
self.n_correct_words += 1
self.total_correct_words += 1
pygame.display.flip()
pygame.time.delay(800)
pygame.draw.rect(self.surface, (255, 255, 255), (5, 5, 140, 30), 0)
pygame.draw.rect(self.surface, (255, 255, 255), ((self.x - 200), self.y - 80,400, 60), 0)
self.text = self.font_counter.render("Score: " + str(self.total_correct_words), 1, color_font)
self.surface.blit(self.text, self.textpos)
# else this is not a correct word
else:
self.surface.blit(self.font_feedback.render("Incorrect",
1, incorrect_color),
(self.x - 60, self.y - 70))
self.past_incorrect_words.append(word)
self.n_incorrect_words += 1
self.total_incorrect_words += 1
pygame.display.flip()
pygame.time.delay(800)
pygame.draw.rect(self.surface, (255, 255, 255),
((self.x - 200), self.y - 80,
400, 60), 0)
class Wait:
def __init__(self, surface):
self.surface = surface
self.x = surface.get_width() / 2
self.y = surface.get_height() / 2
self.font = pygame.font.Font(None, 25)
def intro(self, image):
self.surface.fill(background_color)
self.surface.blit(image, (50, 50))
pygame.display.flip()
while True:
event = pygame.event.poll()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return
def waiter(self, time=15000):
self.surface.fill(background_color)
text = self.font.render("Wacht op de volgende letterset", 1,
color_font)
self.surface.blit(text, (self.x - 140, self.y))
pygame.display.flip()
pygame.time.delay(time)
self.surface.fill(background_color)
def outro(self):
self.surface.fill(background_color)
text = self.font.render("Dat was de oefening voor de scrabbletaak!",
1, color_font)
self.surface.blit(text, (self.x - 180, self.y - 50))
text = self.font.render("In het volgende scherm begint de eerste scrabbletaak.",
1, color_font)
self.surface.blit(text, (self.x - 225, self.y - 15))
text = self.font.render("Druk op de spatiebalk om verder te gaan",
1, color_font)
self.surface.blit(text, (self.x - 170, self.y + 50))
pygame.display.flip()
while True:
event = pygame.event.poll()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return
class Button:
def __init__(self, surface):
self.surface = surface
self.x = surface.get_width() / 2
self.y = surface.get_height() / 2
self.font = pygame.font.Font(None, 30)
def stop_practice(self):
pygame.draw.rect(self.surface, button_back_color,
((self.x - (button_width / 2)), self.y + 100,
button_width, button_height), 0)
pygame.draw.rect(self.surface, button_edge_color,
((self.x - (button_width / 2)), self.y + 100,
button_width, button_height), 1)
text = self.font.render("stop oefenen", 1, color_font)
screen.blit(text, (self.x - button_width / 2 + 12, self.y + 105))
pygame.display.flip()
class Main:
def __init__(self, letters, words):
# Init task
pygame.init()
self.surface = screen
self.surface.fill(background_color)
self.letters = letters
self.word = words
# Init objects
self.stimulus = Stimulus(screen)
self.user_input = Input(screen)
self.wait = Wait(screen)
self.button = Button(screen)
def main(self):
# Main loop
self.start = timer()
clock = pygame.time.Clock()
image_general_intro = pygame.image.load(os.path.join("images", "general_intro_experiment.png")).convert()
image_intro01 = pygame.image.load(os.path.join("images", "intro_scrabble_practice01.png")).convert()
image_intro02 = pygame.image.load(os.path.join("images", "intro_scrabble_practice02.png")).convert()
image_intro03 = pygame.image.load(os.path.join("images", "intro_scrabble_practice03.png")).convert()
self.begin = timer()
self.wait.intro(image_general_intro)
self.wait.intro(image_intro01)
self.wait.intro(image_intro02)
self.wait.intro(image_intro03)
self.stimulus.draw_letterset(self.letters)
self.button.stop_practice()
self.user_input.draw_input(self.word)
self.time = (timer() - self.begin)
self.end = timer()
screen.fill(background_color)
clock.tick(fps)
if __name__ == '__main__':
stimulus_set = "I E D T W R"
correct_words = ["weid", "wedt", "werd", "wied", "wiet", "erwt", "wier",
"tred", "tier", "rite", "drie", "redt", "riet", "ried",
"dier","weidt","wiedt"]
run = Main(stimulus_set, correct_words)
run.main()
| UTF-8 | Python | false | false | 11,405 | py | 20 | scrabble_practice.py | 12 | 0.5395 | 0.505831 | 0 | 291 | 38.19244 | 113 |
hotwire-django/django-turbo-allauth | 2,336,462,211,151 | 3841370b97048fd9c84900d617d92ad54809791d | 72c59dbca3196a44e47ea98675056a5dbe2fcb54 | /setup.py | 2f46564a968121369770e9bbd1a13204487b024d | [
"MIT"
]
| permissive | https://github.com/hotwire-django/django-turbo-allauth | dd19603e7f1ec1767085ea0c61862b052e742afe | ce45ae042d7a5e8915aae35fd812d886b584c9ff | refs/heads/main | 2023-04-26T15:53:47.208830 | 2021-05-12T09:51:14 | 2021-05-12T09:51:14 | 326,223,548 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from setuptools import find_packages, setup
VERSION = "0.0.10"
setup(
name="django-turbo-allauth",
version=VERSION,
author="Dan Jacob",
author_email="danjac2018@gmail.com",
url="https://github.com/hotwire-django/django-turbo-allauth",
description="Hotwired/Turbo subclasses for allauth views",
long_description=open("README.md").read() + "\n\n" + open("CHANGELOG.md").read(),
long_description_content_type="text/markdown",
license="MIT",
python_requires=">=3.8",
install_requires=[
"django (>=3.0)",
"django-allauth",
"django-turbo-response (>=0.0.49)",
],
packages=find_packages(where="src"),
package_dir={"": "src"},
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| UTF-8 | Python | false | false | 1,128 | py | 14 | setup.py | 8 | 0.609043 | 0.590426 | 0 | 34 | 32.176471 | 85 |
IgorEduYa/foodgram-project | 7,206,955,136,087 | e1040eea73cc6e3181efe72efa3a4ac666d76b32 | 31da4180d93f64ba4b7f04d94a3b5d4321f92f4d | /recipes/views.py | dbc86da6d92abe632642709e13f375319d042f4a | []
| no_license | https://github.com/IgorEduYa/foodgram-project | 98643e49e494cbe1ceb3dff4cddfc1df313e7c7f | 9435ab6bf4d44504a6922d9f674989c074fd1d11 | refs/heads/master | 2023-06-08T11:21:09.165222 | 2021-06-23T14:19:00 | 2021-06-23T14:19:00 | 372,381,729 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from excel_response import ExcelResponse
from .forms import RecipeForm
from .models import Component, Recipe, Unit, User
from .utils import get_ingredient
def get_recipes(request, username=None):
tag = request.GET.get('tag')
if username:
author = get_object_or_404(User, username=username)
if tag:
recipes = author.recipes.filter(tag__name=tag)
else:
recipes = author.recipes.all()
elif request.resolver_match.url_name == 'favorites':
author = None
user = request.user
if tag:
favors = user.favoriters.filter(recipe__tag__name=tag)
else:
favors = user.favoriters.all()
recipes = Recipe.objects.filter(id__in=favors.values('recipe_id'))
else:
author = None
if tag:
recipes = Recipe.objects.filter(tag__name=tag)
else:
recipes = Recipe.objects.all()
paginator = Paginator(recipes, settings.OBJECTS_PER_PAGE)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return page, paginator, tag, author
def index(request):
page, paginator, tag, author = get_recipes(request)
return render(
request,
'index.html',
{
'page': page,
'paginator': paginator,
'tag': tag,
}
)
def profile(request, username):
page, paginator, tag, author = get_recipes(request, username)
return render(
request,
'profile.html',
{
'page': page,
'paginator': paginator,
'author': author,
'tag': tag,
}
)
def recipe_view(request, id):
recipe = get_object_or_404(Recipe, id=id)
return render(request, 'recipe_page.html', {'recipe': recipe})
def form_saving(request, form):
recipe = form.save(commit=False)
recipe.author = request.user
ingredients = get_ingredient(request)
components = []
Component.objects.filter(recipe=recipe).delete()
for title, value in ingredients.items():
unit = get_object_or_404(Unit, title=title)
value = value[1:] if value[0] == '-' else value
comp = Component(
recipe=recipe,
unit=unit,
value=value,
)
components.append(comp)
if len(components) == 0:
form.add_error(None, 'Нет ингредиентов!')
return True
recipe.save()
Component.objects.bulk_create(components)
form.save_m2m()
@login_required
def new_recipe(request):
if request.method != "POST":
form = RecipeForm()
return render(request, "new_recipe.html", {"form": form})
form = RecipeForm(request.POST or None, files=request.FILES or None)
print(request.POST.getlist('tags'))
if form.is_valid():
result = form_saving(request, form)
if result == True:
return render(request, "new_recipe.html", {"form": form})
return redirect('index')
return render(request, "new_recipe.html", {"form": form})
@login_required
def recipe_edit(request, id):
recipe = get_object_or_404(Recipe, id=id)
if recipe.author != request.user:
return redirect('recipe', id=id)
form = RecipeForm(
request.POST or None,
files=request.FILES or None,
instance=recipe,
)
if form.is_valid():
result = form_saving(request, form)
if result == True:
return render(
request,
"new_recipe.html",
{"form": form, 'edit': True, 'recipe': recipe}
)
return redirect('recipe', id=id)
return render(
request,
"new_recipe.html",
{"form": form, 'edit': True, 'recipe': recipe}
)
@login_required
def recipe_delete(request, id):
recipe = get_object_or_404(Recipe, id=id)
if recipe.author != request.user:
return redirect('recipe', id=id)
recipe.delete()
return redirect('index')
@login_required
def subscribe(request):
user = request.user
all_param = request.GET.get('all')
all = int(all_param) if all_param else None
subscriptions = user.subscriber.all()
paginator = Paginator(subscriptions, 3)
page_number = request.GET.get('subscriptions')
subscriptions = paginator.get_page(page_number)
return render(
request,
'subscribes.html',
{
'subscriptions': subscriptions,
'paginator': paginator,
'all': all,
}
)
@login_required
def favorites(request):
page, paginator, tag, author = get_recipes(request)
return render(
request,
'favorites.html',
{
'page': page,
'paginator': paginator,
'tag': tag,
}
)
@login_required
def purchases(request):
user = request.user
purchases = user.buyers.all()
recipes = Recipe.objects.filter(id__in=purchases.values('recipe_id'))
return render(
request,
'purchases.html',
{'recipes': recipes}
)
@login_required
def shoplist_download(request):
user = request.user
purchases = user.buyers.all()
recipes = Recipe.objects.filter(id__in=purchases.values('recipe_id'))
components = Component.objects.filter(recipe__in=recipes)
titles = []
dimensions = []
values = []
for comp in components:
if comp.unit.title in titles:
index = titles.index(comp.unit.title)
values[index] += comp.value
else:
titles.append(comp.unit.title)
dimensions.append(comp.unit.dimension)
values.append(comp.value)
data = [[titles[i], dimensions[i], values[i]] for i in range(len(titles))]
return ExcelResponse(data)
def page_not_found(request, exception):
return render(
request,
'misc/404.html',
{'path': request.path},
status=404
)
def server_error(request):
return render(request, "misc/500.html", status=500)
| UTF-8 | Python | false | false | 6,263 | py | 26 | views.py | 14 | 0.596031 | 0.590429 | 0 | 233 | 25.815451 | 78 |
mathLab/ITHACA-SEM | 7,172,595,433,767 | 460b91f84e33af19805cbdd088010a3d58ccc408 | 676ffceabdfe022b6381807def2ea401302430ac | /library/Demos/Python/FieldUtils/compositeid.py | 1134fd490d907d00a629a062794f0d635b4d1c32 | [
"MIT"
]
| permissive | https://github.com/mathLab/ITHACA-SEM | 3adf7a49567040398d758f4ee258276fee80065e | 065a269e3f18f2fc9d9f4abd9d47abba14d0933b | refs/heads/master | 2022-07-06T23:42:51.869689 | 2022-06-21T13:27:18 | 2022-06-21T13:27:18 | 136,485,665 | 10 | 5 | MIT | false | 2019-05-15T08:31:40 | 2018-06-07T14:01:54 | 2019-05-13T11:08:03 | 2019-05-15T08:31:40 | 153,228 | 3 | 4 | 0 | Makefile | false | false | # -f -e -m addcompositeid compositeid.xml compositeid.fld
import sys
from NekPy.FieldUtils import *
field = Field(sys.argv, forceoutput=True, error=True)
InputModule.Create("xml", field, "compositeid.xml").Run()
ProcessModule.Create("addcompositeid", field).Run()
OutputModule.Create("fld", field, "compositeid.fld").Run()
| UTF-8 | Python | false | false | 326 | py | 2,195 | compositeid.py | 1,039 | 0.754601 | 0.754601 | 0 | 9 | 35.222222 | 58 |
fs714/ipyexample | 10,995,116,306,437 | dcf519d9ed1195c929121a5f712426ffe55a1b24 | 34e58d7b034268650bddf88ef4a63e4c62563997 | /asynchronous/py36/asyncio/async_test.py | 6bbbaee6e0a06de57cd8afaf6b74f73ebe6712f6 | [
"Apache-2.0"
]
| permissive | https://github.com/fs714/ipyexample | 98d79c4f0bce8c2ced641c6042dd36d64d41c584 | fbff041804b9c46fb7f21ebbae22acff745c7b0c | refs/heads/master | 2023-01-25T03:43:53.506276 | 2020-12-09T03:59:44 | 2020-12-09T04:00:35 | 40,577,116 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # import asyncio
#
# async def compute(x, y):
# print("Compute %s + %s ..." % (x, y))
# await asyncio.sleep(1.0)
# return x + y
#
# async def print_sum(x, y):
# for i in range(10):
# result = await compute(x, y)
# print("%s + %s = %s" % (x, y, result))
#
# loop = asyncio.get_event_loop()
# loop.run_until_complete(print_sum(1,2))
# asyncio.ensure_future(print_sum(1, 2))
# asyncio.ensure_future(print_sum(3, 4))
# asyncio.ensure_future(print_sum(5, 6))
# loop.run_forever()
import asyncio
async def display_date(who, num):
i = 0
while True:
if i > num:
return
print('{}: Before loop {}'.format(who, i))
await asyncio.sleep(1)
i += 1
loop = asyncio.get_event_loop()
asyncio.ensure_future(display_date('AAA', 4))
asyncio.ensure_future(display_date('BBB', 6))
loop.run_forever()
| UTF-8 | Python | false | false | 868 | py | 52 | async_test.py | 31 | 0.581797 | 0.562212 | 0 | 38 | 21.842105 | 50 |
edwinreg011/Coding-Dojo | 4,767,413,724,298 | 329c7df952922cdd39175e5c8c0fd35ac32bdc4b | d261119814862d1119ffa0a7bf64f2cfa956afac | /python_stack/django/django_intro/django_example/apps/first_app/views.py | aee2bbe2214874420e6ea7ead3cd82613edeff36 | []
| no_license | https://github.com/edwinreg011/Coding-Dojo | 3280a2290dc4d8fb709c1ff4b4906b078925a705 | 14bb5844d741a2be05f995987a434f335c90e6c9 | refs/heads/master | 2022-12-24T14:23:20.342451 | 2019-12-17T19:24:54 | 2019-12-17T19:24:54 | 228,685,995 | 0 | 0 | null | false | 2022-12-11T17:34:49 | 2019-12-17T19:21:41 | 2019-12-17T19:24:57 | 2022-12-11T17:34:49 | 24,187 | 0 | 0 | 23 | Python | false | false | from django.shortcuts import render, HttpResponse
def index(request):
return HttpResponse("this is the equivalent of app.route('/') !")
def temp(request):
return render (request, "first_app/index.html")
def passData(request):
context = {
"name": "Edwin",
"fav_color": "Black",
"pets": ["Luna", "Tuna", "Jona"]
}
return render (request, "first_app/index.html", context) | UTF-8 | Python | false | false | 393 | py | 272 | views.py | 81 | 0.664122 | 0.664122 | 0 | 15 | 25.266667 | 67 |
JissuPark/binary-diffing-tool-1 | 5,540,507,818,227 | bffda21ea2e3473e15254392d5975dde1d1d8b40 | 8d4a1762c6cb702b0cbc87db4dfd7fcd28ecef0d | /Main_engine/migrations/0015_auto_20191112_1606.py | fd8020c20077138aa107af8c9329966fcaa85c74 | []
| no_license | https://github.com/JissuPark/binary-diffing-tool-1 | f8433159603a78983e3be5710994e57adb713f2c | 46fd8e966b315dae93b69fce814a469e7d248610 | refs/heads/master | 2023-01-25T01:59:14.539810 | 2019-12-21T06:52:16 | 2019-12-21T06:52:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.7 on 2019-11-12 07:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Main_engine', '0014_pe_info_imphash'),
]
operations = [
migrations.AddField(
model_name='pe_info',
name='ContainedSections',
field=models.BigIntegerField(null=True),
),
migrations.AddField(
model_name='pe_info',
name='EntryPoint',
field=models.BigIntegerField(null=True),
),
migrations.AddField(
model_name='pe_info',
name='Targetmachine',
field=models.TextField(null=True),
),
]
| UTF-8 | Python | false | false | 712 | py | 109 | 0015_auto_20191112_1606.py | 79 | 0.557584 | 0.530899 | 0 | 28 | 24.428571 | 52 |
StevenJimenez18/CodingDojo | 6,734,508,753,708 | 0efd352a2fed74c0d84775d89f06666836bbcb49 | 50642560d2a03b4708c268331efdcda1c4443967 | /Python/_python/python_fundamentals/playground_flask/playground.py | 88c60686b20cfab5435b9b1f0a156be458dff027 | []
| no_license | https://github.com/StevenJimenez18/CodingDojo | 5071c8aabb2f99a5fc1aed47804fa988e6027442 | 25ab5591c2806ee34bf21de3ed631e3e6fd38a81 | refs/heads/main | 2023-04-23T17:25:01.980859 | 2021-04-27T21:48:05 | 2021-04-27T21:48:05 | 362,242,244 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
from flask.templating import render_template
app = Flask(__name__)
@app.route('/play')
def index():
return render_template('index.html', phrase="Hello World!", box="",times=3,level1or2= "True")
@app.route('/play/<amount>')
def amount(amount):
return render_template('index.html', phrase="Hello World!", box="", times=int(amount),level1or2 ="True")
@app.route('/play/<amount>/<color>')
def color(amount,color):
return render_template('index.html', phrase="Hello World!", box="", times=int(amount), textcolor=color, level3="True")
if __name__=="__main__":
app.run(debug=True) | UTF-8 | Python | false | false | 637 | py | 141 | playground.py | 68 | 0.657771 | 0.648352 | 0 | 19 | 31.578947 | 122 |
kvoitiuk/codingbat-solutions | 5,025,111,777,518 | 2c02be06d006822752090e0edd696d26d8364a84 | 07ff318f80d28bc437c3c1070b4726cd27df2444 | /python/list-1.py | 1317de5615f13cf4fc42d5057fdae2e2764f880a | []
| no_license | https://github.com/kvoitiuk/codingbat-solutions | 04b3905752147d4a4257073edc6ff77f3a451bd2 | ff6c463a5bb3bf92ce7450362c17e1112ea50216 | refs/heads/master | 2021-01-17T11:55:55.172968 | 2017-03-25T19:29:54 | 2017-03-25T19:29:54 | 59,352,083 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# ------------- List-1: -----------------
#first_last6
#Given an array of ints, return True if 6 appears as either the first or last element in the array. The array will be length 1
#or more.
def first_last6(nums):
return nums[len(nums)-1] ==6 or nums[0] == 6
#same_first_last
#Given an array of ints, return True if the array is length 1 or more, and the first element and the last element are equal.
def same_first_last(nums):
return len(nums)>=1 and nums[0] == nums[-1]
#make_pi
#Return an int array length 3 containing the first 3 digits of pi, {3, 1, 4}.
def make_pi():
return [3, 1, 4]
#common_end
#Given 2 arrays of ints, a and b, return True if they have the same first element or they have the same last element.
#Both arrays will be length 1 or more.
def common_end(a, b):
return a[0] == b[0] or a[-1] == b[-1]
#sum3
#Given an array of ints length 3, return the sum of all the elements.
def sum3(nums):
sum = 0
for int in nums:
sum += int
return sum
#rotate_left3
#Given an array of ints length 3, return an array with the elements "rotated left" so {1, 2, 3} yields {2, 3, 1}.
def rotate_left3(nums):
return nums[1:len(nums)] + nums[0:1]
#reverse3
#Given an array of ints length 3, return a new array with the elements in reverse order, so {1, 2, 3} becomes {3, 2, 1}.
def reverse3(nums):
return [nums[2], nums[1], nums[0]]
#max_end3
#Given an array of ints length 3, figure out which is larger between the first and last elements in the array, and set all the
#other elements to be that value. Return the changed array.
def max_end3(nums):
result = max(nums[0], nums[2])
return [result]*len(nums)
#sum2
#Given an array of ints, return the sum of the first 2 elements in the array. If the array length is less than 2, just sum up
#the elements that exist, returning 0 if the array is length 0.
def sum2(nums):
result = 0
for index in range(min(2, len(nums))):
result += nums[index]
return result
#middle_way
#Given an array of ints, return the sum of the first 2 elements in the array. If the array length is less than 2, just sum up
#the elements that exist, returning 0 if the array is length 0.
def middle_way(a, b):
return [a[len(a)/2], b[len(b)/2]]
#make_ends
#Given an array of ints, return a new array length 2 containing the first and last elements from the original array. The
#original array will be length 1 or more.
def make_ends(nums):
return [nums[0], nums[-1]]
#has23
#Given an int array length 2, return True if it contains a 2 or a 3.
def has23(nums):
return 2 in nums or 3 in nums
| UTF-8 | Python | false | false | 2,577 | py | 18 | list-1.py | 17 | 0.690338 | 0.656577 | 0 | 81 | 30.753086 | 126 |
rodrigoTcarmo/RegistronAPI | 15,719,580,348,680 | 7ba4673f5c07984d6c08f5bfcf9670f94d3ae676 | 93ba943a777e9d23204e0477cd98aec5d7b6bbe9 | /registronAUTH/forms.py | 46889108f6038ec10a21abb4167ba5cca230f3ba | []
| no_license | https://github.com/rodrigoTcarmo/RegistronAPI | a24ebe8dad5373f0537bdc284f87696be7449779 | 9f99a8e09a62b2a14fa9aa74a160bd37d002bc9e | refs/heads/master | 2023-03-04T13:28:53.927281 | 2021-02-18T12:59:34 | 2021-02-18T12:59:34 | 340,050,199 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordChangeForm
class PasswordChgForm(PasswordChangeForm):
old_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control', 'type': 'password'}))
new_password1 = forms.CharField(max_length=100, widget=forms.PasswordInput(attrs={'class': 'form-control', 'type': 'password'}))
new_password2 = forms.CharField(max_length=100, widget=forms.PasswordInput(attrs={'class': 'form-control', 'type': 'password'}))
class Meta:
model = User
fields = ('old_password', 'new_password1', 'new_password2')
| UTF-8 | Python | false | false | 659 | py | 40 | forms.py | 24 | 0.723824 | 0.708649 | 0 | 13 | 49.692308 | 132 |
threeq/wifi_locating | 9,706,626,101,328 | 4ff2fa9d2bd16984c73c65319c5ba7e7c1356ead | 0935ab161ed3ca1b17020e65e10ea501f7077736 | /locatingserver.py | 161b0d5c21c36656208addb32f53c21c351d3da4 | []
| no_license | https://github.com/threeq/wifi_locating | 28039134307ff2b07a33e27c3ad6d8d7b53787f5 | 431a373f082a1c6cbe17a051d652414eee932231 | refs/heads/master | 2021-01-02T22:50:34.022822 | 2013-12-04T10:23:11 | 2013-12-04T10:23:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#####################################
## 读入传入数据,并且格式化
##
##
#####################################
import os;
import sys;
import string;
from numpy import *;
import operator;
import time;
def load_data(path='data/'):
data_file_lst = os.listdir(path);
attrlist = [];
attrType = {};
for data_file in data_file_lst:
position = data_file.split('.')[0];
print position;
for line in open(path+data_file):
if line==None or line=='' or line=='\n': continue;
posattr = {};
apList = line.split('$');
for ap in apList:
if ap!=None and ap!='' and ap!='\n':
ap_attr_list = ap.split('|');
posattr[ap_attr_list[1]] = -1*string.atoi(ap_attr_list[3]);
posattr['position']=position;
attrType[ap_attr_list[1]] = True;
attrlist.append(posattr);
posdata = [];
d = {};
for attr in attrlist:
onedata = [];
for ap in attrType:
try:
onedata.append(attr[ap]);
except:
onedata.append(0);
onedata.append(attr['position']);
# 过滤掉重复数据
k = ','.join([str(i) for i in onedata]);
d.setdefault(k, 0)
d[k]+=1
if d[k]==1:
posdata.append(onedata);
attrlist = attrType.keys();
dataset = [ p[0:-1] for p in posdata];
labels = [p[-1:] for p in posdata];
return dataset, labels, attrlist;
# knn K紧邻分类
def classify_pos(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0];
diffMat = tile(inX, (dataSetSize,1)) - dataSet;
sqDiffMat = diffMat**2;
sqDistances = sqDiffMat.sum(axis=1);
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort();
classCount = {};
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]][0]
classCount[voteIlabel] = classCount.get(voteIlabel,0)+1;
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True);
# print sortedClassCount;
return sortedClassCount[0][0]
print "加载分类数据========================"
dataset, labels, attrList = load_data();
print len(attrList);
dataset = array(dataset);
print dataset.shape;
#----------------------------------------------------------------------
def do_locating(dataset_fl, attrList_fl):
""""""
# 整理验证数据
dataset_yz_f = [];
for dt in dataset_fl:
dt_f = [];
for attr in attrList:
index = -1;
for i in range(len(attrList_fl)):
if attr==attrList_fl[i]:
index = i;
if index!=-1: dt_f.append(dt[i])
else: dt_f.append(0);
dataset_yz_f.append(dt_f);
# 开始验证
k = 50;
countLab = {};
for i in range(len(dataset_yz_f)):
data = dataset_yz_f[i];
s = time.clock();
c = classify_pos(data, dataset, labels, k);
e = time.clock();
print '第',i+1 ,'次使用时间:', e-s, "ms"
countLab.setdefault(c, 0);
countLab[c] += 1;
sortedLab = sorted(countLab.iteritems(), key=operator.itemgetter(1), reverse=True)
print sortedLab[0];
print "分类结果:%s" %(sortedLab[0][0]);
return sortedLab[0][0];
# 开启web服务器
import web
render = web.template.render('templates/loacting/');
urls = (
"", "test",
"/server.html", "locating",
"/collect.do", "collector",
"/", "test"
)
class relocating:
def GET(self): raise web.seeother('/')
# 数据采集器
class collector:
#----------------------------------------------------------------------
def GET(self):
""""""
reqArgs = web.input(location=None);
# 定位服务
class locating:
def GET(self):
reqArgs = web.input(location=None);
loc = reqArgs.location;
if None==loc or loc=='':
return "数据输入有误"
attrlist = [];
attrType = {};
for line in loc.split('@'):
if line==None or line=='' or line=='\n': continue;
posattr = {};
apList = line.split('$');
for ap in apList:
if ap!=None and ap!='' and ap!='\n':
ap_attr_list = ap.split('|');
posattr[ap_attr_list[1]] = -1*string.atoi(ap_attr_list[3]);
attrType[ap_attr_list[1]] = True;
attrlist.append(posattr);
posdata = [];
d = {};
for attr in attrlist:
onedata = [];
for ap in attrType:
try:
onedata.append(attr[ap]);
except:
onedata.append(0);
# 过滤掉重复数据
k = ','.join([str(i) for i in onedata]);
d.setdefault(k, 0)
d[k]+=1
if d[k]==1:
posdata.append(onedata);
attrlist = attrType.keys();
position = do_locating(posdata, attrlist);
print position;
return position;
class test:
def GET(self):
return render.test();
app_locating = web.application(urls, locals()) | UTF-8 | Python | false | false | 4,928 | py | 13 | locatingserver.py | 4 | 0.526823 | 0.516974 | 0 | 191 | 23.984293 | 96 |
khasherr/SummerOfPython | 12,730,283,086,427 | 5bcfbe420183ecca0aeca77adda3ae95a7a41808 | e2649ed541e9bfab6687d6c1ab3e1bac330e2bdd | /NaturalNumbers.py | 407fd3988c499dbaaed047aa44586bfb25ac1c31 | []
| no_license | https://github.com/khasherr/SummerOfPython | d9c0077a0b21f138dfc2998d6d9dcc3884d9eb80 | 7b9a746c79946a10a616c88639723313b38cda68 | refs/heads/master | 2020-05-18T14:32:18.157604 | 2019-09-26T02:04:28 | 2019-09-26T02:04:28 | 184,474,327 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #This program prints prints first n natural numbers
userInput = int(input()) #takes userinput
counter = 1 #initialized counter to 1
while(counter <= userInput): #condition to check counter is less than or equal to userinput
print(counter) #prints out counter
counter+=1 #increments counter by 1 to avoid infinite loop | UTF-8 | Python | false | false | 351 | py | 90 | NaturalNumbers.py | 89 | 0.712251 | 0.700855 | 0 | 8 | 43 | 92 |
Aasthaengg/IBMdataset | 14,345,190,813,041 | 48d6114981ba87a2803ffdf02405b2e2a6cf92f7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03251/s960945580.py | ca487d308833f3570e3c9a9427dd28bd120b7acc | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | N,M,X,Y = list(map(int,input().split()))
x = list(map(int,input().split()))
y = list(map(int,input().split()))
x.append(X)
y.append(Y)
maxX = max(x)
minY = min(y)
if maxX < minY:
print("No War")
else:
print("War") | UTF-8 | Python | false | false | 221 | py | 202,060 | s960945580.py | 202,055 | 0.588235 | 0.588235 | 0 | 11 | 19.181818 | 40 |
gwk/coven | 1,838,246,027,957 | d1fa763d90312183650f16b5ac15ccef7a661d4a | c6101cf5b4fe902d63d5ce244df67e61aaa6d718 | /test/while_simple_%s.py | 6dc9211b29cf9d0eff7c8bd7c6682c00eed01a57 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | https://github.com/gwk/coven | 977fe00375db69556c2c3fe021ffe6c205d892b1 | 0cdfba60c75b325f64fc33afa72659061333a1a6 | refs/heads/main | 2021-11-28T06:22:09.543072 | 2021-09-16T01:22:28 | 2021-09-16T01:22:28 | 65,837,644 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import argv
def while_simple(i):
'''
Python 3.10 duplicates the `while` test at the beginning and end of the loop.
Complete coverage requires that both branches of both tests are exercised.
Thus i=0 and i=1 are not enough for complete code covereage, but i=0 and i=2 are.
'''
while i:
in_while_simple()
i -= 1
def in_while_simple(): pass
for c in argv[1]:
while_simple(int(c))
| UTF-8 | Python | false | false | 411 | py | 42 | while_simple_%s.py | 37 | 0.686131 | 0.664234 | 0 | 18 | 21.833333 | 83 |
moChen0607/prmaya | 15,410,342,679,526 | 01e7e25a9dd0e6a2c0767782558b61a1aaa38217 | 756ad94623cfd007dee99edfcab7ad48bfdf9254 | /test/plugins/test_prMotionPath.py | 2831c5e6967aa8e86f5cb76de8b48387e0e2e1c0 | [
"MIT"
]
| permissive | https://github.com/moChen0607/prmaya | 0761a2cb57ba66feec4d2cd786a7f7cdabad2933 | 1b48f2b72b458d106752fee768846e1c172dd59a | refs/heads/master | 2020-12-11T09:56:44.650138 | 2019-12-31T19:18:49 | 2019-12-31T19:18:49 | 148,767,315 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
import sys
sys.path.append(r'C:\Users\paz\Documents\git\prmaya\test\plugins')
import test_prMotionPath
reload(test_prMotionPath)
test_prMotionPath.SETTINGS['plugin_path'] = r'C:\Users\paz\Documents\git\prmaya\prmaya\plugins\prMotionPath.py'
test_prMotionPath.SETTINGS['file'] = r'C:\Users\paz\Documents\git\prmaya\test\plugins\test_prMotionPath.ma'
test_prMotionPath.run()
import sys
sys.path.append('/home/prthlein/private/code/prmaya/test/plugins')
import test_prMotionPath
reload(test_prMotionPath)
test_prMotionPath.SETTINGS['plugin_path'] = r'/home/prthlein/private/code/prmaya/prmaya/plugins/prMotionPath.py'
test_prMotionPath.SETTINGS['file'] = r'/home/prthlein/private/code/prmaya/test/plugins/test_prMotionPath.ma'
test_prMotionPath.run()
"""
import maya.cmds as mc
from prmaya.plugins import prMotionPath
reload(prMotionPath)
SETTINGS = {'plugin_name': 'prMotionPath.py',
'plugin_path': 'C:\Users\paz\Documents\git\prmaya\prmaya\plugins\prMotionPath.py',
'file': 'C:\Users\paz\Documents\git\prmaya\test\plugins\test_prMotionPath.ma',
}
def createTempFile():
"""create and reopen TEMP scene"""
TEMP_FILE = mc.file(q=True, sceneName=True).replace('.ma', 'TEMP.ma')
mc.file(rename=TEMP_FILE)
mc.file(save=True, force=True)
mc.file(new=True, force=True)
mc.file(TEMP_FILE, open=True, force=True)
mc.file(renameToSave=True)
def run():
mc.file(newFile=True, force=True)
mc.unloadPlugin(SETTINGS['plugin_name'])
mc.loadPlugin(SETTINGS['plugin_path'])
mc.file(SETTINGS['file'], open=True, force=True)
prNode = mc.createNode('prMotionPath', name='prnode_output_prMotionPath')
mc.connectAttr('curve1.worldSpace', prNode+'.inputCurve', force=True)
mc.connectAttr('uValue_locator.fractionMode', prNode+'.fractionMode', force=True)
for x in range(11):
mc.connectAttr('uValue_locator.uValue'+str(x), '{}.uValue[{}]'.format(prNode, x), force=True)
mc.connectAttr('{0}.output[{1}].outTranslate'.format(prNode, x), 'prnode_output_{}.t'.format(x), force=True)
createTempFile()
| UTF-8 | Python | false | false | 2,101 | py | 3 | test_prMotionPath.py | 3 | 0.716802 | 0.714422 | 0 | 55 | 37.2 | 116 |
nawidsayed/greedy_CNN | 8,950,711,851,137 | 6786d9723f3c01e74976c5281d3f2a4a492eb652 | 731b99eb77ace46a7b9b40f6bf61d061c225944b | /greedy_convnet/sub_nets/__init__.py | f537666fcc81d4f1fe07e1047e9f78fdf0a8a2ed | []
| no_license | https://github.com/nawidsayed/greedy_CNN | 9ef5b53a49a80a44ac7f6743183012b3223ffafa | 5deaa2a4f61f8717441288865762685fe56f0608 | refs/heads/master | 2019-01-07T02:24:25.826303 | 2016-11-28T15:05:09 | 2016-11-28T15:05:09 | 74,978,107 | 0 | 0 | null | true | 2016-11-28T13:59:01 | 2016-11-28T13:59:00 | 2016-08-15T13:44:51 | 2016-11-11T14:36:26 | 337 | 0 | 0 | 0 | null | null | null | # from greedyLayer import greedyLayer
from greedyLayer_reloaded import greedyLayer_reload
# from greedyLayer_ReLU import greedyLayer_ReLU
from boostedPerceptron import boostedPerceptron
# from boostedNode_ReLU import boostedNode_ReLU
| UTF-8 | Python | false | false | 234 | py | 3 | __init__.py | 2 | 0.863248 | 0.863248 | 0 | 5 | 45.8 | 51 |
filipbartek6417/cviceniaMaturitySEN | 16,252,156,250,280 | e829bce5b2ccc376b3f187be78fa3515afa636b9 | 8bf3706b166a780b1257190c7c04b3c715ed9bb9 | /ucastnici.py | 21f69d5d518f9a32c86b7e0f3d653484aba4531e | []
| no_license | https://github.com/filipbartek6417/cviceniaMaturitySEN | 8ae1a38b7ecff4b4ed9801fa11881540fb6d6427 | e99b428b4ae8f7ef9bce4725cf604817cfef5758 | refs/heads/master | 2023-03-29T20:55:53.675486 | 2021-04-04T07:21:48 | 2021-04-04T07:21:48 | 351,505,308 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from re import split
with open('ucastniciMena.txt', 'r') as mena, open('ucastniciPoradie.txt') as poradie:
m = [line.rstrip() for line in mena]
p = [int(line.rstrip()) for line in poradie]
dlzka = len(m)
print('Pocet ucastnikov:',dlzka)
a = input('Zadajte meno účastníka ')
najh = ''
print('\nNajhorsi ucastnici:')
for i in range(dlzka):
if m[i] == a:
najh = p[i]
if p[i] > (dlzka-4):
print(m[i])
print()
if najh != '':
print(a+" skoncil na "+str(najh)+". mieste")
else:
print("Taky ucastnik nie je!") | UTF-8 | Python | false | false | 605 | py | 49 | ucastnici.py | 29 | 0.548173 | 0.546512 | 0 | 20 | 29.15 | 85 |
ayushjjwala/QUARNA | 10,728,828,340,398 | 8a86092e683444adffee7d3452695338657392f0 | c9a3d55a18c8ddfcf2fbcf45cdf32d7f45ccb8b2 | /portal/quartet_finder/semicyclic_nomenclature.py | 061b72d283d3de3bfc85c13193a4d9d4ac7908cb | []
| no_license | https://github.com/ayushjjwala/QUARNA | 54e9aebac8f749d52d8d065f8c34b97f4240ec1c | fa3686e1460d037a516d3aaa15441494037e8019 | refs/heads/master | 2021-01-25T00:57:12.764341 | 2017-06-18T19:15:30 | 2017-06-18T19:15:30 | 56,061,081 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
store = {} # Graph of the linear quartet
def norm(num):
if len(num)==1:
return '000'+num
elif len(num)==2:
return '00'+num
elif len(num)==3:
return '0'+num
else:
return num
def create_graph(q,t):
key1 = norm(q[0])+q[1]+q[2]
key2 = norm(q[3])+q[4]+q[5]
key3 = norm(q[7])+q[8]+q[9]
key4 = norm(q[11])+q[12]+q[13]
key5 = norm(t[0])+t[1]+t[2]
key6 = norm(t[3])+t[4]+t[5]
key7 = norm(t[7])+t[8]+t[9]
store[key1]={}
store[key5]={}
store[key1] = {key2:{'edge':q[6][0]+q[6][2],'ori':q[6][3]},key3:{'edge':q[10][0]+q[10][2],'ori':q[10][3]},key4:{'edge':q[14][0]+q[14][2],'ori':q[14][3]}}
if(key6!=key1):
store[key6]={}
store[key5][key6] = {'edge':t[6][0]+t[6][2],'ori':t[6][3]}
store[key6][key5] = {'edge':t[6][2]+t[6][0],'ori':t[6][3]}
link = [key5,key6]
elif(key7!=key1):
store[key7]={}
store[key5][key7] = {'edge':t[10][0]+t[10][2],'ori':t[10][3]}
store[key7][key5] = {'edge':t[10][2]+t[10][0],'ori':t[10][3]}
link = [key5,key7]
quad = [key1,key2,key3,key4]
for l in link:
quad.remove(l)
quad = quad + link
return quad
def nomenclature(quad,trip):
quad_list = quad.strip().split('\t')
quad_list = filter(None,quad_list)
trip_list = trip.strip().split('\t')
trip_list = filter(None,trip_list)
# print "here"
# print trip
# print trip_list
q = create_graph(quad_list,trip_list)
p1 = q[0][4]+q[2][4]+store[q[0]][q[2]]['edge']+store[q[0]][q[2]]['ori']+q[0][0:4]+q[2][0:4]+q[0][5:]+q[2][5:]
p2 = q[0][4]+q[3][4]+store[q[0]][q[3]]['edge']+store[q[0]][q[3]]['ori']+q[0][0:4]+q[3][0:4]+q[0][5:]+q[3][5:]
if(p1>p2):
q[2],q[3] = q[3],q[2]
p1,p2 = p2,p1
var = q[0][0:4].lstrip('0')+q[0][4]+'('+q[0][5]+')[<'+q[2][0:4].lstrip('0')+q[2][4]+'('+q[2][5]+') '+q[3][0:4].lstrip('0')+q[3][4]+'('+q[3][5]+')>'+q[1][0:4].lstrip('0')+q[1][4]+'('+q[1][5]+')]'
edge = store[q[0]][q[2]]['edge']+store[q[0]][q[2]]['ori']+'/'+store[q[0]][q[3]]['edge']+store[q[0]][q[3]]['ori']+'/'+store[q[0]][q[1]]['edge']+store[q[0]][q[1]]['ori']+','+store[q[2]][q[3]]['edge']+store[q[2]][q[3]]['ori']
return var + ' ' + edge
| UTF-8 | Python | false | false | 2,123 | py | 28 | semicyclic_nomenclature.py | 14 | 0.521432 | 0.418747 | 0 | 64 | 32.171875 | 223 |
jn8366ew/Postingboard | 7,284,264,582,497 | a9830d600735393b2107a13ea6fd48559a8b3ef5 | 91142eaf386fa64f950124dd092eca6bc30bb66e | /user_profile/urls.py | 3376f3aa37e4ab22e435b788db501b2742e4b49e | []
| no_license | https://github.com/jn8366ew/Postingboard | 127b0210b9ecb8cb0c2498a2e131c6005c815cd8 | 08279cb743ad4e9a95546b43ebaf6d64b9cd0ebe | refs/heads/master | 2023-08-14T08:26:25.244661 | 2021-09-30T04:23:51 | 2021-09-30T04:23:51 | 381,254,348 | 0 | 0 | null | false | 2021-06-29T06:29:06 | 2021-06-29T05:53:36 | 2021-06-29T06:10:49 | 2021-06-29T06:29:06 | 0 | 0 | 0 | 1 | Python | false | false | from django.urls import path, re_path
from .views import ShowProfilePageView, CreateProfilePageView, \
EditProfilePageView
app_name = 'user_profile'
urlpatterns = [
re_path(r'^(?P<pk>\d+)/profile/$', ShowProfilePageView.as_view(), name='show_profile'),
path('create_profile/', CreateProfilePageView.as_view(), name='create_profile'),
re_path(r'^(?P<pk>\d+)/edit_profile/$', EditProfilePageView.as_view(), name='edit_profile'),
] | UTF-8 | Python | false | false | 463 | py | 31 | urls.py | 16 | 0.678186 | 0.678186 | 0 | 12 | 37.666667 | 96 |
uridr/RDF-TextGeneration | 9,388,798,511,314 | 5bbb83d3b7ed0456d2236fabdac861a5822a6663 | afa8bfda3164e08459e9af30f71a05e84ba228f0 | /preprocessing/camelCase.py | fc8482707af06b499ec4f4c9493012b10aa18f5b | [
"MIT"
]
| permissive | https://github.com/uridr/RDF-TextGeneration | 9427f0741fdf9940a8d9b98762a6a5166e81c0bc | 2f57b1c288d8270b7fd73671597db36904d20310 | refs/heads/master | 2023-02-09T22:20:20.199694 | 2020-12-29T13:16:22 | 2020-12-29T13:16:22 | 251,291,582 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
name = 'birth date'
splitted = re.sub('([A-Z][a-z]+)', r' \1', re.sub('([A-Z]+)', r' \1', name)).split()
print(" ".join(str(x) for x in splitted)) | UTF-8 | Python | false | false | 160 | py | 224 | camelCase.py | 29 | 0.53125 | 0.51875 | 0 | 8 | 19.125 | 84 |
sebasmazo/CasinoGames | 3,762,391,381,660 | f3f0b7a8e4781a4d32f94bddf3e84c8fe15c1db6 | bc393ee852ad1f47b437756f1bfd2924169f06a8 | /prueba.py | 71d2b0202ea412ef9463d1cf478d381ae439ba9e | []
| no_license | https://github.com/sebasmazo/CasinoGames | 4691b8c341ff8b039fd5c40c1f4f28fc3d26157e | cf6b4657f15ba2bbae3505ede0725af83c5c33cd | refs/heads/master | 2023-08-27T18:09:41.082405 | 2021-10-06T18:33:58 | 2021-10-06T18:33:58 | 300,323,638 | 0 | 0 | null | false | 2020-10-08T19:30:45 | 2020-10-01T15:14:13 | 2020-10-08T19:28:44 | 2020-10-08T19:30:44 | 2,183 | 0 | 0 | 0 | Python | false | false | import main_ruleta as ruleta
ruleta.Juego() | UTF-8 | Python | false | false | 44 | py | 5 | prueba.py | 4 | 0.795455 | 0.795455 | 0 | 3 | 14 | 28 |
saum7800/GL32_Meraki | 4,526,895,555,235 | f1f1097fac4b27b810d09c7dcf6c8244555c9bcc | afa6249950e70d116328567a06a59cec98e2e008 | /src/Database.py | 38325e7f2cbcd7afae8bcd866f0e54095f4284cf | []
| no_license | https://github.com/saum7800/GL32_Meraki | 7be7519a989bb9f9a6a5625d3bb001e78576e217 | 2cbf1ffab0860125b43fe547b49871015a11816a | refs/heads/master | 2023-08-25T09:54:47.926468 | 2021-10-09T12:08:56 | 2021-10-09T12:08:56 | 284,310,259 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyasn1.compat.octets import null
from pyasn1.type.univ import Null
import Levenshtein
from datetime import datetime
from statistics import mean
def init_db():
import pyrebase
config = {
"apiKey": "bccdb0a053ff6210f4944c55f87fc6d241e199b0",
"authDomain": "data-5eef0.firebaseapp.com",
"databaseURL": "https://data-5eef0.firebaseio.com/",
"storageBucket": "data-5eef0.appspot.com",
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
return db
class DataBase:
def __init__(self) -> None:
# self.teacher_name = teacher_name
self.db = init_db()
self.db.remove()
self.scoreDict = {}
self.NameList = []
self.id = str(datetime.now().strftime("%d-%m-%Y-%H:%M:%S"))
self.db.child("online").set(self.id)
def checkName(self, name: str):
parts = name.split()
for n in self.NameList:
n_split = n.split()
if Levenshtein.distance(n, name) < 5 or Levenshtein.distance(parts[0], n_split[0]) < 2:
return n
self.NameList.append(name)
return name
def insert_data(self, name, category, score):
name = self.checkName(name)
student_data = {name : category}
self.db.child("Teacher").update(student_data)
try:
self.scoreDict[name].append(score)
except KeyError:
self.scoreDict[name] = [score]
push = True
avg = 0
for ques in self.scoreDict.values():
if len(ques) == 0:
push = False
break
else:
avg += ques.pop(0)
if push:
self.db.child("means_score").set(avg/len(self.scoreDict))
def end_ses(self):
self.db.child("online").set("null")
"""import random
import time
db = DataBase()
stlist = ["Surbhi", "Sauyma", "Aayush", "Praneeth", "Kanishka", "surbha", "sauwma", "ayush"]
for i in range(200):
for s in stlist:
score = random.randrange(50, 100)
category = random.randrange(0, 3)
if random.random() < 0.4:
score -= 50
db.insert_data(s, category, score)
time.sleep(5)
#db.end_ses()
"""
| UTF-8 | Python | false | false | 2,245 | py | 37 | Database.py | 4 | 0.567483 | 0.54343 | 0 | 82 | 26.378049 | 99 |
ritviksahajpal/MLforCropYieldForecasting | 6,966,436,999,522 | f4a45478c8b1d6a14ddbd52b1b6cf872953a491b | a386b253ad65cd0c41da1c8042cc10befacb1177 | /cypml/run_workflow/run_data_preprocessing.py | 81c150f8682b362a8393f7e04e96dc54d7f47196 | []
| no_license | https://github.com/ritviksahajpal/MLforCropYieldForecasting | 255497bfcbbe625bb6ede9a46bf361d2afff7e29 | 1dc2718afdd5a6e3022944cb39ddce49b7c137c4 | refs/heads/main | 2023-04-18T05:47:06.360531 | 2021-05-03T10:09:16 | 2021-05-03T10:09:16 | 418,221,203 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def printPreprocessingInformation(df, data_source, order_cols, crop_season=None):
"""Print preprocessed data and additional debug information"""
df_regions = [reg[0] for reg in df.select('IDREGION').distinct().collect()]
print(data_source , 'data available for', len(df_regions), 'region(s)')
if (crop_season is not None):
print('Season end information')
crop_season.orderBy(['IDREGION', 'FYEAR']).show(10)
print(data_source, 'data')
df.orderBy(order_cols).show(10)
def preprocessData(cyp_config, cyp_preprocessor, data_dfs):
crop_id = cyp_config.getCropID()
nuts_level = cyp_config.getNUTSLevel()
season_crosses_calyear = cyp_config.seasonCrossesCalendarYear()
use_centroids = cyp_config.useCentroids()
use_remote_sensing = cyp_config.useRemoteSensing()
debug_level = cyp_config.getDebugLevel()
order_cols = ['IDREGION', 'CAMPAIGN_YEAR', 'CAMPAIGN_DEKAD']
# wofost data
wofost_df = data_dfs['WOFOST']
wofost_df = wofost_df.filter(wofost_df['CROP_ID'] == crop_id).drop('CROP_ID')
crop_season = cyp_preprocessor.getCropSeasonInformation(wofost_df, season_crosses_calyear)
wofost_df = cyp_preprocessor.preprocessWofost(wofost_df, crop_season, season_crosses_calyear)
wofost_regions = [reg[0] for reg in wofost_df.select('IDREGION').distinct().collect()]
data_dfs['WOFOST'] = wofost_df
if (debug_level > 1):
printPreprocessingInformation(wofost_df, 'WOFOST', order_cols, crop_season)
# meteo data
meteo_df = data_dfs['METEO']
meteo_df = cyp_preprocessor.preprocessMeteo(meteo_df, crop_season, season_crosses_calyear)
assert (meteo_df is not None)
data_dfs['METEO'] = meteo_df
if (debug_level > 1):
printPreprocessingInformation(meteo_df, 'METEO', order_cols)
# remote sensing data
rs_df = None
if (use_remote_sensing):
rs_df = data_dfs['REMOTE_SENSING']
rs_df = rs_df.drop('IDCOVER')
# if other data is at NUTS3, convert rs_df to NUTS3 using parent region data
if (nuts_level == 'NUTS3'):
rs_df = cyp_preprocessor.remoteSensingNUTS2ToNUTS3(rs_df, wofost_regions)
rs_df = cyp_preprocessor.preprocessRemoteSensing(rs_df, crop_season, season_crosses_calyear)
assert (rs_df is not None)
data_dfs['REMOTE_SENSING'] = rs_df
if (debug_level > 1):
printPreprocessingInformation(rs_df, 'REMOTE_SENSING', order_cols)
order_cols = ['IDREGION']
# centroids and distance to coast
centroids_df = None
if (use_centroids):
centroids_df = data_dfs['CENTROIDS']
centroids_df = cyp_preprocessor.preprocessCentroids(centroids_df)
data_dfs['CENTROIDS'] = centroids_df
if (debug_level > 1):
printPreprocessingInformation(centroids_df, 'CENTROIDS', order_cols)
# soil data
soil_df = data_dfs['SOIL']
soil_df = cyp_preprocessor.preprocessSoil(soil_df)
data_dfs['SOIL'] = soil_df
if (debug_level > 1):
printPreprocessingInformation(soil_df, 'SOIL', order_cols)
order_cols = ['IDREGION', 'FYEAR']
# yield_data
yield_df = data_dfs['YIELD']
if (debug_level > 1):
print('Yield before preprocessing')
yield_df.show(10)
yield_df = cyp_preprocessor.preprocessYield(yield_df, crop_id)
assert (yield_df is not None)
data_dfs['YIELD'] = yield_df
if (debug_level > 1):
print('Yield after preprocessing')
yield_df.show(10)
return data_dfs
| UTF-8 | Python | false | false | 3,309 | py | 35 | run_data_preprocessing.py | 31 | 0.700816 | 0.694167 | 0 | 86 | 37.476744 | 96 |
SShayashi/ABC | 16,114,717,309,010 | f7135d4c8adb16ddd46651047da6262d753857b6 | da7a165522daea7c346693c5f32850017c482967 | /abc101-150/abc129/c.py | b5e1f50e5666fff3cd27843556cc1eef4be26f07 | []
| no_license | https://github.com/SShayashi/ABC | 19f8750919208c5ff8935638dbaab941c255f914 | 3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c | refs/heads/master | 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def m():
MOD = 10 ** 9 + 7
N, M = map(int, input().split())
A = [int(input()) for _ in range(M)]
dp = [0] * (N + 1)
for a in A: dp[a] = -1
if N == 1:
if dp[1] != -1:
return 1
else:
return 0
if dp[1] != -1 and dp[2] != -1:
dp[1] = 1
dp[2] = 2
elif dp[1] != -1 and dp[2] == -1:
dp[1] = 1
elif dp[1] == -1 and dp[2] != -1:
dp[2] = 1
else:
return 0
for i in range(3, N + 1):
if dp[i] == -1:
continue
l = dp[i - 2] if dp[i - 2] != -1 else 0
r = dp[i - 1] if dp[i - 1] != -1 else 0
dp[i] = (l + r) % MOD
return dp[N] % MOD
print(m())
| UTF-8 | Python | false | false | 703 | py | 288 | c.py | 285 | 0.357041 | 0.294452 | 0 | 31 | 21.677419 | 47 |
nidhi76/PPL20 | 5,497,558,170,004 | 996293afa5afa3898e379e4901aa8536ff0bf529 | 8bb92d36910f958e0af184d3a86e8f4be54e837b | /assign3/shapes/penta.py | 057fdeefc3b37afe54adb767395c9a74cb7eba2c | []
| no_license | https://github.com/nidhi76/PPL20 | 08b98789bf9983a38a7888078b5089fbae202c9a | 464c17f6bf7d54c816dad84dfa7fa26602f77c8b | refs/heads/master | 2023-04-18T13:48:02.651357 | 2020-05-30T05:39:29 | 2020-05-30T05:39:29 | 267,807,457 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import turtle
tk=turtle.Turtle()
class Penta:
def __init__(self):
self.length=85
self.sides=5
self.angle=360/5
def draw(self):
for i in range(self.sides):
tk.left(self.angle)
tk.forward(self.length)
turtle.done()
penta=Penta()
penta.draw()
| UTF-8 | Python | false | false | 264 | py | 58 | penta.py | 47 | 0.666667 | 0.640152 | 0 | 20 | 12.2 | 29 |
Linfeng-Tang/VIF-Benchmark | 16,922,171,171,551 | df18b7f977a6d85c55bd02d2c27621e43db8c5ed | 57de9a859ace880a30290a140e0f35f18ed3238a | /RFN-Nest/train_fusionnet.py | 6942f97539cdeea3040dcd0995c7f246989b6872 | [
"MIT"
]
| permissive | https://github.com/Linfeng-Tang/VIF-Benchmark | cf7609f836d8f6b6666e9c795c79367eb2e8c5ce | ae368ea39b4049afb4b54cb3447d26107c3a8ab1 | refs/heads/main | 2023-05-22T23:42:45.953544 | 2023-02-28T12:40:57 | 2023-02-28T12:40:57 | 607,610,052 | 29 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Training a NestFuse network
# auto-encoder
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import sys
import time
from tqdm import tqdm, trange
import scipy.io as scio
import random
import torch
from torch.optim import Adam
from torch.autograd import Variable
import utils
from net import NestFuse_light2_nodense, Fusion_network
from args_fusion import args
import pytorch_msssim
EPSILON = 1e-5
def main():
original_imgs_path, _ = utils.list_images(args.dataset_ir)
train_num = 80000
original_imgs_path = original_imgs_path[:train_num]
random.shuffle(original_imgs_path)
# True - RGB , False - gray
img_flag = False
alpha_list = [700]
w_all_list = [[6.0, 3.0]]
for w_w in w_all_list:
w1, w2 = w_w
for alpha in alpha_list:
train(original_imgs_path, img_flag, alpha, w1, w2)
def train(original_imgs_path, img_flag, alpha, w1, w2):
batch_size = args.batch_size
# load network model
nc = 1
input_nc = nc
output_nc = nc
nb_filter = [64, 112, 160, 208, 256]
f_type = 'res'
with torch.no_grad():
deepsupervision = False
nest_model = NestFuse_light2_nodense(nb_filter, input_nc, output_nc, deepsupervision)
model_path = args.resume_nestfuse
# load auto-encoder network
print('Resuming, initializing auto-encoder using weight from {}.'.format(model_path))
nest_model.load_state_dict(torch.load(model_path))
nest_model.eval()
# fusion network
fusion_model = Fusion_network(nb_filter, f_type)
if args.resume_fusion_model is not None:
print('Resuming, initializing fusion net using weight from {}.'.format(args.resume_fusion_model))
fusion_model.load_state_dict(torch.load(args.resume_fusion_model))
optimizer = Adam(fusion_model.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
ssim_loss = pytorch_msssim.msssim
if args.cuda:
nest_model.cuda()
fusion_model.cuda()
tbar = trange(args.epochs)
print('Start training.....')
# creating save path
temp_path_model = os.path.join(args.save_fusion_model)
temp_path_loss = os.path.join(args.save_loss_dir)
if os.path.exists(temp_path_model) is False:
os.mkdir(temp_path_model)
if os.path.exists(temp_path_loss) is False:
os.mkdir(temp_path_loss)
temp_path_model_w = os.path.join(args.save_fusion_model, str(w1))
temp_path_loss_w = os.path.join(args.save_loss_dir, str(w1))
if os.path.exists(temp_path_model_w) is False:
os.mkdir(temp_path_model_w)
if os.path.exists(temp_path_loss_w) is False:
os.mkdir(temp_path_loss_w)
Loss_feature = []
Loss_ssim = []
Loss_all = []
count_loss = 0
all_ssim_loss = 0.
all_fea_loss = 0.
for e in tbar:
print('Epoch %d.....' % e)
# load training database
image_set_ir, batches = utils.load_dataset(original_imgs_path, batch_size)
fusion_model.train()
count = 0
for batch in range(batches):
image_paths_ir = image_set_ir[batch * batch_size:(batch * batch_size + batch_size)]
img_ir = utils.get_train_images(image_paths_ir, height=args.HEIGHT, width=args.WIDTH, flag=img_flag)
image_paths_vi = [x.replace('lwir', 'visible') for x in image_paths_ir]
img_vi = utils.get_train_images(image_paths_vi, height=args.HEIGHT, width=args.WIDTH, flag=img_flag)
count += 1
optimizer.zero_grad()
img_ir = Variable(img_ir, requires_grad=False)
img_vi = Variable(img_vi, requires_grad=False)
if args.cuda:
img_ir = img_ir.cuda()
img_vi = img_vi.cuda()
# get fusion image
# encoder
en_ir = nest_model.encoder(img_ir)
en_vi = nest_model.encoder(img_vi)
# fusion
f = fusion_model(en_ir, en_vi)
# decoder
outputs = nest_model.decoder_eval(f)
# resolution loss: between fusion image and visible image
x_ir = Variable(img_ir.data.clone(), requires_grad=False)
x_vi = Variable(img_vi.data.clone(), requires_grad=False)
######################### LOSS FUNCTION #########################
loss1_value = 0.
loss2_value = 0.
for output in outputs:
output = (output - torch.min(output)) / (torch.max(output) - torch.min(output) + EPSILON)
output = output * 255
# ---------------------- LOSS IMAGES ------------------------------------
# detail loss
# ssim_loss_temp1 = ssim_loss(output, x_ir, normalize=True)
ssim_loss_temp2 = ssim_loss(output, x_vi, normalize=True)
loss1_value += alpha * (1 - ssim_loss_temp2)
# feature loss
g2_ir_fea = en_ir
g2_vi_fea = en_vi
g2_fuse_fea = f
# w_ir = [3.5, 3.5, 3.5, 3.5]
w_ir = [w1, w1, w1, w1]
w_vi = [w2, w2, w2, w2]
w_fea = [1, 10, 100, 1000]
for ii in range(4):
g2_ir_temp = g2_ir_fea[ii]
g2_vi_temp = g2_vi_fea[ii]
g2_fuse_temp = g2_fuse_fea[ii]
(bt, cht, ht, wt) = g2_ir_temp.size()
loss2_value += w_fea[ii]*mse_loss(g2_fuse_temp, w_ir[ii]*g2_ir_temp + w_vi[ii]*g2_vi_temp)
loss1_value /= len(outputs)
loss2_value /= len(outputs)
# total loss
total_loss = loss1_value + loss2_value
total_loss.backward()
optimizer.step()
all_fea_loss += loss2_value.item() #
all_ssim_loss += loss1_value.item() #
if (batch + 1) % args.log_interval == 0:
mesg = "{}\t Alpha: {} \tW-IR: {}\tEpoch {}:\t[{}/{}]\t ssim loss: {:.6f}\t fea loss: {:.6f}\t total: {:.6f}".format(
time.ctime(), alpha, w1, e + 1, count, batches,
all_ssim_loss / args.log_interval,
all_fea_loss / args.log_interval,
(all_fea_loss + all_ssim_loss) / args.log_interval
)
tbar.set_description(mesg)
Loss_ssim.append( all_ssim_loss / args.log_interval)
Loss_feature.append(all_fea_loss / args.log_interval)
Loss_all.append((all_fea_loss + all_ssim_loss) / args.log_interval)
count_loss = count_loss + 1
all_ssim_loss = 0.
all_fea_loss = 0.
if (batch + 1) % (200 * args.log_interval) == 0:
# save model
fusion_model.eval()
fusion_model.cpu()
save_model_filename = "Epoch_" + str(e) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".model"
save_model_path = os.path.join(temp_path_model, save_model_filename)
torch.save(fusion_model.state_dict(), save_model_path)
# save loss data
# pixel loss
loss_data_ssim = Loss_ssim
loss_filename_path = temp_path_loss_w + "/loss_ssim_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_ssim': loss_data_ssim})
# SSIM loss
loss_data_fea = Loss_feature
loss_filename_path = temp_path_loss_w + "/loss_fea_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_fea': loss_data_fea})
# all loss
loss_data = Loss_all
loss_filename_path = temp_path_loss_w + "/loss_all_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_all': loss_data})
fusion_model.train()
fusion_model.cuda()
tbar.set_description("\nCheckpoint, trained model saved at", save_model_path)
# ssim loss
loss_data_ssim = Loss_ssim
loss_filename_path = temp_path_loss_w + "/Final_loss_ssim_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_ssim': loss_data_ssim})
loss_data_fea = Loss_feature
loss_filename_path = temp_path_loss_w + "/Final_loss_2_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_fea': loss_data_fea})
# SSIM loss
loss_data = Loss_all
loss_filename_path = temp_path_loss_w + "/Final_loss_all_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_all': loss_data})
# save model
fusion_model.eval()
fusion_model.cpu()
save_model_filename = "Final_epoch_" + str(args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(
w1) + "_wvi_" + str(w2) + ".model"
save_model_path = os.path.join(temp_path_model_w, save_model_filename)
torch.save(fusion_model.state_dict(), save_model_path)
print("\nDone, trained model saved at", save_model_path)
def check_paths(args):
try:
if not os.path.exists(args.vgg_model_dir):
os.makedirs(args.vgg_model_dir)
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
except OSError as e:
print(e)
sys.exit(1)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 8,564 | py | 149 | train_fusionnet.py | 132 | 0.635918 | 0.619921 | 0 | 247 | 33.672065 | 181 |
memobijou/erpghost | 7,799,660,637,199 | 52c145c4bd2471a12eb2cbb45a3d4efe58dc6bf6 | c838c53ec5de94af57696f11db08f332ff2a65d8 | /sku/migrations/0004_auto_20180516_0211.py | 71caebf476483002f7a527e271bb3ffe134bf82a | []
| no_license | https://github.com/memobijou/erpghost | 4a9af80b3c948a4d7bb20d26e5afb01b40efbab5 | c0ee90718778bc2b771b8078d9c08e038ae59284 | refs/heads/master | 2022-12-11T14:47:59.048889 | 2019-01-28T02:30:40 | 2019-01-28T02:30:40 | 113,774,918 | 1 | 1 | null | false | 2022-11-22T02:02:41 | 2017-12-10T18:53:41 | 2019-04-30T10:55:58 | 2022-11-22T02:02:38 | 8,174 | 1 | 1 | 14 | Python | false | false | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-05-16 00:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sku', '0003_auto_20180516_0209'),
]
operations = [
migrations.RemoveField(
model_name='sku',
name='bruttopreis',
),
migrations.RemoveField(
model_name='sku',
name='menge',
),
]
| UTF-8 | Python | false | false | 490 | py | 529 | 0004_auto_20180516_0211.py | 376 | 0.555102 | 0.487755 | 0 | 23 | 20.304348 | 48 |
jingxuxie/SpectrumAnalysis | 12,472,585,054,694 | 2a3695885ac011ab2cf36418d0ea17b9f398b2e4 | f0723bc19cf15022fd3dd373a503de19b8708961 | /test1.py | 64c83f50fd18b538a7451f499de78b10ca49ab65 | []
| no_license | https://github.com/jingxuxie/SpectrumAnalysis | d476bbee0b8dfbedfd3a53a466481f2c404fe286 | 1bf8d08783221861744914d0255a41ee5a0d15c6 | refs/heads/master | 2022-11-10T16:27:40.664067 | 2020-06-19T00:51:06 | 2020-06-19T00:51:06 | 272,861,432 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 12 16:27:02 2019
@author: HP
"""
import multiprocessing
import time
start=time.time()
x=range(10)
def test_for_multiprocessing(l):
k=0
for i in range(10000):
for j in range(1000):
if i%2==0 and j%2==0:
k+=1
if i%2==1 and j%2==1:
k-=1
return k
def worker(num):
"""Returns the string of interest"""
return "worker %d" % num
def main(x):
cores = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cores)
results = pool.map(test_for_multiprocessing,x)
pool.close()
pool.join()
#for result in results:
# prints the result string in the main process
# print(result)
if __name__ == '__main__':
for i in x:
k=test_for_multiprocessing(1)
time1=time.time()
time2=time.time()
T1=round(time1-start,3)
T2=round(time2-time1,3)
print(T1,T2)
| UTF-8 | Python | false | false | 977 | py | 7 | test1.py | 6 | 0.560901 | 0.512794 | 0 | 47 | 19.468085 | 54 |
um-computacion-manana/2019-parcial-2 | 8,735,963,514,012 | 01d9fc0abeabe84be42ba3d4e75ac2cb9451d199 | 00406258c330e9585f577f5b04458e2967fa04d8 | /56002-Barrio-Alberto/test_impresora.py | 4ee488016b46f2bca84ccf8b4ca94be10111626b | []
| no_license | https://github.com/um-computacion-manana/2019-parcial-2 | 3bee026fbc31b5426afbcd02a5c470d893a8711a | 5cc067b466f9417e73e702b309afb9a6606477cd | refs/heads/master | 2020-05-31T07:51:55.929207 | 2019-06-05T17:01:52 | 2019-06-05T17:01:52 | 190,175,421 | 0 | 27 | null | false | 2019-06-05T17:00:26 | 2019-06-04T09:57:58 | 2019-06-04T15:02:16 | 2019-06-05T17:00:25 | 119 | 0 | 27 | 0 | Python | false | false | import unittest
from impresora import Printer
class TestPrinter(unittest.TestCase):
def setUp(self):
self.printer=Printer()
def test_nada_para_imprimir(self):
self.printer.print_job()
self.assertTrue(self.printer.error_flag)
self.assertEqual(self.printer.error_description,'nothing to print')
def test_impresora_no_disponible(self):
self.printer.add_print_job("Juan")
self.printer.add_print_job("Pepe")
self.printer.add_print_job("Paco")
self.printer.print_job()
self.assertFalse(self.printer.printer_available())
def test_imprime_con_normalidad(self):
self.assertTrue(self.printer.printer_available())
self.printer.add_print_job("Estocolmo")
self.printer.print_job()
self.assertTrue(self.printer.printing)
self.printer.reset_printer()
self.assertTrue(self.printer.printer_available())
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 979 | py | 47 | test_impresora.py | 46 | 0.663943 | 0.663943 | 0 | 28 | 34 | 75 |
JuliaHolodiy/my-site | 16,243,566,335,875 | 40de470264effc80dd53781f8489e997ea1065ae | a2841220ced03d15b9ba51bc259e5ad5bd2bf379 | /registr/views.py | 58290d880510b50b2374dce6f33eea0eafdbb5e8 | []
| no_license | https://github.com/JuliaHolodiy/my-site | dac7148166ff53943f37b95704bc2a36eeaf7204 | ed1c30f7a53887991fea8474cf1b7803eb6d4321 | refs/heads/master | 2021-01-21T18:33:30.187013 | 2017-06-09T09:56:33 | 2017-06-09T09:56:33 | 92,055,743 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
# Create your views here.
from django.views.generic.edit import FormView
from django.contrib.auth.forms import UserCreationForm
class RegisterFormView(FormView):
form_class = UserCreationForm
# Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации.
# В данном случае указана ссылка на страницу входа для зарегистрированных пользователей.
# success_url = '/login/'
# Шаблон, который будет использоваться при отображении представления.
template_name = '../registr/template/registr/registr.html'
def form_valid(self, form):
# Создаём пользователя, если данные в форму были введены корректно.
form.save()
# Вызываем метод базового класса
return super(RegisterFormView, self).form_valid(form)
# # Опять же, спасибо django за готовую форму аутентификации.
# from django.contrib.auth.forms import AuthenticationForm
#
# # Функция для установки сессионного ключа.
# # По нему django будет определять, выполнил ли вход пользователь.
# from django.contrib.auth import login
#
# class LoginFormView(FormView):
# form_class = AuthenticationForm
#
# # Аналогично регистрации, только используем шаблон аутентификации.
# template_name = "../templates/registr/template/registr/login.html"
#
# # В случае успеха перенаправим на главную.
# success_url = "/"
#
# def form_valid(self, form):
# # Получаем объект пользователя на основе введённых в форму данных.
# self.user = form.get_user()
#
# # Выполняем аутентификацию пользователя.
# login(self.request, self.user)
# return super(LoginFormView, self).form_valid(form)
#
# from django.http import HttpResponseRedirect
# from django.views.generic.base import View
# from django.contrib.auth import logout
#
# class LogoutView(View):
# def get(self, request):
# # Выполняем выход для пользователя, запросившего данное представление.
# logout(request)
#
# # После чего, перенаправляем пользователя на главную страницу.
# return HttpResponseRedirect("/")
#
#
#
# from django.shortcuts import render_to_response
# from django.template import RequestContext
# from news.views import Post
#
#
# # можно переписать как в news/views.py
# def home(request):
# vars = dict (
# posts=Post.objects.all().order_by('-timestamp')[:10],
# )
#
# return render_to_response('index.html', vars, context_instance=RequestContext(request)) | UTF-8 | Python | false | false | 3,244 | py | 7 | views.py | 5 | 0.710882 | 0.710087 | 0 | 73 | 33.506849 | 93 |
madeleine789/sius-eventboard | 14,980,845,938,560 | 09f0d56ded6545e7b95cbdec6ad193e18f6b7dbf | 27b04ee475b68db4be62f8726b100adf656d9630 | /libs/tweets.py | 62f0d2d850de4a9fbbce60fd7048d9b5638053cf | []
| no_license | https://github.com/madeleine789/sius-eventboard | 3d3e383988cdf604e5d00d4c18f0fe6e890c7366 | 3bd4f196a08bd16e0b05744311c1a805ef7d15df | refs/heads/master | 2021-01-17T12:44:04.083669 | 2016-05-17T17:47:28 | 2016-05-17T17:47:28 | 57,375,528 | 0 | 0 | null | false | 2016-05-17T17:47:29 | 2016-04-29T10:04:40 | 2016-04-29T10:25:03 | 2016-05-17T17:47:28 | 207 | 0 | 0 | 0 | JavaScript | null | null | __author__ = 'mms'
from TwitterSearch import *
from app import app
import tweepy
def search(query='cheeky nandos ledge banter', max=5):
keywords = query.split()
try:
tso = TwitterSearchOrder()
tso.set_keywords(keywords)
# tso.set_language('en')
# tso.set_include_entities(False)
ts = TwitterSearch(
consumer_key=app.config['TWITTER_CONSUMER_KEY'],
consumer_secret=app.config['TWITTER_CONSUMER_SECRET'],
access_token=app.config['TWITTER_ACCESS_TOKEN'],
access_token_secret=app.config['TWITTER_TOKEN_SECRET']
)
results = []
for tweet in ts.search_tweets_iterable(tso):
results.append(tweet['id'])
# print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) )
max -= 1
if not max: break
# print results
return results
except TwitterSearchException as e: # take care of all those ugly errors if there are some
print(e)
def post(status='New status'):
auth = tweepy.OAuthHandler(app.config['TWITTER_CONSUMER_KEY'], app.config['TWITTER_CONSUMER_SECRET'])
auth.set_access_token(app.config['TWITTER_ACCESS_TOKEN'], app.config['TWITTER_TOKEN_SECRET'])
twitter = tweepy.API(auth)
twitter.update_status(status=status)
| UTF-8 | Python | false | false | 1,343 | py | 24 | tweets.py | 13 | 0.621742 | 0.620253 | 0 | 39 | 33.435897 | 105 |
snake0203cheng/Projects | 10,273,561,787,863 | 488baf6b06ad0ccef737bfcada5888444f7900fd | f7ee9995056170b9bdadc85de60f01b373d80d5e | /youtube_audio_downloader_and_password_generator_and_manager_.py | 0a79645a2a27c5a5d65f5e040365e8428de58a20 | []
| no_license | https://github.com/snake0203cheng/Projects | fa6981bfa0113473ad5e9a8d039cf09f946a9565 | 530e69ef5a06b133fda3d76cc23f67370ede6b5a | refs/heads/main | 2023-06-30T13:01:27.182025 | 2021-08-07T09:41:19 | 2021-08-07T09:41:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""YouTube Audio Downloader and Password Generator and Manager .ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1YFECDh3pxcikc37bZvTcXRDUN88GL_Dx
**TASK-2(Part-1)**
YouTube Downloader[Audio of any video]
"""
pip install youtube_dl
from youtube_dl import YoutubeDL
from youtube_dl import YoutubeDL
Audio_Downloder = YoutubeDL({'format':'bestaudio'})
#added my fav song link you can add yors:D
Link = 'https://youtu.be/MU0Yp0qmYEs'
Audio_Downloder.extract_info(Link)
"""TASK-2[Part-2]
PASSWORD GENERATOR AND MANAGER
"""
import random
Alpha = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789!@€$#%({)}],/<\.>[*^&"
while 1:
password_len = int(input("Lenght of password? :"))
count = int(input("Number of passwords you need? "))
for x in range(0, count):
password = ""
for x in range(0,password_len):
password_Alpha = random.choice(Alpha)
password = password + password_Alpha
print("There you go!! : ",password)
import os
username = "nikk" @niharika{type:"string"}
password = "Heyyy![[[[]]]" {type:"string"}
print("Creating User and Setting it up")
# Creation of user
os.system(f"useradd -m {username}")
# Add user to sudo group
os.system(f"adduser {username} sudo")
# Set password of user to 'root'
os.system(f"echo '{username}:{password}' | sudo chpasswd")
# Change default shell from sh to bash
os.system("sed -i 's/\/bin\/sh/\/bin\/bash/g' /etc/passwd")
print("User Created and Configured")
| UTF-8 | Python | false | false | 1,587 | py | 1 | youtube_audio_downloader_and_password_generator_and_manager_.py | 1 | 0.688328 | 0.672555 | 0 | 58 | 26.310345 | 92 |
rangermeier/subkoord | 8,821,862,874,397 | 66392c7f00aa2e23c2da1e0c8bd6323a0756cc38 | d7c685b4a33b1043173eefa20d05147c98d8e229 | /django/apps/event/templatetags/add_linefeed.py | 6a7d55ac5c250e56622fa6b480425e2dd7dea063 | []
| no_license | https://github.com/rangermeier/subkoord | 5e2261ccb2700bd00ebbcd6d3d5c59a680b6b315 | 998210ac54bef3d4498a67df90abbcbbc5a3bf41 | refs/heads/master | 2020-05-30T00:04:42.689538 | 2014-02-19T20:58:26 | 2014-02-19T20:58:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import template
from django.utils.safestring import mark_safe
import re
def add_linefeed_filter(value, num=1):
f = ' '*num
p = re.compile("\n")
value = p.sub(r'\n'+f, value)
return mark_safe(value)
register = template.Library()
register.filter('add_linefeed', add_linefeed_filter)
| UTF-8 | Python | false | false | 312 | py | 79 | add_linefeed.py | 35 | 0.692308 | 0.689103 | 0 | 12 | 25 | 52 |
gmkou/FikaNote | 2,800,318,702,130 | f4dacd63cb66a31cdc28d8e0335bef7537894367 | 8c8061124f9d6deb3ef4c2594c765f3fb421117c | /app/agendaform.py | 4791b782ed8c13c72fd8ed4619410f1f1bd22040 | [
"MIT"
]
| permissive | https://github.com/gmkou/FikaNote | 61ca3d3d239b45a7c314d74c0add6ee3d5265d85 | 94d488cac3cf1930109b97584dc9e7f20451163c | refs/heads/master | 2016-09-15T15:28:45.429190 | 2016-03-18T16:51:25 | 2016-03-18T16:51:25 | 29,051,564 | 2 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
class AgendaForm(forms.Form):
url = forms.URLField()
| UTF-8 | Python | false | false | 84 | py | 21 | agendaform.py | 12 | 0.72619 | 0.72619 | 0 | 5 | 15.8 | 29 |
devi-prasad/ghx | 9,543,417,361,401 | a316c26f4b61cded513c9458aa10577ab1849981 | 68fb152a9068d65dafa593f6651c7fb614c733df | /users/malli/test_api_simple.py | 68fbcb4beb14eda5004bd5a1b4f7c95952f27b11 | []
| no_license | https://github.com/devi-prasad/ghx | e792938a56bb159868aca5fccfd063942adbe064 | 10a341969b3a6a9ac75de0a520ae14bcba952217 | refs/heads/master | 2021-05-31T00:27:20.669856 | 2016-02-13T07:41:12 | 2016-02-13T07:41:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from getpass import getpass
import json
# Note that credentials will be transmitted over a secure SSL connection
#url = 'https://api.github.com/users'
#url = 'https://api.github.com/users/satyamalli'
#url = 'https://api.github.com/users/defunkt'
url = 'https://api.github.com/users/sois'
response = requests.get(url)
#print(json.dumps(response.json()[1], indent=1))
print(json.dumps(response.json(), indent=1))
print()
print()
#for index in range(len(response.json()[1])):
# print(response.json()[index])
for key in response.json():
print(key)
print()
print()
print(len(response.json()))
print()
print()
print(response.json()['location'])
print()
| UTF-8 | Python | false | false | 675 | py | 24 | test_api_simple.py | 21 | 0.70963 | 0.703704 | 0 | 29 | 22.275862 | 72 |
ZHANGRENJIE1992/wechatRobot | 5,196,910,459,009 | 5b5cde47acbe0ac6b6f502c1eb62a0d94040013e | e69fb3518d80a8184cc471a471a5813cd55e735a | /model.py | e213a84bee410f5b4c4996ea98e9b7177c1b0e15 | []
| no_license | https://github.com/ZHANGRENJIE1992/wechatRobot | 022dc59fd6351b76af5c30a25f14d1721889afde | e76090b929a1c0ae00fc95da7359d827b8fcc916 | refs/heads/master | 2022-03-24T21:08:44.639459 | 2020-01-04T13:19:35 | 2020-01-04T13:19:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pymysql
# 打开数据库连接
import shortuuid
def createuuid():
return shortuuid.uuid()
class db(object):
def __init__(self,keyinfo):
self.get_conn(keyinfo)
# 打开数据库连接
def get_conn(self,keyinfo):
try:
self.a = 1
self.conn = pymysql.connect(
#host='127.0.0.1',
#port=3306,
#user='root',
#password='ZRJ19920708',
#charset='utf8',
#database='itchat'
host=keyinfo[0],
port=int(keyinfo[1]),
user=keyinfo[2],
password=keyinfo[3],
charset='utf8',
database=keyinfo[4]
#,
#cursor=pymysql.cursors.DictCursor
)
except db.Error as e:
print(e+"connection failed")
#关闭数据库连接
def close_conn(self):
try:
if self.conn:
self.conn.close()
except pymysql.Error as e:
print(e,"close failed")
#添加数据
def create_data(self,wechatid,content,time,groupid,groupsize):
try:
#创建sql
sql = ("INSERT INTO `CHAT_CONTENT`(`uuid`,`WechatID`,`Content`,`Time`,`GroupName`,`GroupSize`) VALUES"
"(%s,%s,%s,%s,%s,%s);")
#获取cusor
cursor = self.conn.cursor()
#执行sql
contentid = createuuid()
cursor.execute(sql,(contentid,wechatid,content,time,groupid,groupsize))
#提交事物
self.conn.commit()
except pymysql.Error as e:
print(e, "close failed")
#print('error')
self.conn.commit() # 如果上面的提交有错误,那么只执行对的那一个提交
# 关闭连接
cursor.close()
self.close_conn()
def get_dailyinfo(self,year,month,day):
#创建sql
sql = ('SELECT ct.* FROM CHAT_CONTENT AS ct WHERE YEAR(ct.Time) = %s AND MONTH(ct.Time) = %s AND DAY(ct.Time) = %s ORDER BY ct.uuid DESC ;')
# 获取cursor
cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
# 执行sql
cursor.execute(sql,[year,month,day])
a = cursor.fetchall()
# 关闭连接
cursor.close()
self.close_conn()
return a
#def main():
# sql = db()
# sql.get_more(3,2)
#sql.create_data(contentid=1,wechatid='111',content="第一句话",time="20191220000000",groupid=111,groupsize=4)
#sql.create_data(2,'M112',"第一句话","20191221100000",1111,6)
# sql.get_dailyinfo()
#if __name__ == '__main__':
# main()
# 使用 cursor() 方法创建一个游标对象 cursor
#cursor = db.cursor()
# 使用预处理语句创建表
#sql = """
#CREATE TABLE CHAT_CONTENT(
#id INT auto_increment PRIMARY KEY ,
#WechatID CHAR(30) NOT NULL,
#Content TEXT,
#Time TIMESTAMP,
#GroupName INT,
#GroupSize INT
#)ENGINE=innodb DEFAULT CHARSET=utf8;"""
# 使用 execute() 方法执行 SQL,如果表存在则删除
#cursor.execute(sql)
# 关闭数据库连接
#db.close() | UTF-8 | Python | false | false | 2,669 | py | 7 | model.py | 5 | 0.645601 | 0.614209 | 0 | 117 | 19.700855 | 142 |
RL-OtherApps/website-addons | 16,217,796,515,622 | 734b140ffba7c04e338b9c122259fde35b06c4c9 | e344e321ec7a728510843008761f84029178bb9d | /website_multi_company/models/ir_actions.py | 751edb5397a59d199d784be1c04c91de6073f416 | [
"MIT"
]
| permissive | https://github.com/RL-OtherApps/website-addons | 004b8c870af40d25b10589203f3e7675d22987ae | b0903daefa492c298084542de2c99f1ab13cd4b4 | refs/heads/12.0 | 2023-05-15T11:28:55.210348 | 2020-10-31T19:15:02 | 2020-10-31T19:15:02 | 262,059,849 | 0 | 0 | MIT | true | 2021-06-09T05:50:43 | 2020-05-07T13:36:17 | 2020-10-31T19:15:10 | 2021-06-09T05:50:42 | 45,657 | 0 | 0 | 0 | JavaScript | false | false | # Copyright 2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
from odoo import models
class IrActionsActUrl(models.Model):
_inherit = "ir.actions.act_url"
def read(self, fields=None, load="_classic_read"):
res = super(IrActionsActUrl, self).read(fields=fields, load=load)
for data in res:
if data["xml_id"] == "website.action_website":
url = self._action_website_url(data)
if url:
data["url"] = url
return res
def _action_website_url(self, data):
website = self.env.user.backend_website_id
if not website:
website = self.env["website"].search(
[("company_id", "=", self.env.user.company_id.id)]
)
if len(website) != 1:
return False
if website.domain in ["localhost", "0.0.0.0"] or website.domain.endswith(
".example"
):
return False
if (
self.env["ir.config_parameter"]
.sudo()
.get_param("web.base.url", "")
.startswith("https://")
):
scheme = "https"
else:
scheme = "http"
url = "{}://{}/".format(scheme, website.domain)
return url
| UTF-8 | Python | false | false | 1,343 | py | 361 | ir_actions.py | 147 | 0.5242 | 0.517498 | 0 | 42 | 30.97619 | 81 |
victor0801/DjangoProjekt | 13,142,599,942,195 | b8dd7eccbddb3712959952e72c785edf809ea110 | 1f7672ffd066d829dabf6f1df632836707ebcdd9 | /core/admin.py | 9e11da9cec6e95d633c1942a54401295222716d3 | [
"Apache-2.0"
]
| permissive | https://github.com/victor0801/DjangoProjekt | dac38686f0394d2f0ac4f51264e6d08d36f3cf40 | 2efec16f9159a33457172f390301255709c33066 | refs/heads/master | 2021-07-24T08:30:42.726915 | 2017-10-31T00:10:10 | 2017-10-31T00:10:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from core.models import Curso
# Register your models here.
admin.site.register(Curso)
| UTF-8 | Python | false | false | 121 | py | 7 | admin.py | 6 | 0.801653 | 0.801653 | 0 | 6 | 19.166667 | 32 |
jedikarix/TASS-tatra-trips | 12,189,117,232,941 | cb393722455a746a2969a2ae906cb12c0aa4d791 | 7da5209258f0daea85a120fff69421b6741c3b75 | /authors_graph.py | f6eb1902d7ca318d832b57d9792bb9b2855f2edc | []
| no_license | https://github.com/jedikarix/TASS-tatra-trips | 220ab6f639445bf84cad908b8da70be5d1ed2d7e | e66494a474a1fccd32d7bd57ffa2b2115f8c6de8 | refs/heads/master | 2020-12-14T16:52:25.257446 | 2020-01-22T22:51:36 | 2020-01-22T22:51:36 | 234,814,788 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import collections
import math
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from threads_analysis import all_threads
from user import User
def build_users_graph():
threads = all_threads('resources/threads/')
user_graph = nx.DiGraph()
for thread in threads:
thread_info = thread['thread_info']
author_name = thread_info['author']
answers_num = thread_info['answers_num']
title = thread_info['title']
user = next((user_graph.nodes[x]['dict_attr'] for x in user_graph.nodes if x == author_name), None)
if user is None:
user = User(author_name)
user_graph.add_node(author_name, dict_attr=user)
user.create_thread(title, answers_num)
for answer in thread['answers']:
replier_name = answer['author']['name']
replier = next((user_graph.nodes[x]['dict_attr'] for x in user_graph.nodes if x == replier_name), None)
if replier is None:
replier = User(replier_name)
user_graph.add_node(replier_name, dict_attr=replier)
replier.create_post()
if author_name != replier_name:
if not user_graph.has_edge(replier_name, author_name):
user_graph.add_edge(replier_name, author_name, weight=1)
else:
user_graph.edges[replier_name, author_name]['weight'] += 1
return user_graph
def draw_graph(graph, with_labels=False):
pos = nx.spring_layout(graph)
nx.draw_networkx(graph, node_size=40, alpha=0.8, with_labels=False, font_size=8, pos=pos)
if with_labels:
labels = dict((n, n) for n, d in graph.nodes(data=True))
pos_higher = {}
y_off = 0.05 # offset on the y axis
x_off = -0.05 # offset on the x axis
for k, v in pos.items():
pos_higher[k] = (v[0] + x_off, v[1] + y_off)
nx.draw_networkx_labels(graph, pos_higher, labels, font_size=8)
plt.show()
def draw_degree_histogram(graph):
degree_sequence = sorted([d for n, d in graph.degree()], reverse=True)
title = 'Histogram stopni wierzchołków'
y_label = 'Liczba wierzchołków o danym stopniu'
x_label = 'Stopień wierzchołka'
plot_histogram(degree_sequence, 2, title, x_label, y_label)
def draw_in_degree_histogram(graph):
degree_sequence = sorted([d for n, d in graph.in_degree()], reverse=True)
title = 'Histogram stopni wchodzących wierzchołków'
y_label = 'Liczba wierzchołków o danym stopniu wchodzącym'
x_label = 'Stopień wchodzący wierzchołka'
plot_histogram(degree_sequence, 2, title, x_label, y_label)
def draw_out_degree_histogram(graph):
degree_sequence = sorted([d for n, d in graph.out_degree()], reverse=True)
title = 'Histogram stopni wychodzących wierzchołków'
y_label = 'Liczba wierzchołków o danym stopniu wychodzącym'
x_label = 'Stopień wychodzący wierzchołka'
plot_histogram(degree_sequence, 2, title, x_label, y_label)
def draw_edges_weights_histogram(graph):
weights_sequence = sorted([graph.get_edge_data(*edge)['weight'] for edge in graph.edges], reverse=True)
title = 'Histogram wag powiązań (krawędzi) pomiędzy autorami'
y_label = 'Liczba krawędzi o danej wadze'
x_label = 'Waga krawędzi'
plot_histogram(weights_sequence, 20, title, x_label, y_label)
def show_activity_of_users_with_no_threads(graph):
not_commented_users = [(node, graph.nodes[node[0]]['dict_attr'].threads_number)
for node in graph.in_degree if node[1] == 0]
users_with_no_threads = [user[0][0] for user in not_commented_users if user[1] == 0]
degree_sequence = sorted([node[1] for node in graph.out_degree if node[0] in users_with_no_threads], reverse=True)
title = 'Histogram aktywności użytkowników, którzy nie stworzyli żadnego własnego wątku'
y_label = 'Liczba wierzchołków o danym stopniu'
x_label = 'Liczba użytkowników na których posty odpowiedziano'
plot_histogram(degree_sequence, 2, title, x_label, y_label)
def plot_histogram(sequence, y_step, title, x_label, y_label):
sequence_count = collections.Counter(sequence)
item_value, cnt = zip(*sequence_count.items())
fig, ax = plt.subplots(figsize=(15, 5))
ax.set_xticks([d for d in item_value if (d - 1) not in item_value or d % 2 != 0]) # if two labels in a row should
# be visible, remove the second one if even
plt.yticks(np.arange(1, max(cnt), y_step))
plt.bar(item_value, cnt, width=1.0, color='b')
plt.title(title)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.show()
# returns users to whose threads commented the biggest group of other users
def get_most_commented_users(graph):
max_in_degrees = sorted(set([node[1] for node in graph.in_degree]), reverse=True)
popular_users = dict()
popular_users['top1'] = [(node, graph.nodes[node[0]]['dict_attr'].threads_number)
for node in graph.in_degree if node[1] == max_in_degrees[0]]
popular_users['top2'] = [(node, graph.nodes[node[0]]['dict_attr'].threads_number)
for node in graph.in_degree if node[1] == max_in_degrees[1]]
popular_users['top3'] = [(node, graph.nodes[node[0]]['dict_attr'].threads_number)
for node in graph.in_degree if node[1] == max_in_degrees[2]]
return popular_users
# returns 3 tuples (author, commenter, how_many_times) representing edge and its weight with the highest weight value
def get_most_important_edges(graph):
max_edge_weights = sorted([graph.get_edge_data(*edge)['weight'] for edge in graph.edges], reverse=True)
popular_edges = dict()
popular_edges['top1'] = [(edge, graph.get_edge_data(*edge)['weight'])
for edge in graph.edges if graph.get_edge_data(*edge)['weight'] == max_edge_weights[0]]
popular_edges['top2'] = [(edge, graph.get_edge_data(*edge)['weight'])
for edge in graph.edges if graph.get_edge_data(*edge)['weight'] == max_edge_weights[1]]
popular_edges['top3'] = [(edge, graph.get_edge_data(*edge)['weight'])
for edge in graph.edges if graph.get_edge_data(*edge)['weight'] == max_edge_weights[2]]
return popular_edges
# returns users who commented on threads of biggest other users group
def get_most_commenting_users(graph):
max_out_degrees = sorted(set([node[1] for node in graph.out_degree]), reverse=True)
popular_users = dict()
popular_users['top1'] = [node for node in graph.out_degree if node[1] == max_out_degrees[0]]
popular_users['top2'] = [node for node in graph.out_degree if node[1] == max_out_degrees[1]]
popular_users['top3'] = [node for node in graph.out_degree if node[1] == max_out_degrees[2]]
return popular_users
# returns users on whose threads no one commented
def get_lonely_users(graph):
lonely_users = [(node, graph.nodes[node[0]]['dict_attr'].threads_number)
for node in graph.degree if node[1] == 0]
return lonely_users
# calculates user(v) authority as A(v) = ln[Σ w(Eu1v)] + ln[Σ w(Evu2)]). If an argument of logarithm is equal to 0 then
# is replaced with 1,as ln(1) = 0. [Σ w(Eu1v)] is the sum of weights of all edges, which end in v and start in u1
# (how many people commented on user threads). The other sum [Σ w(Evu2)] is the sum of weights of all edges, which start
# in v and end in u2 (on how many people's threads user commented)
def get_user_authority(graph, name):
in_edges_weights = sum([graph.get_edge_data(*edge)['weight'] for edge in graph.in_edges if edge[1] == name])
out_edges_weights = sum([graph.get_edge_data(*edge)['weight'] for edge in graph.in_edges if edge[0] == name])
in_edges_weights = in_edges_weights if in_edges_weights != 0 else 1
out_edges_weights = out_edges_weights if out_edges_weights != 0 else 1
return math.log(in_edges_weights) + math.log(out_edges_weights)
| UTF-8 | Python | false | false | 8,046 | py | 16 | authors_graph.py | 11 | 0.650875 | 0.63975 | 0 | 181 | 43.198895 | 120 |
pytorch/benchmark | 14,869,176,794,723 | 90ccec98daf0bb316494ca35e95bd25ed26b454e | c168fe819b446640957e5e310ef89fcfe28662b3 | /torchbenchmark/models/nvidia_deeprecommender/reco_encoder/model/__init__.py | bad43256895b741cc3b00755947ffda0cc779a5d | [
"MIT",
"BSD-3-Clause"
]
| permissive | https://github.com/pytorch/benchmark | 7b55e8d714de2ea873e03df43811aab3848485dd | df4da9bdff11a2f948d5bd4ac83da7922e6f44f4 | refs/heads/main | 2023-08-29T13:06:09.671728 | 2023-08-28T16:51:55 | 2023-08-28T16:51:55 | 92,541,759 | 685 | 220 | BSD-3-Clause | false | 2023-09-14T18:10:18 | 2017-05-26T19:21:12 | 2023-09-12T15:10:01 | 2023-09-14T18:10:14 | 248,981 | 663 | 218 | 100 | Python | false | false | # Copyright (c) 2017 NVIDIA Corporation
| UTF-8 | Python | false | false | 40 | py | 601 | __init__.py | 453 | 0.775 | 0.675 | 0 | 1 | 39 | 39 |
edelooff/sqlalchemy-hybrid-utils | 3,994,319,614,803 | d4af8b7402453b0cf4d4c463a62388176f9e0d3d | c1bcb9aa4121b48a7d0bbbf862f4868a7d3aea89 | /tests/test_multi_column_flag.py | 0b33ccff32ddfa27e3e3ff46a131be8ed73df9ca | [
"BSD-2-Clause"
]
| permissive | https://github.com/edelooff/sqlalchemy-hybrid-utils | b1e7713342ed6c588e553808b708a84e6f6af203 | 69723729c61843998c9840f5b561e117c1c51298 | refs/heads/master | 2022-12-20T08:43:02.697956 | 2020-09-28T21:53:02 | 2020-09-28T21:53:02 | 263,436,848 | 0 | 0 | BSD-2-Clause | false | 2020-09-28T21:53:03 | 2020-05-12T19:48:53 | 2020-09-27T12:33:52 | 2020-09-28T21:53:02 | 96 | 0 | 0 | 1 | Python | false | false | from datetime import datetime
import pytest
from sqlalchemy_hybrid_utils import column_flag
@pytest.mark.parametrize(
"sent_at, delivered_at, expected",
[
pytest.param(None, None, False, id="not shipped"),
pytest.param(datetime(2020, 1, 1), None, True, id="in transit"),
pytest.param(None, datetime(2020, 1, 1), False, id="magic arrival"),
],
)
def test_flag_initial_value(Message, sent_at, delivered_at, expected):
message = Message(sent_at=sent_at, delivered_at=delivered_at)
assert message.in_transit == expected
def test_flag_runtime_evaluation(Message):
message = Message(content="Spam")
message.sent_at = datetime(2020, 1, 1)
assert message.in_transit
message.delivered_at = datetime(2020, 1, 2)
assert not message.in_transit
def test_flag_after_database_read(Message, session):
msg = Message(sent_at=datetime(2020, 1, 1))
session.add(msg)
session.commit()
assert msg.in_transit
def test_flag_select_expr(Message, session):
monday = datetime(2020, 6, 1)
tuesday = datetime(2020, 6, 2)
session.add(Message(sent_at=monday))
session.add(Message(sent_at=monday, delivered_at=tuesday))
session.add(Message(sent_at=tuesday))
assert session.query(Message).filter(Message.in_transit).count() == 2
def test_multi_column_no_default(Message):
col_one = Message.__table__.c.content
col_two = Message.__table__.c.sent_at
with pytest.raises(TypeError, match="default for multi-column expression"):
column_flag(col_one & col_two, default="baz")
| UTF-8 | Python | false | false | 1,574 | py | 18 | test_multi_column_flag.py | 14 | 0.686785 | 0.659466 | 0 | 51 | 29.862745 | 79 |
CXY-YSL/MGZDTS | 7,164,005,492,677 | a23c1a725085aeb8da7f801530afc618a49a564c | d623da68918649a6fe6510939475988c0f3bff7f | /Python/PythonCode/Chapter04/find函数的使用.py | 0e11125b59a018a7d6933af9ea7e936558bd2e16 | [
"MIT"
]
| permissive | https://github.com/CXY-YSL/MGZDTS | a5e36501a935450697d34cdf798dc33eea2acc08 | 28279fad2ff76a2d22da5c82654d5ab0f7b6d461 | refs/heads/master | 2021-05-25T13:31:39.145299 | 2020-04-17T04:48:54 | 2020-04-17T04:48:54 | 253,772,958 | 3 | 0 | MIT | false | 2020-04-16T18:11:37 | 2020-04-07T11:27:14 | 2020-04-15T15:09:27 | 2020-04-16T18:11:18 | 67,648 | 0 | 0 | 4 | HTML | false | false | mystr ='hello world itheima and itheimaApp'
index=mystr.find("itheima")
print(index)
| UTF-8 | Python | false | false | 85 | py | 754 | find函数的使用.py | 393 | 0.776471 | 0.776471 | 0 | 3 | 27.333333 | 43 |
anias9/comic-book-app | 3,934,190,074,607 | 5beb47c880e16b4851254aad0c32e6b63015f7cc | 042547b9322aff331fa3d7d8eebab47add8faabc | /komiksy/migrations/0012_auto_20180402_1822.py | 1789abb2060e292447d7d7fe387e7221c63820e3 | []
| no_license | https://github.com/anias9/comic-book-app | 0dd88ebb84fb465c19903f4bad9cc3b387fec6a8 | 8e09fb751607d224115047cb784d34cac5c4bf29 | refs/heads/master | 2023-02-20T02:54:39.180600 | 2021-01-22T20:03:12 | 2021-01-22T20:03:12 | 128,771,631 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.3 on 2018-04-02 16:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('komiksy', '0011_auto_20180402_1741'),
]
operations = [
migrations.AlterField(
model_name='elements',
name='comics',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='komiksy.Comic'),
),
]
| UTF-8 | Python | false | false | 472 | py | 57 | 0012_auto_20180402_1822.py | 34 | 0.629237 | 0.563559 | 0 | 19 | 23.842105 | 101 |
aarya22/app-AFQ-seg | 3,633,542,367,283 | 2b0077fb62da7c18672123b00e239a7077716ca1 | 21c9eb19a9e8d1465596feb24f7d4d90b8321b4d | /main.py | a87a7307487f0f6e0f19e941cc6612f7fe0467ef | []
| no_license | https://github.com/aarya22/app-AFQ-seg | 5961b2a5d92d75739f7500cf3c69622ef33a1077 | 36cb5907a94a3899293476efc41eee29251de8e4 | refs/heads/master | 2020-12-10T03:18:20.280544 | 2017-07-11T17:56:33 | 2017-07-11T17:56:33 | 95,477,305 | 1 | 2 | null | false | 2017-07-11T18:09:43 | 2017-06-26T18:33:06 | 2017-06-29T20:33:35 | 2017-07-11T17:56:34 | 27 | 0 | 3 | 1 | Python | null | null | import os.path as op
import numpy as np
import nibabel as nib
import dipy.data as dpd
import json
from dipy.data import fetcher
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from nibabel.streamlines import Tractogram, save
import AFQ.utils.streamlines as aus
import AFQ.data as afd
import AFQ.tractography as aft
import AFQ.registration as reg
import AFQ.dti as dti
import AFQ.segmentation as seg
import os
def main():
with open('config.json') as config_json:
config = json.load(config_json)
data_file = str(config['data_file'])
data_bval = str(config['data_bval'])
data_bvec = str(config['data_bvec'])
tck_data = str(config['tck_data'])
img = nib.load(data_file)
print("Calculating DTI...")
if not op.exists('./dti_FA.nii.gz'):
dti_params = dti.fit_dti(data_file, data_bval, data_bvec, out_dir='.')
else:
dti_params = {'FA': './dti_FA.nii.gz',
'params': './dti_params.nii.gz'}
#Use this one eventually
tg = nib.streamlines.load(tck_data)
#tg = nib.streamlines.load('track.tck').tractogram
streamlines = tg.tractogram.apply_affine(img.affine).streamlines
streamlines = streamlines[::100]
templates = afd.read_templates()
bundle_names = ["CST", "ILF"]
tract_names = {}
for name in bundle_names:
for hemi in ['_R', '_L']:
tract_names[name + hemi] = {'ROIs': [templates[name + '_roi1' + hemi],
templates[name + '_roi1' + hemi]],
'rules': [True, True]}
print("Registering to template...")
MNI_T2_img = dpd.read_mni_template()
bvals, bvecs = read_bvals_bvecs(data_bval, data_bvec)
gtab = gradient_table(bvals, bvecs, b0_threshold=100)
mapping = reg.syn_register_dwi(data_file, gtab)
reg.write_mapping(mapping, './mapping.nii.gz')
print("Segmenting fiber groups...")
tract_anatomy = seg.segment(data_file,
data_bval,
data_bvec,
streamlines,
tract_names,
reg_template=MNI_T2_img,
mapping=mapping,
as_generator=False,
affine=img.affine)
path = os.getcwd() + '/tract/'
if not os.path.exists(path):
os.makedirs(path)
for fg in tract_anatomy:
streamlines = tract_anatomy[fg]
fname = fg + ".tck"
#aus.write_trk(fname, streamlines)
trg = nib.streamlines.Tractogram(streamlines, affine_to_rasmm=img.affine)
nib.streamlines.save(trg, path+'/'+fname)
main()
| UTF-8 | Python | false | false | 3,116 | py | 7 | main.py | 6 | 0.512837 | 0.509307 | 0 | 84 | 34.309524 | 86 |
Nativeatom/Dianping | 1,855,425,878,694 | 8d23b8547c53ae18295326fc99c5b9a4c4b1e27f | 97378d9d094832e9a99710a16c26b2c9280eeca2 | /data_and_preprocess/GetReview.py | 2beaa625e0869d2cb4f96225868b448d20121251 | []
| no_license | https://github.com/Nativeatom/Dianping | 9da8417705d5afd51b365dc12ca23534ae259ec6 | dbcb75bd89acae0d2384f30dcf7eaf5e85a5e5f8 | refs/heads/master | 2021-01-22T21:28:06.189612 | 2017-04-13T00:16:18 | 2017-04-13T00:16:18 | 85,435,194 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 12 12:20:42 2017
@author: Y40
"""
import json
from bs4 import BeautifulSoup
import threading
from requests import Session
import urllib2
import time
import random
import chardet
#import string
import codecs
#from sklearn import feature_extraction
#from sklearn.feature_extraction.text import TfidfTransformer
#from sklearn.feature_extraction.text import CountVectorizer
class dazp_bj:
def __init__(self,category):
self.baseUrl='http://www.dianping.com'
self.bgurl=category[0][0]+'/review_more'
self.typename=category[0][1]
self.comment = category[0][2]
self.status = category[1]
self.page= 176 # 43 for 18
self.pagenum = 20
self.headers={
"Host":"www.dianping.com",
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
"Referer":category[0][0]+'/review_all',
}
# def PageNum(self,bgurl):
# html=self.s.post(bgurl,headers=self.headers).text
# soup=BeautifulSoup(html,'lxml')
# pagelist = soup.find('div',class_='comment mode').find('div',class_='Pages').find('div',class_='Pages').find_all('a')
# return len(pagelist)
def start(self):
self.s=Session() #定义一个Session()对象
print self.typename, self.bgurl
self.typename = self.typename.decode('utf-8')
html=self.s.post(self.bgurl,headers=self.headers).text
soup_page=BeautifulSoup(html,'lxml')
CommentTotal = soup_page.find('div',class_='comment-tab').find('div',class_='tabs').find('span',class_='active').em.get_text()
Comment_T = CommentTotal.replace("(",'').replace(')','')
if type(int(Comment_T) == int):
self.comment = int(Comment_T)
print "评论update: %d 条\n"%(self.comment)
#获取评论页数
page = []
for a in soup_page.find('div',class_='comment-mode').find('div',class_='Pages').find('div',class_='Pages').find_all('a'):
now_page = a.get_text().encode('utf-8')
page.append(now_page)
#time.sleep(random.randint(0,20))
time.sleep(random.randint(0,12))
self.pagenum = int(page[-2])
print "评论共有", self.pagenum, "页 ", self.comment, "条"
#
comment_count = 0
dazp_bj.__parseHtml(self,self.bgurl,comment_count) #调用__parseHtml函数
print self.typename, "comment getting finished\n"
try:
f2 = open('"//Dianping_data//comment_index.txt','a')
#code = chardet.detect(self.typename)['encoding']
f2.write(self.typename.encode('utf-8')+'.txt' + "\r\n")
except UnicodeEncodeError:
print self.typename," 写入失败\n"
finally:
pass
print 'get data of %s down'%(self.status['Restaurant'])
html=self.s.post(self.bgurl,headers=self.headers).text
# soup=BeautifulSoup(html,'lxml')
# self.pagenum=int(soup.find('div',class_='comment mode').find('div',class_='Pages').find('div',class_='Pages').find_all('a')[-2].get_text().encode('utf-8')) #设置最大页面数目
# maxpage = dazp_bj.PageNum(self, self.bgurl)
# print "Review has ", maxpage, "pages\n"
def ch_en_utfenode(self, text):
pass
def __parseHtml(self,preurl, comment_count):
comment=dict()
html=self.s.post(preurl,headers=self.headers).text
soup=BeautifulSoup(html,'lxml')
name2 = ['商家','用户名','总评','口味','环境','服务','评论','日期','人均','赞','喜欢的菜']
comment[name2[0]] = self.typename.encode('utf-8')
fail = 0
#评论页面查询方法
for li in soup.find('div',class_='comment-list').ul.find_all('li'):
try:
comment[name2[1]] = li.find('div',class_='pic').find('p',class_='name').a.get_text().encode('utf-8')
print comment[name2[1]]#用户名
except:
continue
Comment = li.find('div',class_="content")
comment[name2[2]] = Comment.find('div',class_='user-info').span['title'].encode('utf-8')
print comment[name2[2]]#总评
comment[name2[3]] = Comment.find('div',class_='user-info').find('div',class_='comment-rst').find_all('span')[0].get_text().encode('utf-8')
# print comment[name2[3]]#口味
comment[name2[4]] = Comment.find('div',class_='user-info').find('div',class_='comment-rst').find_all('span')[1].get_text().encode('utf-8')
# print comment[name2[4]]#环境
comment[name2[5]] = Comment.find('div',class_='user-info').find('div',class_='comment-rst').find_all('span')[2].get_text().encode('utf-8')
# print comment[name2[5]]#服务
# for br in Comment.find('div',class_='comment-txt').find('div',class_='J_brief-cont').find_all('br'):
# comment[name2[6]] + = Comment.find('div',class_='comment-txt').find('div',class_='J_brief-cont').find('br').get_text
# except:
comment[name2[6]] = Comment.find('div',class_='comment-txt').find('div',class_='J_brief-cont').get_text().encode('utf-8').lstrip().rstrip()
print comment[name2[6]]
comment[name2[7]] = Comment.find('div', attrs={'class':"misc-info"}).find('span',class_='time').get_text().encode('utf-8')
# print comment[name2[7]]#日期
try:
comment[name2[8]] = Comment.find('div',class_='user-info').find('span',class_="comm-per").get_text().encode('utf-8')[4:]
print comment[name2[8]]
except:
comment[name2[8]]=0 #没有标注人均消费
comment[name2[9]] = int(Comment.find('div', attrs={'class':"misc-info"}).find('span',class_='col-right').find('span',class_='countWrapper').a['data-count'])
print comment[name2[9]]
try:
comment[name2[10]] = Comment.find('div',class_='comment-recommend').find_all('a').get_text().encode('utf-8')
except:
comment[name2[10]]='无'
# print comment[name2[10]]
self.status['comment']+=1
comment_count+=1
#time.sleep(random.randint(0,45))
time.sleep(random.randint(0,35))
#
##Addition over
#
try:
with open(self.typename+'.json','a') as outfile:
json.dump(comment,outfile,ensure_ascii=False)
with open(self.typename+'.json','a') as outfile:
outfile.write(',\n')
print "Comment: #", comment_count,"/",self.comment,"\n"
except AttributeError:
fail+=1
print "评论写入失败: ", fail
pass
except UnicodeEncodeError:
try:
self.typename = self.typename.encode('utf-8')
#decode(chardet.detect(self.typename)['encoding'])
except UnicodeEncodeError:
try:
comment_list = list(comment)
fileObject = open(self.typename + '.txt', 'w')
for ip in comment_list:
fileObject.write(ip)
fileObject.write('\n')
fileObject.close()
print comment_count," 评论写入失败\n"
comment_count+=1
except UnicodeEncodeError:
print comment_count," 评论写入失败\n"
comment_count+=1
pass
except UnicodeEncodeError:
print comment_count," 评论写入失败\n"
comment_count+=1
pass
try:
with open(self.typename+'.json','a') as outfile:
json.dump(comment,outfile,ensure_ascii=False)
# except IOError:
# file=codecs.open(self.typename+'.txt','w','utf-8')
# for key in comment:
# #for j in comment_get[i]:
# # j = j + ' '
# file.write(str(key) + ': ')
# file.write(comment[key])
# file.write(',')
# file.write('\r\n')
except UnicodeEncodeError:
continue
try:
with open("//Dianping_data//Comment//" + self.typename+'.json','a') as outfile:
outfile.write(',\n')
except IOError:
outfile.write(',\n')
pass
print "Comment: #", comment_count,"/",self.comment,"\n"
self.status['user-name'] = comment[name2[1]]
self.status['comment'] = comment_count
# except IOError:
# fail+=1
# print"写入IOError ", fail
# pass
print "评论第", self.page,"页结束\n"
self.page+=1
self.status['page'] = self.page
if self.page<self.pagenum:
self.nexturl=self.bgurl+soup.find('div',class_='comment-mode').find('div',class_='Pages').find('div',class_='Pages').find('a',class_='NextPage')['href'] #获得下一页的链接
time.sleep(random.randint(0,10))
print "评论第", self.page,"/",self.pagenum,"页"
if self.page == self.pagenum:
return 1
try:
dazp_bj.__parseHtml(self,self.nexturl, comment_count)
except AttributeError:
fail+=1
print "Error: ",fail
pass
except IOError:
fail+=1
print "出现IOError ",fail
pass
except urllib2.URLError:
print "出现网络连接错误\n"
local_time = time.strftime('%Y-%m-%d %H:%M:%S %Z',time.localtime(time.time()))
self.status['time'] = local_time
print self.status #显示断点信息
f3 = open('comment_breakpoint','a')
for key in self.status:
f3.write(key + ':' + self.status[key] + ' ')
f3.write('\r\n')
time.sleep(600)
pass
#New function
if __name__=='__main__':
name = []
link = []
comNum = []
content = []
finish_num = [0,1,2,5,6,26]
f = open('Restaurant_Nanjing.txt','r')
for line in f.readlines():
content.append(line)
for line in content:
split = line.split('"')
resname = split[3]
name.append(resname)
linkage = split[-6]
link.append(linkage)
commentNumber = split[-1].lstrip(": ").replace("},",'')
comNum.append(int(commentNumber))
if len(link)!=len(name):
print "get link error for number unmatch\n"
else:
print len(link) ,"restaurants included\n","Example:",name[0],": ",link[0],"Comment: ",comNum[0],"\n"
f.close()
start_num = 28
status = dict() #记录爬虫当前状态
for i in range(start_num,len(link)-1):#restart 改这个参数 3 4 11 12 13没做 i=5次数少可用于测试 18未完
# 19 到P29 578/714 20 21 24 25 27没写
linkex = link[i]
nameex = name[i]#.decode('utf-8')
commex = comNum[i]
status = {'Restaurant':name[i], 'link': link[i], 'page':1, 'comment':0, 'user-name':'爬壁神偷'}
cat=[(linkex,nameex,commex),status]
obj=list()
obj.append(dazp_bj(cat))
[threading.Thread(target=foo.start(),args=()).start for foo in obj]#多线程执行obj列表中的任务
| UTF-8 | Python | false | false | 11,902 | py | 4 | GetReview.py | 2 | 0.52328 | 0.503735 | 0 | 260 | 43.273077 | 175 |
sandeshprabhu02/EmoContext-Microsoft-Challenge | 8,203,387,561,997 | 9479dc631f325371f02565772aafc564c28a64df | 3812d23a4b6b3107ad9bd96b82b1da121084be16 | /Library/Sem_Clean_Start.py | 9fe84079fdfb86164992583d7d365c7a3df59e18 | []
| no_license | https://github.com/sandeshprabhu02/EmoContext-Microsoft-Challenge | 3d3390bb07793dc22bc21c5f2d7847f9d91ebcdd | 055a15428e56253d008d6de1129fc25e54beadce | refs/heads/master | 2022-04-17T17:43:00.966397 | 2020-04-19T08:36:26 | 2020-04-19T08:36:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append("C:\\Anaconda3\\envs\\tensorflow\\python35.zip")
sys.path.append("C:\\Anaconda3\\envs\\tensorflow\\DLLs")
sys.path.append("C:\\Anaconda3\\envs\\tensorflow\\lib")
sys.path.append("C:\\Anaconda3\\envs\\tensorflow")
sys.path.append("C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages")
sys.path.append("C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\setuptools-27.2.0-py3.5.egg")
print(sys.path)
import numpy as np
import pandas as pd
import emoji
import re
#Read training data
data = pd.read_table('../Data/train.txt')
#Regex
special_regex = re.compile('[-"@_+=#$%^&*()<>/\|}{~:;,]+')
question_regex = re.compile('[?+]')
ex_regex = re.compile('[!]+')
dot_regex = re.compile('[.]+')
num_regex = re.compile('[^[0-9]+$]')
apho_rege = re.compile('[\']')
alphanum_regex = [r'\w+']
#Unknown chars
unk_pattern = re.compile("["
"\\U0001F9D0"
"\\U000FE339"
"\\U000023EA"
"\\U00000E3F"
"\\U000020B9"
"\\U00002211"
"\\U00002267"
"\\U00002207"
"\\U00002248"
"\\U00002284"
"\\U000000BF"
"\\U000000AC"
"\\U000000B0 - \\U000000B6"
"\\U00002022"
"\\U00000296"
"\\U000003C9"
"\\U000000D7"
"]+", flags=re.UNICODE)
#Replace Special chars
for i, row in data.iterrows():
data.set_value(i, 'turn1', special_regex.sub(r' SPLCHAR ', row['turn1']))
data.set_value(i, 'turn2', special_regex.sub(r' SPLCHAR ', row['turn2']))
data.set_value(i, 'turn3', special_regex.sub(r' SPLCHAR ', row['turn3']))
#Replace Question chars
for i, row in data.iterrows():
data.set_value(i, 'turn1', question_regex.sub(r' QCHAR ', row['turn1']))
data.set_value(i, 'turn2', question_regex.sub(r' QCHAR ', row['turn2']))
data.set_value(i, 'turn3', question_regex.sub(r' QCHAR ', row['turn3']))
#Replace Exlamatory chars
for i, row in data.iterrows():
data.set_value(i, 'turn1', ex_regex.sub(r' EXCHAR ', row['turn1']))
data.set_value(i, 'turn2', ex_regex.sub(r' EXCHAR ', row['turn2']))
data.set_value(i, 'turn3', ex_regex.sub(r' EXCHAR ', row['turn3']))
#Replace Dots
for i, row in data.iterrows():
data.set_value(i, 'turn1', dot_regex.sub(r' DCHAR ', row['turn1']))
data.set_value(i, 'turn2', dot_regex.sub(r' DCHAR ', row['turn2']))
data.set_value(i, 'turn3', dot_regex.sub(r' DCHAR ', row['turn3']))
#Emoji Classification
happy = pd.read_csv('../Data/Emoji/happy.csv')
sad = pd.read_csv('../Data/Emoji/sad.csv')
angry = pd.read_csv('../Data/Emoji/angry.csv')
other = pd.read_csv('../Data/Emoji/other.csv')
#Alpha-numeric methods
def hasdigit(word):
return any(c for c in word if c.isdigit())
def hasalpha(word):
return any(c for c in word if c.isalpha())
def hasalnum(word):
return hasdigit(word) and hasalpha(word)
happylst = []
sadlst = []
angrylst = []
otherlst = []
for i, row in happy.iterrows():
happylst.append(row[0])
for i, row in sad.iterrows():
sadlst.append(row[0])
for i, row in angry.iterrows():
angrylst.append(row[0])
for i, row in other.iterrows():
otherlst.append(row[0])
#Apply emoji classification on Turn 1 data
for i, row in data.iterrows():
tempStr = []
for s in row.turn1.split():
for word in s.split():
tempWord = []
for char in word:
if char in emoji.UNICODE_EMOJI:
char = 'U+{:X}'.format(ord(char))
if char in happylst:
char = 'HAPPY'
elif char in sadlst:
char = 'SAD'
elif char in angrylst:
char = 'ANGRY'
elif char in otherlst:
char = 'OTHER'
tempWord.append(' ')
tempWord.append(char)
tempWord.append(' ')
else:
tempWord.append(char)
strWord = ''.join(i for i in tempWord)
tempStr.append(strWord)
strFinal = ' '.join(w for w in tempStr)
data.set_value(i, 'turn1', strFinal)
#Apply emoji classification on Turn 2 data
for i, row in data.iterrows():
tempStr = []
for s in row.turn2.split():
for word in s.split():
tempWord = []
for char in word:
if char in emoji.UNICODE_EMOJI:
char = 'U+{:X}'.format(ord(char))
if char in happylst:
char = 'HAPPY'
elif char in sadlst:
char = 'SAD'
elif char in angrylst:
char = 'ANGRY'
elif char in otherlst:
char = 'OTHER'
tempWord.append(' ')
tempWord.append(char)
tempWord.append(' ')
else:
tempWord.append(char)
strWord = ''.join(i for i in tempWord)
tempStr.append(strWord)
strFinal = ' '.join(w for w in tempStr)
data.set_value(i, 'turn2', strFinal)
#Apply emoji classification on Turn 3 data
for i, row in data.iterrows():
tempStr = []
for s in row.turn3.split():
for word in s.split():
tempWord = []
for char in word:
if char in emoji.UNICODE_EMOJI:
char = 'U+{:X}'.format(ord(char))
if char in happylst:
char = 'HAPPY'
elif char in sadlst:
char = 'SAD'
elif char in angrylst:
char = 'ANGRY'
elif char in otherlst:
char = 'OTHER'
tempWord.append(' ')
tempWord.append(char)
tempWord.append(' ')
else:
tempWord.append(char)
strWord = ''.join(i for i in tempWord)
tempStr.append(strWord)
strFinal = ' '.join(w for w in tempStr)
data.set_value(i, 'turn3', strFinal)
#Replace Numbers from text
for i, row in data.iterrows():
temp1 = []
temp2 = []
temp3 = []
for s in row.turn1.split():
if hasalnum(s) or hasalpha(s):
t = s
else:
t = re.sub('\d+', 'NUM', s)
temp1.append(str(t))
strturn1 = ' '.join(word for word in temp1)
data.set_value(i, 'turn1', strturn1)
for s in row.turn2.split():
if hasalnum(s) or hasalpha(s):
t = s
else:
t = re.sub('\d+', 'NUM', s)
temp2.append(str(t))
strturn2 = ' '.join(word for word in temp2)
data.set_value(i, 'turn2', strturn2)
for s in row.turn3.split():
if hasalnum(s) or hasalpha(s):
t = s
else:
t = re.sub('\d+', 'NUM', s)
temp3.append(str(t))
strturn3 = ' '.join(word for word in temp3)
data.set_value(i, 'turn3', strturn3)
#Some more data cleaning
for i, row in data.iterrows():
temp1 = []
temp2 = []
temp3 = []
for s in row.turn1.split():
t = re.sub('[\\\\]', '', s)
u = re.sub('[\[\]]', '', t)
v = re.sub('[.]', '', u)
x = re.sub('[-]', '', v)
temp1.append(str(x))
strturn1 = ' '.join(word for word in temp1)
data.set_value(i, 'turn1', strturn1)
for s in row.turn2.split():
t = re.sub('[\\\\]', '', s)
u = re.sub('[\[\]]', '', t)
v = re.sub('[.]', '', u)
x = re.sub('[-]', '', v)
temp2.append(str(x))
strturn2 = ' '.join(word for word in temp2)
data.set_value(i, 'turn2', strturn2)
for s in row.turn3.split():
t = re.sub('[\\\\]', '', s)
u = re.sub('[\[\]]', '', t)
v = re.sub('[.]', '', u)
x = re.sub('[-]', '', v)
temp3.append(str(x))
strturn3 = ' '.join(word for word in temp3)
data.set_value(i, 'turn3', strturn3)
#remove unknowns
for i, row in data.iterrows():
temp1 = []
temp2 = []
temp3 = []
for s in row.turn1.split():
x = unk_pattern.sub(r'', s)
temp1.append(str(x))
strturn1 = ' '.join(word for word in temp1)
data.set_value(i, 'turn1', strturn1)
for s in row.turn2.split():
x = unk_pattern.sub(r'', s)
temp2.append(str(x))
strturn2 = ' '.join(word for word in temp2)
data.set_value(i, 'turn2', strturn2)
for s in row.turn3.split():
x = unk_pattern.sub(r'', s)
temp3.append(str(x))
strturn3 = ' '.join(word for word in temp3)
data.set_value(i, 'turn3', strturn3)
#Remove apostrophe
for i, row in data.iterrows():
temp1 = []
temp2 = []
temp3 = []
for s in row.turn1.split():
x = apho_rege.sub(r'', s)
temp1.append(str(x))
strturn1 = ' '.join(word for word in temp1)
data.set_value(i, 'turn1', strturn1)
for s in row.turn2.split():
x = apho_rege.sub(r'', s)
temp2.append(str(x))
strturn2 = ' '.join(word for word in temp2)
data.set_value(i, 'turn2', strturn2)
for s in row.turn3.split():
x = apho_rege.sub(r'', s)
temp3.append(str(x))
strturn3 = ' '.join(word for word in temp3)
data.set_value(i, 'turn3', strturn3)
#Save to CSV/Text file
turn1_data = data['turn1']
turn2_data = data['turn2']
turn3_data = data['turn3']
turn1_data.to_csv('./Clean/T1_v3.csv', sep=',', encoding='utf-8')
turn2_data.to_csv('./Clean/T2_v3.csv', sep=',', encoding='utf-8')
turn3_data.to_csv('./Clean/T3_v3.csv', sep=',', encoding='utf-8') | UTF-8 | Python | false | false | 9,580 | py | 25 | Sem_Clean_Start.py | 4 | 0.522129 | 0.492693 | 0 | 306 | 30.310458 | 99 |
apollolj/MIT-OpenCourseWare | 2,765,958,946,141 | 2bd30912e5e6ebbe90226e7d30df369df92c1118 | bda901a1850e9427156aad894bcca40f569e36ba | /CS 6.00/ProblemSet8/P08.py | 05244a44e711646da1d631994ff14faad4c041cb | []
| no_license | https://github.com/apollolj/MIT-OpenCourseWare | 435960df0eba0f530d8b4a0f2d6a5d336768ec46 | 536134a30a09573c3ff4b5ad660a40e432ad6eab | refs/heads/master | 2020-12-02T08:42:45.473713 | 2011-08-30T05:42:05 | 2011-08-30T05:42:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # MIT OpenCourseWare - Online Education
# CS 6.00 - Intro to Computer Science
# Problem set 8 - Intelligent Course Advisor
# Student: May Pongpitpitak
# July 30, 2011
import time
SUBJECT_FILENAME = "subjects.txt"
VALUE, WORK = 0, 1
#
# Problem 1: Building A Subject Dictionary
#
def loadSubjects(filename):
"""
Returns a dictionary mapping subject name to (value, work), where the name
is a string and the value and work are integers. The subject information is
read from the file named by the string filename. Each line of the file
contains a string of the form "name,value,work".
returns: dictionary mapping subject name to (value, work)
"""
# The following sample code reads lines from the specified file and prints
# each one.
# TODO: Instead of printing each line, modify the above to parse the name,
# value, and work of each subject and create a dictionary mapping the name
# to the (value, work).
inputFile = open(filename,'r', 1)
subjects = {}
for line in inputFile:
nextSubject = line.strip().split(',') # First remove the extra chars then split them
# subjectName = nextSubject[0]
# subjectValue = int(nextSubject[1])
# subjectWork = int(nextSubject[2])
subjects[nextSubject[0]] = {VALUE : int(nextSubject[1]), WORK : int(nextSubject[2])}
return(subjects)
def printSubjects(subjects):
"""
Prints a string containing name, value, and work of each subject in
the dictionary of subjects and total value and work of all subjects
"""
totalVal, totalWork = 0,0
if len(subjects) == 0:
return 'Empty SubjectList'
res = 'Course\tValue\tWork\n======\t====\t=====\n'
subNames = list(subjects.keys()) # Added list() to the code to force conversion from keys to list. object.keys() doesn't return a list-type and can't be sorted.
subNames.sort()
for s in subNames:
val = subjects[s][VALUE]
work = subjects[s][WORK]
res = res + s + '\t' + str(val) + '\t' + str(work) + '\n'
totalVal += val
totalWork += work
res = res + '\nTotal Value:\t' + str(totalVal) +'\n'
res = res + 'Total Work:\t' + str(totalWork) + '\n'
print(res)
def cmpValue(subInfo1, subInfo2):
"""
Returns True if value in (value, work) tuple subInfo1 is GREATER than
value in (value, work) tuple in subInfo2
"""
val1 = subInfo1[VALUE]
val2 = subInfo2[VALUE]
return val1 > val2
def cmpWork(subInfo1, subInfo2):
"""
Returns True if work in (value, work) tuple subInfo1 is LESS than than work
in (value, work) tuple in subInfo2
"""
work1 = subInfo1[WORK]
work2 = subInfo2[WORK]
return work1 < work2
def cmpRatio(subInfo1, subInfo2):
"""
Returns True if value/work in (value, work) tuple subInfo1 is
GREATER than value/work in (value, work) tuple in subInfo2
"""
val1 = subInfo1[VALUE]
val2 = subInfo2[VALUE]
work1 = subInfo1[WORK]
work2 = subInfo2[WORK]
return float(val1) / work1 > float(val2) / work2
#
# Problem 2: Subject Selection By Greedy Optimization
#
def greedyAdvisor(subjects, maxWork, comparator):
"""
Returns a dictionary mapping subject name to (value, work) which includes
subjects selected by the algorithm, such that the total work of subjects in
the dictionary is not greater than maxWork. The subjects are chosen using
a greedy algorithm. The subjects dictionary should not be mutated.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
comparator: function taking two tuples and returning a boolean
returns: dictionary mapping subject name to (value, work)
"""
# TODO...
assert comparator is 'cmpRatio' or 'cmpValue' or 'cmpWork' , "Invalid comparator"
assert maxWork > 0 , "Maximum work limit is not enough"
bestCourse = list(subjects.keys())[0]
greedyCourseList = {}
totalWork = 0
# Select course by best value/work ratio
if comparator is 'cmpRatio':
while totalWork < maxWork:
# Selecting the best course available in the list
for course in subjects.keys():
if cmpRatio(subjects[course], subjects[bestCourse]) is True \
and course not in greedyCourseList.keys():
bestCourse = course
# If not exceeding maxWork, add the selected course to the list
totalWork += subjects[bestCourse][WORK]
if totalWork <= maxWork:
greedyCourseList[bestCourse] = subjects[bestCourse]
# Select course by best value
elif comparator is 'cmpValue':
while totalWork < maxWork:
# Selecting the best course available in the list
for course in subjects.keys():
if cmpValue(subjects[course], subjects[bestCourse]) is True \
and course not in greedyCourseList.keys():
bestCourse = course
# If not exceeding maxWork, add the selected course to the list
totalWork += subjects[bestCourse][WORK]
if totalWork <= maxWork:
greedyCourseList[bestCourse] = subjects[bestCourse]
# Select course by best work load
elif comparator is 'cmpWork' :
while totalWork < maxWork:
# Selecting the best course available in the list
for course in subjects.keys():
if cmpWork(subjects[course], subjects[bestCourse]) is True \
and course not in greedyCourseList.keys():
bestCourse = course
# If not exceeding maxWork, add the selected course to the list
totalWork += subjects[bestCourse][WORK]
if totalWork <= maxWork:
greedyCourseList[bestCourse] = subjects[bestCourse]
return(greedyCourseList)
def bruteForceAdvisor(subjects, maxWork):
"""
Returns a dictionary mapping subject name to (value, work), which
represents the globally optimal selection of subjects using a brute force
algorithm.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
returns: dictionary mapping subject name to (value, work)
"""
nameList = list(subjects.keys())
tupleList = list(subjects.values())
bestSubset, bestSubsetValue = \
bruteForceAdvisorHelper(tupleList, maxWork, 0, None, None, [], 0, 0)
outputSubjects = {}
for i in bestSubset:
outputSubjects[nameList[i]] = tupleList[i]
return outputSubjects
def bruteForceAdvisorHelper(subjects, maxWork, i, bestSubset, bestSubsetValue, \
subset, subsetValue, subsetWork):
# Hit the end of the list.
if i >= len(subjects):
if bestSubset == None or subsetValue > bestSubsetValue:
# Found a new best.
return subset[:], subsetValue
else:
# Keep the current best.
return bestSubset, bestSubsetValue
else:
s = subjects[i]
# Try including subjects[i] in the current working subset.
if subsetWork + s[WORK] <= maxWork:
subset.append(i)
bestSubset, bestSubsetValue = bruteForceAdvisorHelper(subjects, \
maxWork, i+1, bestSubset, bestSubsetValue, subset, \
subsetValue + s[VALUE], subsetWork + s[WORK])
subset.pop()
bestSubset, bestSubsetValue = bruteForceAdvisorHelper(subjects, \
maxWork, i+1, bestSubset, bestSubsetValue, subset, \
subsetValue, subsetWork)
return bestSubset, bestSubsetValue
#
# Problem 3: Subject Selection By Brute Force
#
def bruteForceTime(subjects, maxWork):
"""
Runs tests on bruteForceAdvisor and measures the time required to compute
an answer.
"""
# TODO...
print("Measuring Brute Force method's performance.")
startTimer = time.time()
bruteForceCourseList = bruteForceAdvisor(subjects, maxWork)
endTimer = time.time()
performanceTime = endTimer-startTimer
print("Total time to completion:", performanceTime)
return(performanceTime, bruteForceCourseList)
# Problem 3 Observations
# ======================
#
# TODO: write here your observations regarding bruteForceTime's performance
#
# Problem 4: Subject Selection By Dynamic Programming
#
def dpAdvisor(subjects, maxWork):
"""
Returns a dictionary mapping subject name to (value, work) that contains a
set of subjects that provides the maximum value without exceeding maxWork.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
returns: dictionary mapping subject name to (value, work)
"""
# TODO...
dpCourseList = {}
subjectsKeysList = list(subjects.keys())
subjectsValuesList = list(subjects.values())
# Get the best course list using recursive method
courseListTree, courseListTreeValue = dpAdvisorHelperTree(subjectsValuesList, maxWork, \
len(subjectsValuesList)-1, 0, {})
for courses in courseListTree:
dpCourseList[subjectsKeysList[courses]] = subjectsValuesList[courses]
# Get the best course list using for-loop method
# courseListTable = dpAdvisorHelperTable(subjects, maxWork)
return(dpCourseList)
def dpAdvisorHelperTree(subjectsValuesList, maxWork, i, trackedWork, courseMemoTree):
"""
DESC: the best course list is determined by going down a decision tree between
taking or not taking each course (binary)
NOTE: This function might be better if the dictionary of all intermediate steps
were made Global and doesn't need to be pass around
subjectsValuesList: a list of all courses offer created from keys
maxWork: limit amount of work load impose
i: index of courses for tracking location on the subject list
trackedWork: the total amount of work accumulated so far. This variable is also used
as a second key in the courseMemoTree dictionary as it gives insight to
the depth of tree
courseMemoTree: a dictionary mapping index number and list of courses(keys)
to the total value and work of the list for references
(i, trackedWork) : (totalValue, totalWork)
return: the better list of courses and its corresponding values
as two separate items
"""
# Check if the element has already been computed and stored in the reference
try: return(courseMemoTree[i, trackedWork])
# If not, proceed
except KeyError:
# Check if at the first/bottom possible element in the list.
if i == 0:
courseList = []
courseListValue = 0
courseMemoTree[i, trackedWork] = (courseList, courseListValue)
return(courseList, courseListValue)
else:
# SCENARIO 1 - If not taking this course
courseNotTaken, courseNotTakenValue = dpAdvisorHelperTree(subjectsValuesList, maxWork, i-1,
trackedWork, courseMemoTree)
# SCENARIO 2 - Take this course
# If there is no space
if trackedWork >= maxWork:
courseMemoTree[i, trackedWork] = (courseNotTaken, courseNotTakenValue)
return(courseNotTaken, courseNotTakenValue)
# If there is space
elif (trackedWork + subjectsValuesList[i][WORK]) <= maxWork:
# Get the beginning of the list. Remember we are working backward in this method.
courseTaken, courseTakenValue = dpAdvisorHelperTree(subjectsValuesList, maxWork, i-1,
trackedWork + subjectsValuesList[i][WORK], courseMemoTree)
courseTaken.append(i)
courseTakenValue += subjectsValuesList[i][VALUE]
# Compare the results from the two scenarios
courseListValue = max(courseTakenValue, courseNotTakenValue)
if courseListValue == courseTakenValue: courseList = courseTaken
else: courseList = courseNotTaken
# Store for reference
courseMemoTree[i, trackedWork] = (courseList, courseListValue)
return(courseList, courseListValue)
##def dpAdvisorHelperTable(subjects, maxWork):
## """
## DESC: solving the problem using for-loop instead of decision tree
##
## subjects: a dictionary mapping of all courses offer to their work load and value
## maxWork: limit amount of work load impose
##
## return: a dictionary mapping the best courses and their wprk load and value
## """
##
## courseMemoTable = {0:([], 0)} # a dictionary for storing the references
## courseList = {} # the final result list of best courses
##
## # Check through all the possible total work load value even at less than max
## for trackedWork in range(maxWork):
## bestCourseCombo = ([], 0) # set/re-set the best choice (list, total value)
##
## #
## for course in subjects:
##
## # Check if we're not going over work load limit and doesn't already exist in the reference
## if subjects[course][WORK] < trackedWork and course not in courseMemoTable[trackedWork][0]:
## currentCourseComboValue = courseMemoTable[trackedWork][1] + subjects[course][VALUE]
##
## #
## if currentCourseComboValue > bestCourseCombo[1]:
## bestCourseCombo = (courseMemoTable[trackedWork][0].append(course), currentCourseComboValue)
##
## courseMemoTable[trackedWork] = bestCourseCombo
##
## #
## for course in courseMemoTable[max(courseMemoTable)][0]:
## courseList[course] = subjects[course]
##
## return(courseList)
#
# Problem 5: Performance Comparison
#
def dpTime(subjects, maxWork):
"""
DESC: Runs tests on dpAdvisor and measures the time required to compute an
answer.
return: the time value and the dictionary of selected courses from dpAdvisor
"""
# TODO...
print("Measuring Dynamic Programming method's performance")
startTimer = time.time()
dpCourseList = dpAdvisor(subjects, maxWork)
endTimer = time.time()
performanceTime = endTimer-startTimer
print("Total time to completion:", performanceTime)
# return(performanceTime, bruteForceCourseList)
# Problem 5 Observations
# ======================
#
# TODO: write here your observations regarding dpAdvisor's performance and
# how its performance compares to that of bruteForceAdvisor.
| UTF-8 | Python | false | false | 15,410 | py | 12 | P08.py | 12 | 0.617716 | 0.611746 | 0 | 393 | 38.211196 | 179 |
cfobel/scons_make | 10,514,079,943,253 | a139f52acc2cb82a16d60139a85d5fbcda7f9f51 | fe584b9beca82729bb7e38d3e9b4fce8eabbd8ff | /SConstruct | 0c4d29613d7cf832456403e60ec177ac26e2bce2 | []
| no_license | https://github.com/cfobel/scons_make | c7d278fc0dea05408e2fa5df665aea4e3d8c14ae | 95f4a7be2bb6231b195b6bb6e508ba0cef2ce968 | refs/heads/master | 2021-01-17T05:27:16.011098 | 2013-06-03T18:16:05 | 2013-06-03T18:16:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | env = Environment()
foo = env.Command('foo', 'Makefile', "make -f $SOURCE")
env.Default(foo)
| UTF-8 | Python | false | false | 95 | 2 | SConstruct | 1 | 0.652632 | 0.652632 | 0 | 5 | 18 | 55 |
|
jasonqiu95/jason_amy_translator | 9,070,970,976,150 | d29854fca35c0b8fa49a370111f0e4dc616edde1 | 90e9687d99f63778970c61b3d9a7f19b9e14d6d3 | /test.py | 4b4bd705b84e718714fe84d89794177502716095 | []
| no_license | https://github.com/jasonqiu95/jason_amy_translator | 66c201dcd65d45a884bcc0d08953fed56e683efc | bb14a39e8afa1504e7179682ca5a0f87111cdaa1 | refs/heads/master | 2021-01-11T03:09:12.279801 | 2016-10-17T00:38:34 | 2016-10-17T00:38:34 | 71,086,097 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from flask import Flask, request, Response
KEY = 'EAAQFNW0yLFYBAFh4ZAJr1aIwIRcJxv4WQOv1iz64GP4m6rhZCQqI4p8ZCrIYSb7Rgvoxo2qmU0KM2zL1oG5d9bZAfN1VqN16ragMHKM0eql7f1J6qrT520F3ktBspDfdvXtMuxdM8houekYouTm1hTTOxJZAxeMhLYlXCu0SNaAZDZD'
app = Flask(__name__)
@app.route('/dubhacks', methods=['GET', 'POST'])
def getMessage():
print "begin"
myJson = request.json
print myJson
try:
userId = myJson['entry'][0]['messaging'][0]['sender']['id']
print userId
message = myJson['entry'][0]['messaging'][0]['message']['text']
print message
except KeyError:
return Response(status=200)
sendTextMessage(userId, message)
return Response(status=200)
def sendTextMessage(recipientId, messageText):
import json
from translate import Translator
translation = messageText
try:
translator= Translator(to_lang="zh")
translation = translator.translate(messageText)
except KeyError:
pass
message = json.dumps({"recipient": {"id": recipientId}, "message": {"text": translation}})
callSendAPI(message, recipientId)
def callSendAPI(messageData, recipientId):
parameters = {"access_token": KEY, "recipient": recipientId}
headers = {'Content-type': 'application/json'}
return requests.post("https://graph.facebook.com/v2.6/me/messages", params=parameters, data=messageData, headers=headers).json()
| UTF-8 | Python | false | false | 1,322 | py | 1 | test.py | 1 | 0.760212 | 0.726929 | 0 | 41 | 31.243902 | 184 |
t0z/tuby | 7,413,113,601,458 | aa00938b10c820fc80ca1fd9125b8350c286bd9d | 3884b00d40043d7538791e5cf20148676ff54671 | /module/put.py | 7be202f87d3aeed5880e91e13215098ed0dc038d | []
| no_license | https://github.com/t0z/tuby | 149ff276d832163851f0a784b1d2f372d21598ef | ddb2c47052bac7538e0be67929a3f832cfdf5205 | refs/heads/master | 2021-01-10T03:14:53.213192 | 2016-02-16T09:00:26 | 2016-02-16T09:00:26 | 51,375,437 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | try:
TUBY = TUBY # @UndefinedVariable
except Exception:
from tuby.core import TubyStream
TUBY = TubyStream()
import urllib2
import sys
data = None
urls = ''
if len(sys.argv) > 3:
urls = sys.argv[3]
else:
urls = u''.join([l.decode('utf8') for l in TUBY['stdin']]).strip()
if len(sys.argv) > 4:
data = u' '.join(sys.argv[3:].decode('utf8'))
urls = urls.split('\n')
for url in urls:
# rh = urllib.urlopen(url, data)
rh = urllib2.urlopen('http://myserver/inout-tracker', data)
# h.request('POST', '/inout-tracker/index.php', data, {})
for line in rh:
TUBY.stdout.write(line.encode('utf8', errors="ignore"))
| UTF-8 | Python | false | false | 655 | py | 49 | put.py | 37 | 0.622901 | 0.60916 | 0 | 25 | 25.2 | 70 |
Cielako/Python_exercises | 9,715,216,048,457 | c791e4d8dd90e6d8ff39a7a790de84e9a7fca224 | 3e602925f1983062c5f928a843efa8c1d315b451 | /lista7/zad1.py | 8ca849705847c9d08bdd693007acabe57cfabd26 | []
| no_license | https://github.com/Cielako/Python_exercises | 7ee210e5be70e74b7e715a12512c13ee2e47cdec | 35df631ab4c0b1feea27c2992388fd1ccd45e564 | refs/heads/master | 2022-04-10T00:39:30.995776 | 2020-01-17T13:24:03 | 2020-01-17T13:24:03 | 219,190,707 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
n = int(input("do którego elementu ciągu fibonnaciego mam wypisać zaczynając od 0: "))
def iteracyjna(n):
czas = time.time()
a, b = 0, 1
print("1 :", a)
print("2 :", b)
for i in range(1, n - 1):
# wynik = a + b
a, b = b, a + b
print( str(i + 2)+" :", b)
print("Czas mojej aplikacji zajął:",time.time()-czas)
print("-----------")
def rekurencyjna(n):
czas = time.time()
def obliczenia(n):
if n == 1:
return 0
elif n == 2:
return 1
elif n > 2 :
return obliczenia(n-1) + obliczenia(n-2)
for i in range(1,n+1):
print (i, ':', obliczenia(i))
print("Czas mojej aplikacji zajął:",time.time()-czas)
iteracyjna(n)
rekurencyjna(n)
#jak można zauważyć iteracyjny ciąg fibonacciego wykonał się dużo szybciej dla np.35 niż poprzez rekurencję
| UTF-8 | Python | false | false | 902 | py | 82 | zad1.py | 73 | 0.553672 | 0.532203 | 0 | 30 | 28.5 | 107 |
hgmehta/CloudCompiler | 3,564,822,904,630 | 6fb97a27b5001c0592a030381bb3558f375edf47 | 6add0eeb3ec5ec0be41d396241d05c12be390791 | /code/src/monitorService/monitorService.py | 3b14c0cf47b0685898776fe60848e4089b3fe7da | []
| no_license | https://github.com/hgmehta/CloudCompiler | 14d0928722f8dca6df84693afaf3a8d5e80dab44 | 8ffe24ee467b292dc6cc83834871f764aea826c1 | refs/heads/master | 2021-08-23T14:04:49.445937 | 2017-12-05T05:29:05 | 2017-12-05T05:29:05 | 106,504,573 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from flask import Flask, request
from monitor import monitorStatus
app = Flask(__name__)
@app.route('/monitor',methods = ['POST'])
def monitor():
return str(monitorStatus())
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5007, debug=True, threaded=True)
| UTF-8 | Python | false | false | 286 | py | 33 | monitorService.py | 14 | 0.664336 | 0.636364 | 0 | 12 | 22.833333 | 65 |
Luminous20/PythonCourse | 14,130,442,414,602 | 3071b973b4eefd1b33a5d8c8a0b6b43d17f538a9 | e2243ee4e850be5f31b8c7e232ffa1fb14ea1d11 | /homework1.py | e042fae9c362442e4bbe2a3a7277329649361b3e | []
| no_license | https://github.com/Luminous20/PythonCourse | b5c58ab3ee9946095ab764e78a11477983bb4d75 | 0c1e919174d09bcbb911bee87715b1ae32868939 | refs/heads/master | 2020-12-27T12:21:36.065032 | 2020-06-03T13:01:27 | 2020-06-03T13:01:27 | 237,901,600 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Homework by Clara Schlosser KU 0073323
# For INTL 308
import random #Import the package random to afterwards get
# a random value to define stock prices
market = {} # create list market (global variable )
class Portfolio(object): # defining what is a portfolio it is an object
def __init__(self):
self.cash = 0 # At the beginning there is no money in the portfolio
self.stocklst = {} # list of the stocks
self.fundlst = {} # list of the funds
self.historylst = ['new Portfolio'] # making a list to afterwards put in the history
def addCash(self, value): # defining the addCash function:
self.value = value #value = the amount added
if (value < 0): # if function only positive amount can be added
self.historylst.append("Warn - cash must be a positive value")
return bool(False)
self.cash = self.cash + value # adding the amount = the new cash amount
self.historylst.append(str("{:.2f}".format(value)) + " cash added") # add the amount added to the history
return bool(True)
def withdrawCash(self, value): # define the Withdraw function:
self.value = value #value = the amount were going to withdraw
if (value < 0): # if function only positive amount can be withdrawn
self.historylst.append("Warn - cash must be a positive value")
return bool(False)
if (self.cash < value): # puts out a waring if you going to withdraw cash so that you go in debt
self.historylst.append("Warn - not enough money")
return bool(False)
self.cash = self.cash - value # withdraws the amount = the new cash amount
self.historylst.append(str("{:.2f}".format(value)) + " cash withdrawn") # add the amount withdrawn to the history
return bool(True)
def buyStock(self, shares, symbol):# define the buyStock function
self.shares = shares # define the shares
self.symbol = symbol #define the symbols of the share
price = market[symbol.symbol] # looks for the price in the marcet list for the specific symbol
val = shares*price # the value is calculated by multiplying the amount of shares with the prices
if self.cash < val: # puts out a waring if you going to withdraw cash so that you go in debt
self.historylst.append("Warn - not enough money")
return bool(False)
self.historylst.append(str(shares) + " Stock "+str(self.symbol.symbol) +" purchased at " + str("{:.2f}".format(price)) + " per share")# adds the transaction to the history
self.withdrawCash(val) # uses the withdraw function to withdraw the value
if self.symbol.symbol in self.stocklst:# updates the stocks you have in the Portfolio
self.stocklst[self.symbol.symbol] = self.symbol.symbol[self.symbol.symbol] + shares # applies if you have already shares from that firm
else:
self.stocklst[self.symbol.symbol] = shares # applies if you have not already shares from that firm
return bool(True)
def sellStock(self, symbol, shares): # define the sellStock function
self.shares = shares # define the shares
self.symbol = symbol #define the symbols of the share
fac = random.uniform(0.5, 1.5) #determines a random factor between (0.5, 1.5)
price = market[symbol] # reads the price in the market list
price = price * fac # determines the sell price by multiplying the share price with the factor
price = round(price, 2) # rounds the price
self.historylst.append(str(shares) + " Stock "+str(self.symbol) +" sold at " + str("{:.2f}".format(price)) + " per share") # adds the transaction to the history
self.addCash(shares*price) # uses the addCash function to add the price times the amount of shares
self.stocklst[self.symbol] = self.stocklst[self.symbol] - shares
def buyMutualFund(self, shares, symbol): # define the buyMutualFundfunction
self.shares = shares # define the shares
self.symbol = symbol #define the symbols of the share
price = 1 # price is set at 1
val = shares*price # value is calculated shares times price
if self.cash < val: #puts out a waring if you going to withdraw cash so that you go in debt
self.historylst.append("Warn - not enough money")
return bool(False)
self.withdrawCash(shares*price) # uses the withdraw function to withdraw the price times the shares
if self.symbol in self.fundlst: # updates the fundlist you have in the Portfolio
self.fundlst[self.symbol] = self.fundlst[self.symbol] + shares
else:
self.fundlst[self.symbol.symbol] = shares
self.historylst.append(str(shares) + " Mutual Fund " +str(self.symbol.symbol) + " purchased at " + str("{:.2f}".format(price)) + " per share") # adds the transaction to the history
return bool(True)
def sellMutualFund(self, symbol, shares): # define the buyStock function
self.shares = shares # define the shares
self.symbol = symbol #define the symbols of the share
price = round(random.uniform(0.9, 1.2),2) # determines a random price between (0.9, 1.2)and rounds
self.addCash(shares*price) # uses the addCash function to add the price times the amount of shares
self.fundlst[symbol] = self.fundlst[symbol] - shares # updates the fundlist you have in the Portfolio
self.historylst.append(str(shares) + " Mutual Fund "+str(self.symbol) +" sold at " + str("{:.2f}".format(price)) + " per share") # adds the transaction to the history
def history(self): # define the history function
print ("")
print ("History")
print(*self.historylst,sep="\n")
def __str__(self): # printing the portfolio
return str(self.print_portfolio())
def print_portfolio(self): # defines the history function
print("Portfolio")
print("cash: ",str("{:.2f}".format(self.cash)))
print("stock: ",str(self.stocklst))
print("nfund: ",str(self.fundlst))
class MutualFund(object): # defines the class Mutualfund
def __init__(self, symbol):
self.symbol = symbol
class Stock(object): # defines the class stock
def __init__(self, price, symbol):
self.symbol = symbol
market[symbol] = price # Has a list in which the price is determined and specified by the symbol
if __name__ == '__main__': #Add this if you want to run the test with this script.
portfolio = Portfolio() # create a new portfolio
portfolio.addCash(300.50) # add cash to the portfolio
s = Stock(price=20, symbol="HFH") # create a stock HFH
portfolio.buyStock(5, s) # buy 5 shares of stock s
mf1 = MutualFund("BRT") # create MF BRT
mf2 = MutualFund("GHT") # create MF GHT
portfolio.buyMutualFund(10.3, mf1) # Buy 10.3 shares of BRT
portfolio.buyMutualFund(2, mf2) # Buy 2 shares of GHT
portfolio.print_portfolio() # print portfolio
portfolio.sellMutualFund("BRT", 3) # Sell 3 shares of BRT
portfolio.sellStock("HFH", 1) # Sell 1 share of HFH
portfolio.withdrawCash(50) # Withdraw 50
portfolio.addCash(19.80) #add cash add cash to the portfolio
portfolio.history() # Show a transaction history ordered by time
| UTF-8 | Python | false | false | 7,457 | py | 9 | homework1.py | 3 | 0.653346 | 0.643556 | 0 | 128 | 57.25 | 188 |
tanakatsu/padock_photo_classifier | 1,056,561,978,412 | ad4e0c4e039d664a5057300cec52511395b143ee | 505870b83e9d3abb2488d45ae04fd1a2d521e830 | /utils/generate_dataset.py | 6f66bc22b4aaf9e76d2ac402ffc0ecc413d83b25 | []
| no_license | https://github.com/tanakatsu/padock_photo_classifier | 8f2ebe792a4d2096640b674dd97e192aff551732 | fbe6b6ebd44769a21482d229925e3d3549c507a6 | refs/heads/master | 2021-01-19T22:01:47.296532 | 2017-06-25T10:34:34 | 2017-06-25T10:34:34 | 88,742,083 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pickle
import argparse
import os
import re
def load_file(filename):
# http://qiita.com/Kodaira_/items/91207a7e092f491fca43
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
parser = argparse.ArgumentParser()
parser.add_argument('photo_file', help='padock photo urls pkl')
parser.add_argument('score_file', help='distance aptitude score pkl')
parser.add_argument('data_directory', help='image data directory')
parser.add_argument('--output', '-o', action='store', type=str, default='dataset.txt')
args = parser.parse_args()
padock_photo_data = load_file(args.photo_file)
score_data = load_file(args.score_file)
filenames = os.listdir(args.data_directory)
filenames = [file for file in filenames if not re.match('^\.', file)]
data_dict = []
for data in padock_photo_data:
url = data['padock_photo_url']
params = url.replace('http://', '').split('/')
filename = '%s_%s' % (params[2], params[4])
filename = filename.replace('.jpg', '')
name = data['name']
if name in score_data:
score = score_data[name]
else:
score = None
if score:
data_dict.append({'name': name, 'score': score, 'filename': filename})
sorted_data_dict = sorted(data_dict, key=lambda x: -x['score'])
# print(sorted_data_dict)
if args.output:
with open(args.output, 'w') as f:
for filename in filenames:
print(filename)
filename_prefix = filename.split('.')[0]
try:
data_item = next((item for item in sorted_data_dict if item["filename"] == filename_prefix))
print(data_item)
print("%s %f\n" % (filename, data_item['score']))
f.write("%s %f\n" % (filename, data_item['score']))
except Exception:
pass
else:
for filename in filenames:
print(filename)
filename_prefix = filename.split('.')[0]
try:
data_item = next((item for item in sorted_data_dict if item["filename"] == filename_prefix))
print(data_item)
print("%s %f\n" % (filename, data_item['score']))
except Exception:
pass
| UTF-8 | Python | false | false | 2,176 | py | 30 | generate_dataset.py | 29 | 0.607077 | 0.598805 | 0 | 67 | 31.477612 | 108 |
Nihilnia/reset | 2,765,958,945,001 | 7a480e4317fc24bb80de1efba5215945534ba5ff | de64b33ae5d08c920eae6d9414e2e0a008cf4966 | /Day 14 - Prime Number Questioning.py | 118f2b4d33b1567ab54132657423ea39763f0f6a | []
| no_license | https://github.com/Nihilnia/reset | dd446c6a83ea0edac69a56c1612dc4e0b09bc258 | c70dcc9611a4b962a31add8a291b27be9d20014e | refs/heads/master | 2020-03-27T09:45:17.222802 | 2018-09-14T01:27:23 | 2018-09-14T01:27:23 | 146,369,551 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Prime Number Questioning!
while True:
userInput = int(input("Give me a number bitch: "))
key = 0
for summs in range(2, userInput):
if userInput % summs == 0:
key += 1
if key != 0:
print("It's not a Prime Number!")
else:
print("It's a prime Number babe!")
####################################################
# WITH DEFINE A FUNCTION
def PrimeNumber():
userInput = int(input("Give me a number: "))
key = 0
for summs in range(2, userInput):
if userInput % summs == 0:
key += 1
if key != 0:
print("It's not a Prime Number!")
else:
print("It's a Prime number Babe!")
while True:
PrimeNumber() | UTF-8 | Python | false | false | 740 | py | 42 | Day 14 - Prime Number Questioning.py | 41 | 0.490541 | 0.477027 | 0 | 30 | 22.733333 | 54 |
callumherries/edward | 3,169,685,883,254 | 3672c46f1de198bd41b140318042c10baaad531c | b0a30690394678946c753581d0d7c2580ee1fda6 | /tests/test-util/test_get_control_variate_coef.py | f95df568d42f41027e8b6bb2fc9626c456d09d18 | [
"Apache-2.0"
]
| permissive | https://github.com/callumherries/edward | 59d143d2886ad8ba7d9b88ddad37fdefc9c3991f | 4395319a8442bbfbeb1da1f2c7c25d879a09be7f | refs/heads/master | 2021-01-22T21:44:53.538020 | 2017-03-19T07:18:34 | 2017-03-19T07:18:34 | 85,466,371 | 1 | 0 | null | true | 2017-03-19T10:05:45 | 2017-03-19T10:05:45 | 2017-03-19T10:05:28 | 2017-03-19T09:47:08 | 34,913 | 0 | 0 | 0 | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from edward.util.tensorflow import get_control_variate_coef
class test_get_control_variate_coef(tf.test.TestCase):
def test_calculate_correct_coefficient(self):
with self.test_session():
f = tf.constant([1.0, 2.0, 3.0, 4.0])
h = tf.constant([2.0, 3.0, 8.0, 1.0])
self.assertAllClose(get_control_variate_coef(f, h).eval(),
0.03448276)
if __name__ == '__main__':
tf.test.main()
| UTF-8 | Python | false | false | 568 | py | 3 | test_get_control_variate_coef.py | 3 | 0.646127 | 0.602113 | 0 | 21 | 26.047619 | 64 |
mosh-shu/big-data-processing | 4,604,204,944,215 | 02e16780d43a20ceea59d474fab716f0263e9ab9 | 994a8a0aceea9612c12c4c1e34294fa923d3e9af | /homework/gpx/denoise.py | 515dbee3ba752bf5d2918e831c28cdaa5c689bd2 | []
| no_license | https://github.com/mosh-shu/big-data-processing | be4b2e969d46d83e331401e3a3a9421821e004ea | 7aea90dbfb25f2af2cd93a00aef88b316894e72d | refs/heads/master | 2020-05-05T12:47:13.612952 | 2019-07-13T13:18:52 | 2019-07-13T13:18:52 | 180,044,885 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
import sys
path = sys.argv[1]
file = path + "all.csv"
newfile = path + "all_denoise.csv"
outs = list()
csv_file = open(file, "r", encoding="utf_8")
f = csv.reader(csv_file, delimiter=",", lineterminator="\r\n", quotechar='"',
skipinitialspace=True)
for row in f:
if not 136.4 < float(row[1]) < 137.5:
continue
if not 34.6 < float(row[0]) < 35.6:
continue
out = row
outs.append(out)
with open(newfile, 'w') as ff:
writer = csv.writer(ff, lineterminator='\n') # 改行コード(\n)を指定しておく
writer.writerows(outs) # 2次元配列も書き込める
| UTF-8 | Python | false | false | 635 | py | 29 | denoise.py | 9 | 0.606474 | 0.574106 | 0 | 24 | 23.458333 | 77 |
mgebhardt/joomla-componentBuilder | 15,393,162,822,117 | a646c6fcb557f1bd04d5165e18de59b516c9c14c | 50429883430403351e0e81acdb5e9329923d5151 | /build.py | 1b92009e4573a8b8d763101752392bef9cbcbf6a | []
| no_license | https://github.com/mgebhardt/joomla-componentBuilder | c67204f345efb8130383e5d3f248ab3d41fe4396 | 3365785eec34f39e90f92985e5f9843c1bea986e | refs/heads/master | 2020-06-06T11:18:30.952795 | 2012-11-16T14:24:52 | 2012-11-16T14:24:52 | 6,689,564 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #===============================================================================
# @version: 1.1.0
# @summary: This python script creates install archives for Joomla componentes
# @copyright: (C) 2012 Mathias Gebhardt
# @license: GNU General Public License version 2 or later; see LICENSE.txt
#===============================================================================
from xml.dom.minidom import parse
import zipfile
import os
import sys
import urllib2
# Change to components root directory
os.chdir('..')
#===============================================================================
# Search manifest and get some parameters
#===============================================================================
found = False
# search manifest file
for file in os.listdir('.'):
# must be a xml file
if '.xml' != file[-4:]:
continue
# open file and lock in it
# create XML dom
manifest = parse(file)
# root node must be extension
if 'extension' == manifest.firstChild.nodeName:
found = True
break;
if not found:
print 'Manifest not found!'
sys.exit(-1)
# get name
name = manifest.getElementsByTagName('name')[0].firstChild.data.lower()
# get version
version = manifest.getElementsByTagName('version')[0].firstChild.data
# get updateServer
if len(manifest.getElementsByTagName('updateservers')) > 0:
# if update section exsits
updateServer = manifest.getElementsByTagName('server')[0].firstChild.data
else:
updateServer = False
# get folders
folders = []
# get frontend and backend folder
for folder in manifest.getElementsByTagName('files'):
folders.append(folder.attributes['folder'].value)
# get folder for media
folders.append(manifest.getElementsByTagName('media')[0].attributes['folder'].value)
# get folder for installer language
folders.append('language')
#===============================================================================
# Create zip archive
#===============================================================================
# create file name for archive
zipFileName = os.path.join('releases', '{}-{}.zip'.format(name, version))
#print zipFileName
# open archive
installer = zipfile.ZipFile(zipFileName, mode = 'w')
# try to add all files to archive
try:
# at first add the whole folder for frontend, backend, media and language
for rootdir in folders:
for root, subFolders, files in os.walk(rootdir):
for file in files:
installer.write(os.path.join(root, file))
# add the content of component's root folder
for file in os.listdir('.'):
# ignore all hidden files
if file[0] == '.':
continue
# also ignore all folders
if os.path.isdir(file):
continue
installer.write(file)
finally:
# every thing added, than close
installer.close()
print 'successfully created {}'.format(zipFileName)
#===============================================================================
# Get update server file and add new update section
#===============================================================================
if updateServer == False:
# no update server found
print 'Skipping update server'
sys.exit(0)
file = urllib2.urlopen(updateServer)
dom = parse(file)
# get first update and copy it
# first and last child are \n
# so you you have to search for update
# TODO: search latest update
lastUpdate = dom.getElementsByTagName('update')[0]
update = lastUpdate.cloneNode(True)
# update version
update.getElementsByTagName('version')[0].firstChild.data = version
# update download url
update.getElementsByTagName('downloadurl')[0].firstChild.data = \
update.getElementsByTagName('downloadurl')[0].firstChild.data.rsplit \
('/', 1)[0] + '/' + '{}-{}.zip'.format(name, version)
# add new tag at first position
dom.getElementsByTagName('updates')[0].insertBefore(update, lastUpdate)
# get file name
updateXML = os.path.join('releases', updateServer.rsplit('/', 1)[1])
# Write dom to file
xmlFile = open(updateXML, 'w')
dom.writexml(xmlFile)
xmlFile.close()
print 'successfully created {}'.format(updateXML) | UTF-8 | Python | false | false | 4,020 | py | 6 | build.py | 5 | 0.612438 | 0.605473 | 0 | 137 | 28.350365 | 84 |
Yackpott/public | 17,411,797,437,563 | a255d657f063f4490486788dcd1d57a7bf26a750 | 6216a93ebd10aafc86580c09b412ae490b2ce256 | /Yackpott-repo/Actividades/AC20/main.py | f8efd2127f5f8b254bb4a344e03aca445bc2fbb1 | []
| no_license | https://github.com/Yackpott/public | 16a3bdbe0922e1c20a7fed4904071a47d282b50b | 6d4f310df73d653a841676d6be525feb2f7c03b9 | refs/heads/master | 2021-01-10T14:35:18.785300 | 2017-02-25T20:16:34 | 2017-02-25T20:16:34 | 51,626,232 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PyQt4 import QtGui, uic
from calc_financiero import calcular_jub
form = uic.loadUiType("hexa.ui")
class MainWindow(form[0], form[1]):
def __init__(self):
super().__init__()
self.setupUi(self)
pix_1 = QtGui.QPixmap("logo_argentum.png")
pix_3 = QtGui.QPixmap("logo_hexa.png")
self.label_1.setPixmap(pix_1)
self.label_3.setPixmap(pix_3)
self.lineEdit_1.textChanged.connect(self.calcular)
self.lineEdit_2.textChanged.connect(self.calcular)
self.lineEdit_3.textChanged.connect(self.calcular)
self.lineEdit_4.textChanged.connect(self.calcular)
self.lineEdit_5.textChanged.connect(self.calcular)
self.comboBox.currentIndexChanged.connect(self.calcular)
# Completar la creación de la interfaz #
def calcular(self):
seguir = True
if self.lineEdit_1 == "":
seguir = False
elif self.lineEdit_2 == "":
seguir = False
elif self.lineEdit_3 == "":
seguir = False
elif self.lineEdit_4 == "":
seguir = False
elif self.lineEdit_5 == "":
seguir = False
elif self.comboBox == "":
seguir = False
if seguir:
try:
self.aporte = int(self.lineEdit_1.text())*int(self.lineEdit_2.text())/100
self.label_aporte.setText(str(self.aporte))
self.anos = float(self.lineEdit_5.text())-float(self.lineEdit_4.text())
self.label_pension.setText(str(self.anos))
self.ingreso = float(self.lineEdit_1.text())
self.cotiza = float(self.lineEdit_2.text())
self.edad = float(self.lineEdit_3.text())
self.edad_j = float(self.lineEdit_4.text())
self.esp_vida = float(self.lineEdit_5.text())
self.fondo_elegido = self.comboBox.itemText(self.comboBox.currentIndex())
rango = calcular_jub(self.ingreso,self.cotiza, self.edad, self.edad_j, self.esp_vida, self.fondo_elegido)
self.label_rango.setText(rango)
print("termino")
except Exception as err:
print(err)
if __name__ == '__main__':
app = QtGui.QApplication([])
form = MainWindow()
form.show()
app.exec_()
| UTF-8 | Python | false | false | 2,335 | py | 140 | main.py | 90 | 0.57455 | 0.561268 | 0 | 61 | 37.262295 | 121 |
3lizabethhh/volume-calculator | 4,870,492,950,755 | 1936712eb8eaf30ba2aa9db10272f25319849127 | 88160a6a18fb78040f29e21b65cac539d9fb3962 | /VolumeCalculator/main.py | adaf944044e3a8e9283cd819fb1a91129c7f7242 | []
| no_license | https://github.com/3lizabethhh/volume-calculator | 9a9ac5e264a3fe6e7605c9f863e3cdc70e94f1fb | fb1ddab3b6134a674d76e789f7af3095835a9227 | refs/heads/master | 2020-05-29T17:52:59.564747 | 2019-05-29T20:37:54 | 2019-05-29T20:37:54 | 189,287,560 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Dongzheng (Elizabeth) Xu
# This program handles prompting and input and output for different shapes.
# Last edited: October 23 2018
from VolumeCalculator import volumes # imports volume calculations
quitCalculating = False # sentinel value to quit/end session
# lists volumes calculated of different shapes
cubes = []
pyramids = []
ellipsoids = []
# function that prints out shape categories in a certain format
def print_lists(chosen_shape, shape_list):
print("{}: ".format(chosen_shape), end="") # print shape category
if len(shape_list) < 1: # no shapes entered
print("No shapes entered.")
else: # shapes entered
shape_list.sort() # sort calculated volumes from lowest to highest
for x in range(len(shape_list)):
print("{:0.1f}".format(shape_list[x]), end="") # print each value in that category
if x < (len(shape_list) - 1): # print comma after each element except last element
print(", ", end="")
else:
print("") # starts new line for next shape
# prompts for shape and calculates volume
while not quitCalculating:
print(
"____________________________________________________") # Line to separate each shape calculation for easy reading
shape = input("Enter shape: ").lower() # prompt user to enter shape
# calculate the volume of selected shape using functions from volumes.py
if shape == "cube" or shape == "c":
shape = "cube"
sideLength = int(input("Please input length of cube:"))
calculatedVolume = volumes.cube(sideLength) # calculate volume
print("The volume of a cube with sides {:0.1f} is: {:0.1f}. ".format(sideLength, calculatedVolume))
cubes.append(calculatedVolume) # record calculated volume to cube list
elif shape == "p" or shape == "pyramid":
shape = "pyramid"
base = int(input("Please input base of pyramid: "))
height = int(input("Please input height of pyramid: "))
calculatedVolume = volumes.pyramid(base, height) # calculate volume
print("The volume of a pyramid with base {:0.1f} and height {:0.1f} is: {:0.1f}. ".format(base, height,calculatedVolume))
pyramids.append(calculatedVolume) # record calculated volume to pyramid list
elif shape == "e" or shape == "ellipsoid":
shape = "ellipsoid"
r1 = int(input("Please input 1st radius:"))
r2 = int(input("Please input 2st radius:"))
r3 = int(input("Please input 3st radius:"))
calculatedVolume = volumes.ellipsoid(r1, r2, r3) # calculate volume
print("The volume of an ellipsoid with radii {:0.1f},{:0.1f} and {:0.1f} is: {:.1f}. ".format(r1, r2, r3,calculatedVolume))
ellipsoids.append(calculatedVolume) # record calculated volume to ellipsoid list
# if user quits, print out all the calculated volumes for each shape category in sorted order
elif shape == "q" or shape == "quit":
print("You have reached the end of your session.")
if (len(cubes) == 0 and len(ellipsoids) == 0 and len(pyramids) == 0): # message if user didn't calculate any volumes
print("You did not perform any volume calculations.")
else:
print("The volumes calculated for each shape are:")
# prints each shape list
print_lists("Cube", cubes)
print_lists("Pyramid", pyramids)
print_lists("Ellipsoid", ellipsoids)
quitCalculating = True # exits while loop
else:
print("ERROR: Invalid shape input.") # Error message if invalid shape input
| UTF-8 | Python | false | false | 3,695 | py | 3 | main.py | 2 | 0.623275 | 0.611908 | 0 | 74 | 47.932432 | 131 |
mstroehle/PyFlow | 9,552,007,298,848 | 23b0cad266ca715a676d4c346a7c85a41433c062 | 2473a471d2699142d5ac6cde399e785ed97d3e59 | /PyFlow/Packages/PyflowBase/Nodes/pythonNode.py | cb57e459ae9525b8949f013c18ff307da2fc3f70 | [
"MIT"
]
| permissive | https://github.com/mstroehle/PyFlow | a65caa344bf53256afa031a19d7a17103ca327f9 | 905e13c503816962c6502159b749bdec2654522c | refs/heads/master | 2020-09-02T13:41:14.590659 | 2019-06-06T14:27:09 | 2019-06-06T14:27:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import weakref
import uuid
from types import MethodType
from collections import OrderedDict
from Qt import QtGui
from Qt.QtWidgets import QPushButton
from Qt.QtWidgets import QGraphicsProxyWidget
from Qt.QtWidgets import QMenu
from PyFlow.Core.Common import *
from PyFlow.UI.Utils.Settings import *
from PyFlow.Core.NodeBase import NodeBase
from PyFlow import getPinDefaultValueByType
from PyFlow.Core.PyCodeCompiler import Py3CodeCompiler
class pythonNode(NodeBase):
def __init__(self, name):
super(pythonNode, self).__init__(name)
self._nodeData = ''
self.bCacheEnabled = False
@property
def nodeData(self):
return self._nodeData
@nodeData.setter
def nodeData(self, codeString):
try:
self._nodeData = codeString
# compile and get symbols
mem = Py3CodeCompiler().compile(codeString)
# clear node pins
for i in list(self.inputs.values()):
i.kill()
for o in list(self.outputs.values()):
o.kill()
# define pins
pinsDefinitionFunction = mem["prepareNode"]
pinsDefinitionFunction(self)
self.autoAffectPins()
# assign compute code
computeFunction = mem["compute"]
def nodeCompute(*args, **kwargs):
computeFunction(self)
self.compute = MethodType(nodeCompute, self)
self.bCallable = self.isCallable()
except Exception as e:
print(e)
@staticmethod
def pinTypeHints():
return {'inputs': [], 'outputs': []}
def serialize(self):
default = super(pythonNode, self).serialize()
default['nodeData'] = self.nodeData
return default
def postCreate(self, jsonTemplate=None):
super(pythonNode, self).postCreate(jsonTemplate)
if jsonTemplate is None:
return
if 'nodeData' in jsonTemplate:
self.nodeData = jsonTemplate['nodeData']
for inpJson in jsonTemplate['inputs']:
pin = self.getPin(inpJson["name"])
pin.deserialize(inpJson)
for outJson in jsonTemplate['outputs']:
pin = self.getPin(outJson["name"])
pin.deserialize(outJson)
self.autoAffectPins()
@staticmethod
def category():
return 'Common'
@staticmethod
def keywords():
return ['Code', 'Expression', 'py']
@staticmethod
def description():
return 'Python script node'
| UTF-8 | Python | false | false | 2,550 | py | 13 | pythonNode.py | 12 | 0.613725 | 0.612941 | 0 | 95 | 25.842105 | 56 |
n-hutton/ubuntu-setup | 1,709,397,002,997 | 695216169a5e62818c533f38a19db776a9a10712 | 7e799e7b4b0f4a73107a91e7f0e631686cfc9ee3 | /scripts/jenkins-destroy.py | 7db96b643c53a44783ffdfc7eb3e3e9dde425383 | []
| no_license | https://github.com/n-hutton/ubuntu-setup | 9831e61c053d2ee67c35a3262972fb0ee3d85a0d | 7ce6d7c71f621f3099ac833d55a8e6cd56e2b135 | refs/heads/master | 2023-08-10T00:22:49.458353 | 2023-07-21T11:13:09 | 2023-07-21T11:13:09 | 25,432,488 | 0 | 0 | null | false | 2022-10-13T15:01:46 | 2014-10-19T16:21:16 | 2022-05-27T14:51:08 | 2022-10-13T15:01:45 | 3,590 | 0 | 0 | 0 | Vim Snippet | false | false | #!/usr/bin/env python3
import requests
import sys
import os
def to_json(r):
if 200 <= r.status_code < 300:
return r.json()
print(r)
print(r.status_code)
print(r.text)
sys.exit(1)
s = requests.session()
auth = ('ejfitzgerald', os.environ['JENKINS_TOKEN'])
data = to_json(s.get('https://jenkins.economicagents.com/queue/api/json', auth=auth))
queued_items = set()
for item in data['items']:
queued_items.add(item['id'])
for item in queued_items:
#print(f'Cancelling {item}...')
params = {
'id': item
}
r = s.post('https://jenkins.economicagents.com/queue/cancelItem', params=params, auth=auth)
print(r)
| UTF-8 | Python | false | false | 623 | py | 23 | jenkins-destroy.py | 16 | 0.678973 | 0.666132 | 0 | 31 | 19.096774 | 92 |
shafferm/SCNIC | 14,645,838,500,411 | 88621c86d7b5d89f1f932ff9e8cbfc7223fb9c78 | 682c17bff855b7c1c9897cdc5d0571a1bddab136 | /SCNIC/between_correls.py | 971699159da54b7e050fefe1acbe5aebc9470caa | [
"BSD-3-Clause"
]
| permissive | https://github.com/shafferm/SCNIC | ccacde538f5cc83c491b3382c8567395a441a208 | 0407b8725288adb99ea531def442eea0c14ddebe | refs/heads/master | 2023-03-06T06:46:47.835211 | 2021-07-22T14:45:14 | 2021-07-22T14:45:14 | 34,074,242 | 18 | 16 | BSD-3-Clause | false | 2021-05-31T20:51:32 | 2015-04-16T18:49:00 | 2021-05-27T08:27:38 | 2021-05-31T20:51:31 | 287 | 12 | 12 | 11 | Python | false | false | """
Workflow script for finding correlations between pairs of biom tables, making networks, finding modules and collapsing
modules.
"""
import os
from os import path
from biom import load_table
from scipy.stats import spearmanr, pearsonr
import networkx as nx
import numpy as np
import shutil
from SCNIC import general
from SCNIC import correlation_analysis as ca
__author__ = 'shafferm'
# TODO: output heat map with clusters
_spearmanr = spearmanr
def spearmanr(x, y):
return _spearmanr(x, y)
def between_correls(table1, table2, output_loc, max_p=None, min_r=None, correl_method='spearman', sparcc_filter=False,
min_sample=None, p_adjust='fdr_bh', procs=1, force=False):
"""TABLES MUST SORT SO THAT SAMPLES ARE IN THE SAME ORDER """
logger = general.Logger(path.join(output_loc, "SCNIC_log.txt"))
logger["SCNIC analysis type"] = "between"
# correlation and p-value adjustment methods
correl_methods = {'spearman': spearmanr, 'pearson': pearsonr}
correl_method = correl_methods[correl_method]
# load tables
table1 = load_table(table1)
table2 = load_table(table2)
logger["input table 1"] = table1
logger["input table 1"] = table2
table1 = table1.sort()
table2 = table2.sort()
# make new output directory and change to it
if force and output_loc is not None:
shutil.rmtree(output_loc, ignore_errors=True)
if output_loc is not None:
os.makedirs(output_loc)
logger["output directory"] = output_loc
# filter tables
if sparcc_filter is True:
table1 = general.sparcc_paper_filter(table1)
table2 = general.sparcc_paper_filter(table2)
print("Table 1 filtered: %s observations" % str(table1.shape[0]))
print("Table 2 filtered: %s observations" % str(table2.shape[0]))
logger["sparcc paper filter"] = True
logger["number of observations present in table 1 after filter"] = table1.shape[0]
logger["number of observations present in table 2 after filter"] = table2.shape[0]
if min_sample is not None:
table1 = general.filter_table(table1, min_sample)
table2 = general.filter_table(table2, min_sample)
if not np.array_equal(table1.ids(), table2.ids()):
raise ValueError("Tables have different sets of samples present")
metadata = general.get_metadata_from_table(table1)
metadata.update(general.get_metadata_from_table(table2))
# make correlations
logger["correlation metric"] = correl_method
logger["p adjustment method"] = p_adjust
correls = ca.between_correls_from_tables(table1, table2, correl_method, nprocs=procs)
correls.sort_values(correls.columns[-1], inplace=True)
correls['p_adj'] = general.p_adjust(correls['p'], method=p_adjust)
correls.to_csv(open(path.join(output_loc, 'correls.txt'), 'w'), sep='\t', index=True)
# make network
correls_filt = general.filter_correls(correls, min_r=min_r)
net = general.correls_to_net(correls_filt, metadata=metadata)
logger["number of nodes"] = net.number_of_nodes()
logger["number of edges"] = net.number_of_edges()
nx.write_gml(net, path.join(output_loc, 'crossnet.gml'))
logger.output_log()
| UTF-8 | Python | false | false | 3,217 | py | 28 | between_correls.py | 22 | 0.68853 | 0.675474 | 0 | 87 | 35.977011 | 118 |
ThibaultDataDev/SmartTweet | 2,156,073,620,030 | 29113d59ab57673f3767229e7e66907a454a2eaa | 3a67db861cf3c5c744c30b78154186516a38b54b | /app/apipart/azure_analyze.py | 76b0ac26362c313063fe9628c605a0a61ca32a2a | []
| no_license | https://github.com/ThibaultDataDev/SmartTweet | ce00e3431a53403cd54b0aaf222215c74acb7c10 | 9507cc1add1e892d6f85e7c7e02cb49ce6fc1f7f | refs/heads/master | 2022-12-10T06:14:10.525958 | 2020-09-17T19:34:19 | 2020-09-17T19:34:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import requests
from pprint import pprint
#import tweepy_test
import twitter_credentials as tc
import azure
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
keys = tc.Settings
endpoint = keys.AZURE_ENDPOINT
api_version = '?api-version=2020-06-30'
headers = {'Content-Type': 'application/json',
'api-key': keys.API_AZURE_KEY}
def authenticate_client():
ta_credential = AzureKeyCredential(keys.API_AZURE_KEY)
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint, credential=ta_credential)
return text_analytics_client
client = authenticate_client()
def sentiment_analysis_example(client):
with open('tweets.json') as json_data:
data = json.load(json_data)
for tweet in data:
tweetlist = [tweet['Texte']]
response = client.analyze_sentiment(documents = tweetlist)[0]
print("Document Sentiment: {}".format(response.sentiment))
print("Overall scores: positive={0:.2f}; neutral={1:.2f}; negative={2:.2f} \n".format(
response.confidence_scores.positive,
response.confidence_scores.neutral,
response.confidence_scores.negative,
))
for idx, sentence in enumerate(response.sentences):
print("Sentence: {}".format(sentence.text))
print("Sentence {} sentiment: {}".format(idx+1, sentence.sentiment))
print("Sentence score:\nPositive={0:.2f}\nNeutral={1:.2f}\nNegative={2:.2f}\n".format(
sentence.confidence_scores.positive,
sentence.confidence_scores.neutral,
sentence.confidence_scores.negative,
))
sentiment_analysis_example(client) | UTF-8 | Python | false | false | 1,817 | py | 25 | azure_analyze.py | 18 | 0.647771 | 0.635663 | 0 | 49 | 36.102041 | 102 |
Viola8/Python-NLP-Libraries | 5,205,500,403,923 | bb360462b78de7abf7298fe7ffe10a51523b7d86 | 735c3039f60c0dc02c10e773ef3994c181771f27 | /numpy8_9.py | 9b7494306c7bc3564615c80d468bc168070ec14d | []
| no_license | https://github.com/Viola8/Python-NLP-Libraries | 1733059d8b8fe492312d148717e2247d9b2dec0d | caa3d1e568a21b0e59d588295040805a28a74b80 | refs/heads/main | 2023-05-01T05:43:35.711720 | 2021-04-23T04:46:28 | 2021-04-23T04:46:28 | 308,936,854 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 8 Write a NumPy program to create a 2d array with 1 on the border and 0 inside.
import numpy as np
a = np.ones((5,7))
a[1:-1,1:-1] = 0
print(a)
# 9 Write a NumPy program to add a border (filled with 0's) around an existing array.
import numpy as np
a = np.ones((3,3))
# numpy.pad(array, pad_width, mode='constant', **kwargs)
print(np.pad(a, pad_width=1, mode='constant', constant_values=0))
# constant_valuessequence or scalar, optional
# Used in ‘constant’. The values to set the padded values for each axis.
| UTF-8 | Python | false | false | 537 | py | 35 | numpy8_9.py | 34 | 0.681051 | 0.649156 | 0 | 16 | 31.3125 | 86 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.