repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sunilvarma9697/Code-Basics | 12,068,858,147,000 | 8a67307028cafd54ce24f7d65a9e71565d831190 | 3e62a4bad85222dcf9eefa393500e4eba6d963d2 | /Fibonacci.py | d57f6f427cb6b79ae2b0860c71ae8cd0d001c481 | []
| no_license | https://github.com/sunilvarma9697/Code-Basics | 48c8f7085b9d85e638bf6f40567f97f052bdcaef | 45595ea3de1b938dd5682575d25b2a6b76b2e1c4 | refs/heads/main | 2023-06-19T23:12:14.883542 | 2021-07-21T15:56:00 | 2021-07-21T15:56:00 | 388,168,707 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Fibonacci - Fabonacci is set of numbers that are starts with zero and one.
def fib(n):
a = 0
b = 1
if n == 1:
print(a)
print(a)
print(b)
for i in range(2,n):
c = a + b
a = b
b = c
print(c)
fib(10) | UTF-8 | Python | false | false | 232 | py | 38 | Fibonacci.py | 37 | 0.530172 | 0.50431 | 0 | 16 | 12.625 | 75 |
poppindouble/AlgoFun | 9,251,359,594,508 | 7d478f397e6c09c366e7a474add3a2829ceeb832 | 79732fa5d9c93e1362cbad8cfcb5f5d31fae596c | /repeat_substring_pattern.py | a58647a496bd0fbcb5d5aef8a088c65b23129af6 | []
| no_license | https://github.com/poppindouble/AlgoFun | c4bb78ab9338b8b685ae8b27b55de6a62862495c | f35c61b715cd436c99037c4e6ed824dfcba3ec15 | refs/heads/master | 2021-09-10T05:52:08.666243 | 2018-03-21T09:38:55 | 2018-03-21T09:38:55 | 112,266,753 | 1 | 0 | null | false | 2018-02-21T22:28:42 | 2017-11-28T00:54:16 | 2017-11-28T00:56:29 | 2018-02-21T22:28:42 | 182 | 0 | 0 | 0 | Python | false | null | class Solution:
def repeatedSubstringPattern(self, s):
if len(s) < 2:
return False
next = [0] * len(s)
i, j = 0, 1
while j < len(s):
if s[j] == s[i]:
next[j] = i + 1
i += 1
j += 1
else:
if i == 0:
next[j] = 0
j += 1
else:
i = next[i - 1]
return next[-1] > 0 and len(s) % (len(s) - next[-1]) == 0
def main():
print(Solution().repeatedSubstringPattern("abaababaab"))
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 457 | py | 190 | repeat_substring_pattern.py | 190 | 0.496718 | 0.463895 | 0 | 24 | 18.083333 | 59 |
Diarukia/metaheuristic_regression | 6,150,393,190,996 | 7ec8d67f0e59ae599982daf639b27495998505c7 | e4c37f6a9bf6c27298a389e1243051669042da40 | /regression_framework/functions/hartman_family_function_1.py | 53729b0a841aeac11beef74185bf1351f838bcc7 | []
| no_license | https://github.com/Diarukia/metaheuristic_regression | 47f2a909bca4103522948af4828e791993085b80 | 9933e3a955ca453414206a6019ff57a30a6df7c4 | refs/heads/main | 2023-08-15T01:38:47.331380 | 2021-09-25T19:48:17 | 2021-09-25T19:48:17 | 344,648,766 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from regression_framework.functions.base_function import Base_function
import numpy as np
import random
import math
class Hartman_family_function_1(Base_function):
def __init__(self,name = 'Hartman_family_function_1',lower_bound = 0,upper_bound = 1,dimension = 30):
super().__init__(name,lower_bound,upper_bound,dimension)
def get_fitness(self,value):
x = value
a=[[3,10,30],
[0.1,10,35],
[3,10,30],
[0.1,10,30]]
c=[1,1.2,3,3.2]
p=[[0.3689,0.1170,0.2673],
[0.4699,0.4387,0.7470],
[0.1091,0.8732,0.5547],
[0.03815,0.5743,0.8828]]
sum=0
for i in range(4):
sumint=0
for j in range(3):
sumint+=a[i][j]*((x[j]-p[i][j])**2)
sumint= -sumint
sum+= c[i]*math.exp(sumint)
return -sum
def random_solution(self):
randomSolution = random.random()*(2*self.upper_bound) - self.lower_bound
return randomSolution | UTF-8 | Python | false | false | 1,051 | py | 36 | hartman_family_function_1.py | 36 | 0.528069 | 0.43197 | 0 | 32 | 30.90625 | 105 |
La0bALanG/Spider_Codes | 12,481,174,965,162 | bda5194215209e2f13fb7a24c122ed7dac070c07 | 069e82d0400913682bf5ff1ffa60a62422610b83 | /demo11_GetGovementAdministrativeArea_OOP.py | 7063a7647143b384a3b3cae3c24e5426be9676f2 | []
| no_license | https://github.com/La0bALanG/Spider_Codes | ae9aa6a97c15d0aeeb72702f6a6b3b8caa7b107d | 72ce5b72b0baf1de7d8fecac4805d4711dc6409f | refs/heads/master | 2022-12-30T13:06:13.937923 | 2020-10-13T09:18:54 | 2020-10-13T09:18:54 | 298,497,349 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 _*-
"""
@version:
author:安伟超
@time: 2020/09/14
@file: demo11_GetGovementAdministrativeArea_OOP.py.py
@environment:virtualenv
@email:awc19930818@outlook.com
@github:https://github.com/La0bALanG
@requirement:
"""
import requests
import threading
from lxml import etree
import re
from fake_useragent import UserAgent
"""
目标
1.目标url:http://www.mca.gov.cn/ - 民政数据 - 行政区划代码
即: http://www.mca.gov.cn/article/sj/xzqh/2019/
2.目标数据: 抓取最新中华人民共和国县以上行政区划代码
实现思路:
1.分析页面及url
1.请求民政部官网,进入-民政数据-行政区划代码 选项
2.F12开启控制台,抓手工具抓到:
2020年7月份县以上行政区划代码
2020-09-08
2020年7月份县以下区划变更情况
2020-09-08
2020年6月份县以上行政区划代码
2020-08-31
2020年6月份县以下区划变更情况
2020-08-31
2020年5月份县以上行政区划代码
3.查看elements:
<a class="artitlelist" href="/article/sj/xzqh/2020/202009/20200900029334.shtml" target="_blank" title="2020年7月份县以上行政区划代码">2020年7月份县以上行政区划代码</a>
目标链接在上述a标签的href属性
但!这是假链接!
测试:
现在向该链接(假链接)直接请求行政区划代码详情页,会直接跳转为一个.html页面(真链接)的响应内容,在控制台的response中确实能看到所需数据,但此为.html页面的相应内容,使用python代码直接请求假页面得到的response却根本没有所需数据!
但是,在假页面的响应内容中,存在如下一段js代码:
<script>
window.location.href="http://www.mca.gov.cn//article/sj/xzqh/2020/2020/20200908007001.html";
</script>
我们真正想要的真链接其实在这里!
没错,这也是常见的一种反爬手段,通过伪造url隐藏真实请求的url。
但,又能奈我何!请求假页面,提取js中的真链接不就行了?
2.拿到真链接。
3.请求真链接的response。
4.解析,持久化存储
实现步骤:
1.请求行政区划代码页面,解析html,得到假url;
2.请求假链接页面response,解析js脚本,获取真实链接;
3.向真实链接请求最终数据
4.解析,持久化存储
"""
class GetGovementAdministrativeAreaSpider(object):
'''
面向对象思路:
1.拆分功能细节。整体程序可拆分为:
1.发请求获得页面
2.解析页面
3.持久化存储(写入文件保存)
2.结合开闭原则,封装功能方法为私有方法,对外提供统一公共接口
3.采用单例模式:假设本爬虫程序在多个时间、不同情况下多次使用,单例模式实现只创建一个对象,提升性能避免内存占用过高。
'''
_instance_lock = threading.Lock()
# 单例模式实现:__new__方法
def __new__(cls, *args, **kwargs):
if not hasattr(GetGovementAdministrativeAreaSpider, '_instance'):
with GetGovementAdministrativeAreaSpider._instance_lock:
if not hasattr(GetGovementAdministrativeAreaSpider, '_instance'):
GetGovementAdministrativeAreaSpider._instance = object.__new__(cls)
return GetGovementAdministrativeAreaSpider._instance
def __init__(self):
self.__url = 'http://www.mca.gov.cn/article/sj/xzqh/2020/'
self.__headers = {
'User-Agent':UserAgent().random
}
@property
def url(self):
return self.__url
@property
def headers(self):
return self.__headers
def __get_html(self,url,headers):
return requests.get(url=url,headers=headers)
#请求一级页面,解析、构造假链接
def __get_false_url(self):
html = self.__get_html(self.__url,self.__headers).text
dom = etree.HTML(html)
#只需要最新的,第一个肯定是最新的
a = dom.xpath('//a[@class="artitlelist"]')[0]
#获取节点对象的title属性
title = a.get('title')
#解析所有以“代码”结尾的title
if title.endswith('代码'):
return 'http://www.mca.gov.cn' + a.get('href')#获取节点对象的href属性
def __get_true_url(self,false_url):
# 先获取假链接的响应,然后根据响应获取真链接
html = self.__get_html(url=false_url,headers=self.__headers).text
# 利用正则提取真实链接
return re.findall(r'window.location.href="(.*?)"',html)[0]
#请求真实页面,解析数据
def __parse_html(self,true_url):
html = self.__get_html(url=true_url,headers=self.__headers).text
dom = etree.HTML(html)
tr_lists = dom.xpath('//tr[@height="19"]')
for tr in tr_lists:
code = tr.xpath('./td[2]/text()')
name = tr.xpath('./td[3]/text()')
if len(code) and len(name) != 0:
self.__save_html(code[0].strip(),name[0].strip())
else:
pass
def __save_html(self,code,name):
print('开始写入行政区划代码...')
with open('Area_code.txt','a') as f:
print('写入%s...'%name)
f.write(code + ':' + name + '\n')
print('写入完毕.')
def display(self):
self.__parse_html(self.__get_true_url(self.__get_false_url()))
def test():
GetGovementAdministrativeAreaSpider().display()
if __name__ == '__main__':
test() | UTF-8 | Python | false | false | 5,786 | py | 34 | demo11_GetGovementAdministrativeArea_OOP.py | 31 | 0.597831 | 0.558376 | 0 | 168 | 24.803571 | 147 |
davidlrnt/civiccheckin | 1,623,497,639,753 | 1644f6a695c75b0c39145fb14093b01ab63c638d | dd914c37550adba54791e4d72bc70f4273ab7061 | /run.py | 45ea4b7ff79f9f39eacdcc1abe1c1ba334cdf591 | []
| no_license | https://github.com/davidlrnt/civiccheckin | 3a34033bc2d737196ff48678d7de98703f5175ac | ec990c00512ee7cd46bf8c5ee9761d5b5b0b3482 | refs/heads/master | 2020-12-24T17:44:50.995435 | 2015-08-27T18:58:57 | 2015-08-27T18:58:57 | 41,328,890 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from app import app
from flask.ext.cors import CORS
cors = CORS(app)
if __name__=="__main__":
app.run(host='0.0.0.0', debug=True)
| UTF-8 | Python | false | false | 155 | py | 2 | run.py | 1 | 0.651613 | 0.625806 | 0 | 8 | 18.375 | 36 |
DavidBarts/opstools | 8,048,768,718,233 | 415c5cfea4e8dda861280b4a1ff710fed3ce38b3 | 08eeceee31b0a76b2acfbf01ccacff40101d7cbe | /stopgap-alarm/lib/lazy-crawler/format-alert-f | 5d3ecd4195867893444664c1a533ab80a10fd1d9 | []
| no_license | https://github.com/DavidBarts/opstools | b14ac6ae51a11e87695f8268b8c795c33804bb2c | 3cf65b764371b66eaa139f648e481cacba355480 | refs/heads/master | 2015-07-13T04:19:03 | 2015-06-25T15:30:10 | 2015-06-25T15:30:10 | 7,896,303 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Format a lazy-crawler alert, penalty box (and file-based) version.
# I m p o r t s
import os, sys
sys.path.append(os.path.join(os.environ['HOME'], 'kloshe-nanitch'))
from lazy_crawler_check import CLUSTERS
# F u n c t i o n s
def getdict(fn):
ret = {}
with open(fn, 'r') as fp:
while True:
line = fp.readline()
if line == '':
break
v, k = line.split()[:2]
ret[k] = v
return ret
# M a i n P r o g r a m
# Parse arguments
cluster = sys.argv[1]
past = getdict(sys.argv[2])
curr = getdict(sys.argv[3])
# Print a header
print cluster.upper(), "LAZY CRAWLERS:"
# Report changes
for i in range(1, CLUSTERS[cluster] + 1):
key = "%s-crawler-%02d" % (cluster, i)
p = past.get(key, "OK")
c = curr.get(key, "OK")
if c != p:
print "%s is now %s (was %s)" % (key, c, p)
elif c != "OK":
print "%s is still %s" % (key, c)
| UTF-8 | Python | false | false | 988 | 80 | format-alert-f | 74 | 0.543522 | 0.532389 | 0 | 43 | 21.976744 | 68 |
|
LITianpei999/HKU-MLB | 2,370,821,986,490 | dfba2859f516561f174fb2e881e2b03cc99fd92f | ee1e0af1398e1087d002fa3447728dcf1c4a4bfc | /MLB-GroupWork 12/Text data Extractor/path_txt.py | b098305ddffd585fdaef358e86a477e63dd63d56 | []
| no_license | https://github.com/LITianpei999/HKU-MLB | 0bddd92d64a53ba7cf884910d7afd472bfa7616e | 2211631891b55dc5ffef3232f159945739c7843e | refs/heads/main | 2023-01-24T02:12:44.679325 | 2020-11-29T11:57:24 | 2020-11-29T11:57:24 | 316,936,141 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import traceback
class PathError(Exception):
def __init__(self, message, code):
self.message = "PathError: " + message
self.code = code
def check_path(path):
"""
Check path.
:param path: <str> Input path.
:return: <str> path
"""
if not os.path.exists(path):
raise PathError("directory path url %s is not exist." % path, 500)
if not os.path.isdir(path):
raise PathError("path url %s is not a directory." % path, 500)
if path[-1] == "\\":
path = path.strip("\\")
path = "\\" + path
return path
def cd(path):
"""
Traverse the directory and add the file path to the list.
:param path: <str> Valid path.
:return: <list> file_list
"""
cd_list = os.listdir(path)
file_list = []
for ele in cd_list:
temp_path = path + "\\" + ele
if os.path.isfile(temp_path):
file_list.append(temp_path)
else:
pre_list = cd(temp_path)
file_list.extend(pre_list)
return file_list
def print_files(files):
"""
Write path to txt file.
:param files: <list> file list.
:return: <None>
"""
open("txt_path.txt", "w").write("")
if len(files) == 0:
open("txt_path.txt", "w",encoding='utf-8').write("None")
print("write success.")
return
with open("txt_path.txt", "w",encoding='utf-8') as txt_files:
for file in files:
if file[-4:]==".txt":
txt_files.write(file + "\n")
txt_files.close()
print("write success.")
return
# main method
path = r"C:\Users\24508\Desktop\Resume&Job_Description\Original_Resumes"
try:
path = check_path(path)
files = cd(path)
print_files(files)
except PathError as e:
print(e.message + " errcode " + str(e.code))
print("errmag: \n%s" % traceback.format_exc()) | UTF-8 | Python | false | false | 1,969 | py | 10 | path_txt.py | 6 | 0.535805 | 0.527679 | 0 | 77 | 23.597403 | 75 |
DataDog/dd-agent | 15,616,501,110,935 | 2769e93a05bbde0e73ffdc3d85c20736d267ca10 | 8d585fa3b2419d9b993be2f2652e448cfeedc8b2 | /tests/core/test_watchdog.py | 1777d637e2302de282ba97cc27e7263f45855e40 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | https://github.com/DataDog/dd-agent | bd4ef0edb234293b51d30894a529ce94b37060f8 | 16fa4ec9ae11ca0adfffbd260c5b4899dc73509f | refs/heads/master | 2023-08-16T09:52:21.816487 | 2023-07-11T15:37:34 | 2023-07-11T15:37:34 | 1,210,071 | 1,227 | 991 | NOASSERTION | false | 2023-06-28T12:20:19 | 2010-12-31T03:02:47 | 2023-06-26T00:38:26 | 2023-06-28T12:20:19 | 44,713 | 1,287 | 854 | 191 | Python | false | false | # stdlib
from contextlib import contextmanager
from random import random, randrange
import os
import subprocess
import sys
import time
import unittest
import urllib as url
# 3p
from mock import patch
from nose.plugins.attrib import attr
# project
# needed because of the subprocess calls
sys.path.append(os.getcwd())
from ddagent import Application
from utils.watchdog import WatchdogPosix as Watchdog
class WatchdogKill(Exception):
"""
The watchdog attempted to kill the process.
"""
pass
@attr('unix')
@attr(requires='core_integration')
class TestWatchdog(unittest.TestCase):
"""
Test watchdog in various conditions
"""
JITTER_FACTOR = 2
@contextmanager
def set_time(self, time):
"""
Helper, a context manager to set the current time value.
"""
# Set the current time within `util` module
mock_time = patch("utils.timer.time.time")
mock_time.start().return_value = time
# Yield
yield
# Unset the time mock
mock_time.stop()
@patch.object(Watchdog, 'self_destruct', side_effect=WatchdogKill)
def test_watchdog_frenesy_detection(self, mock_restarted):
"""
Watchdog restarts the process on suspicious high activity.
"""
# Limit the restart timeframe for test purpose
Watchdog._RESTART_TIMEFRAME = 1
# Create a watchdog with a low activity tolerancy
process_watchdog = Watchdog(10, max_resets=3)
ping_watchdog = process_watchdog.reset
with self.set_time(1):
# Can be reset 3 times within the watchdog timeframe
for x in xrange(0, 3):
ping_watchdog()
# On the 4th attempt, the watchdog detects a suspiciously high activity
self.assertRaises(WatchdogKill, ping_watchdog)
with self.set_time(3):
# Gets back to normal when the activity timeframe expires.
ping_watchdog()
def test_watchdog(self):
"""
Verify that watchdog kills ourselves even when spinning
Verify that watchdog kills ourselves when hanging
"""
start = time.time()
try:
subprocess.check_call(["python", __file__, "busy"], stderr=subprocess.STDOUT)
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Start pseudo web server
subprocess.Popen(["nc", "-l", "31834"])
start = time.time()
try:
subprocess.check_call(["python", __file__, "net"])
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Normal loop, should run 5 times
start = time.time()
try:
subprocess.check_call(["python", __file__, "normal"])
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
except subprocess.CalledProcessError:
self.fail("Watchdog killed normal process after %s seconds" % int(time.time() - start))
# Fast tornado, not killed
start = time.time()
p = subprocess.Popen(["python", __file__, "fast"])
p.wait()
duration = int(time.time() - start)
# should die as soon as flush_trs has been called
self.assertTrue(duration < self.JITTER_FACTOR * 10)
# Slow tornado, killed by the Watchdog
start = time.time()
p = subprocess.Popen(["python", __file__, "slow"])
p.wait()
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 4)
class MockTxManager(object):
def flush(self):
"Pretend to flush for a long time"
time.sleep(5)
sys.exit(0)
class MemoryHogTxManager(object):
def __init__(self, watchdog):
self._watchdog = watchdog
def flush(self):
rand_data = []
while True:
rand_data.append('%030x' % randrange(256**15))
self._watchdog.reset()
class PseudoAgent(object):
"""Same logic as the agent, simplified"""
AGENT_CONFIG = {
"bind_host": "localhost",
'endpoints': {
'https://app.datadoghq.com': ['api_key']
},
'forwarder_timeout': 5
}
def busy_run(self):
w = Watchdog(5)
w.reset()
while True:
random()
def hanging_net(self):
w = Watchdog(5)
w.reset()
x = url.urlopen("http://localhost:31834")
print "ERROR Net call returned", x
return True
def normal_run(self):
w = Watchdog(2)
w.reset()
for i in range(5):
time.sleep(1)
w.reset()
def slow_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(4)
a._tr_manager = MockTxManager()
a.run()
def fast_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(6)
a._tr_manager = MockTxManager()
a.run()
if __name__ == "__main__":
if sys.argv[1] == "busy":
a = PseudoAgent()
a.busy_run()
elif sys.argv[1] == "net":
a = PseudoAgent()
a.hanging_net()
elif sys.argv[1] == "normal":
a = PseudoAgent()
a.normal_run()
elif sys.argv[1] == "slow":
a = PseudoAgent()
a.slow_tornado()
elif sys.argv[1] == "fast":
a = PseudoAgent()
a.fast_tornado()
elif sys.argv[1] == "test":
t = TestWatchdog()
t.runTest()
elif sys.argv[1] == "memory":
a = PseudoAgent()
a.use_lots_of_memory()
| UTF-8 | Python | false | false | 5,903 | py | 185 | test_watchdog.py | 145 | 0.579705 | 0.568863 | 0 | 206 | 27.65534 | 99 |
MonoS/MonoS-VS-Func | 13,700,945,710,204 | 67fe5f1ca9feb5e945223e0a365ad403c1ecf87f | c51222c37dd1a7fc46f6f6d6cb63fd0b27623dc6 | /MFunc.py | 0b6a4e8658f3914a52702ab31ff8740e8e444a9f | []
| no_license | https://github.com/MonoS/MonoS-VS-Func | db9b8059dfb69f9728a92d479cd3d23fae4dc97d | 6472770a84e0e7d37b27eb529fbaf9d3b9cdf4cf | refs/heads/master | 2021-01-17T07:24:33.280890 | 2016-06-20T17:36:51 | 2016-06-20T17:36:51 | 31,260,763 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import vapoursynth as vs
import havsfunc as has
import nnedi3_resample as res
import os.path
def WriteVecs(vecs, prefix):
core = vs.get_core()
w = vecs[0].get_frame(0).width
v = core.std.StackVertical([core.std.CropAbs(vec, width=w, height=1) for vec in vecs])
log = open(prefix + ".len", "w")
log.write(repr(w))
log.close()
return v
def ReadVecs(index, prefix, h):
core = vs.get_core()
f = open(prefix + ".len", "r")
w = int(f.read())
f.close()
vecs = core.raws.Source(prefix + ".vec", w, h, src_fmt="Y8")
v = core.std.CropAbs(vecs, y=index, height=1, width=w)
return v
def Denoise2(src, denoise=400, blur=None, lsb=True, truemotion=True, chroma=True, fast=False, blksize=None, prefix=None, recalculate=None, thSAD=None):
core = vs.get_core()
if fast:
if blksize is None:
blksize = 32
overlap = int(blksize/4)
else:
if blksize is None:
blksize = 8
overlap = int(blksize/2)
if recalculate is None:
recalculate = blksize
if thSAD is None:
thSAD = int(denoise * 1.25)
pad = blksize + overlap
src = core.fmtc.resample(src, src.width+pad, src.height+pad, sw=src.width+pad, sh=src.height+pad, kernel="point")
src16 = Up16(src, lsb)
super = core.mv.Super(src16, chroma=chroma)
if prefix is not None:
exist = os.path.exists(prefix + ".vec") and os.path.exists(prefix + ".len")
create = not exist
else:
exist = False
create = False
if not exist or (blksize > recalculate):
if blur is not None:
blurred = core.generic.GBlur(src, blur)
blurred = Up16(blurred, lsb)
else:
blurred = src16
rep = has.DitherLumaRebuild(blurred, s0=1, chroma=chroma)
superRep = core.mv.Super(rep, chroma=chroma)
if not exist:
bvec2 = core.mv.Analyse(superRep, isb = True, delta = 2, blksize=blksize, overlap=overlap, truemotion=truemotion, chroma=chroma)
bvec1 = core.mv.Analyse(superRep, isb = True, delta = 1, blksize=blksize, overlap=overlap, truemotion=truemotion, chroma=chroma)
fvec1 = core.mv.Analyse(superRep, isb = False, delta = 1, blksize=blksize, overlap=overlap, truemotion=truemotion, chroma=chroma)
fvec2 = core.mv.Analyse(superRep, isb = False, delta = 2, blksize=blksize, overlap=overlap, truemotion=truemotion, chroma=chroma)
if create:
return WriteVecs([bvec1, bvec2, fvec1, fvec2], prefix)
else:
bvec1 = ReadVecs(0, prefix, 4)
bvec2 = ReadVecs(1, prefix, 4)
fvec1 = ReadVecs(2, prefix, 4)
fvec2 = ReadVecs(3, prefix, 4)
if blksize > recalculate and exist:
bvec1 = core.std.Splice([core.std.BlankClip(bvec1, width=1, length=1), bvec1], mismatch=True).std.Trim(1)
bvec2 = core.std.Splice([core.std.BlankClip(bvec2, width=1, length=1), bvec2], mismatch=True).std.Trim(1)
fvec1 = core.std.Splice([core.std.BlankClip(fvec1, width=1, length=1), fvec1], mismatch=True).std.Trim(1)
fvec2 = core.std.Splice([core.std.BlankClip(fvec2, width=1, length=1), fvec2], mismatch=True).std.Trim(1)
while blksize > recalculate:
blksize = int(blksize / 2)
if fast:
overlap = int(overlap / 4)
else:
overlap = int(overlap / 2)
bvec1 = core.mv.Recalculate(superRep, bvec1, thSAD, blksize=blksize, chroma=chroma, truemotion=truemotion, overlap=overlap)
bvec2 = core.mv.Recalculate(superRep, bvec2, thSAD, blksize=blksize, chroma=chroma, truemotion=truemotion, overlap=overlap)
fvec1 = core.mv.Recalculate(superRep, fvec1, thSAD, blksize=blksize, chroma=chroma, truemotion=truemotion, overlap=overlap)
fvec2 = core.mv.Recalculate(superRep, fvec2, thSAD, blksize=blksize, chroma=chroma, truemotion=truemotion, overlap=overlap)
fin = core.mv.Degrain2(src16, super, bvec1,fvec1,bvec2,fvec2, denoise, plane = 4 if chroma else 0)
fin = core.std.CropRel(fin, 0, pad, 0, pad)
return fin
def GCResizer(src, w, h, Ykernel=None, UVkernel=None, Yinvks=False, UVinvks=None, Yinvkstaps=3, UVinvkstaps=None, Ytaps=4, UVtaps=None, css="420", sigmoid=True, curve="709", mat="709", scaleThr=1.0):
core = vs.get_core()
src16 = Up16(src)
csp = vs.YUV444P16 if css == "444" else None
if Ykernel is None:
if Yinvks:
Ykernel = "bilinear"
else:
Ykernel = "spline64"
UVinvks = UVinvks if UVinvks is not None else Yinvks
if UVkernel is None:
if UVinvks:
UVkernel = "bicubic"
else:
UVkernel = Ykernel
UVinvkstaps = UVinvkstaps if UVinvkstaps is not None else Yinvkstaps
UVtaps = UVtaps if UVtaps is not None else Ytaps
resized = res.nnedi3_resample(src16, w, h, kernel=Ykernel, chromak_down=UVkernel, invks=Yinvks, chromak_down_invks=UVinvks, invkstaps=Yinvkstaps, chromak_down_invkstaps=UVinvkstaps, taps=Ytaps, chromak_down_taps=UVtaps, mats=mat, fulls=False, curves=curve, sigmoid=sigmoid, scale_thr=scaleThr, csp=csp)
return resized
def MQTGMC(src, EZDenoise=None, lsb=None, TFF=True, half=False, fast=False):
core = vs.get_core()
if lsb is None:
if fast is True:
lsb = False
else:
lsb = True
src16 = Up16(src, lsb)
FPSDivisor = 2 if half else 1
# Controllare MatchEnhance e/o Sharpness in quanto con SourceMatch il risultato sembra essere meno sharposo
# has.QTGMC(src16, Preset="Very Slow", SourceMatch=3, MatchPreset="Slow", MatchPreset2="Ultra Fast", Lossless=2, NoisePreset="Slow", TFF=TFF, EZDenoise=EZDenoise, FPSDivisor=FPSDivisor)
# has.QTGMC(src16, Preset="Medium", SourceMatch=3, MatchPreset="Fast", MatchPreset2="Ultra Fast", Lossless=2, NoisePreset="Medium", TFF=TFF, EZDenoise=EZDenoise, FPSDivisor=FPSDivisor)
if fast:
result = has.QTGMC(src16, Preset="Medium", SourceMatch=3, MatchPreset="Fast", Lossless=2, NoisePreset="Medium", TFF=TFF, EZDenoise=EZDenoise, FPSDivisor=FPSDivisor)
else:
result = has.QTGMC(src16, Preset="Very Slow", SourceMatch=3, MatchPreset2="Slow", Lossless=2, NoisePreset="Slow", TFF=TFF, EZDenoise=EZDenoise, FPSDivisor=FPSDivisor)
return result
def Up16(src, lsb=True):
core = vs.get_core()
src16 = src
if(lsb is True) and (src.format.bits_per_sample < 16):
src16 = core.fmtc.bitdepth(src, bits=16)
return src16
def ToRGB(src, mats="709"):
core = vs.get_core()
src16 = Up16(src)
ris = res.nnedi3_resample(src16, src16.width, src16.height, mats=mats, curves="709", fulls=False, csp=vs.RGB48)
return ris
def imwriSave(src, path, format="PNG24", first=0, dmode=7, lsb=False):
core = vs.get_core()
if(lsb is False) and (src.format.bits_per_sample > 8):
srcDown = core.fmtc.bitdepth(src, bits=8, dmode=dmode)
core.std.LoadPlugin("C:/Program Files (x86)/VapourSynth/plugins64/imwri/imwri.dll")
return core.imwri.Write(srcDown, format, path, firstnum=first)
def Source(file, lsb=False):
core = vs.get_core()
if file.startswith("file:///"):
file = file[8::]
if file.endswith(".d2v"):
src = core.d2v.Source(file, nocrop=True)
else:
src = core.lsmas.LWLibavSource(file)
src16 = Up16(src, lsb)
return src16
| UTF-8 | Python | false | false | 6,812 | py | 1 | MFunc.py | 1 | 0.703758 | 0.673664 | 0 | 209 | 31.593301 | 303 |
markphuong/phylogenetics.targetcapture.pilot | 5,961,414,649,563 | 6368b4d3cbd3be60adb56375a11de3a22dd99ce9 | c26b3faec3e4fc89cc74e7f255491bbc5d402d1d | /1.5capsAssemble/mapping.py | 0f69a5a7d5e2e7268d35d1af4e8bbb835c9fd3fa | []
| no_license | https://github.com/markphuong/phylogenetics.targetcapture.pilot | 2518586f99f90e1f023798b3837f30dd025ae333 | 30b42c0fcb067407d50ee6e0d44c175e27525bdf | refs/heads/master | 2021-07-19T06:38:52.613520 | 2017-10-27T20:14:16 | 2017-10-27T20:14:16 | 108,591,118 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#REQUIRES: novoalign and samtools
#REQUIRES: a map file, with first column as sample ID, and second file as which fasta it goes to. The reason you have different fastas for different samples is because of divergent mtDNA genomes
#elements in the map file are separated by a tab
#This script aligns your paired and unpaired reads to a reference using novoalign, and makes a pileup file using samtools
import os
import sys
import argparse
import multiprocessing
#this is a wrap around for novoalign and samtools where each sample identifier was "index#" where # was a number between 1 - 50
def get_args(): #arguments needed to give to this script
parser = argparse.ArgumentParser(description="run novoalign")
#forces required argument to let it run
required = parser.add_argument_group("required arguments")
required.add_argument("--map", help="textfile with samples to run and what fasta file to match it to", required=True) #A map file with the sample ID and the fasta file it goes to
return parser.parse_args()
def align(element):
ID = element
r1name = '_final1.fq' #extension of front reads
r2name = '_final2.fq' #extension of back reads
uname = '_finalunpaired.fq'
variables = dict(
sample = ID,
read1 = '/nfs/LabShared/MarkPhuong/exonCapturePilot/1spadesAssemble/' + ID + r1name,
read2 = '/nfs/LabShared/MarkPhuong/exonCapturePilot/1spadesAssemble/' + ID + r2name,
unpaired = '/nfs/LabShared/MarkPhuong/exonCapturePilot/1spadesAssemble/' + ID + uname,
out_paired = ID + '_out_paired',
out_unpaired = ID + '_out_unpaired',
outfile = ID + '_sorted'
) #name your output
commands = """
python makesomethingNotInterleaved.py {sample}_contigs.fasta {sample}_contigs.fasta.NI
bowtie2-build {sample}_contigs.fasta.NI {sample}_contigs
bowtie2 -x {sample}_contigs -1 {read1} -2 {read2} --local --very-sensitive-local --no-discordant -p 20 -S {out_paired}.sam > {sample}_paired.out 2> {sample}_paired.stderr
bowtie2 -x {sample}_contigs -U {unpaired} --local --very-sensitive-local -p 20 -S {out_unpaired}.sam > {sample}_unpaired.out 2> {sample}_unpaired.stderr
/home/analysis/bin/samtools-1.2/samtools view -bS -@ 20 {out_paired}.sam > {out_paired}.bam
/home/analysis/bin/samtools-1.2/samtools view -bS -@ 20 {out_unpaired}.sam > {out_unpaired}.bam
/home/analysis/bin/samtools-1.2/samtools merge -f {sample}.raw.bam {out_paired}.bam {out_unpaired}.bam
/home/analysis/bin/samtools-1.2/samtools sort -@ 20 {sample}.raw.bam {outfile}
/home/analysis/bin/samtools-1.2/samtools index {outfile}.bam
java -jar /home/analysis/Downloads/picard-tools-1.138/picard.jar MarkDuplicates I={sample}_sorted.bam O={sample}_md.bam REMOVE_DUPLICATES=FALSE ASSUME_SORTED=TRUE METRICS_FILE={sample}_md.metrics
/home/analysis/bin/samtools-1.2/samtools mpileup -d 1000000 -u -I -D -S -B -f {sample}_contigs.fasta.NI {sample}_md.bam | /home/analysis/bin/bcftools-1.2/bcftools call -c - > {sample}.vcf
""".format(**variables)
cmd_list = commands.split("\n")
for cmd in cmd_list:
os.system(cmd)
mylist = []
def main():
args = get_args()
#Make a list of lists, each list within the list will have the first and second elements of the map file that are separated by a tab
with open(args.map) as rfile:
for line in rfile:
line = line.strip()
align(line)
# mylist.append(line.split("\t"))
# pool = multiprocessing.Pool()
# pool.map(align, mylist)#run the function with the arguments
if __name__ == "__main__": #run main over multiple processors
main()
| UTF-8 | Python | false | false | 3,514 | py | 25 | mapping.py | 25 | 0.730222 | 0.713717 | 0 | 77 | 44.545455 | 196 |
pombredanne/zope | 10,101,763,089,238 | 5c4777ec71ce1d93237d3cd6271b367942e0a765 | 153ecce57c94724d2fb16712c216fb15adef0bc4 | /zope.kgs/tags/1.0.1/src/zope/kgs/template.py | abc7af7049e18154c7d5a9c6cfbd4eb30aeef003 | []
| no_license | https://github.com/pombredanne/zope | 10572830ba01cbfbad08b4e31451acc9c0653b39 | c53f5dc4321d5a392ede428ed8d4ecf090aab8d2 | refs/heads/master | 2018-03-12T10:53:50.618672 | 2012-11-20T21:47:22 | 2012-11-20T21:47:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##############################################################################
#
# Copyright (c) 2007 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Helper components for the Web site generation.
"""
import os
import shutil
import copy
import zope.pagetemplate.pagetemplatefile
class Template(zope.pagetemplate.pagetemplatefile.PageTemplateFile):
def __init__(self, path, data, templates):
super(Template, self).__init__(path)
self.templates = templates
self.data = data
def pt_getContext(self, args=(), options=None, **ignore):
rval = self.data.copy()
rval.update(
{'args': args,
'nothing': None,
'self': self,
'templates': self.templates,
})
rval.update(self.pt_getEngine().getBaseNames())
return rval
class DirectoryContext(object):
def __init__(self, path, data, root=None):
self.path = path
self.data = data
self.root = root or self
def __getitem__(self, name):
path = os.path.join(self.path, name)
if os.path.exists(path):
return Template(path, self.data, self.root)
return None
def generateSite(src, dst, data, templates=None):
if templates is None:
templates = DirectoryContext(src, data)
for filename in os.listdir(src):
srcPath = os.path.join(src, filename)
dstPath = os.path.join(dst, filename)
if filename.startswith('.'):
continue
elif srcPath.endswith('.pt'):
continue
elif srcPath.endswith('.html'):
html = Template(srcPath, data, templates)()
open(dstPath, 'w').write(html)
elif filename == 'VERSION':
for version in data['versions']:
versionDir = os.path.join(dst, version['name'])
newData = copy.deepcopy(data)
newData['version'] = version
newData['siteRoot'] = '../%s' % newData['siteRoot']
generateSite(srcPath, versionDir, newData, templates)
elif os.path.isdir(srcPath):
if not os.path.exists(dstPath):
os.mkdir(dstPath)
newData = copy.deepcopy(data)
newData['siteRoot'] = '../%s' % newData['siteRoot']
generateSite(srcPath, dstPath, newData, templates)
else:
shutil.copyfile(srcPath, dstPath)
| UTF-8 | Python | false | false | 2,908 | py | 13,605 | template.py | 6,145 | 0.575309 | 0.573246 | 0 | 81 | 34.901235 | 78 |
camlee/power-meter | 1,984,274,894,045 | f145d39be8b958be73968e7f40e4fcab9db99f55 | 78b961adcc7f2cc8c1e95a88a46220c94c1de85d | /esp32-micropython/server/sensor.py | 04cb9ea2d5385a88456e76c2e53e6d4a98481303 | []
| no_license | https://github.com/camlee/power-meter | 3b685ca7df26c774c8e946271a89b82d75c0b145 | 4954198e863de30d32af927da2cec6767e3681f9 | refs/heads/master | 2022-12-24T23:32:20.400866 | 2021-07-12T21:41:57 | 2021-07-12T21:41:57 | 144,101,312 | 4 | 1 | null | false | 2022-12-10T13:59:48 | 2018-08-09T04:37:09 | 2022-12-03T20:16:05 | 2022-12-10T13:59:48 | 9,811 | 3 | 0 | 17 | Python | false | false | import os
import time
import json
import machine
from _thread import start_new_thread
from ads1x15 import ADS1115
from util import epoch_time
from logger import log_exception
def average_excluding_outliers(values):
values.sort()
values = values[2:-2]
return sum(values) / len(values)
def median(values):
return values[len(values)//2]
class CircularBuffer:
def __init__(self, size):
self._size = size
self._buffer = [None] * size
self._index = -1
self._filled = False
def push(self, value):
self._index += 1
if self._index >= self._size:
self._index = 0
self._filled = True
self._buffer[self._index] = value
def _filled_buffer(self):
if self._filled:
return self._buffer
else:
return self._buffer[:self._index+1]
def average(self):
values = self._filled_buffer()
return sum(values) / len(values)
def max(self):
return max(self._filled_buffer())
def min(self):
return max(self._filled_buffer())
def latest(self):
return self._buffer[self._index]
def multiply_then_average(self, other_buffer):
values_pairs = list(zip(self._filled_buffer(), other_buffer._filled_buffer()))
return sum(a * b for a, b in values_pairs) / len(values_pairs)
def multiply_then_max(self, other_buffer):
values_pairs = list(zip(self._filled_buffer(), other_buffer._filled_buffer()))
return max(a * b for a, b in values_pairs)
class BaseADC:
def __init__(self, settings={}, **kwargs):
raise NotImplementedError()
def read(self):
raise NotImplementedError()
class BuiltInADC(BaseADC):
bits = 12
max_voltage = 1 # The ESP ADC reads values between 0-1V despite operating at 3.3V.
max_value = 2 ** bits
factor = max_voltage / max_value
def __init__(self, pin, settings={}):
self.adc = machine.ADC(machine.Pin(pin))
def read(self):
return self.adc.read() * self.factor
i2c = None
class ADS1115ADC(BaseADC):
def __init__(self, address, channels, settings={}):
global i2c
if i2c is None:
print("Initializing I2C")
i2c_settings = settings["i2c"]
i2c = machine.I2C(
settings["i2c"].get("id", -1),
scl=machine.Pin(i2c_settings["scl"]),
sda=machine.Pin(i2c_settings["sda"]),
freq=i2c_settings.get("freq", None),
)
# Voltage is max readable voltage (full-scale input voltage range: FSR).
# Doesn't let you read above power supply voltage (VCC) of 3.3V or 5V.
# So for maximum resolution, pick 1x for 5V and 2x for 3.3V and make sure
# input voltages are less than 4.096V and 2.048V respectively
# gain_index = 0 # 2/3x 6.144V
# gain_index = 1 # 1x 4.096V
gain_index = 2 # 2x 2.048V
# gain_index = 3 # 4x 1.024V
# gain_index = 4 # 8x 0.512V
# gain_index = 5 # 16x 0.256V
self.adc = ADS1115(i2c, address, gain_index)
self.channels = channels
# rate_index is an index into how many samples can be taken per second.
# More samples per second means faster reads but noisier results.
# self.rate_index = 0 # 8 samples per second
# self.rate_index = 1 # 16 samples per second
# self.rate_index = 2 # 32 samples per second
# self.rate_index = 3 # 64 samples per second
# self.rate_index = 4 # 128 samples per second
# self.rate_index = 5 # 250 samples per second
# self.rate_index = 6 # 475 samples per second
self.rate_index = 7 # 860 samples per Second
def read(self):
try:
raw_value = self.adc.read(self.rate_index, *self.channels)
voltage = self.adc.raw_to_v(raw_value)
# print("%s,%s" % (raw_value, voltage))
return voltage
except OSError:
return 0 # TODO: update callers to handle None
class Sensor:
# _average_x_reads = 5
def __init__(self, voltage_adc, current_adc, voltage_factor, current_zero, current_factor,
nominal_voltage=None, nominal_current=None, buffer_size=10, settings={}):
self.last_read = None
self.cumulative_energy = 0
self.available_cumulative_energy = 0
sensor_type = settings["sensor_type"]
types = {
"ADS1115": ADS1115ADC,
"BuiltInADC": BuiltInADC,
}
ADC = types.get(sensor_type, None)
if ADC is None:
raise Exception("Unrecognized sensor type: %s. Pick one of: %s." % (sensor_type, ", ".join(types.keys())))
self.voltage_buffer = CircularBuffer(buffer_size)
if voltage_adc is not None:
self.voltage_adc = ADC(settings=settings, **voltage_adc)
elif nominal_voltage is not None:
self.voltage_adc = None
self.voltage_buffer.push(nominal_voltage)
else:
raise ValueError("Must specify either voltage_adc or nominal_voltage")
self.current_buffer = CircularBuffer(buffer_size)
if current_adc is not None:
self.current_adc = ADC(settings=settings, **current_adc)
elif nominal_current is not None:
self.current_adc = None
self.current_buffer.push(nominal_current)
else:
raise ValueError("Must specify either current_adc or nominal_current")
self.voltage_factor = voltage_factor
self.current_zero = current_zero
self.current_factor = current_factor
def read(self):
this_read = time.ticks_ms()
if self.voltage_adc:
self.voltage_buffer.push(self.voltage_adc.read() * self.voltage_factor)
if self.current_adc:
# val = self.current_adc.read()
# print(val)
# val = val * self.factor
# print(val)
# val = val - self.current_zero
# print(val)
# val = val * self.current_factor
# print(val)
# print("")
# reads = [None] * self._average_x_reads
# for i in range(len(reads)):
# # time.sleep_us(1)
# reads[i] = self.current_adc.read()
# value = average_excluding_outliers(reads)
value = self.current_adc.read()
# print("%.3f: %s -> %s" % (((value * self.factor) - self.current_zero) * self.current_factor, sorted(reads), value))
measured_current = (value - self.current_zero) * self.current_factor
# previous_current = self.current_buffer.latest()
# if previous_current is not None:
# difference = measured_current - previous_current
# if abs(difference) > 0.1:
# measured_current = previous_current + difference * 0.1
self.current_buffer.push(measured_current)
if self.last_read is not None:
power = self.voltage_buffer.latest() * self.current_buffer.latest()
duration = time.ticks_diff(this_read, self.last_read) / 1000
available_power = self.voltage_buffer.multiply_then_max(self.current_buffer)
# print("power: %.1f, available: %.1f" % (power, available_power))
available_power = max(power, available_power * 0.9) # Assuming only 90% of the available
# can be used. To allow for outlier readings,
# < 100% efficiency, etc...
self.cumulative_energy += power * duration
self.available_cumulative_energy += available_power * duration
self.last_read = this_read
@property
def voltage(self):
return self.voltage_buffer.average()
@property
def current(self):
return self.current_buffer.average()
@property
def power(self):
return self.voltage_buffer.multiply_then_average(self.current_buffer)
@property
def available_power(self):
available_power = self.voltage_buffer.multiply_then_max(self.current_buffer)
return max(self.power, available_power * 0.9) # Assuming only 90% of the available
# can be used. To allow for outlier readings,
# < 100% efficiency, etc...
@property
def duty(self):
return self.power / self.available_power
def pop_energy(self):
value = self.cumulative_energy
self.cumulative_energy = 0
return value
def pop_available_energy(self):
value = self.available_cumulative_energy
self.available_cumulative_energy = 0
return value
class SensorLogger:
_read_every_ms = 10
_average_over_reads = 20
_log_every_x_reads = 1000
_rotate_period = 3600
_max_files = 170
def __init__(self, log_directory, sensor_config, settings={}):
self.started = False
self._stop_now = True
# Setting up sensors:
self.sensors = {}
for name, config in sensor_config.items():
self.sensors[name] = Sensor(**config, buffer_size=self._average_over_reads, settings=settings)
self.last_read = time.ticks_ms()
self.last_rotate = None
# Setting up logging:
self.reads_since_last_log = 0
self.log_directory = log_directory.rstrip("/")
self.meta_file = "%s/%s" % (self.log_directory, "meta.csv")
self.tmp_meta = "%s/%s" % (self.log_directory, "tmp_meta.csv")
self.start_time_offset = None
self.our_logs_without_start_time = []
self.log_index = None
self.data_file = None
try:
os.mkdir(self.log_directory)
except Exception:
pass # Directory already exists
self.load_meta()
def rotate(self):
# print("rotating from %s" % self.log_index)
self.last_rotate = time.ticks_ms()
if self.data_file:
self.data_file.close()
if self.log_index >= self._max_files:
self.log_index = 0
else:
self.log_index += 1
self.data_file_path = "%s/%s.csv" % (self.log_directory, self.log_index)
with open(self.meta_file, "r") as f:
with open(self.tmp_meta, "w") as f2:
# print("rotate():")
str_log_index = str(self.log_index)
updated_line = False
for line in f.readlines():
# print("<%s>" % line.strip())
index, active, start_time, start_time_offset = line.strip().split(",")
if index == str_log_index:
updated_line = True
active = "1"
start_time = str(time.ticks_ms())
start_time_offset = str(self.start_time_offset)
else:
active = "0"
line2 = ",".join([index, active, start_time, start_time_offset])
# print("[%s]" % line2)
f2.write(line2)
f2.write("\n")
if updated_line is False:
line2 = ",".join([str_log_index, "1", str(time.ticks_ms()), str(self.start_time_offset)])
# print("[%s]" % line2)
f2.write(line2)
f2.write("\n")
os.rename(self.tmp_meta, self.meta_file)
# print("rotated to %s" % self.log_index)
self.data_file = open(self.data_file_path, "w")
def load_meta(self):
try:
with open(self.meta_file, "r") as f:
with open(self.tmp_meta, "w") as f2:
# print("load_meta():")
# Looping over all entries in meta.csv.
# For each, setting start_time_offset from None to Unkn.
# Also, extracting the current index.
for line in f.readlines():
# print("<%s>" % line.strip())
index, active, start_time, start_time_offset = line.strip().split(",")
if active == "1":
self.log_index = int(index)
active = "0"
if start_time_offset == "None":
start_time_offset = "Unknown"
line2 = ",".join([index, active, start_time, start_time_offset])
# print("[%s]" % line2)
f2.write(line2)
f2.write("\n")
os.rename(self.tmp_meta, self.meta_file)
except OSError as e:
err = "Failed to load meta.csv: %s\n" % e
print(err)
log_exception(err)
# Initializing an empty file:
with open(self.meta_file, "w") as f:
pass
if self.log_index is None:
self.log_index = self._max_files
def start(self, threaded=False):
if not self.started:
self._stop_now = False
if threaded:
start_new_thread(self.run_forever, ())
else :
self.run_forever()
def stop(self):
if self.started:
self._stop_now = True
def refresh(self):
# Seeing if it's time to do work yet:
next_read = time.ticks_add(self.last_read, self._read_every_ms)
time_till_next_work = time.ticks_diff(next_read, time.ticks_ms())
# print("time_till_next_work: %s" % time_till_next_work)
if time_till_next_work <= 0:
# Log rotation:
if self.last_rotate is None or (time.ticks_diff(time.ticks_ms(), self.last_rotate) / 1000) > self._rotate_period:
self.rotate()
# Reading:
self.read_all()
# print("%.4f,%.4f,%.4f,%.4f" % (
# self.sensors["in"].voltage_buffer.latest(),
# self.sensors["out"].voltage_buffer.latest(),
# self.sensors["in"].current_buffer.latest(),
# self.sensors["out"].current_buffer.latest(),
# ))
#Logging:
self.reads_since_last_log += 1
if self.reads_since_last_log >= self._log_every_x_reads:
self.log_all()
self.reads_since_last_log = 0
return time_till_next_work
def run_forever(self):
self.started = True
while not self._stop_now:
sleep_ms = self.refresh()
if sleep_ms > 0:
# Sleep until next scheduled read:
time.sleep_ms(sleep_ms)
def read_all(self):
self.last_read = time.ticks_ms()
for sensor_name, sensor in self.sensors.items():
sensor.read()
# print("Read in %.3f ms" % time.ticks_diff(time.ticks_ms(), self.last_read))
def get_voltage(self, sensor_name):
return self.sensors[sensor_name].voltage
def get_current(self, sensor_name):
return self.sensors[sensor_name].current
def get_power(self, sensor_name):
return self.sensors[sensor_name].power
def get_available_power(self, sensor_name):
return self.sensors[sensor_name].available_power
def get_duty(self, sensor_name):
return self.sensors[sensor_name].duty
def log_all(self):
now = time.ticks_ms()
# print("Logging at %s" % now)
line = "%s,%.1f,%.1f,%.1f" % (
now,
self.sensors["in"].pop_energy(),
self.sensors["out"].pop_energy(),
self.sensors["in"].pop_available_energy(),
)
self.data_file.write(line)
self.data_file.write("\n")
self.data_file.flush()
# print(line)
def time_updated(self):
"""
Should be called once the system time is set correctly (ex. from NTP
or a browser client).
Updates the metadata now that we know the real time of the data we've been
collecting.
"""
self.start_time_offset = epoch_time() * 1000 - time.ticks_ms()
for log in self.our_logs_without_start_time:
log["start_time_offset"] = self.start_time_offset
self.our_logs_without_start_time = []
with open(self.meta_file, "r") as f:
with open(self.tmp_meta, "w") as f2:
# print("time_updated():")
for line in f.readlines():
# print("<%s>" % line.strip())
index, active, start_time, start_time_offset = line.strip().split(",")
if start_time_offset == "None":
start_time_offset = str(self.start_time_offset)
line2 = ",".join([index, active, start_time, start_time_offset])
# print("[%s]" % line2)
f2.write(line2)
f2.write("\n")
os.rename(self.tmp_meta, self.meta_file) | UTF-8 | Python | false | false | 17,649 | py | 30 | sensor.py | 13 | 0.524959 | 0.511757 | 0 | 470 | 35.555319 | 129 |
hungs/tslearn | 369,367,230,921 | 6fcc5e0614ffd317778869825de4924e82185069 | ad67801ee787f58d83d71de03d0fc5ea9b05d64a | /setup.py | 4218fc9b00379aa60fd2adabbc8ae6c44590bc14 | []
| no_license | https://github.com/hungs/tslearn | a844df97c6aa23c149cfb0072c876f3b89a1c7b2 | 061082e00614fb763f49992d434d3d77069c6982 | refs/heads/master | 2021-01-23T12:26:37.915209 | 2017-05-24T14:28:40 | 2017-05-24T14:28:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
from Cython.Build import cythonize
import numpy
setup(
name="tslearn",
description="A machine learning toolkit dedicated to time-series data",
ext_modules=cythonize(["tslearn/cydtw.pyx", "tslearn/cylrdtw.pyx", "tslearn/cygak.pyx"]),
include_dirs=[numpy.get_include()],
install_requires=['Cython', 'numpy', 'scipy', 'scikit-learn'],
version="0.0.1",
url="https://github.com/rtavenar/tslearn",
author="Romain Tavenard",
author_email="romain.tavenard@univ-rennes2.fr"
) | UTF-8 | Python | false | false | 532 | py | 15 | setup.py | 7 | 0.704887 | 0.697368 | 0 | 15 | 34.533333 | 93 |
kushalwaghmare/DataModel | 8,933,532,003,647 | 80e981793315d7e7b01ae120d435f5639338ce6d | 26f47e2ab24219d5cc1791ca39bc76fe921ace99 | /Regression2.py | 0e0b1fed6ea6ed67e76b5f7b62673ffb360b931c | []
| no_license | https://github.com/kushalwaghmare/DataModel | 25b921041edb25a1a973178f6b4356757df3d7f7 | 4c4db880fb9847196d90bd6e329312fc6fd4dd1d | refs/heads/master | 2022-04-25T14:16:13.638283 | 2020-04-28T11:40:21 | 2020-04-28T11:40:21 | 259,618,892 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import statsmodels.api as sm
data = pd.read_csv(r'\data\Sample_Book1.csv', encoding = "utf8")
#Print Data Head
print (data.head())
#Visualize data
plt.figure(figsize=(16, 8))
plt.scatter(
data['AGE'],
data['UNMARRIED'],
c='black'
)
plt.xlabel("Age")
plt.ylabel("Unmarried")
plt.show()
# Linear Approximation of the Data
IndVar = data['AGE'].values.reshape(-1,1)
DepVar = data['UNMARRIED'].values.reshape(-1,1)
reg = LinearRegression()
reg.fit(IndVar,DepVar)
# The coefficients
print('Intercept: \n', reg.intercept_[0])
print('Coefficient: \n', reg.coef_[0][0])
#Visualize the regression line
"""
predictions = reg.predict(IndVar)
plt.figure(figsize = (16,8))
plt.scatter(
data['AGE'],
data['UNMARRIED'],
c = 'green'
)
plt.plot(
data['AGE'],
predictions,
c = 'red'
)
plt.xlabel("Age")
plt.ylabel("Unmarried")
plt.show()
"""
x = data['AGE']
y = data['UNMARRIED']
print ("X: ")
print (x)
x2 = sm.add_constant(x)
print("X2")
print(x2)
est = sm.OLS(y, x2)
est2 = est.fit()
print(est2.summary())
print ("Done.")
| UTF-8 | Python | false | false | 1,264 | py | 2 | Regression2.py | 1 | 0.630538 | 0.613133 | 0 | 69 | 16.318841 | 64 |
Kyle1668/Data-Structures-and-Algorithms | 5,007,931,867,972 | 7e378272fc986132e2152556cc65b9ec54fe8c87 | 1bfe574843d6ca9fdda4db3822e49407e49e5d95 | /data structures/stack/test_stack.py | 6fbe726e2b3c8eac3d2be5466a168cf1d8382f7d | []
| no_license | https://github.com/Kyle1668/Data-Structures-and-Algorithms | 360cc2b4b7a15039ac5df66d09b0e54c9c4fc21e | a406f806898f369160d61b5aaf849a374fe4a38f | refs/heads/master | 2021-01-21T14:48:45.225086 | 2018-11-29T22:05:16 | 2018-11-29T22:05:16 | 95,336,302 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .stack import Stack
def test_push():
test_stack = Stack()
test_stack.push(1668)
assert test_stack.top.data == 1668
assert test_stack.length == 1
def test_pop():
test_stack = Stack()
test_stack.push(1668)
test_stack.pop()
assert test_stack.length == 0
assert test_stack.top == None
def test_get():
test_stack = Stack()
test_stack.push(1668)
test_stack.push(3000)
test_stack.push(5432)
assert test_stack.get(1).data == 3000
assert test_stack.get(5) == None
| UTF-8 | Python | false | false | 529 | py | 18 | test_stack.py | 17 | 0.627599 | 0.567108 | 0 | 28 | 17.892857 | 41 |
AdamZhouSE/pythonHomework | 13,872,744,383,629 | 6ff54b6eb66ff3afec4eabbce4560753736f2ea9 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2417/48025/310916.py | 04cb29026dfb9c46119d565118584c2d7a2e44c0 | []
| no_license | https://github.com/AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | try:
s=''
while(True):
s+=input()
except EOFError:
pass
if s=='12,5,7,23' or s=='1,3,5,7,9' or s=='29,6,10':
print(True)
elif s=='5,4,3,2,1':
print(True)
elif s=='3,6':
print(False)
else:
print(s) | UTF-8 | Python | false | false | 241 | py | 45,079 | 310916.py | 43,489 | 0.485477 | 0.390041 | 0 | 15 | 15.133333 | 52 |
estanislaogoya/tp-final-itba | 1,202,590,883,911 | 36c4fc5c9fa52e241d7317957af2947fbb8ecac0 | 23e3b54f55fe2c20e7497caac535ee9e8354d78e | /src/features/featEng.py | 4760aa3271e16dfd7fa68d5e1bfe6a5ec924f208 | []
| no_license | https://github.com/estanislaogoya/tp-final-itba | 2520b270d95ad1088d427c8e98c0e66768b5507f | 43b65f5ba5da5b7c05247b2e551be8e59fbd021b | refs/heads/master | 2023-06-18T22:16:27.269091 | 2021-07-22T22:32:40 | 2021-07-22T22:32:40 | 263,178,122 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
QUOTE_DF = []
# As per Stock price predictions input variables for backpropagation
"""
- current stock price
− the absolute variation of the price in relation to previous day.
− direction of variation,
− direction of variation from two days previously,
− major variations in relation to the previous day
− the prices of the last 10 days (for Backpropagation)
"""
LOOK_WINDOW = -365
def pricePredictionFeatureEng40(df):
#assuming we recieve a 2 column df, date and price
df.sort_values(by='Date',ascending=True)
#df = df.rename(columns={1: 'Close'})
df['1d_abs'] = df['Close'].diff(1)
df['1d_dir'] = np.where((df['1d_abs'] <= 0), 0, 1)
df['2d_abs'] = df['Close'].diff(2)
df['1d_cls'] = df['Close'].shift(1)
df['2d_cls'] = df['Close'].shift(2)
df['3d_cls'] = df['Close'].shift(3)
df['4d_cls'] = df['Close'].shift(4)
df['5d_cls'] = df['Close'].shift(5)
df['6d_cls'] = df['Close'].shift(6)
df['7d_cls'] = df['Close'].shift(7)
df['8d_cls'] = df['Close'].shift(8)
df['9d_cls'] = df['Close'].shift(9)
df['10d_cls'] = df['Close'].shift(10)
#moving average indicators
df['50d_ma'] = df['Close'].rolling(window=50).mean()
df['100d_ma'] = df['Close'].rolling(window=100).mean()
df['200d_ma'] = df['Close'].rolling(window=200).mean()
#df['Volume'] = df['Volume'] / 100
df = df.iloc[200:]
df['future_price'] = df.loc[:, 'Close'].shift(LOOK_WINDOW)
#Drop the n amount of rows, as they are empty
df.drop(df.tail(abs(LOOK_WINDOW)).index,inplace=True)
return df
def pricePredictionFeatureEng_B(df):
#assuming we recieve a 2 column df, date and price
df.sort_values(by='Date',ascending=True)
#df = df.rename(columns={1: 'Close'})
df['1d_abs'] = df['Close'].diff(1)
df['1d_dir'] = np.where((df['1d_abs'] <= 0), 0, 1)
df['2d_abs'] = df['Close'].diff(2)
df['50d_cls'] = df['Close'].shift(50)
df['100d_cls'] = df['Close'].shift(100)
df['150d_cls'] = df['Close'].shift(150)
df['200d_cls'] = df['Close'].shift(200)
df['250d_cls'] = df['Close'].shift(250)
df['300d_cls'] = df['Close'].shift(300)
df['50d_dir'] = np.where((df['Close'] >= (df['50d_cls'])), 1, 0)
df['100d_dir'] = np.where((df['Close'] >= (df['100d_cls'])), 1, 0)
df['150d_dir'] = np.where((df['Close'] >= (df['150d_cls'])), 1, 0)
df['200d_dir'] = np.where((df['Close'] >= (df['200d_cls'])), 1, 0)
df['250d_dir'] = np.where((df['Close'] >= (df['250d_cls'])), 1, 0)
df['300d_dir'] = np.where((df['Close'] >= (df['300d_cls'])), 1, 0)
#moving average indicators
df['50d_ma'] = df['Close'].rolling(window=50).mean()
df['100d_ma'] = df['Close'].rolling(window=100).mean()
df['200d_ma'] = df['Close'].rolling(window=200).mean()
df['300d_ma'] = df['Close'].rolling(window=300).mean()
#df['Volume'] = df['Volume'] / 100
df = df.iloc[300:]
df['future_price'] = df.loc[:, 'Close'].shift(LOOK_WINDOW)
#Drop the n amount of rows, as they are empty
df.drop(df.tail(abs(LOOK_WINDOW)).index,inplace=True)
return df
def splittingForTraining(df):
target = df.pop('future_price')
return train_test_split(df,
target,
test_size=0.33,
random_state=42)
| UTF-8 | Python | false | false | 3,394 | py | 15 | featEng.py | 7 | 0.583629 | 0.528664 | 0 | 87 | 37.896552 | 70 |
aguinaldolorandi/Python_exercicios_oficial | 18,021,682,781,217 | 5448d61dd9ed85b1c6bb689c398e75a4bc04b181 | b6feb419b2c6c454cd1f0502a801861fc5363187 | /Lista de Exercícios nº 02 Python Oficial/Ex.12-lista2.py | a8f7b2b2eb23598ef6c45d06bd6413c4eac06b59 | []
| no_license | https://github.com/aguinaldolorandi/Python_exercicios_oficial | 1ad05c380e08a12c8e0d3f5650cff6bef6e881b4 | 3a024631ebb70a6fea022cffd8a566a717cdbd9d | refs/heads/master | 2020-06-03T05:56:50.335561 | 2019-06-13T13:32:20 | 2019-06-13T13:32:20 | 191,469,833 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #EXERCÍCIO Nº 12 - LISTA 02 - ESTRUTURA DE DECISÃO
print('Folha de pagamento')
print('##################')
valor_hora=float(input('Insira o valor da hora trabalhada: R$ '))
horas_trabalhada=float(input('Insira a quantidade de horas trabalhadas no mês: '))
salário_bruto=valor_hora*horas_trabalhada
#Tabela do Imposto de Renda
if salário_bruto<=900:
IR=0
elif salário_bruto <=1500:
IR=0.05
elif salário_bruto <=2500:
IR=0.10
else:
IR:0.20
if IR >0:
IR1=IR*100
else:
IR1='Isento'
INSS=0.10
FGTS=0.11
#Salário líquido, bruto e descontos:
if IR>0:
salário_liquído=salário_bruto-(salário_bruto*IR+salário_bruto*INSS)
desconto_IR=salário_bruto*IR1/100
total_descontos=(salário_bruto*IR+salário_bruto*INSS)
else:
salário_liquído = salário_bruto - (salário_bruto * INSS)
desconto_IR=0.00
total_descontos = salário_bruto * INSS
print('⃝ SALÁRIO BRUTO: (',valor_hora,'*',horas_trabalhada,') : R$ ',salário_bruto )
print('⃝ (-) IR (',IR1,'$) : R$ ',desconto_IR )
print('⃝ (-) INSS (',INSS*100,'$) : R$ ',salário_bruto*INSS)
print('⃝ FGTS (',FGTS*100,'$) : R$ ',salário_bruto*FGTS)
print('⃝ TOTAL DE DESCONTOS : R$ ',total_descontos)
print('⃝ SALÁRIO LÍQUIDO : R$ ',salário_liquído)
| UTF-8 | Python | false | false | 1,380 | py | 162 | Ex.12-lista2.py | 159 | 0.610322 | 0.571429 | 0 | 42 | 29.833333 | 84 |
watson6/cod | 10,290,741,666,731 | fa72493c9753c6cb24082d8623edd82590994553 | 233435731051af0cd89816df865120552f4a25c5 | /project/views.py | 7d65ad9f46fe2735c43d35d196df365a9940d677 | []
| no_license | https://github.com/watson6/cod | 49708ae31d50e4a7ea917d7cb4fd29025972d80a | b57bd3c1da2f4d3ea6d6b8940552dd31e236d9fb | refs/heads/master | 2022-12-10T18:23:08.521489 | 2020-09-14T08:35:55 | 2020-09-14T08:35:55 | 295,577,165 | 1 | 1 | null | true | 2020-09-15T01:05:08 | 2020-09-15T01:05:07 | 2020-09-15T00:59:29 | 2020-09-14T08:36:03 | 59 | 0 | 0 | 0 | null | false | false | from rest_framework.viewsets import ModelViewSet
from project.serializers import ProjectSerializer, Project
# Create your views here.
class ProjectViewSet(ModelViewSet):
queryset = Project.objects.filter(parent=None)
serializer_class = ProjectSerializer
| UTF-8 | Python | false | false | 266 | py | 70 | views.py | 66 | 0.808271 | 0.808271 | 0 | 10 | 25.6 | 58 |
chamhoo/FM-FFM | 9,655,086,518,267 | d2d5d74a5e0658832fbfb69d1971bfef44211748 | c177659694205f18df8bf63eda35856a7f131a85 | /auto_tuning.py | e35c6a2b7f604eba2e87d32393e74e0239160e11 | [
"MIT"
]
| permissive | https://github.com/chamhoo/FM-FFM | 475e40efe9745779deed86070babebb75fd41e2c | adae51569d1647332896b9bb321fb7b5967980db | refs/heads/master | 2021-10-24T23:44:08.134230 | 2019-03-30T05:13:03 | 2019-03-30T05:13:03 | 167,799,136 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import numpy as np
import matplotlib.pyplot as plt
from CTR import *
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import warnings
warnings.filterwarnings('ignore')
class AutoTuning(CTR):
def cv_score(self, params):
param = {}
for key, value in self.model_params.items():
param[key] = value
for key, value in params.items():
param[key] = value
self.model(**param)
return self.cv(**self.cv_params)
def f(self, params):
self.eval += 1
score = self.cv_score(params)['mean_score']
self.fmin_recorder['score'].append(score)
for key, value in params.items():
self.fmin_recorder['param'][key].append(value)
if self.fmin_verbose == 1:
if self.best_score > score:
self.best_score = score
print(f'new best, eval {self.eval}, score {self.best_score}, param {params}')
if self.fmin_verbose == 2:
print(f'eval {self.eval}, score {score}, param {params}')
if self.fmin_verbose == 3:
num_params = len(params)
col = np.ceil(np.sqrt(num_params))
row = np.floor(np.sqrt(num_params))
for i, [key, value] in enumerate(self.fmin_recorder['param'].items()):
self.ax[key] = self.fig.add_subplot(row, col, i+1)
self.ax[key].cla()
self.ax[key].scatter(value, self.fmin_recorder['score'])
plt.pause(0.01)
else:
pass
return {'loss': score, 'status': STATUS_OK}
def fmin(self, model, space_dict, model_params, cv_params, verbose=0, max_evals=100):
"""
We use Hyperopt to achieve tunnning automation.
you can use [pip install hyperopt] command to install this package.
- https://github.com/hyperopt/hyperopt
:param model: <class 'method'>, a model function
:param space_dict: dict, record parameter search space
:param model_params: dict, the model parameter what doesn't use in space_dict.
:param verbose: [0, 1, 2, 3] (default=1), 0 = almost silent, Only one line of progress bar.
1 = Update only when better parameters appear, 2 = Update every time,
3 = Update and plot every time.
:param cv_params: dict, The cv parameter.
:param max_evals: int, The maximum number of parameter searches you can afford,
the more likely you are to search for better parameters, but the longer it takes.
:return: best training param
"""
self.best_score = 2**32
self.eval = 0
self.model = model
self.cv_params = cv_params
self.fmin_verbose = verbose
self.model_params = model_params
self.fmin_recorder = dict()
self.fmin_recorder['score'] = []
self.fmin_recorder['param'] = dict(zip(space_dict.keys(), [[] for i in range(len(space_dict))]))
if self.fmin_verbose == 3:
self.ax = {}
self.fig = plt.figure()
trials = Trials()
best = fmin(self.f, space_dict, algo=tpe.suggest, max_evals=max_evals, trials=trials)
plt.close()
return best
| UTF-8 | Python | false | false | 3,231 | py | 10 | auto_tuning.py | 7 | 0.58372 | 0.575673 | 0 | 82 | 38.402439 | 104 |
mdqarshad/arshadWeb.github.io | 9,792,525,482,064 | fd8ed0ec3dcab8df71c4d2dddb677b9d75974b2f | 8202bf99aa3cc504e667996c935aae4dcd2d8e50 | /date.py | de3e8f998b9b159d15e22cbd52a16b5c60b9a36b | []
| no_license | https://github.com/mdqarshad/arshadWeb.github.io | 2be65f48103bbc988b2d7462f02df3848dd931da | e8822d076a386a688d717f29de1bfeb6db87d8e5 | refs/heads/master | 2021-07-11T05:47:07.215719 | 2021-03-08T13:55:54 | 2021-03-08T13:55:54 | 236,685,288 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
day=int(input("enter day"))
mon=int(input("enter day"))
year=int(input("enter day"))
print("the examination will start in ",day,"/",mon,"/",year) | UTF-8 | Python | false | false | 156 | py | 8 | date.py | 6 | 0.628205 | 0.628205 | 0 | 7 | 20.285714 | 60 |
noorulameenkm/DataStructuresAlgorithms | 14,542,759,314,319 | 8ad9740c72cbfff2dbd65f2198d65fadcf178b72 | e5504d8c4880993b82d5583a11c5cc4623e0eac2 | /LinkedList/fractionalNode.py | e9f8717b8deba79e8a9a4e978cffe195a10a708c | []
| no_license | https://github.com/noorulameenkm/DataStructuresAlgorithms | e5f87f426fc444d18f830e48569d2a7a50f5d7e0 | 7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0 | refs/heads/master | 2023-06-08T19:29:42.507761 | 2023-05-28T16:20:19 | 2023-05-28T16:20:19 | 219,270,731 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Way of solving this problem:-
* i = 0
* i % k == 0 (means jumped k nodes, that's why it is starting from i = 0)
* in the above case assign fractional node to head
* 2nd condition will be true in the case of i = 0 as well
that means atleast one node is there so we have to assign
it to firstNode
* Next case is when i = k which means k + 1th node, then move
fractional Node to next Node, because more than k nodes are there
since we are taking ceil, we can move it to next node
* Basic idea is to jump k times and then move fractional Node to
the next, Do the same till completely parse list
"""
class Node:
def __init__(self, val):
self.val = val
self.next = None
def findFractionalNode(head, k):
if k <= 0:
return None
fractionalNode = None
temp = head
i = 0
while temp:
if i % k == 0:
if not fractionalNode:
fractionalNode = head
else:
fractionalNode = fractionalNode.next
temp = temp.next
i += 1
return fractionalNode
def Main(head):
k = int(input())
node = findFractionalNode(head, k)
if node:
print(node.val)
else:
print('No Fractional Node')
if __name__ == '__main__':
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
Main(head)
| UTF-8 | Python | false | false | 1,525 | py | 683 | fractionalNode.py | 679 | 0.584918 | 0.574426 | 0 | 57 | 25.754386 | 77 |
biobakery/hmp2_workflows | 3,908,420,243,256 | 546d753a249c9d0411b94d004911178815816478 | 13e07d77985c093b4f39a482320ecf4366f66d1d | /hmp2_workflows/scripts/depth_first_dcc_delete.py | b0706199e89493f8b4e4b11423c1ba8e7e2fdb1b | []
| no_license | https://github.com/biobakery/hmp2_workflows | c1bf715cfc92873df38c84a8e450e27f43072e80 | ef27c580456632ac2d334dded5653f24b0f04569 | refs/heads/master | 2022-06-24T12:40:45.709962 | 2019-01-23T00:14:54 | 2019-01-23T00:14:54 | 229,788,243 | 0 | 0 | null | false | 2022-06-21T23:58:04 | 2019-12-23T16:35:17 | 2019-12-23T16:38:57 | 2022-06-21T23:58:02 | 441 | 0 | 0 | 2 | Python | false | false | # -*- coding: utf-8 -*-
"""
depth_first_dcc_delete.py
~~~~~~~~~~~~~~~~~~~~~~~~~
Does a depth-first traversal/delete of the document tree out of the OSDF
based off an OQL query to provide the starting point for the delete.
Example OQL queries are:
- '"abundance_matrix"[node_type] && "wgs_community"[meta.matrix_type]'
- '"visit"[node_type]'
Extreme caution should be taken when using the script as if a specific enough
OQL query is not provided a large amount of documents could be deleted.
Copyright (c) 2017 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import argparse
import importlib
import itertools
import json
import types
import anytree
import cutlass
def parse_cli_arguments():
"""Parses any command-line arguments passed into the workflow.
Args:
None
Requires:
None
Returns:
argparse.ArgumentParser: Object containing arguments passed in by user.
"""
parser = argparse.ArgumentParser('Deletes a cache of OSDF documents in a '
'depth-first manner using a specific OQL '
'query as the basis.')
parser.add_argument('-u', '--username', required=True,
help='DCC username.')
parser.add_argument('-p', '--password', required=True,
help='DCC password.')
parser.add_argument('-s', '--study-id', required=True,
default='52d8c92f2d3660b9add954d544a0216e',
help='The study ID from which to cascade delete down.')
parser.add_argument('-t', '--node-type-filter',
help='OPTIONAL. Filter nodes to delete by a node type.')
parser.add_argument('-q', '--oql-query',
help='OQL query to establish the basis from which '
'to do a deletion.')
parser.add_argument('-d', '--dry-run', action='store_true', default=False,
help='Perform a dry-run deletion and list which '
'nodes will be delete.')
parser.add_argument('--delete-root', action='store_true', default=False,
help='Delete the root node when this flag is specified.')
return parser.parse_args()
def build_osdf_tree(study_id):
"""
Builds a tree structure to contain all our targeted objects from the OSDF.
Args:
study_id (string): The study ID to act as the root node from which
all children nodes are retrieved.
Requires:
None
Returns:
anytree.Node: The root tree node of our constructed tree structure.
"""
## We want a hash lookup map so that we can map OSDF ID -> OSDF object so
## can quickly build our tree without some messy looping.
osdf_lookup_map = {}
def _update_osdf_tree(osdf_obj):
"""
Updates the tree containing OSDF nodes and makes use of lookup table
set the proper parent node.
Args:
osdf_obj (cutlass.*): One of the cutlass objects associted with
the provided study.
Requires:
None
Returns:
None
"""
## We should only ever have one parent for each of our objects so
## this is naieve but ok.
parent_id = osdf_obj.links.values()[0][0]
parent_node = osdf_lookup_map.get(parent_id)
if not parent_node:
print "WARNING: Could not find parent node for following object:", osdf_obj.to_json()
else:
osdf_node = anytree.Node(osdf_obj.id, osdf=osdf_obj, type=json.loads(osdf_obj.to_json()).get('node_type'), parent=parent_node)
return osdf_node
study_obj = cutlass.Study.load(study_id)
study_node = anytree.Node("root", osdf=study_obj, type='study')
subjects = list(study_obj.subjects())
subject_nodes = [anytree.Node(s.id, osdf=s, parent=study_node, type='subject') for s in subjects]
osdf_lookup_map.update({s.name: s for s in subject_nodes})
subject_attrs = [list(s.attributes()) for s in subjects]
subject_attrs = list(itertools.chain.from_iterable(subject_attrs))
subject_attr_nodes = map(_update_osdf_tree, subject_attrs)
osdf_lookup_map.update({sa.name: sa for sa in subject_attr_nodes})
visits = [list(s.visits()) for s in subjects]
visits = list(itertools.chain.from_iterable(visits))
visit_nodes = map(_update_osdf_tree, visits)
osdf_lookup_map.update({v.name: v for v in visit_nodes})
visit_attrs = [list(v.visit_attributes()) for v in visits]
visit_attrs = list(itertools.chain.from_iterable(visit_attrs))
visit_attr_nodes = map(_update_osdf_tree, visit_attrs)
osdf_lookup_map.update({va.name: va for va in visit_attr_nodes})
samples = [list(v.samples()) for v in visits]
samples = list(itertools.chain.from_iterable(samples))
sample_nodes = map(_update_osdf_tree, samples)
osdf_lookup_map.update({sp.name: sp for sp in sample_nodes})
sample_attrs = [list(s.sampleAttributes()) for s in samples]
sample_attrs = list(itertools.chain.from_iterable(sample_attrs))
sample_attr_nodes = map(_update_osdf_tree, sample_attrs)
osdf_lookup_map.update({sa.name: sa for sa in sample_attr_nodes})
preps = [list(s.preps()) for s in samples]
preps = list(itertools.chain.from_iterable(preps))
prep_nodes = map(_update_osdf_tree, preps)
osdf_lookup_map.update({p.name: p for p in prep_nodes})
seq_sets = [list(c) for p in preps for c in p.children() if p.children()]
seq_sets = list(itertools.chain.from_iterable(seq_sets))
seq_sets = [ss for ss in seq_sets if not isinstance(ss, types.GeneratorType)]
seq_set_nodes = map(_update_osdf_tree, seq_sets)
osdf_lookup_map.update({ss.name: ss for ss in seq_set_nodes})
products = [list(c) for ss in seq_sets for c in ss.children() if ss.children()]
products = list(itertools.chain.from_iterable(products))
products = [p for p in products if not isinstance(p, types.GeneratorType)]
product_nodes = map(_update_osdf_tree, products)
osdf_lookup_map.update({po.name: po for po in product_nodes})
## Sometimes we have another round of products we need to account for here...
products2 = [list(c) for p in products for c in p.children() if p.children()]
products2 = list(itertools.chain.from_iterable(products2))
products2 = [p for p in products2 if not isinstance(p, types.GeneratorType)]
product_nodes2 = map(_update_osdf_tree, products2)
osdf_lookup_map.update({po.name: po for po in product_nodes2})
return study_node
def filter_osdf_tree(root_node, node_type):
"""Filters an existing OSDF tree by a specific node_type.
Args:
root_node (anytree.Node): The root node of the tree to filter upon
node_type (string): THe type of node to filter the tree down by.
"""
filtered_nodes = anytree.search.findall(study_node,
filter_=lambda node: node.type in
node_type)
filtered_root_node = anytree.Node('root', osdf=root_ndoe.osdf, type='study')
for filtered_node in filtered_nodes:
path = filtered_node.path[1:]
parent_node = filtered_root_node
for node in path:
new_node = anytree.Node(node.osdf.id, osdf=node.osdf, type=node.type, parent=parent_node)
parent_node = new_node
return filtered_root_node
def delete_nodes(root_node, dry_run, delete_root, stop_node="root"):
"""
Cascade deletes OSDF nodes in a depth-first manner.
Args:
root_node (anytree.Node): The root node of the tree to delete from.
dry_run (boolean): True/False to delete or print out the nodes to
be deleted.
stop_node (string): Name of the node to stop deletion on. Defaults to
the root node but can be any OSDF ID to stop on.
delete_root (boolean): If the stop_node parameter is set to 'root'
this parameter can be passed to indicate we want to delete the
root node as well.
Requires:
None
Returns:
None
"""
deleted = []
failed_delete = []
for node in anytree.PostOrderIter(root_node):
if dry_run:
if not node.name == "root":
print "DELETING NODE:", node
else:
osdf_obj = node.osdf
if not node.name == "root":
print "DELETING NODE:", node
res = osdf_obj.delete()
if not res:
print "FAILED TO DELETE NODE:", node
failed_delete.append(osdf_obj)
if failed_delete:
print "WARNING: The following OSDF nodes were not deleted:" + "\n".join(failed_delete)
if delete_root and stop_node == "root":
print "DELETING ROOT NODE:", root_node
if not dry_run:
root_node.osdf.delete()
def main(args):
session = cutlass.iHMPSession(args.username, args.password, ssl=False)
osdf = session.get_osdf()
root_node = build_osdf_tree(args.study_id)
if args.node_type_filter:
root_node = filter_osdf_tree(root_node, args.node_type_filter)
delete_nodes(root_node, args.dry_run, args.delete_root)
if __name__ == "__main__":
main(parse_cli_arguments())
| UTF-8 | Python | false | false | 10,450 | py | 45 | depth_first_dcc_delete.py | 40 | 0.636364 | 0.632823 | 0 | 274 | 37.138686 | 138 |
Infiziert90/discord_feedbot | 14,096,082,704,729 | be2f6c9c9135166f0daa05e0c03eb694d7e8fbd3 | 676d5c1c4fc098e05a1981ae839d9cacb55604e7 | /feed2discord.py | 6987cb4fbe8ecf3ee123af6a033f49d54cc6f992 | [
"MIT"
]
| permissive | https://github.com/Infiziert90/discord_feedbot | d6a9d473babc371e7ae421bed3cdcdd8abef1f91 | 6233781706a9a6452e468df79e158bf8b8e5d8ab | refs/heads/master | 2023-08-20T20:37:32.797440 | 2019-09-19T08:46:13 | 2019-09-19T08:46:13 | 128,564,044 | 2 | 0 | NOASSERTION | true | 2020-06-20T04:06:21 | 2018-04-07T20:07:58 | 2020-04-24T08:38:18 | 2019-09-19T08:46:23 | 317 | 2 | 0 | 1 | Python | false | false | #!/usr/bin/env python3
# Copyright (c) 2016-2017 Eric Eisenhart
# This software is released under an MIT-style license.
# See LICENSE.md for full details.
import asyncio
import logging
import os
import random
import re
import sqlite3
import sys
import time
import warnings
import aiohttp
import discord
import feedparser
import logging.handlers
from configparser import ConfigParser
from datetime import datetime
from importlib import reload
from urllib.parse import urljoin
from aiohttp.web_exceptions import HTTPForbidden, HTTPNotModified
from dateutil.parser import parse as parse_datetime
from html2text import HTML2Text
__version__ = "3.2.0r"
PROG_NAME = "feedbot"
USER_AGENT = f"{PROG_NAME}{__version__}"
SQL_CREATE_FEED_INFO_TBL = """
CREATE TABLE IF NOT EXISTS feed_info (
feed text PRIMARY KEY,
url text UNIQUE,
lastmodified text,
etag text
)
"""
SQL_CREATE_FEED_ITEMS_TBL = """
CREATE TABLE IF NOT EXISTS feed_items (
id text PRIMARY KEY,
published text,
title text,
url text,
reposted text
)
"""
SQL_CLEAN_OLD_ITEMS = """
DELETE FROM feed_items WHERE (julianday() - julianday(published)) > 365
"""
if not sys.version_info[:2] >= (3, 6):
print("Error: requires python 3.6 or newer")
exit(1)
def get_config():
ini_config = ConfigParser()
ini_config.read(["feed2discord.ini"])
debug = ini_config["MAIN"].getint("debug", 0)
if debug:
os.environ["PYTHONASYNCIODEBUG"] = "1"
# The AIO modules need to be reloaded because of the new env var
reload(asyncio)
reload(aiohttp)
reload(discord)
if debug >= 3:
log_level = logging.DEBUG
elif debug >= 2:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(level=log_level)
log = logging.getLogger(__name__)
# suppress poll infos from asyncio
logging.getLogger('asyncio').setLevel(logging.WARNING)
log.addHandler(logging.handlers.TimedRotatingFileHandler("output.log", when='W0', backupCount=3))
log.setLevel(log_level)
warnings.resetwarnings()
return ini_config, log
def get_timezone(ini_config):
import pytz
tzstr = ini_config["MAIN"].get("timezone", "utc")
# This has to work on both windows and unix
try:
timezone = pytz.timezone(tzstr)
except pytz.UnknownTimeZoneError:
timezone = pytz.utc
return timezone
def get_feeds_config(ini_config):
feeds = list(ini_config.sections())
# remove non-feed sections
feeds.remove("MAIN")
feeds.remove("CHANNELS")
return feeds
def get_sqlite_connection(ini_config):
db_path = ini_config["MAIN"].get("db_path", "feed2discord.db")
conn = sqlite3.connect(db_path)
# If our two tables don't exist, create them.
conn.execute(SQL_CREATE_FEED_INFO_TBL)
conn.execute(SQL_CREATE_FEED_ITEMS_TBL)
# Clean out *some* entries that are over 1 year old...
# Doing this cleanup at start time because some feeds
# do contain very old items and we don't want to keep
# re-evaluating them.
conn.execute(SQL_CLEAN_OLD_ITEMS)
return conn
# Make main, timezone, logger, config global, since used everywhere/anywhere
config, logger = get_config()
MAIN = config['MAIN']
TIMEZONE = get_timezone(config)
# Crazy workaround for a bug with parsing that doesn't apply on all
# pythons:
feedparser.PREFERRED_XML_PARSERS.remove('drv_libxml2')
# global discord client object
client = discord.Client()
def extract_best_item_date(item, tzinfo):
"""
This function loops through all the common date fields for an item in a feed,
and extracts the "best" one. Falls back to "now" if nothing is found.
"""
fields = ("published", "pubDate", "date", "created", "updated")
for date_field in fields:
if date_field in item and len(item[date_field]) > 0:
try:
date_obj = parse_datetime(item[date_field])
if date_obj.tzinfo is None:
tzinfo.localize(date_obj)
return date_obj
except Exception:
pass
# No potentials found, default to current timezone's "now"
return tzinfo.localize(datetime.now())
def process_field(field, item, feed, channel):
"""
This looks at the field from the config, and returns the processed string
naked item in fields: return that field from the feed item
*, **, _, ~, `, ```: markup the field and return it from the feed item
" around the field: string literal
Added new @, turns each comma separated tag into a group mention
"""
logger.debug(f"{feed}:process_field:{field}: started")
item_url_base = feed.get('item_url_base', None)
if field == 'guid' and item_url_base is not None:
if 'guid' in item:
return item_url_base + item['guid']
else:
logger.error('process_field:guid:no such field; try show_sample_entry.py on feed')
return ""
logger.debug(f"{feed}:process_field:{field}: checking regexes")
stringmatch = re.match('^"(.+?)"$', field)
highlightmatch = re.match('^([*_~<]+)(.+?)([*_~>]+)$', field)
bigcodematch = re.match('^```(.+)```$', field)
codematch = re.match('^`(.+)`$', field)
tagmatch = re.match('^@(.+)$', field) # new tag regex
if stringmatch is not None:
# Return an actual string literal from config:
logger.debug(f"{feed}:process_field:{field}:isString")
return stringmatch.group(1) # string from config
elif highlightmatch is not None:
logger.debug(f"{feed}:process_field:{field}:isHighlight")
# If there's any markdown on the field, return field with that
# markup on it:
begin, field, end = highlightmatch.groups()
if field in item:
if field == "link":
url = urljoin(feed.get("feed-url"), item[field])
return begin + url + end
else:
return begin + item[field] + end
else:
logger.error(f"process_field:{field}:no such field")
return ""
elif bigcodematch is not None:
logger.debug(f"{feed}:process_field:{field}:isCodeBlock")
# Code blocks are a bit different, with a newline and stuff:
field = bigcodematch.group(1)
if field in item:
return "```\n{item[field]}\n```"
else:
logger.error(f"process_field:{field}:no such field")
return ""
elif codematch is not None:
logger.debug(f"{feed}:process_field:{field}:isCode")
# Since code chunk can't have other highlights, also do them
# separately:
field = codematch.group(1)
if field in item:
return f"`{item[field]}`"
else:
logger.error(f"process_field:{field}:no such field")
return ""
elif tagmatch is not None:
logger.debug(f"{feed}:process_field:{field}:isTag")
field = tagmatch.group(1)
if field in item:
# Assuming tags are ', ' separated
taglist = item[field].split(', ')
# Iterate through channel roles, see if a role is mentionable and
# then substitute the role for its id
for role in client.get_channel(channel['id']).server.roles:
rn = str(role.name)
taglist = [
f"<@&{role.id}>" if rn == str(i) else i for i in taglist
]
return ", ".join(taglist)
else:
logger.error(f"process_field:{field}:no such field")
return ""
else:
logger.debug(f"{feed}:process_field:{field}:isPlain")
# Just asking for plain field:
if field in item:
# If field is special field "link",
# then use urljoin to turn relative URLs into absolute URLs
if field == 'link':
return urljoin(feed.get('feed_url'), item[field])
# Else assume it's a "summary" or "content" or whatever field
# and turn HTML into markdown and don't add any markup:
else:
htmlfixer = HTML2Text()
logger.debug(htmlfixer)
htmlfixer.ignore_links = True
htmlfixer.ignore_images = True
htmlfixer.ignore_emphasis = False
htmlfixer.body_width = 1000
htmlfixer.unicode_snob = True
htmlfixer.ul_item_mark = '-' # Default of "*" likely
# to bold things, etc...
markdownfield = htmlfixer.handle(item[field])
# Try to strip any remaining HTML out. Not "safe", but
# simple and should catch most stuff:
markdownfield = re.sub('<[^<]+?>', '', markdownfield)
return markdownfield
else:
logger.error(f"process_field:{field}:no such field")
return ""
def build_message(feed, item, channel):
"""
This builds a message.
Pulls the fields (trying for channel_name.fields in FEED, then fields in
FEED, then fields in DEFAULT, then "id,description".
fields in config is comma separate string, so pull into array.
then just adds things, separated by newlines.
truncates if too long.
"""
message = ''
fieldlist = feed.get(channel['name'] + '.fields', feed.get('fields', 'id,description')).split(',')
# Extract fields in order
for field in fieldlist:
logger.debug(f"feed:item:build_message:{field}:added to message")
message += process_field(field, item, feed, channel) + "\n"
# Naked spaces are terrible:
message = re.sub(' +\n', '\n', message)
message = re.sub('\n +', '\n', message)
# squash newlines down to single ones, and do that last...
message = re.sub("(\n)+", "\n", message)
if len(message) > 1800:
message = message[:1800] + "\n... post truncated ..."
return message
async def send_message_wrapper(async_loop, feed, channel, message):
""" This schedules an 'actually_send_message' coroutine to run """
async_loop.create_task(actually_send_message(channel, message, feed))
logger.debug(f"{feed}:{channel['name']}:message scheduled")
async def actually_send_message(channel, message, feed):
logger.debug(f"{feed}:{channel['name']}:actually sending message")
await channel["channel_obj"].send(message)
logger.debug(f"{feed}:{channel['name']}:message sent: {message!r}")
async def background_check_feed(conn, feed, async_loop):
"""
The main work loop
One of these is run for each feed.
It's an asyncio thing. "await" (sleep or I/O) returns to main loop
and gives other feeds a chance to run.
"""
logger.info(f'{feed}: Starting up background_check_feed')
# Try to wait until Discord client has connected, etc:
await client.wait_until_ready()
# make sure debug output has this check run in the right order...
await asyncio.sleep(1)
user_agent = config["MAIN"].get("user_agent", USER_AGENT)
# just a bit easier to use...
_feed = config[feed]
# pull config for this feed out:
feed_url = _feed.get('feed_url')
rss_refresh_time = _feed.getint('rss_refresh_time', 3600)
start_skew = _feed.getint('start_skew', rss_refresh_time)
start_skew_min = _feed.getint('start_skew_min', 1)
max_age = _feed.getint('max_age', 86400)
# loop through all the channels this feed is configured to send to
channels = []
for key in _feed.get('channels').split(','):
logger.debug(feed + ': adding channel ' + key)
# stick a dict in the channels array so we have more to work with
channels.append(
{
'channel_obj': client.get_channel(int(config['CHANNELS'][key])),
'name': key,
'id': int(config['CHANNELS'][key]),
}
)
if start_skew > 0:
sleep_time = random.uniform(start_skew_min, start_skew)
logger.info(f'{feed}:start_skew:sleeping for {str(sleep_time)}')
await asyncio.sleep(sleep_time)
# Basically run forever
while not client.is_closed():
# And tries to catch all the exceptions and just keep going
# (but see list of except/finally stuff below)
try:
logger.info(f'{feed}: processing feed')
http_headers = {"User-Agent": user_agent}
# Download the actual feed, if changed since last fetch
# pull data about history of this *feed* from DB:
cursor = conn.cursor()
cursor.execute("select lastmodified,etag from feed_info where feed=? OR url=?", [feed, feed_url])
data = cursor.fetchone()
# If we've handled this feed before,
# and we have etag from last run, add etag to headers.
# and if we have a last modified time from last run,
# add "If-Modified-Since" to headers.
if data is None: # never handled this feed before...
logger.info(f"{feed}:looks like updated version. saving info")
cursor.execute("REPLACE INTO feed_info (feed,url) VALUES (?,?)", [feed, feed_url])
conn.commit()
logger.debug(f"{feed}:feed info saved")
else:
logger.debug(f"{feed}:setting up extra headers for HTTP request.")
logger.debug(data)
lastmodified = data[0]
etag = data[1]
if lastmodified is not None and len(lastmodified):
logger.debug(f"{feed}:adding header If-Modified-Since: {lastmodified}")
http_headers['If-Modified-Since'] = lastmodified
else:
logger.debug(f"{feed}:no stored lastmodified")
if etag is not None and len(etag):
logger.debug(f"{feed}:adding header ETag: {etag}")
http_headers['ETag'] = etag
else:
logger.debug(f"{feed}:no stored ETag")
logger.debug(f"{feed}:sending http request for {feed_url}")
feed_data = None
# Send actual request.
async with aiohttp.ClientSession() as sess:
async with sess.get(feed_url, headers=http_headers) as http_response:
logger.debug(http_response)
# First check that we didn't get a "None" response, since that's
# some sort of internal error thing:
if http_response.status is None:
logger.error(f"{feed}:HTTP response code is NONE")
http_response.close()
# raise not HTTPError because this is giving me NoneType errors
raise HTTPForbidden()
# Some feeds are smart enough to use that if-modified-since or
# etag info, which gives us a 304 status. If that happens,
# assume no new items, fall through rest of this and try again
# later.
elif http_response.status == 304:
logger.debug(f"{feed}:data is old; moving on")
http_response.close()
raise HTTPNotModified()
# If we get anything but a 200, that's a problem and we don't
# have good data, so give up and try later.
# Mostly handled different than 304/not-modified to make logging
# clearer.
elif http_response.status != 200:
logger.debug(f"{feed}:HTTP error not 200")
http_response.close()
# raise not HTTPError because this is giving me NoneType errors
raise HTTPForbidden()
else:
logger.debug(f"{feed}:HTTP success")
# pull data out of the http response
logger.debug(f"{feed}:reading http response")
http_data = await http_response.read()
# parse the data from the http response with feedparser
logger.debug(f"{feed}:parsing http data")
feed_data = feedparser.parse(http_data)
logger.debug(f"{feed}:done fetching")
# If we got an ETAG back in headers, store that, so we can
# include on next fetch
if 'ETAG' in http_response.headers:
etag = http_response.headers['ETAG']
logger.debug(f"{feed}:saving etag: {etag}")
cursor.execute("UPDATE feed_info SET etag=? where feed=? or url=?", [etag, feed, feed_url])
conn.commit()
logger.debug(f"{feed}:etag saved")
else:
logger.debug(f"{feed}:no etag")
# If we got a Last-Modified header back, store that, so we can
# include on next fetch
if 'LAST-MODIFIED' in http_response.headers:
modified = http_response.headers['LAST-MODIFIED']
logger.debug(f"{feed}:saving lastmodified: {modified}")
cursor.execute("UPDATE feed_info SET lastmodified=? where feed=? or url=?",
[modified, feed, feed_url])
conn.commit()
logger.debug(f"{feed}:saved lastmodified")
else:
logger.debug(f"{feed}:no last modified date")
# Process all of the entries in the feed
# Use reversed to start with end, which is usually oldest
logger.debug(f"{feed}:processing entries")
if feed_data is None:
logger.error(f"{feed}:no data in feed_data")
# raise not HTTPError because this is giving me NoneType errors
raise HTTPForbidden()
for item in reversed(feed_data.entries):
logger.debug(f"{feed}:item:processing this entry:{item}")
# Pull out the unique id, or just give up on this item.
if 'id' in item:
uid = item.id
elif 'guid' in item:
uid = item.guid
elif 'link' in item:
uid = item.link
else:
logger.error(f"{feed}:item:no id, skipping")
continue
# Get our best date out, in both raw and parsed form
pubdate = extract_best_item_date(item, TIMEZONE)
pubdate_fmt = pubdate.strftime("%a %b %d %H:%M:%S %Z %Y")
logger.debug(f"{feed}:item:id:{uid}")
logger.debug(f"{feed}:item:checking database history for this item")
# Check DB for this item
cursor.execute("SELECT published,title,url,reposted FROM feed_items WHERE id=?", [uid])
data = cursor.fetchone()
# If we've never seen it before, then actually processing
# this:
if data is None:
logger.info(f"{feed}:item {uid} unseen, processing:")
# Store info about this item, so next time we skip it:
cursor.execute("INSERT INTO feed_items (id,published) VALUES (?,?)", [uid, pubdate_fmt])
conn.commit()
# Doing some crazy date math stuff...
# max_age is mostly so that first run doesn't spew too
# much stuff into a room, but is also a useful safety
# measure in case a feed suddenly reverts to something
# ancient or other weird problems...
time_since_published = TIMEZONE.localize(datetime.now()) - pubdate.astimezone(TIMEZONE)
if time_since_published.total_seconds() < max_age:
logger.info(f"{feed}:item:fresh and ready for parsing")
# Loop over all channels for this particular feed
# and process appropriately:
for channel in channels:
# just a bit easier to use...
_name = channel['name']
include = True
filter_field = _feed.get(f"{_name}.filter", _feed.get('filter_field', 'title'))
# Regex if channel exists
if f"{_name}.filter" in _feed or 'filter' in _feed:
logger.debug(f"{feed}:item:running filter for {_name}")
re_pat = _feed.get(f"{_name}.filter", _feed.get('filter', '^.*$'))
logger.debug(f"{feed}:item:using filter: {re_pat} on "
f"{item['title']} field {filter_field}")
re_match = re.search(re_pat, item[filter_field])
if re_match is None:
include = False
logger.info(f"{feed}:item:failed filter for {_name}")
elif f"{_name}.filter_exclude" in _feed or 'filter_exclude' in _feed:
logger.debug(f"{feed}:item:running exclude filter for{_name}")
re_pat = _feed.get(f"{_name}.filter_exclude", _feed.get('filter_exclude', '^.*$'))
logger.debug(f"{feed}:item:using filter_exclude: {re_pat} on "
f"{item['title']} field {filter_field}")
re_match = re.search(re_pat, item[filter_field])
if re_match is None:
include = True
logger.info(f"{feed}:item:passed exclude filter for {_name}")
else:
include = False
logger.info(f"{feed}:item:failed exclude filter for {_name}")
else:
include = True # redundant safety net
logger.debug(f"{feed}:item:no filter configured for {_name}")
if include is True:
logger.debug(f"{feed}:item:building message for {_name}")
message = build_message(_feed, item, channel)
logger.debug(f"{feed}:item:sending message (eventually) to {_name}")
await send_message_wrapper(async_loop, feed, channel, message)
else:
logger.info(f"{feed}:item:skipping item due to not passing filter for {_name}")
else:
# Logs of debugging info for date handling stuff...
logger.info(f"{feed}:too old, skipping")
logger.debug(f"{feed}:now:now:{time.time()}")
logger.debug(f"{feed}:now:gmtime:{time.gmtime()}")
logger.debug(f"{feed}:now:localtime:{time.localtime()}")
logger.debug(f"{feed}:pubDate:{pubdate}")
logger.debug(item)
# seen before, move on:
else:
logger.debug(f"{feed}:item: {uid} seen before, skipping")
# This is completely expected behavior for a well-behaved feed:
except HTTPNotModified:
logger.debug(f"{datetime.today()}:{feed}: Headers indicate feed unchanged since last time fetched:")
logger.debug(sys.exc_info())
# Many feeds have random periodic problems that shouldn't cause
# permanent death:
except HTTPForbidden:
logger.warning(f"{datetime.today()}:{feed}: Unexpected HTTPError:")
logger.warning(sys.exc_info())
logger.warning(f"{datetime.today()}:{feed}: Assuming error is transient and trying again later")
# sqlite3 errors are probably really bad and we should just totally
# give up on life
except sqlite3.Error as sqlerr:
logger.error(f"{datetime.today()}:{feed}: sqlite3 error: ")
logger.error(sys.exc_info())
logger.error(sqlerr)
raise
# Ideally we'd remove the specific channel or something...
# But I guess just throw an error into the log and try again later...
except discord.errors.Forbidden:
logger.error(f"{datetime.today()}:{feed}: discord.errors.Forbidden")
logger.error(sys.exc_info())
logger.error(f"{datetime.today()}:{feed}: Perhaps bot isn't allowed in one of the channels for this feed?")
# raise # or not? hmm...
except asyncio.TimeoutError:
logger.error(f"{datetime.today()}:{feed}: Timeout error")
except aiohttp.ClientConnectorError:
logger.error(f"{datetime.today()}:{feed}: Connection failed!")
except aiohttp.ClientOSError:
logger.error(f"{datetime.today()}:{feed}: Connection not responding!")
except aiohttp.ServerDisconnectedError:
logger.error(f"{datetime.today()}:{feed}: Socket closed by peer")
# unknown error: definitely give up and die and move on
except BaseException:
logger.exception(f"{datetime.today()}:{feed}: Unexpected error - giving up")
# raise # or not? hmm...
# No matter what goes wrong, wait same time and try again
finally:
logger.debug(f"{feed}:sleeping for {str(rss_refresh_time)} seconds")
await asyncio.sleep(rss_refresh_time)
@client.event
async def on_ready():
logger.info(f"Logged in as {client.user.name} ({client.user.id})")
def main():
"""
Set up the tasks for each feed and start the main event loop thing.
In this __main__ thing so can be used as library.
"""
loop = asyncio.get_event_loop()
feeds = get_feeds_config(config)
conn = get_sqlite_connection(config)
try:
for feed in feeds:
loop.create_task(background_check_feed(conn, feed, loop))
if "login_token" in MAIN:
loop.run_until_complete(client.login(MAIN.get("login_token")))
loop.run_until_complete(client.connect())
except BaseException:
loop.run_until_complete(client.close())
finally:
loop.close()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 26,915 | py | 2 | feed2discord.py | 1 | 0.558053 | 0.554784 | 0 | 646 | 40.664087 | 119 |
jasonliuxyz/homework-python | 523,986,028,594 | fdd6a6cf08504c70a8b65d87f15f242e3f338514 | e8931042cf9a44d87df8505785a1d95250c961ec | /cookbook/charpter1/iterator02.py | aff0fc84b33f65de5348d387d248ad45b580d7aa | []
| no_license | https://github.com/jasonliuxyz/homework-python | df518eae576839a1560a907cd9770a0c95d18f2a | 3cde18e0cb3f6b7a6e3775492310dc298453106f | refs/heads/master | 2023-01-06T11:18:24.934855 | 2020-11-06T09:15:03 | 2020-11-06T09:15:03 | 273,636,458 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- encoding: utf-8 -*-
'''
迭代器:
1、可迭代对象包含迭代器,所以迭代器中实现了__iter__方法
2、迭代器比可迭代对象要对实现__next__方法
3、iter()函数将一个可迭代对象转成迭代器对象
迭代器作用:
迭代器是实现了迭代器协议的数据结构,通过迭代器来屏蔽底层的复杂逻辑
参考:https://drivingc.com/p/5c4c210d4b0f2b793a5fe2e7
'''
from collections.abc import Iterable
from collections.abc import Iterator
from collections.abc import Generator
class IterObj:
def __init__(self):
self.a = [3, 5, 7, 11, 13, 17, 19]
self.n = len(self.a)
self.i = 0
def __iter__(self):
return iter(self.a)
def __next__(self):
while self.i < self.n:
v = self.a[self.i]
self.i += 1
return v
else:
self.i = 0
raise StopIteration()
it = IterObj()
print(isinstance(it, Iterable))
print(isinstance(it, Iterator))
print(isinstance(it, Generator))
print(hasattr(it, "__iter__"))
print(hasattr(it, "__next__"))
print(next(it))
print(next(it))
print(next(it))
with open('/data/code/test.py', 'r') as f:
print(isinstance(f, Iterator))
| UTF-8 | Python | false | false | 1,131 | py | 55 | iterator02.py | 53 | 0.675297 | 0.640777 | 0 | 46 | 19.130435 | 50 |
mwarrior92/skylines | 16,252,156,268,502 | 9acc26c7e1658a1401d76103b7979243e373f9bd | 2aa7bf4925f2c859280a8bad48166e6d34f7e0af | /scripts/deprecated/dns_redirection_check.py | c97d1ceeed72d3e20105f235d7cbeb30123c5d96 | []
| no_license | https://github.com/mwarrior92/skylines | f36edacd16dafaa8af3fad668cd4540ff161abd9 | 350df2e41dfa55ac3acbaa387407a859a647a1ce | refs/heads/master | 2021-06-10T14:47:08.805631 | 2019-10-20T15:06:44 | 2019-10-20T15:06:44 | 113,765,840 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from easiest.mms import collector, dispatcher, mdo
from easiest import cdo
from easiest.helpers import format_dirpath, mydir
import csv
import json
from pymongo import MongoClient
import time
mclient = MongoClient()
db = mclient.skyline
coll = db.dns_redirection_check
topdir = format_dirpath(mydir()+"../")
label = 'dns_redirection_check'
platform = "ripe_atlas"
# TODO find a rational way to choose these countries
countries = ["JP", "US", "AU", "FR", "IT", "BR", "IN", "DE", "ZA", "KE"]
# load top sites from Umbrella
alldoms = list()
with open(topdir+'/support_files/top-1m.csv', 'r+') as f:
reader = csv.reader(f)
for line in reader:
alldoms.append(line[1])
if len(alldoms) > 1999:
break
size = 10
loops = len(alldoms) / size
# get clients
locs = list()
for c in countries:
locs.append(cdo.TargetLocation())
locs[-1].set_countries([c])
cgs = list()
for loc in locs:
tmp_tcg = cdo.TargetClientGroup(loc, target_quantity=2)
cgs.append(tmp_tcg.get_ClientGroup(platform))
cg = cdo.ClientGroup.merge(*cgs)
# run init test measurement to make sure nodes are working
doms = ["google.com"]
my_mdo = mdo.dns.DNS(label, query_domains=doms)
d = dispatcher.Dispatcher(my_mdo, platform, cg)
my_mro = d.dispatch()
my_mro.set('file_path',
format_dirpath(topdir+"data/"+label+"/")+"testing_probes.json")
c = collector.SpinningCollector(my_mro, timeout=60*5, spin_time=120)
collector.wait_on_collectors([c])
with open(my_mro.file_path, 'r+') as f:
data = json.load(f)
client_info = dict()
for client in cg.clients:
client_info[client.probe_id] = client.country_code
good_probes = set()
for r in data['results']:
if 'answers' in r:
if 'A' in r['answers']:
good_probes.add(r['prb_id'])
bad_probes = [z for z in cg.clients if z.probe_id not in good_probes]
cg.clients = [z for z in cg.clients if z.probe_id in good_probes]
locs = list()
for c in [z.country_code for z in bad_probes]:
locs.append(cdo.TargetLocation())
locs[-1].set_countries([c])
cgs = list()
for loc in locs:
tmp_tcg = cdo.TargetClientGroup(loc, target_quantity=1)
cgs.append(tmp_tcg.get_ClientGroup(platform))
cgs.append(cg)
cg = cdo.ClientGroup.merge(*cgs)
for ind in range(loops):
cg.save_json(file_path=format_dirpath(topdir+"experiment_records/"+label+"/")+"clients_"+str(ind))
print("ind is: " + str(ind) + "****************")
doms = alldoms[ind*size:(ind+1)*size]
# perform twice to check for fast churn
for idx in ["i0", "i1"]:
print(idx)
# setup measurement
my_mdo = mdo.dns.DNS(label, query_domains=doms)
my_mdo.save_json(file_path=format_dirpath(topdir+"experiment_records/"+label+"/")+"meas_"+str(ind)+idx)
# deploy measurement
d = dispatcher.Dispatcher(my_mdo, platform, cg)
my_mro = d.dispatch()
my_mro.set('file_path',
format_dirpath(topdir+"data/"+label+"/")+"loop_"+str(ind)+idx+".json")
# collect measurement results
c = collector.SpinningCollector(my_mro, timeout=60*5, spin_time=120)
#c.grabber_thread.join()
collector.wait_on_collectors([c])
with open(my_mro.file_path, 'r+') as f:
data = json.load(f)
client_info = dict()
pushed = 0
for client in cg.clients:
client_info[client.probe_id] = client.country_code
entries = list()
good_probes = set()
for r in data['results']:
if 'answers' in r:
if 'A' in r['answers']:
entries.append({
'probe_id': r['prb_id'],
'answers': r['answers'],
'country_code': client_info[r['prb_id']],
'domain': r['query_domain'],
'iteration': int(idx[1])
})
good_probes.add(r['prb_id'])
coll.insert_many(entries)
time.sleep(60)
# refresh client set
bad_probes = [z for z in cg.clients if z.probe_id not in good_probes]
cg.clients = [z for z in cg.clients if z.probe_id in good_probes]
locs = list()
for c in [z.country_code for z in bad_probes]:
locs.append(cdo.TargetLocation())
locs[-1].set_countries([c])
cgs = list()
for loc in locs:
tmp_tcg = cdo.TargetClientGroup(loc, target_quantity=1)
cgs.append(tmp_tcg.get_ClientGroup(platform))
cgs.append(cg)
cg = cdo.ClientGroup.merge(*cgs)
| UTF-8 | Python | false | false | 4,521 | py | 81 | dns_redirection_check.py | 40 | 0.607388 | 0.600088 | 0 | 137 | 31.992701 | 111 |
egandone/bitesofpy | 17,875,653,905,052 | 9dd6c4e0627e8d5ff31474ba3113f312841329b6 | faea233c3a52237731729563e03c5e90fc3c2dc2 | /229/best_programming_books.py | 38438f674f86fb9628ff2e31815b6ea413ebb927 | []
| no_license | https://github.com/egandone/bitesofpy | 088ef219e8db0de4eae066852575aa70e4a6ba5a | 6645f06acae6251db30122df8aa8c0ec5dab5af4 | refs/heads/master | 2022-12-12T10:01:01.239323 | 2020-11-21T22:22:26 | 2020-11-21T22:22:26 | 192,716,481 | 0 | 0 | null | false | 2022-12-08T03:14:11 | 2019-06-19T11:08:57 | 2020-11-21T22:22:34 | 2022-12-08T03:14:09 | 716 | 0 | 0 | 17 | Python | false | false | import os
import re
from pathlib import Path
from urllib.request import urlretrieve
import dataclasses
from bs4 import BeautifulSoup
url = ("https://bites-data.s3.us-east-2.amazonaws.com/"
"best-programming-books.html")
tmp = Path(os.getenv('TMP', "/tmp"))
html_file = tmp / "books.html"
if not html_file.exists():
urlretrieve(url, html_file)
@dataclasses.dataclass
class Book:
"""Book class should instatiate the following variables:
title - as it appears on the page
author - should be entered as lastname, firstname
year - four digit integer year that the book was published
rank - integer rank to be updated once the books have been sorted
rating - float as indicated on the page
"""
title: str
author: str
year: int
rank: int
rating: float
lastname: str = dataclasses.field(init=False)
firstname: str = dataclasses.field(init=False)
def __post_init__(self):
self.lastname, self.firstname = self.author.split(',')
def __setattr__(self, name, value):
super().__setattr__(name, value)
if name == 'author':
self.__post_init__()
def __repr__(self):
return f'[{self.rank:03d}] {self.title} ({self.year})\n {self.author} {float(self.rating)}'
def _get_soup(file):
return BeautifulSoup(file.read_text(encoding='utf-8'), "html.parser")
def display_books(books, limit=10, year=None):
"""Prints the specified books to the console
:param books: list of all the books
:param limit: integer that indicates how many books to return
:param year: integer indicating the oldest year to include
:return: None
"""
trimmed_list = [
book for book in books if not year or book.year >= year][:limit]
for book in trimmed_list:
print(book)
def load_data():
"""Loads the data from the html file
Creates the soup object and processes it to extract the information
required to create the Book class objects and returns a sorted list
of Book objects.
Books should be sorted by rating, year, title, and then by author's
last name. After the books have been sorted, the rank of each book
should be updated to indicate this new sorting order.The Book object
with the highest rating should be first and go down from there.
"""
soup = _get_soup(html_file)
python_books = []
titles = soup.find_all("h2", class_="main")
for title in titles:
if 'python' in title.getText().lower():
top = title.parent.parent
authors = top.find('h3', class_='authors')
date = top.find('span', class_='date')
rating = top.find('span', class_='our-rating')
if authors and date and rating:
name = authors.find('a')
*firstnames, lastname = name.getText().split(' ')
firstname = ' '.join(firstnames)
date = date.find(string=re.compile(r'^\d\d\d\d$'))
if name:
book = Book(title.getText(), author=f'{lastname}, {firstname}', year=int(
date), rank=0, rating=float(rating.getText()))
python_books.append(book)
python_books.sort(key=lambda book: (-book.rating,
book.year, book.title.lower(), book.lastname))
for i, book in enumerate(python_books, start=1):
book.rank = i
return python_books
def main():
books = load_data()
display_books(books, limit=5, year=2017)
"""If done correctly, the previous function call should display the
output below.
"""
if __name__ == "__main__":
main()
"""
[001] Python Tricks (2017)
Bader, Dan 4.74
[002] Mastering Deep Learning Fundamentals with Python (2019)
Wilson, Richard 4.7
[006] Python Programming (2019)
Fedden, Antony Mc 4.68
[007] Python Programming (2019)
Mining, Joseph 4.68
[009] A Smarter Way to Learn Python (2017)
Myers, Mark 4.66
"""
| UTF-8 | Python | false | false | 4,130 | py | 108 | best_programming_books.py | 100 | 0.605085 | 0.589104 | 0 | 125 | 31.04 | 104 |
hyliang96/Guided-Denoise | 10,909,216,937,562 | 00b3189897422e5dc32d796e48436636792354f9 | 4e1df0bb0769986b6ee1367c2cfd72430770f65b | /GD_train/print_utlis.py | 1a3d2e73516b5f21e413fcf7161634770fdd47cc | []
| no_license | https://github.com/hyliang96/Guided-Denoise | e75f6c0b982f6f0afc64b266d41e8997c1e92258 | c611dc7229261e1df9a4109cc6e013aace7d2fd6 | refs/heads/master | 2020-05-16T10:11:12.652450 | 2019-06-10T08:45:37 | 2019-06-10T08:45:37 | 182,975,021 | 0 | 0 | null | true | 2019-04-23T08:55:55 | 2019-04-23T08:55:54 | 2019-04-22T13:56:44 | 2018-03-27T11:39:28 | 87,639 | 0 | 0 | 0 | null | false | false | #! /usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import time
# 支持在python2、3下调用本文件
class Log(object):
# 用例
# 从log = Log('test_data')开始,print会向文件'test_data'与屏幕输出(双向输出)
# 包括:
# - 凡print函数皆双向输出
# - 此处代码中的print
# - 被调用的函数中有print
# - 被import的文件中有print
# - 支持输出到文件、屏幕的flush()
# - 不支持'\r'输出到文件(vim下'\r'会显示为^M),支持'\r'输出到文件,故进度条只能在屏幕上显示
# 直到log.close(),print才变为只输出到屏幕
def __init__(self, filename='',mode='w',*args):
# filename =
# time.strftime("%m-%d_%H:%M:%S", time.localtime())+filename
self.f = open(filename, mode)
sys.stdout = self
print('====== log start ======',
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
# 格式化成2016-03-20 11:45:39形式
'======')
def write(self, data):
self.f.write(data)
sys.__stdout__.write(data)
def flush(self):
# 例
# print("xxxx", end='')
# log.flush()
# 可使"xxxx"立即输出到屏幕和文件;
# 若不"log.flush()",则要等到之后有换行的 print("xxxx")
self.f.flush()
sys.__stdout__.flush()
def close(self):
print('======= log end =======',
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
'======')
self.flush()
self.f.close()
sys.stdout=sys.__stdout__
def cursor_back(func):
# 光标回到所在行首,且只输出到屏幕,不输出到别处
# 因为在屏幕是上,'\r'可令光标返回行首,但本行已输出的字符不会删除
# 但在文件中,'\r'显示为"^M" 或 "<0x0d>",不能返回光标到行首
def func_with_cursor_back(*args, **kw):
# 以防之前stdout修改,如"屏幕、文件双向输出"功能
original = sys.stdout
sys.stdout = sys.__stdout__
# 光标回到所在行首
result = func(*args, **kw)
print("\r",end="")
sys.stdout.flush()
# 还原sys.stdout
sys.stdout.flush()
sys.stdout = original
return result
return func_with_cursor_back
def progess_bar(process_rate, precision=2,num_block=30,prefix="",suffix="", clean=True):
# 输出到屏幕
# process_rate 是[0,1]的float,表示百分之多少的进度
# precision小数点位数
# 进度条的'>'块数
# 效果
# 94.000 % | > > > > > > > > > > > > > > > > > > |
# clean: 当process_rate == 1时怎么结束
# Ture: 清除进度条,不换行,光标回行头
# False: 保留进度条,直接换行
line=""
line+=prefix
# 右对齐输出百分数
line+= ( ('%%%ds'%(precision+4)) % (('%%.%df'%precision)%(100*process_rate)) ) +' %'
line+=' |'
# 输出进度条
n = int(process_rate*num_block)
for i in range(n):
line+='>'
for i in range(num_block-n):
line+=' '
line+='| '
# 后缀
line+=suffix
cursor_back(print)(line, end='')
if process_rate==1:
if clean:
cursor_back(print)(' '*len(line), end='')
else:
print()
| UTF-8 | Python | false | false | 3,432 | py | 7 | print_utlis.py | 3 | 0.507231 | 0.494577 | 0 | 110 | 24.136364 | 89 |
ipums/mendeley-python-sdk | 8,100,308,338,300 | b3fa5cb8e112a1992486bbfc56e216742983a270 | 2de5fd54fbf82ce012c6496b1495412b06b8516b | /mendeley/resources/base_documents.py | 99fa0111ff56f2c4e04609565b223ab3d78d3181 | [
"Apache-2.0"
]
| permissive | https://github.com/ipums/mendeley-python-sdk | 0bc69ab7182ca82645397b067beaef647b40f31a | 0df41fbdafd164d27c42a101f2ed0bba91f3bdfb | refs/heads/master | 2022-12-18T01:12:25.000719 | 2022-12-09T20:19:50 | 2022-12-09T20:19:50 | 94,386,874 | 1 | 1 | Apache-2.0 | true | 2022-12-09T20:19:57 | 2017-06-15T01:25:11 | 2022-11-08T23:45:53 | 2022-12-09T20:19:50 | 445 | 1 | 1 | 0 | Python | false | false | from mendeley.resources.base import ListResource, GetByIdResource
class DocumentsBase(GetByIdResource, ListResource):
def __init__(self, session, group_id):
self.session = session
self.group_id = group_id
def get(self, id, view=None):
return super(DocumentsBase, self).get(id, view=view)
def list(self, page_size=None, view=None, sort=None, order=None, modified_since=None, deleted_since=None, marker=None, folder_id=None, tag=None, page=None):
return super(DocumentsBase, self).list(page_size,
view=view,
sort=sort,
order=order,
modified_since=modified_since,
deleted_since=deleted_since,
marker=marker,
folder_id=folder_id,
tag=tag,
group_id=self.group_id,
page=page)
def iter(self, page_size=None, view=None, sort=None, order=None, modified_since=None, deleted_since=None, folder_id=None, tag=None):
return super(DocumentsBase, self).iter(page_size,
view=view,
sort=sort,
order=order,
modified_since=modified_since,
deleted_since=deleted_since,
folder_id=folder_id,
tag=tag,
group_id=self.group_id)
@property
def _session(self):
return self.session
def _obj_type(self, **kwargs):
return self.view_type(kwargs.get('view'))
@staticmethod
def view_type(view):
raise NotImplementedError
| UTF-8 | Python | false | false | 2,140 | py | 88 | base_documents.py | 80 | 0.428037 | 0.428037 | 0 | 45 | 46.555556 | 160 |
szeamer/leafy-colored-website | 6,124,623,387,515 | d8c208ed5878ecb0d50163c25ef9341d477ecc13 | f7dff8d8d5210aaabba9556a752e20a5b17c087a | /reading.py | 11ec6d7ab44069d6831f6e3a7051c40a89dc63ab | []
| no_license | https://github.com/szeamer/leafy-colored-website | f22afccc840eb56e1ce136db695d67305f1f40e9 | 39c930cb687c85f789220afe17dac06a8ac79141 | refs/heads/master | 2020-03-23T23:09:42.994451 | 2018-07-24T22:34:47 | 2018-07-24T22:34:47 | 142,093,144 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def readFile(file):
with open(file, 'r') as f:
lines = f.readlines()
print lines
| UTF-8 | Python | false | false | 97 | py | 9 | reading.py | 1 | 0.57732 | 0.57732 | 0 | 4 | 23.25 | 30 |
DefinitelyNotBen/pyBlackjack | 12,369,505,831,320 | 3bfd182a87c7d5eb508daf605f0002553e08c8b5 | 43398e11e3f4bfab8e8147bd11cfd3452794b248 | /main.py | 1b874bf7c86046f0830b50b2588e7b566a5549fe | []
| no_license | https://github.com/DefinitelyNotBen/pyBlackjack | c65292d0b43e80565207bda36c3c598a6ee03f82 | 4bb670a797dd535c1f652a1b84d85854790cc44b | refs/heads/main | 2022-12-29T03:28:24.911204 | 2020-10-13T13:49:00 | 2020-10-13T13:49:00 | 303,716,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import deck
import player
import card
import board
import time
# the time package has been imported into this program and used to create short time delays
# these delays help to stop avoid disorientating the player by running through steps faster than they can process them
def main():
# first there is the greating message
print("Hello and welcome to Blackjack")
# create variable for deck
play_deck = deck.Deck()
# user and dealer are declared then added to the gameBoard
user = player.Player("User")
dealer = player.Player("Dealer")
gameBoard = board.Board(dealer, user)
# create loop to start game
while True:
start = input("Would you like to play? Y / N\n")
if start.lower() == "y":
game_on = True
break
elif start.lower() == "n":
game_on = False
break
else:
print("Sorry, I didn't catch that\n")
# this is the game loop that will keep going until the user wants to stop playing
while game_on:
# create cards for play_deck and then shuffle them, this is done every game
play_deck.newDeck()
play_deck.shuffle()
# reset hands of players on the gameboad
gameBoard.player.hand = []
gameBoard.dealer.hand = []
# deal the cards
gameBoard.dealer.hit(play_deck.deal())
gameBoard.dealer.hit(play_deck.deal())
gameBoard.player.hit(play_deck.deal())
gameBoard.player.hit(play_deck.deal())
# creating variable to store dealer card for later
hidden_card = gameBoard.dealer.hand[1]
gameBoard.dealer.hand[1] = "X"
print("\nYour current chips balance is " + str(gameBoard.player.chips))
print("\nUser will go first!\n")
betting_turn = True
# loop for user to bet chips on the game
while betting_turn:
try:
bet = input("\nWhat would you like to bet? (Type 0 to exit)\n")
bet_num = int(bet)
if bet_num == 0 :
break
elif bet_num > gameBoard.player.chips:
print("\nYou do not have enough chips to make this bet!\n")
else:
gameBoard.player.bet(bet_num)
print("Your bet of " + str(bet_num) + " has been placed")
betting_turn = False
except:
print("Oops! Something went wrong, please be sure to input a number!")
# had to create exit for loop otherwise would never let you not play
if bet_num == 0:
break
# booleans to keep track of blackjack status and player turn
player_blackjack = False
player_turn = True
# loop that will continue until the player is bust or ends their turn
while player_turn:
gameBoard.display()
if len(gameBoard.player.hand) == 2 and gameBoard.player.count() == 21:
print("Player has blackjack!")
player_turn = False
player_blackjack = True
break
print("\nCurrent hand value is " + str(gameBoard.player.count()))
move = input("\nWhat would you like to do? Hit / Stick \n")
# if player hits
if move.lower() == "hit":
print("\nPlayer hits\n")
# declare card variable to print and add to hand
draw = play_deck.deal()
print("You drew a " + str(draw))
gameBoard.player.hit(draw)
print("\nCurrent hand value is " + str(gameBoard.player.count()))
# check if player is bust
if gameBoard.player.count() > 21:
print("\nPlayer is bust\n")
print("Game over! Dealer wins!")
player_turn = False
# if player sticks then end turn
elif move.lower() == "stick":
print("\nPlayer sticks, end turn!\n")
player_turn = False
else:
print("\nSorry, I didn't catch that\n")
player_hand = gameBoard.player.count()
dealer_turn = False
# reveal hidden card of dealer
gameBoard.dealer.hand[1] = hidden_card
gameBoard.display()
time.sleep(1.0)
# if player has blackjack
if player_blackjack:
# if both player and dealer have blackjak then its a draw and bet is returned
if len(gameBoard.dealer.hand) == 2 and gameBoard.dealer.count() == 21:
print("\nBoth dealer and player have blackjack!\nPlayer bet is returned\n")
gameBoard.player.chips += bet_num
else:
# player wins so return winnings based on bet
gameBoard.player.chips = bet_num * 2 + gameBoard.player.chips
print("\nYou have won +" + str(bet_num) + " chips!")
# if only dealer has blackjack then dealer wins
elif len(gameBoard.dealer.hand) == 2 and gameBoard.dealer.count() == 21 and player_hand <= 21:
print("\nDealer has blackjack!\nDealer wins!")
# if nobody has blackjack and player is not bust then game continues
elif player_hand <= 21:
dealer_turn = True
print("\nIt is now the dealers turn")
print("\nDealer's hand value is " + str(gameBoard.dealer.count()) + "\n")
# loop dealers turn until game is resolved
while dealer_turn:
time.sleep(2.0)
# first check to see if dealer has gone bust
if(gameBoard.dealer.count() > 21):
print("\nDealer has gone bust, player wins!")
# player wins so return winnings based on bet
gameBoard.player.chips = bet_num * 2 + gameBoard.player.chips
print("\nYou have won +" + str(bet_num) + " chips!")
dealer_turn = False
break
# if dealers hand is same or more than player then dealer wins
elif player_hand <= gameBoard.dealer.count():
print("Dealer wins this round!")
dealer_turn = False
break
draw = play_deck.deal()
print("\nDealer deals a " + str(draw))
gameBoard.dealer.hit(draw)
# display game board
gameBoard.display()
print("\nDealer's hand value is " + str(gameBoard.dealer.count()) + "\n")
# final loop to see if player wants to play again
while True:
if gameBoard.player.chips == 0:
print("\nYou have run out of chips to bet with!\n")
game_on = False
break
replay = input("\nWould you like to play again? Y / N\n")
if replay.lower() == "y":
game_on = True
break
elif replay.lower() == "n":
game_on = False
break
else:
print("Sorry, I didn't catch that\n")
print("Thank you for playing!")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 7,891 | py | 6 | main.py | 5 | 0.498289 | 0.494487 | 0 | 242 | 30.590909 | 118 |
edgar-code-repository/django_redis_caching | 1,099,511,644,291 | c6e067bb695b65f252f1d40ecad92900cad19d22 | 5b44cdefc88a8a224a61009ce956ffbc073bdb18 | /main_app/views_continents.py | 66ef278d7c360e62421139c6a93c67e2eec3b8d4 | []
| no_license | https://github.com/edgar-code-repository/django_redis_caching | 4c2a259c2a585be9dd4240aa0016e0490d8a4cec | e1d1097c6f408463bd51b4904f8703f502fb48cb | refs/heads/master | 2020-05-25T17:44:28.559042 | 2019-08-03T03:10:41 | 2019-08-03T03:10:41 | 187,914,587 | 0 | 0 | null | false | 2019-11-02T17:17:35 | 2019-05-21T21:15:07 | 2019-08-03T03:12:04 | 2019-11-02T17:17:34 | 911 | 0 | 0 | 1 | HTML | false | false | from django.core.cache import cache
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.conf import settings
from django.shortcuts import render
from django.views.generic import ListView
from django.views.generic import CreateView
from django.views.generic import UpdateView
from django.views.generic import DeleteView
from .models import Continent
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
CONTINENTS_KEY_CACHE = "GEO_APP_CONTINENT_KEY"
CACHE_TTL = getattr(settings, 'CACHE_TTL', DEFAULT_TIMEOUT)
class ContinentsView(ListView):
template_name = "main_app/continents.html"
context_object_name = "continents_list"
model = Continent
def get_queryset(self):
logger.info("[views_continents][ContinentsView][get_queryset][CACHE_TTL: " + str(CACHE_TTL) + "]")
if CONTINENTS_KEY_CACHE in cache:
logger.info("[views_continents][ContinentsView][get_queryset][retrieving continents from cache]")
continents = cache.get(CONTINENTS_KEY_CACHE)
return continents
else:
logger.info("[views_continents][ContinentsView][get_queryset][retrieving continents from db]")
continents = Continent.objects.all()
cache.set(CONTINENTS_KEY_CACHE, continents, timeout=CACHE_TTL)
return continents
class ContinentCreateView(CreateView):
model = Continent
fields = ['name']
success_url = "/continents"
def get_success_url(self):
logger.info("[views_continents][ContinentsView][get_success_url]")
if CONTINENTS_KEY_CACHE in cache:
logger.info("[views_continents][ContinentsView][get_success_url][CONTINENTS_KEY_CACHE exists in cache and will be deleted]")
cache.delete(CONTINENTS_KEY_CACHE)
return self.success_url
class ContinentUpdateView(UpdateView):
model = Continent
fields = ['name']
success_url = "/continents"
template_name = "main_app/continent_edit_form.html"
def get_success_url(self):
logger.info("[views_continents][ContinentUpdateView][get_success_url]")
if CONTINENTS_KEY_CACHE in cache:
logger.info("[views_continents][ContinentUpdateView][get_success_url][CONTINENTS_KEY_CACHE exists in cache and will be deleted]")
cache.delete(CONTINENTS_KEY_CACHE)
return self.success_url
class ContinentDeleteView(DeleteView):
model = Continent
success_url = "/continents"
def get_success_url(self):
logger.info("[views_continents][ContinentDeleteView][get_success_url]")
if CONTINENTS_KEY_CACHE in cache:
logger.info("[views_continents][ContinentDeleteView][get_success_url][CONTINENTS_KEY_CACHE exists in cache and will be deleted]")
cache.delete(CONTINENTS_KEY_CACHE)
return self.success_url
| UTF-8 | Python | false | false | 2,914 | py | 8 | views_continents.py | 2 | 0.69046 | 0.69046 | 0 | 82 | 34.536585 | 141 |
walfire/simuproject | 7,584,912,260,912 | 84086554543b19cc06280254fec94fbf966a29b8 | c3e88e4d8554c283b99b463fd05fb49eba4ca8c7 | /main.py | f1e892e60724acd25c886374c24d8669737aea2a | []
| no_license | https://github.com/walfire/simuproject | 35f25a446f67e9d4067b2d94a4be0a5c4400460a | f8c12eff787705018967e0dada315d005a4762ba | refs/heads/master | 2020-04-02T02:20:13.473469 | 2018-12-16T15:20:06 | 2018-12-16T15:20:06 | 153,904,050 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #### SIMU PROJECT ####
###Libraries###
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
import numpy.random as rng
import pandas as pd
import random
#### CLASSES ####
#### NETWORK STRUCTURE ####
n = 500
keys=["complex" , "friendly" , "meaning","polish" , "multi", "action", "difficulty", "abstract"]
def getscore(dic1,dic2):
#used in influencer assignment
sco=0
for a in keys:
sco+=abs(dic1[a]-dic2[a])
sco=(1-sco/len(keys))*100
return sco
#### NETWORK STRUCTURE ####
class Network(object):
def __init__(self, size=n):
self.size=size
self.mean=0
self.sd=0
self.watchers=[]
self.dist=0
self.type=0
self.agentsid=[]
self.agents=[]
self.inf=[]
self.gf=nx.Graph()
#not sure if dgraph is still needed. if just doesnt work, try graph witout di
self.ginf=nx.Graph()
self.infperag=1
self.numinf=10
self.infdic={}
self.infobj=[]
def generate(self,meanfriends=5, sdfriends=5, frienddist="uni",connectdist="watstro"):
#generates object and the f network
for a in range(self.size):
self.gf.add_node(a,obj=Agent(a))
if connectdist=="CStyle":
for a in range(self.size):
#
#self.gf.add_node(a,obj=______object_____)
#
tar=["r"]
friends=[]
for b in list(self.gf[a]):
friends.append(b)
for c in list(self.gf[b]):
tar.append(c)
if frienddist=="uni":
#numf=rng.uniform()
#numf=numf*meanfriends//1+5
numf=5
#numf=15-len(friends)
#if numf <0:
# numf=1
numf=int(numf)
if connectdist=="CStyle":
for aa in range(numf):
nex=rng.choice(tar)
if nex=="r" or int(nex) in friends+["r",a]:
while nex in friends+["r",a] or int(nex) in friends+["r",a]:
nex=int(rng.choice(range(self.size)))
nex=int(nex)
self.gf.add_edge(a,int(nex))
tar=tar+list(self.gf[nex])
friends.append(nex)
if len(self.gf[a])<5:
print(a)
print(friends)
print(self.gf[a])
print(" \n")
elif connectdist=="randomunif":
#notperfect
numf=10
connect={k:[] for k in range(self.size)}
li=[]
for a in range(self.size-1):
it=0
while len(connect[a])<numf and it<100:
it+=1
r=rng.choice(range(a+1,self.size))
if len(connect[r])<10 or r in connect[a]:
#print(a,r)
li.append([int(a),int(r)])
connect[a].append(r)
connect[r].append(a)
print(a,it,len(connect[a]))
print(len(li))
for b,c in li:
self.gf.add_edge(b,c)
#gnm_random_graph(n, m, seed=None, directed=False)
#connected_watts_strogatz_graph(n, k, p[, ...])
#if len(self.gf[a])<5:
# print(a)
# print(friends)
# print(self.gf[a])
#print(" \n")
elif connectdist=="randomunif2":
al=[]
numf=10
for a in range(numf):
al=al+list(range(self.size))
con=[]
al2=al
rng.shuffle(al2)
it=0
while it<10000:
it+=1
if len(al2)>=1:
cand=al2[:2]
if cand[0]!=cand[1] and cand not in con and cand[::-1] not in con:
con.append(frozenset(al2[:2]))
al2=al2[2:]
else:
rng.shuffle(al2)
else:
break
print(con,len(con),len(set(con)),it)
for b,c in con:
self.gf.add_edge(b,c)
elif connectdist=="prederd":
n=self.size
k=10/(n-1)
te=nx.gnp_random_graph(n,k)
self.gf.add_edges_from(te.edges)
elif connectdist=="watstro":
n=self.size
p=0.05
k=10
te=nx.connected_watts_strogatz_graph(n,k,p,100)
self.gf.add_edges_from(te.edges)
elif connectdist=="bara":
n=self.size
p=0.05
k=5
te=nx.barabasi_albert_graph(n,k)
self.gf.add_edges_from(te.edges)
elif connectdist=="pow":
n=self.size
#k=min(n/10,5)
k=4
p=0.05
te=nx.powerlaw_cluster_graph(n,k,p)
self.gf.add_edges_from(te.edges)
elif connectdist=="full":
n=self.size
e=[]
for a in range(n-1):
for b in range(a+1,n):
e.append(set([a,b]))
self.gf.add_edges_from(e)
#karate_club_graph()
elif connectdist=="star":
n=self.size
e=[]
for a in range(n):
e.append([a,(n+1)%n])
self.gf.add_edges_from(e)
elif connectdist=="circle":
n=self.size
e=[]
for a in range(n):
e.append([a,(a+1)%n])
self.gf.add_edges_from(e)
#karate_club_graph()
else:
raise Exception("ERROR: UNVALID GENERATE KEY")
self.agentsid=self.gf.nodes
for a in self.agentsid:
self.agents.append(self.getobj(a))
friendsinstances = []
for friend in self.friendsof(a):
temp = self.getobj(friend)
friendsinstances.append(temp)
self.getobj(a).define_friends(friendsinstances)
def setup(self, genway="random"):
#sets up tastes and assigns the inf stuff
pref={}
for a in keys:
pref[a]=0
for a in self.gf.nodes():
dic=pref
for b in keys:
dic[b]=rng.random()
self.getobj(a).define_preferences(dic)
ninf=5
inf=random.sample(self.gf.nodes,ninf)
for a in inf:
self.infobj.append(self.getobj(a))
watchers=self.agentsid
for a in range(self.size):
self.ginf.add_node(a,obj=self.getobj(a))
infdic={}
for a in inf:
infdic[a]=[]
if genway=="random":
for a in watchers:
b=random.choice(inf)
infdic[b].append(a)
self.ginf.add_edge(b,a)
if genway=="stricttaste":
for a in watchers:
pref=self.getobj(a).preferences
sco=-100000000000
nu=0
for b in range(ninf):
be=inf[b]
inpref=self.getobj(be).preferences
s=getscore(pref,inpref)
if sco<s:
nu=b
sco=s
infdic[inf[nu]].append(self.getobj(a)) #attention check
self.ginf.add_edge(a,inf[nu])
if genway=="unstricttaste":
for a in watchers:
pref=self.getobj(a).preferences
sco=[]
for b in range(ninf):
be=inf[b]
inpref=self.getobj(be).preferences
sco.append(getscore(pref,inpref))
nu=rng.choice(range(ninf),1,sco)
infdic[inf[nu]].append(a)
self.ginf.add_edge(a,inf[nu])
if genway=="double":
#might not work
for a in watchers:
b=random.choice(inf)
infdic[b].append(a)
self.ginf.add_edge(b,a)
for a in watchers:
pref=self.getobj(a).preferences
sco=-100000000000
nu=0
for b in range(ninf):
be=inf[b]
inpref=self.getobj(be).preferences
s=getscore(pref,inpref)
if sco<s:
nu=b
sco=s
infdic[inf[nu]].append(a)
self.ginf.add_edge(a,inf[nu])
#puts the inf stuff into a usable form
self.infdic=infdic
for a in self.infdic.keys():
self.getobj(a).define_followers(self.infdic[a])
for b in self.infdic[a]:
self.getobj(b).influencer=self.getobj(a)
def friendsof(self,personnr):
return(list(self.gf[personnr]))
def getobj(self,personnr):
return self.gf.nodes[personnr]["obj"]
def draw(self):
ax=plt.gca()
ax.clear()
fig = plt.gcf()
cols=[]
for a in list(self.gf.nodes):
cols.append(getnodecol(self.getobj(a).now_playing.game_id))
#fig.set_size_inches(13,20)# set dimension of window
nx.draw(self.gf,node_size=100,node_color=cols)
def drawi(self):
ax=plt.gca()
ax.clear()
fig = plt.gcf()
temp=nx.create_empty_copy(self.gf)
temp.add_edges_from(self.ginf.edges)
cols=[]
for a in list(self.gf.nodes):
cols.append(getnodecol(self.getobj(a).now_playing.game_id))
nx.draw(temp, node_colors=cols)
def addinf(self):
####sketch, can be erased
#choose numinf randomagents as infs
# generate score for each inf according to taste similarity
# chose infperag ones according to score
pass
#probably output a dic of ags per inf, but also add inf as a trait of ag
def niceplot(self):
ax=plt.gca()
ax.clear()
fig = plt.gcf()
toplot=nx.Graph()
for a in self.agents:
if a.node_num in self.inf:
eee="d"
aaa=100+10*a.time_playing
else:
eee="d"
aaa=30+5*a.time_playing
toplot.add_node(a.node_num,col=getnodecol(a.now_playing.game_id),size=1,shape=eee)
toplot.add_edges_from(self.gf.edges,col="k",wei=2)
#for aa in range(len(self.inf)):
# a=self.inf[aa]
# for b in list(self.ginf[a]):
# toplot.add_edge(a,b,col=getcol(aa),wei=5)
toplot.add_edges_from(self.ginf.edges,col="r",wei=1)
edges=toplot.edges
nodes=toplot.nodes
colors = [toplot[u][v]['col'] for u,v in edges]
wei = [toplot[u][v]['wei'] for u,v in edges]
coln=[toplot.nodes[u]["col"] for u in nodes]
size=[toplot.nodes[u]["size"] for u in nodes]
shape=[toplot.nodes[u]["shape"] for u in nodes]
#print(colors)
nx.draw_networkx(toplot, nodes=nodes, node_color= coln, node_size=size,
#node_shape=shape,
edges=edges, edge_color=colors,
width=wei
)
def getcol(a):
col=["g","b","y","m","r"]
return col[a]
def getnodecol(id):
trans=["k","r","b","y","m","g"]
#apply list to transltate ids into python colours here
id=trans[id]
return id
#### AGENTS ####
people_total = [] #list of person objects
games_total = [Game(0,real_game=False)] #list of game objects
games_dict = {str(games_total[0]):0} #dictionary of str(game objects)
friendship_prob = 0.3
influencer_prob = 0.3
advertising_power = 0.3
standard_decay = -0.3
decay_multiplier =0.2
comparison_budget = 1000
budget_range = 0.5
likes = ['singleplayer', 'multiplayer', 'casual', 'replayable', 'rpg']
genres = ['fps', 'puzzle', 'strategy', 'platformer', 'sim']
class Agent:
def __init__(self,node_num):
self.node_num = node_num #ID of agent
self.friends = []
self.followers = []
self.influencer = 0
self.knowngames = {}
self.preferences = {}
self.preferences_list=[]
self.now_playing = games_total[0]
self.time_playing = 0
self.influencer_status = False
people_total.append(self)
def __str__(self):
return self.node_num
def define_friends(self, friends_list):
self.friends = friends_list
def define_followers(self, followers_list):
self.followers.extend(followers_list)
self.influencer_status = True
def define_knowngames(self, games_dict):
self.knowngames = games_dict.copy()
# def set_preferences(self,likes:list):
# self.preferences_list=likes
def define_preferences(self, pref_dict ={}):
self.preferences = pref_dict
#else:
# if scores:
# for i in range(len(scores)):
# self.preferences[self.preferences_list[i]]=scores[i]
#for item in self.preferences_list:
# if item not in self.preferences:
# self.preferences[item]= 0
def get_friends(self):
return self.friends
def get_followers(self):
return self.followers
def get_knowngames(self):
return self.knowngames
def get_preferences(self):
return self.preferences
def influence_playing(self,key,prob):
self.knowngames[key] += prob
def decay_playing(self):
#if statt {0:0}
self.knowngames[str(self.now_playing)] += standard_decay
def recommend(self):
for i in self.friends:
i.influence_playing(self.now_playing,friendship_prob)
if self.followers:
for j in self.followers:
j.influence_playing(self.now_playing,influencer_prob)
def game_infection(self):
for game in sorted(self.knowngames, key=self.knowngames.get, reverse=True):
prob=self.knowngames[game]
if random.choice([0,1],[1-prob,prob]):
game_obj = games_total[game]
if game_obj.real_game:
self.now_playing = game_obj
self.time_playing +=1
#return True
break
# def decay_effect(self):
# if self.now_playing:
# disinterest =self.time_playing*self.now_playing
# self.influence_playing(self.now_playing, disinterest)
class Game:
# decay = 0
# popularity = 0.0
# budget = 0.0
# multiplayer = 0.0
# singleplayer = 0.0
# mainstream = 0.0
# target = 0.0 #niche - mainstream
# team = ["indie","blockbuster"] #optional?
game_num = 0
def __init__(self, budget, name = game_num, game_id= game_num, decay = 0, genre = 0, scores = [], real_game = True):
self.name = name
self.budget = budget
self.decay = decay
self.genre = genre
self.scores = scores
self.effect = advertising_power*self.budget/comparison_budget
self.game_id = game_id
self.real_game = real_game
if real_game:
games_total.append(self)
games_dict[str(self)]=0
Game.game_num += 1
def __str__(self):
return str(self.name)
def get_popularity(self, people=people_total):
players = 0
for i in people:
if self.name == i.now_playing:
players += 1
self.popularity = players/len(people)
return self.popularity
def get_totalplayers(self, people=people_total):
players = 0
for i in people:
if self.name == i.now_playing:
players += 1
return players
def run_add(self, people=people_total):
for i in people:
i.influence_playing(self.game, self.effect)
def define_scores(self, keys=likes, scores_list=[], scores_dic={}):
if scores_dic:
self.scores = scores_dic
elif scores_list:
for i in range(len(scores_list)):
self.scores[keys[i]]=scores_list[i]
for item in keys:
if item not in self.scores:
self.scores[item] = 0
# def find_agent_from_agent_ID(self, agentID):
# for agent in people_total:
# if agent
def set_decay(self, value=standard_decay):
self.decay = value
#### CONVERSION ALGORITHM ####
class Conversionalgo:
def __init__(self, step_num=0):
self.counter = step_num
self.currentstatus = {}
self.status_per_step = {}
# def implement_influence(self): #commented out because now in simumanager
# for item in games_total:
# item.run_add()
# for person in people_total:
# person.recommend() #ev in game class
# for person in people_total:
# person.game_infection()
def get_currentstatus(self):
for item in games_total:
self.currentstatus[str(item)] = item.get_totalplayers()
self.status_per_step[self.counter] = self.currentstatus
self.counter += 1
def get_deltas(self):
for item in games_total:
self.deltas[str(item)] = self.status_per_step[self.counter][str(item)] - self.status_per_step[self.counter-1][str(item)]
#more?
#### SIMULATION MANAGER ####
#https://www.tutorialspoint.com/python/python_classes_objects.htm
#https://likegeeks.com/python-gui-examples-tkinter-tutorial/
class Simumanager:
'class that manages the simulation & works with timestamps'
timestamp = 0 #accessable from in/outside the class
def __init__(self):
Simumanager.timestamp = 0 #init the timestamp to 0 for a new simulation
def loadsimu(self, timestamp, datafile):
Simumanager.timestamp = timestamp
def addgames(self,gamesnumber=5, budget="random"): #create n instances of games, which automatically get added in games_total list
if budget == "random":
budgetamount = comparison_budget*2*random.random()
for i in range(0,gamesnumber):
Game(budgetamount)
elif budget == "range":
budgetamount = random.randrange(comparison_budget*(1-budget_range), comparison_budget(1+budget_range))
for i in range(0,gamesnumber):
Game(budgetamount)
else: #open for extension for non random assignment of budget
raise Exception("ERROR: INVALID BUDGET PARAMETER INPUT")
def networkinit(self,agentsnumber=500,influassignment="random"): #creates n agents (500 as preset), assigns preferences,
net = Network(size=agentsnumber)
net.generate() # using watstro simulation as preset
net.setup(influassignment) #random, stricttaste, unstricttaste, double keys for influencer init and assignment
def drawnetwork(self,type="agents"):
if type == "agents":
self.net.draw()
if type == "influencers":
self.net.drawi()
if type == "agents_influencers":
self.net.niceplot()
else:
raise Exception("Error: INVALID TYPE PARAMETER INPUT")
def stateofknowngame(self): #1 Timestamp
pass
def adround(self):
for item in games_total:
if item is not "Null_Game": #there wont be an AD for a Non Game
item.run_add
def influfriendround(self):
for person in people_total:
person.recommend() #includes friend influence over other friends, and influencers influence
def conversion(self): #decides which game gets played
for person in people_total:
person.game_infection()
def exporttimestamp(self):
pass
def get_agents(self):
pass
def get_games(self):
pass
def decay(self):
pass
def nextstep(self): #increases timestamp of simulation by 1
self.timestamp +=1
##### DATA MANAGER ####
class Datamanager: #call it after the network creation, to instantiate a pandas matrix that has the information
#about all the agents at timestamp = 0
def __init__(self):
self.columns = ["timestamp", "agent ID", "isinfluencer", "current played game", "how long been playing current game", "# friends playing the same", "does influencer play the same"]
if games_total: #appends to the index list the names of the played games list
for game in games_total:
if game.real_game:
self.columns.append("game " + str(game.name) + " preference %")
self.listofagents = []
self.table = pd.DataFrame(data = self.listofagents, columns = self.columns)
def get_table(self):
print (self.table)
def update_table(self):
for person in people_total:
agent = []
agent.append(Simumanager.timestamp)
agent.append(person.node_num)
agent.append(person.influencer_status)
agent.append(person.now_playing)
agent.append(person.time_playing) # check on thisi
agent.append(0) #nr friends playing the same game
agent.append(0) #is influ playing the same?
for game in games_total: #TO BE CHECKED IF THE ORDER IS THE SAME OF THE ONE IN THE PANDA DATAFRAME
if game.real_game:
# agent.append(agent.preferences[game.name])
agent.append("placeholder")
self.listofagents.append(agent)
self.table = pd.DataFrame(data = self.listofagents, columns = self.columns)
def export_table(self):
writer = pd.ExcelWriter('Simulation.xlsx', engine='xlsxwriter')
self.table.to_excel(writer, sheet_name='Sim')
writer.save()
def createtable(self):
pass
def savecurrenttimestamp(self):
pass
#sim = Simumanager()
#sim.addgames()
#sim.networkinit()
#sim.drawnetwork()
#data = Datamanager()
#data.get_table()
#data.export_table()
def main():
print ("START OF SIMULATION \n")
sim = Simumanager()
sim.addgames()
net = Network()
net.generate()
net.setup()
data = Datamanager()
rounds = int(input("How many rounds of simulation? "))
for i in range(rounds):
print("Timestamp " + str(sim.timestamp))
sim.adround() #influence of ads, influencers and friends & conversion calculated
sim.influfriendround()
sim.conversion()
data.update_table() #export values in the table
data.get_table()
data.export_table()
net.draw() #draw plot
if i < (rounds-1):
input("Proceed with next step?: ")
sim.nextstep()
else:
print("Simulation finished.")
if __name__ == "__main__":
main()
##### PLOTTER ####
#
#class plotter:
# def setupplot():
# pass
# def drawnodes():
# pass
# def drawedges(node, depth): #node: person & influencer, depth: how many levels of friends of friends of frieds i.e.
# pass
# def update():
# pass
# def exportplot():
# pass | UTF-8 | Python | false | false | 24,063 | py | 2 | main.py | 1 | 0.516062 | 0.507044 | 0 | 720 | 32.422222 | 188 |
deepchem/deepchem | 6,674,379,206,795 | f19aeede65cd74500cefb8fc11411d97b4cbe79d | 39b021eabbb8e3be1734cf92fd641965a796b0eb | /examples/delaney/delaney_textcnn.py | 8d0d7740b45e0c586af53daa3bb6aa2aae09bd48 | [
"MIT"
]
| permissive | https://github.com/deepchem/deepchem | 066cbf42316b2f6bec0166727e0264a485d5266f | ee6e67ebcf7bf04259cf13aff6388e2b791fea3d | refs/heads/master | 2023-09-02T01:32:17.860111 | 2023-08-31T18:49:00 | 2023-08-31T18:49:00 | 43,098,215 | 4,876 | 1,905 | MIT | false | 2023-09-14T19:10:44 | 2015-09-24T23:20:28 | 2023-09-14T15:53:05 | 2023-09-14T19:10:43 | 517,986 | 4,582 | 1,517 | 508 | Python | false | false | """
Script that trains textCNN models on delaney dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Delaney dataset
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer='Raw', split='index')
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
char_dict, length = dc.models.TextCNNModel.build_char_dict(train_dataset)
# Batch size of models
batch_size = 64
model = dc.models.TextCNNModel(
len(delaney_tasks),
char_dict,
seq_length=length,
mode='regression',
learning_rate=1e-3,
batch_size=batch_size,
use_queue=False)
# Fit trained model
model.fit(train_dataset, nb_epoch=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| UTF-8 | Python | false | false | 1,177 | py | 794 | delaney_textcnn.py | 595 | 0.743415 | 0.731521 | 0 | 47 | 24.042553 | 73 |
jwestfromtheeast/CodingChallenges | 16,286,516,010,352 | a30750d7e50aeabebf1456ecb7f2abaf7f26bfd9 | 9c1def5ace798f136ca4c466e193516279aa514c | /python/medium/130SurroundedRegions.py | 3503c5e0354f900cec599a1c15abad3c33ef718e | []
| no_license | https://github.com/jwestfromtheeast/CodingChallenges | a6e5ab043cee2609b7ea4516972066cf3ab1a92a | e75e4e4cccf69368ec2d74785cc156084d7fc3cd | refs/heads/master | 2021-07-13T05:49:31.558977 | 2020-06-26T22:35:49 | 2020-06-26T22:35:49 | 155,331,558 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import deque
class Solution:
# Time Complexity: O(mn) time, O(mn) space
# Logic: Here, we want to use a dfs or bfs for traversal. Since a recursive dfs will potentially stack overflow,
# we will use a bfs by using a queue. This problem becomes simple with that idea in mind, and when following the
# problem statement that any region on a border is not surrounded, and thus must remain as "O"s. So, let's go
# through the graph and mark any region of O's originating from a boundary (and thus not surrounded) with a new character (not X or O).
# Then, we can go back at the end and replace any region of our special character with O's, and the rest with X's, and this will satisfy the problem.
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
if not board or not board[0]:
return
m, n = len(board), len(board[0])
if m <= 2 or n <= 2:
return
# Place all of the boundary coordinates in the queue. Use a tuple x, y pair to store the coordinates.
q = deque()
for i in range(m):
q.append((i, 0))
q.append((i, n - 1))
for j in range(n):
q.append((0, j))
q.append((m - 1, j))
# For any boundary that is an O, we want to mark that region. so, mark it with any char and perform a bfs by adding its neighbors to the queue.
while q:
i, j = q.popleft()
if 0 <= i < m and 0 <= j < n and board[i][j] == "O":
board[i][j] = "Z"
q.append((i - 1, j))
q.append((i, j - 1))
q.append((i + 1, j))
q.append((i, j + 1))
# Go back and replace any marked regions with O's, and the rest with X's. This will fill in any surrounded regions.
for i in range(m):
for j in range(n):
if board[i][j] == "Z":
board[i][j] = "O"
else:
board[i][j] = "X"
| UTF-8 | Python | false | false | 2,100 | py | 95 | 130SurroundedRegions.py | 94 | 0.551429 | 0.544762 | 0 | 46 | 44.652174 | 153 |
chuheng001/FedScale | 13,640,816,134,186 | 370b0b458c16bcd4f2efb54d532564e5f1d2570f | ec64d4d7f638b107ca89f280ae405211ad4821cf | /core/utils/speech.py | fb5ac3dd51a1c214a671cf9126c813349198e113 | [
"Apache-2.0"
]
| permissive | https://github.com/chuheng001/FedScale | 5c9ecfeb147e616961d2bd68a0d6b25100348406 | 2fd30826fceab585250199c247c968b5808f5801 | refs/heads/master | 2023-08-07T11:09:40.606695 | 2021-09-25T20:06:39 | 2021-09-25T20:06:39 | 410,370,674 | 0 | 0 | Apache-2.0 | true | 2021-09-25T20:03:16 | 2021-09-25T20:03:16 | 2021-09-25T06:10:31 | 2021-09-25T06:10:28 | 56,897 | 0 | 0 | 0 | null | false | false | from __future__ import print_function
import warnings
import os
import numpy as np
import numba
import librosa
import csv
CLASSES = ['up', 'two', 'sheila', 'zero', 'yes', 'five', 'one', 'happy', 'marvin', 'no', 'go', 'seven', 'eight', 'tree', 'stop', 'down', 'forward', 'learn', 'house', 'three', 'six', 'backward', 'dog', 'cat', 'wow', 'left', 'off', 'on', 'four', 'visual', 'nine', 'bird', 'right', 'follow', 'bed']
class SPEECH():
"""
Args:
root (string): Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
classes = []
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(self, root, dataset='train', transform=None, target_transform=None, classes=CLASSES):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.classMapping = {classes[i]: i for i in range(len(classes))}
self.data_file = dataset # 'train', 'test', 'validation'
self.path = os.path.join(self.processed_folder, self.data_file)
# load data and targets
self.data, self.targets = self.load_file(self.path)
self.data_dir = os.path.join(self.root, self.data_file)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
path, target = self.data[index], int(self.targets[index])
data = {'path': os.path.join(self.data_dir, path), 'target': target}
if self.transform is not None:
data = self.transform(data)
return data['input'], data['target']
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return self.root
@property
def processed_folder(self):
return self.root
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.data_file)))
def load_meta_data(self, path):
data_to_label = {}
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count != 0:
data_to_label[row[1]] = self.classMapping[row[-2]]
line_count += 1
return data_to_label
def load_file(self, path):
rawData, rawTags = [], []
# load meta file to get labels
classMapping = self.load_meta_data(os.path.join(self.processed_folder, 'client_data_mapping', self.data_file+'.csv'))
for imgFile in list(classMapping.keys()):
rawData.append(imgFile)
rawTags.append(classMapping[imgFile])
return rawData, rawTags
class BackgroundNoiseDataset():
"""Dataset for silence / background noise."""
def __init__(self, folder, transform=None, sample_rate=16000, sample_length=1):
audio_files = [d for d in os.listdir(folder) if d.endswith('.wav')]
samples = []
for f in audio_files:
path = os.path.join(folder, f)
s, sr = librosa.load(path, sample_rate)
samples.append(s)
samples = np.hstack(samples)
c = int(sample_rate * sample_length)
r = len(samples) // c
self.samples = samples[:r*c].reshape(-1, c)
self.sample_rate = sample_rate
self.classes = CLASSES
self.transform = transform
self.path = folder
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
data = {'samples': self.samples[index], 'sample_rate': self.sample_rate, 'target': 1, 'path': self.path}
if self.transform is not None:
data = self.transform(data)
return data
| UTF-8 | Python | false | false | 5,078 | py | 36 | speech.py | 19 | 0.589602 | 0.587042 | 0 | 154 | 31.967532 | 295 |
horiid/netpro | 9,148,280,367,678 | fcb47b369fb21081ed476072868b360824d2d0fa | 9955c86a1435c6d0975355e41c65b0a3631b8d21 | /sniffer/sniff.py | baaa2687542f8e9044049010ce3aab5fd10adcf4 | []
| no_license | https://github.com/horiid/netpro | e3ada2045847c0d012be22de5fe23bf166d5f5f7 | cc2970a42553a2bd0a672a21e80d2705fb9d338b | refs/heads/master | 2022-03-22T13:30:33.891932 | 2019-11-22T05:19:34 | 2019-11-22T05:19:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
import os
import re
def run_cmd(cmd):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = proc.stdout.readline()
decoded = line.decode('utf-8')
if line:
if decoded == "successfully completed.\n":
break
yield decoded
def capture_from_packet(line):
# get source IP of packet sender
info = ""
if "ip_src" in line:
src = re.search(r'[a-z]+\s*=\s*(.+)', line)
# remove local/inside network IP
if not re.match(r'172', src.group(1)) and not re.match(r'192', src.group(1)):
info = "src: " + src.group(1)
else:
info = 'src: ' + src.group(1)
# get dst IP of packet receiver
if "ip_dst" in line:
dst = re.search(r'[a-z]+\s*=\s*(.+)', line)
# remove local/inside network IP
if not re.match(r'172', dst.group(1)) and not re.match(r'192', dst.group(1)):
info = "dst: " + dst.group(1)
else:
info = 'dst: ' + dst.group(1)
# get TCP
if "protocol" in line:
protocol = re.match(r'protocol:\s(.+)', line)
info += protocol.group(1)
# check Flags such as SYN, ACK, etc...
if "FLAGS" in line:
flags = re.match(r'FLAGS:\s(.+)', line)
return info + flags.group(1)
else:
return info
def sniff():
passwd = input("Enter your password:")
pwd = os.getcwd()
cmd = "echo " + passwd + " | sudo -S " + pwd + "/sniff"
print('Running command:', cmd + "...\n")
run_cmd_gen = run_cmd(cmd=cmd)
data = list()
result = dict()
for line in run_cmd_gen:
input_line = capture_from_packet(line)
if not input_line == None and not input_line == "":
data.append(input_line)
for i in range(len(data)):
#print(data[i], data[i+1], data[i+2])
if "src:" in data[i]:
src = data[i][5:-1]
dst = data[i+1][5:-1]
if data[i+2] == "UDP":
comm = data[i+2]+" "
else:
comm = data[i+2] + " " + data[i+3]
result[src] = [dst, comm]
return result
if __name__ == '__main__':
sniff()
| UTF-8 | Python | false | false | 2,248 | py | 9 | sniff.py | 3 | 0.511121 | 0.495996 | 0 | 79 | 27.455696 | 94 |
annareithmeir/PlagiarismCheck_HiWi_Bioinformatik_SS19 | 19,241,453,498,305 | 2d521cef38badad22135b7f772eae156f9a8820d | d7428a18fa5c5416c3d773df6529b1c562705fe0 | /codechecker/repos/1/collected_files/aa_props/ge82fuz.py | 1a2f74cc28f45497556cff2360548cfd69fa3230 | []
| no_license | https://github.com/annareithmeir/PlagiarismCheck_HiWi_Bioinformatik_SS19 | d0b05a47f8d898d6351bb3c097c238780a39a19f | db80ed523d1fc6b1e07ec52c91e60595eae9a995 | refs/heads/master | 2022-01-07T01:30:10.556284 | 2019-07-02T11:09:27 | 2019-07-02T11:09:27 | 192,517,827 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
switcher = {
'A': False,
'R': True,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': True,
'I': False,
'L': False,
'K': True,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isNegativelyCharged(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': True,
'C': False,
'Q': False,
'E': True,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isHydrophobic(aa):
switcher = {
'A': True,
'R': False,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': False,
'I': True,
'L': True,
'K': False,
'M': True,
'F': True,
'P': False,
'S': False,
'T': False,
'W': True,
'Y': True,
'V': True,
}
return switcher.get(aa, False)
def isAromatic(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': True,
'I': False,
'L': False,
'K': False,
'M': False,
'F': True,
'P': False,
'S': False,
'T': False,
'W': True,
'Y': True,
'V': False,
}
return switcher.get(aa, False)
def isPolar(aa):
switcher = {
'A': False,
'R': True,
'N': True,
'D': True,
'C': False,
'Q': True,
'E': True,
'G': False,
'H': True,
'I': False,
'L': False,
'K': True,
'M': False,
'F': False,
'P': False,
'S': True,
'T': True,
'W': False,
'Y': True,
'V': False,
}
return switcher.get(aa, False)
def isProline(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': False,
'F': False,
'P': True,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def containsSulfur(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': False,
'C': True,
'Q': False,
'E': False,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': True,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isAcid(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': True,
'C': False,
'Q': False,
'E': True,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isBasic(aa):
switcher = {
'A': False,
'R': True,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': True,
'I': False,
'L': False,
'K': True,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False) | UTF-8 | Python | false | false | 4,418 | py | 442 | ge82fuz.py | 418 | 0.352196 | 0.351743 | 0 | 233 | 17.965665 | 61 |
MananSoni42/NEC-hackathon | 4,569,845,246,889 | f03b0ad36f416615b4d737cfa6be82ea2839fd1e | ccc7edd018a9a376da1572f4f97ec0c81e0ed5c7 | /helpers/route.py | 6632b8cf8a4845452f27467364a033abb2149826 | []
| no_license | https://github.com/MananSoni42/NEC-hackathon | 98fdc779c8c713a255952cbcf04b4b14c2fbd1b2 | 9488f9f07d5ce0b900210ab7ff4ea6b9ed4661f9 | refs/heads/master | 2020-07-06T04:07:03.971006 | 2019-09-06T15:08:23 | 2019-09-06T15:08:23 | 202,885,852 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from pprint import pprint, pformat
from copy import deepcopy
get_bin = lambda x, n: format(x, "b").zfill(n)
def remove_duplicates(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def get_nbrs(G, node, first=None, last=None):
nbrs = sorted(list(G.neighbors(0)), key=lambda n: G[0][n]["length"], reverse=True)
if first:
return nbrs[:first]
elif last:
return nbrs[-last:]
else:
return nbrs
class Route(object):
"""
Specifies a route for buses to run on
Attributes:
num(int): Number of buses running on this routes
num_bits(int): number of bits to use in the binary description of num
cap(int): capacity of this route
v(list): (ordered) list of vertices covered in this route
Methods:
mutate(mut_prob): Mutate the given route
crossover(route): Crossover current route with the specified route
"""
world = None
initialized = False
@staticmethod
def initialize_class(G):
Route.world = deepcopy(G)
Route.initialized = True
@property
def cum_len(self):
cum_sum = 0
for i in range(len(self.v_disabled) - 1):
cum_sum += Route.world[self.v_disabled[i]][self.v_disabled[i + 1]]["length"]
return cum_sum
@property
def v_disabled(self):
return remove_duplicates(self.v)
def __init__(self, cap, vertices, num=None):
self.num_bits = 5
if not num:
self.num = np.random.randint(1, 2 ** self.num_bits)
else:
self.num = num
self.v = vertices
self.cap = cap
def __str__(self):
return f"{self.num} | {len(self.v_disabled)} | {self.v_disabled}"
# To enable better printing
__repr__ = __str__
def mutate(self, mut_prob=0.05):
if Route.initialized:
G = Route.world
# Mutate the number of buses
bin_num = list(str(get_bin(self.num, self.num_bits)))
for i in range(len(bin_num)):
if np.random.rand() < mut_prob:
bin_num[i] = str(abs(int(bin_num[i]) - 1))
self.num = 4 * int(bin_num[0]) + 2 * int(bin_num[1]) + 1 * int(bin_num[2])
# Mutate the route
for i in range(len(self.v)):
if np.random.rand() < mut_prob:
nbrs = get_nbrs(G, self.v[i], first=len(self.v) + 1)
for n in nbrs[:]:
if n in self.v:
nbrs.remove(n)
probs = np.array([G[self.v[i]][n]["length"] for n in nbrs])
probs = probs / np.sum(probs)
self.v[i] = np.random.choice(nbrs, p=probs)
def crossover(self, other_route):
v1 = set(self.v[1:-1])
v2 = set(other_route.v[1:-1])
common = list(v1.intersection(v2))
if len(common) == 0:
return
if len(common) == 1:
ind_1 = self.v.index(common[0])
ind_2 = other_route.v.index(common[0])
temp_v = self.v
self.v = self.v[:ind_1] + other_route.v[ind_2:]
other_route.v = other_route.v[:ind_2] + temp_v[ind_1:]
else:
elem1, elem2 = np.random.choice(common, size=2, replace=False)
ind_1_l = min(self.v.index(elem1), self.v.index(elem2))
ind_1_u = max(self.v.index(elem1), self.v.index(elem2))
ind_2_l = min(other_route.v.index(elem1), other_route.v.index(elem2))
ind_2_u = max(other_route.v.index(elem1), other_route.v.index(elem2))
temp_v = self.v
self.v[ind_1_l + 1 : ind_1_u] = other_route.v[ind_2_l + 1 : ind_2_u]
other_route.v[ind_2_l + 1 : ind_2_u] = temp_v[ind_1_l + 1 : ind_1_u]
class Routes(object):
"""
Collection of routes to be used as a population for the final Genetic Algorithm
Attributes:
routes: list of bus routes (class Route)
num_routes: Number of such routes
"""
world = None
initialized = False
@staticmethod
def initialize_class(G):
Routes.world = deepcopy(G)
Routes.initialized = True
@property
def cap(self):
cum_cap = 0
for route in self.routes:
cum_cap += route.cap * route.num
return cum_cap
@property
def num_buses(self):
cum_num = 0
for route in self.routes:
cum_num += route.num
return cum_num
def __init__(self, list_routes):
self.routes = list_routes
self.num_routes = len(list_routes)
def __str__(self):
return pformat([self.num_routes] + [r for r in self.routes])
__repr__ = __str__
def mutate(self, mut_prob=0.05, cross_perc=0.3):
if Routes.initialized:
G = Routes.world
# Mutate individual routes
for route in self.routes:
if np.random.rand() < mut_prob:
route.mutate( mut_prob)
# internally Crossover some routes
num_cross = int(cross_perc * self.num_routes)
num_cross = num_cross if not num_cross % 2 else num_cross + 1
cross_routes = np.random.choice(self.routes, replace=False, size=num_cross)
for i in range(0, num_cross, 2):
cross_routes[i].crossover(cross_routes[i + 1])
def crossover(self, other_routes, cross_transfer=0.1, cross_perc=0.3):
num_cross = int(cross_perc * max(self.num_routes, other_routes.num_routes))
num_transfer = int(
cross_transfer * max(self.num_routes, other_routes.num_routes)
)
# Transfer some routes
ind_1 = np.random.choice(
range(self.num_routes), replace=False, size=num_transfer
)
ind_2 = np.random.choice(
range(other_routes.num_routes), replace=False, size=num_transfer
)
for i in range(num_transfer):
temp = self.routes[ind_1[i]]
self.routes[ind_1[i]] = other_routes.routes[ind_2[i]]
other_routes.routes[ind_2[i]] = temp
# Crossover some routes
cross_1 = np.random.choice(self.routes, replace=False, size=num_cross)
cross_2 = np.random.choice(other_routes.routes, replace=False, size=num_cross)
for i in range(num_cross):
cross_1[i].crossover(cross_2[i])
@property
def cum_len(self):
cum_sum = 0
for route in self.routes:
cum_sum += route.cum_len
return cum_sum
| UTF-8 | Python | false | false | 6,468 | py | 28 | route.py | 8 | 0.562925 | 0.548856 | 0 | 199 | 31.502513 | 88 |
yuriy-logosha/geolocations | 1,108,101,591,649 | 5ac1ac8ebce6c819a8d5fce4e84003ada6fcff13 | 56e385052dac79e4ca8a9c07b24abd3bcd1f9cb9 | /geolocations.py | f6279af84fde3a631da788d851e3295dc5acd357 | []
| no_license | https://github.com/yuriy-logosha/geolocations | bc0df914e15cd76728005e785624b9f3034934d6 | 78d637d4e2ea7bf0b21e17c1b6ef651f1a5537d5 | refs/heads/master | 2021-03-08T05:40:13.377785 | 2021-01-01T20:21:40 | 2021-01-01T20:21:40 | 246,321,980 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import time
from logging.handlers import RotatingFileHandler
import pymongo
from urllib3.exceptions import NewConnectionError
from utils import google_geocode, GoogleError
G_KEY = 'AIzaSyCasbDiMWMftbKcSnFrez-SF-YCechHSLA'
BETWEEN_ERRORS_TIMEOUT = 0.2
ITERATIONS_TIMEOUT = 60
BACKOFF_TIMEOUT = 30 * 60
CONN_ERR_BACKOFF_TIMEOUT = 10 * 60
FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
formatter = logging.Formatter(FORMAT)
# Create handlers
c_handler = logging.StreamHandler()
f_handler = logging.handlers.RotatingFileHandler('latest.log', maxBytes=5 * 1024 * 1024, backupCount=10,
encoding=None, delay=0)
# Create formatters and add it to handlers
c_handler.setFormatter(formatter)
f_handler.setFormatter(formatter)
logging.basicConfig(format=FORMAT, level=20, handlers=[c_handler, f_handler])
logger = logging.getLogger('geolocations')
kind_ad = {'kind': 'ad'}
def get_addresses_to_process(db):
geo_address = list(db.geodata.distinct('address', {}))
total_address = list(db.ads.distinct("address_lv", kind_ad))
missed = list(set(total_address) - set(geo_address))
missed.sort()
return missed
logger.info("Starting Get Location Service.")
while True:
try:
myclient = pymongo.MongoClient("mongodb://192.168.1.61:27017/")
with myclient:
logger.info("Connected to DB.")
frm = "{0:>30} {1:7}"
addresses_to_process = get_addresses_to_process(myclient.ss_ads)
for a in addresses_to_process:
if not a or a.endswith('..'):
logger.info("Skip: %s %s/%s", a, addresses_to_process.index(a), len(addresses_to_process))
continue
logger.info("Processing: %s %s/%s", a, addresses_to_process.index(a), len(addresses_to_process))
done = False
start_time = time.time()
conn_err_start = 0
conn_err_count = 0
while not done:
if time.time() - start_time > BACKOFF_TIMEOUT or \
(conn_err_count > 0 and time.time() - conn_err_start > CONN_ERR_BACKOFF_TIMEOUT):
logger.warning("Skip %s as of timeout.", a)
conn_err_count = 0
break
try:
geocode_result = google_geocode(a, key=G_KEY)
exist = list(myclient.ss_ads.geodata.find({'address': a}))
if len(exist) > 0:
myclient.ss_ads.geodata.update_one({'_id': exist[0]['_id']},
{'$set': {'geodata': geocode_result}})
else:
myclient.ss_ads.geodata.insert_one({'address': a, 'geodata': geocode_result})
logger.info(list(myclient.ss_ads.geodata.find({'address': a})))
conn_err_count = 0
done = True
except GoogleError as e:
time.sleep(BETWEEN_ERRORS_TIMEOUT)
except NewConnectionError as e:
if conn_err_count == 0:
conn_err_start = time.time()
logger.error(e)
logger.info("Max retries exceeded for %s", a)
conn_err_count += 1
except Exception as e:
logger.error(e)
logger.info("Waiting: %s s.", ITERATIONS_TIMEOUT)
time.sleep(ITERATIONS_TIMEOUT)
addresses_to_process = get_addresses_to_process(myclient.ss_ads)
except Exception as e:
logger.error(e)
time.sleep(5)
| UTF-8 | Python | false | false | 3,825 | py | 2 | geolocations.py | 2 | 0.540915 | 0.525752 | 0 | 94 | 39.691489 | 112 |
Yoon-Haeng-Heo/Algorithm | 11,252,814,338,218 | 63be12efa420e15d211695093ad045494fe93e4a | 417455d4103c2147c27372778047513e9fa15ab7 | /greedy/greedy1.py | 419389fe8ac7b466e176d73a379cdd7f4c818629 | []
| no_license | https://github.com/Yoon-Haeng-Heo/Algorithm | ada16d83fc204e13b7e35ab02c3ffd95da196dab | 85d5bfbf55a5aaa5c36a65fe985ddf6baafa6f9f | refs/heads/master | 2022-12-14T00:44:40.850195 | 2020-09-20T04:10:22 | 2020-09-20T04:10:22 | 293,536,566 | 0 | 0 | null | false | 2020-09-13T13:26:55 | 2020-09-07T13:31:02 | 2020-09-13T13:20:26 | 2020-09-13T13:26:55 | 14 | 0 | 0 | 1 | Python | false | false | def solution():
n,m,k = map(int,input().split())
summ = 0
cnt= 0
arr = list(map(int,input().split()))
arr.sort()
a = arr[n-1]
b = arr[n-2]
#cnt는 큰 수가 더해지는 횟수
cnt = int(m/(k+1)) * k + (m % (k+1))
summ = cnt * a + (m-cnt) * b
return summ
print(solution()) | UTF-8 | Python | false | false | 334 | py | 33 | greedy1.py | 26 | 0.455414 | 0.436306 | 0 | 15 | 19.066667 | 40 |
mverzeletti/URI | 11,063,835,765,691 | 7069508394261bebeca8e506d9407aa117ba73d3 | b24cc0d497613d70959691275db60926187863c4 | /1012.py | 6655d9a9246eb150c15441fd33a4a1e46be89c09 | []
| no_license | https://github.com/mverzeletti/URI | e135acccbc94c84c0e820a6a00bad891350c9426 | 312452475e7dfb9a1ecb5fe81c4136d8aad79220 | refs/heads/master | 2023-04-29T11:49:15.802586 | 2021-05-12T00:25:16 | 2021-05-12T00:25:16 | 257,114,741 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
def main():
dados = str(input())
pi = 3.14159
dados = dados.split(' ')
a = round(float(dados[0]), 2)
b = round(float(dados[1]), 2)
c = round(float(dados[2]), 2)
triangulo = (a * c) / 2
circulo = pi * (c ** 2)
trapezio = ((a + b) * c) / 2
quadrado = b ** 2
retangulo = a * b
print('TRIANGULO: {0:.3f}'.format(triangulo))
print('CIRCULO: {0:.3f}'.format(circulo))
print('TRAPEZIO: {0:.3f}'.format(trapezio))
print('QUADRADO: {0:.3f}'.format(quadrado))
print('RETANGULO: {0:.3f}'.format(retangulo))
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 649 | py | 31 | 1012.py | 30 | 0.506934 | 0.465331 | 0 | 24 | 25.125 | 49 |
ableinc/polysecrets | 11,785,390,273,207 | 9ab7579a19fed5a7b9758c1ba56cb49375ab1429 | 516b36b6879971835d82f8f458b7161ba69dbcb8 | /polysecrets/cli.py | 52b67ad1e12f1f269f4b0fff1f50d117ec7fab10 | []
| no_license | https://github.com/ableinc/polysecrets | c5a651b3506b6773361d12a6a11f8301b55ecef2 | 2ea970c1d0f312be6fbe67015df08f3964b8a6e2 | refs/heads/master | 2021-06-15T07:09:04.968009 | 2021-06-01T20:56:06 | 2021-06-01T20:56:06 | 190,068,478 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import click, sys
from polysecrets.main import PolySecrets
from polysecrets.version import __version__
_def_sec = 'HOXubh876Gv66v845345FTfhmd'
def go(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(f'Secret: {PolySecrets(config).manual()}')
ctx.exit()
@click.command()
@click.argument('go', nargs=1, type=click.UNPROCESSED)
@click.option('-s', '--secret', required=False, default=_def_sec, type=click.STRING, help='The secret string')
@click.option('-l', '--length', default=10, type=click.INT, help='Length of the secret. Secret has a minimum length '
'of 10')
@click.option('-i', '--interval', default=30, type=click.INT, help='How frequently should a new secret be generated '
'(in seconds)')
@click.option('-u', '--uuid', default='yes', type=click.STRING, help='Whether to use UUIDs or Alphanumeric characters for '
'secret generation - yes, no, both')
@click.option('-m', '--mixcase', default=False, type=click.BOOL, help='Decide whether or not to mix the case of alpha'
'characters in secret string')
@click.option('-p', '--persist', default=False, type=click.BOOL, help='Never get the same secret twice with '
'persistence from MongoDB. A .env file is required.')
@click.option('--symbols', default=False, type=click.BOOL, help='Whether or not to use special characters in secret. This will only increase the probability of appending a special character.')
@click.version_option(version=__version__)
def cli(go, secret, length, interval, uuid, mixcase, persist, symbols):
if not isinstance(uuid, str):
print(f'UUID must equal True, False or "Both". You have {uuid}, which is invalid.')
sys.exit()
config = dict(
secret=secret,
length=length,
interval=interval,
uuid=uuid,
mixcase=mixcase,
symbols=symbols,
persist=persist
)
click.echo(f'Secret: {PolySecrets(config).manual()}')
if __name__ == '__main__':
try:
cli()
except (AttributeError, AssertionError, TypeError) as excep:
print(f'Fatal error - {excep}')
sys.exit(1)
| UTF-8 | Python | false | false | 2,489 | py | 9 | cli.py | 6 | 0.574126 | 0.566493 | 0 | 50 | 48.78 | 250 |
zyp521/python | 18,949,395,732,661 | ffa7df227401b4b7abd176ecb9268e4384e17a0a | 99df93222e25d262b212fe30f98fe0d7ee37b807 | /数据分析/pandas_lx/分组练习.py | e8541ef9f3dc4e69a00db036558254e1d7939f37 | []
| no_license | https://github.com/zyp521/python | 7b1d22a574f53a56a46e8048af98c542a984bd31 | bba75d3dda33d50c6ec5b0de55be918591d42e02 | refs/heads/master | 2023-04-09T19:08:41.944840 | 2021-04-13T01:13:34 | 2021-04-13T01:13:34 | 285,146,480 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
pd.set_option('display.max_column', None)
out = pd.read_csv('./pandas_exercise/exercise_data/drinks.csv', sep=',')
print(out)
# 1.那个大陆(continent)平均消耗的啤酒(beer)更多
print('********************************************************')
# for i in out.groupby(by='continent'):
# print(i)
# print(out.groupby(by='continent').get_group('AF')) # 获取分组后指定组信息
print(out.groupby(by='continent')['beer_servings'].agg('mean').sort_values(ascending=False))
# 2.打印出每个大陆(continent)的红酒消耗(wine_servings)的描述性统计值
print(out.groupby(by='continent')['wine_servings'].describe())
# 3.打印出每个大陆每种酒类列的消耗平均值
print(out.groupby('continent')[['beer_servings', 'spirit_servings', 'wine_servings']].agg('mean'))
# 4. 打印出每个大陆每种酒列别的消耗中位数
print(out.groupby('continent')[['beer_servings', 'spirit_servings', 'wine_servings']].agg('median'))
# 5.打印出每个大陆对spirit饮品消耗的平均值,最大值和最小值
print(out.groupby('continent')[['beer_servings', 'spirit_servings', 'wine_servings']].agg(['mean', 'max', 'min']))
| UTF-8 | Python | false | false | 1,185 | py | 336 | 分组练习.py | 138 | 0.673176 | 0.668037 | 0 | 20 | 47.65 | 114 |
zhirsch/adventofcode | 10,788,957,891,400 | bef40db4a3a9befa29b9051a95423328f7208b3d | 507a22549148cf2711976c2a50419da7bb85dc61 | /2019/day06/day06.py | f73d6c1f3e87e5eeb33a5af8a08372cb707c9a74 | []
| no_license | https://github.com/zhirsch/adventofcode | 6427694687d6afac6fdee5aecfc61db0d576d3f9 | 9ea0a1556a2db031ea0e016347619b36c4f1af0b | refs/heads/master | 2020-11-23T21:59:27.161534 | 2020-01-02T07:27:09 | 2020-01-02T07:28:09 | 227,838,629 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
import heapdict
def parse(lines):
edges = {}
for line in (x.strip() for x in lines if x.strip()):
a, b = line.split(')', 1)
edges.setdefault(a, []).append(b)
edges.setdefault(b, []).append(a)
return edges
def transfers(edges, src, dst):
unvisited = heapdict.heapdict()
unvisited[src] = 0
distances = {src: 0}
while unvisited:
current, _ = unvisited.popitem()
for node in edges[current]:
alt = distances[current] + 1
if alt < distances.get(node, float('inf')):
distances[node] = alt
unvisited[node] = alt
return distances[dst]
def main():
with open('input.txt') as f:
edges = parse(f.readlines())
assert len(edges['YOU']) == 1
assert len(edges['SAN']) == 1
print(transfers(edges, edges['YOU'][0], edges['SAN'][0]))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 935 | py | 28 | day06.py | 28 | 0.547594 | 0.537968 | 0 | 41 | 21.804878 | 61 |
BraedenHopper99/MachineLearning | 180,388,632,886 | d20a97a8e597bc61f84f367ff43bac928fe36826 | c0f386ac8f6f0e315441a02bf08185bd86e282f7 | /SearchandRanking/Programs/searchengine.py | 733105dcf4b1bafa58af7e26a1a58440fbc7439d | []
| no_license | https://github.com/BraedenHopper99/MachineLearning | cbae314bb56f99ef0f4940e815720b556d2e6700 | 78f8d3efcebce9a2413a3df46901b288fae4e32d | refs/heads/master | 2017-09-11T08:10:44.575658 | 2017-07-13T15:39:56 | 2017-07-13T15:39:56 | 96,161,365 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
con = sqlite3.connect(":memory:")
con.isolation_level = None
cur = con.cursor()
class crawler:
#Initialize the crawlwer with name of database
def __init__(self,dbname):
self.con=sqlite.connect(dbname)
def __del__(self):
self.con.close()
def dbcommit(self):
self.con.commit()
#Auxiliary function for getting an entry id and adding it
#if its not present
def getentryid(self, table field, value, createnew=True):
cur=self.con.execute(
"select rowid from %s where %s='%s' " % (table,field,value))
res=cur.fetchone()
if res==None:
cur=self.con.execute(
"insert into %s (%s) values ('%s') " % (table, field, value)
return cur.lastrowid
else:
return res[0]
#Index an individual page
def addtoindex(self, url, soup):
if self.isindexed(url): return
print 'indexing %s' %url
#Get the individual words
text=self.gettextonly(soup)
words=self.separatewords(text)
#Get url id
urlid=self.getentryid('urllist', 'url', url)
#Link each word to the url
for i in range(len(words)):
word=words[i]
if word in ignorewords: continue
wordid=self.getentryid('wordlist','word',word)
self.con.execute("insert into wordlication(urlid,wordid, location) \
values (%d, %d, %d)" % (urlid, wordid, i)
#Extract the text from the html page returns an enormous in order string
def gettextonly(self, soup):
v=soup.string
if (v==None):
c=soup.contents
resulttext=''
for t in c:
subtext=self.gettextonly(t)
resulttext+=subtext+'\n'
return resulttext
else:
return v.strip()
#Separate the words by any non-whitespace character - the words from gettext only function
def separatewords(self,text):
splitter=re.compile('\\W*')
return [s.lower() for s in splitter.split(text) if s !='']
#Return true if this url is already indexed
def isindexed(self, url):
u=self.con.execute \
("select rowid from urllist where url='%s'" % url)/fetchone()
if (u!=None):
#Check if it has been crawled
v=self.con.execute(
'select * from wordlocation where urlid=%d' % u[0]).fetchone()
if (v!=None): return True
return False
#Add a link between two pages
def addlinkref(self, urlFrom, urlTo, linkText):
pass
#DFS indexing on the fly
def crawl(self, pages, depth=2):
for i in range (depth):
newpages = set()
for page in pages:
try:
c=urlib2.urlopen(page)
except:
print "Could not open %s" % page
continue
soup = BeautifulSoup(c.read())
self.addtoindex(page,soup)
links=soup('a')
for link in links:
if('href' in dict(link.attrs)):
url=url.join(page,link['href'])
if url.find("'")!=-1: continue
url=url.split('#')[0]
if url[0:4]=='http' and not self.isindexed(url):
newpages.add(url)
linkText=self.gettextonly(link)
self.addlinkref(page,url,linkText)
self.dbcommit()
pages=newpages
#Function to add all of the tables
def createindextables(self):
self.con.execute('create table urllist(url)')
self.con.execute('create table wordlist(word)')
self.con.execute('create table wordlocation(urlid, wordlist, location)')
self.con.execute('create table link(fromid integer, toid integer)')
self.con.execute('create table linkwords(wordid, linkid)')
self.con.execute('create index wordidx on wordlist(word)')
self.con.execute('create index urlidx on urllist(url)')
self.con.execute ('create index wordurlidx on wordlocation(wordid)')
self.con.execute('create index urltoidx on link(toid)')
self.con.execute('create index urlfromidx on link(fromid)')
self.dbcommit()
#PAGE RANK ALGORITHM
def
#QUERYING
#This section defines the searching part of the engine
class searcher:
def __init__(self,dbname):
self.con=sqlite.connect(dbname)
def __del__(self):
self.con.close()
def getmatchrows(self, q):
#Strings to build query
fieldlist='wO.urlid'
tablelist=''
clauselist=''
wordids=[]
#Split the words by spaces
words=q.split(' ')
tablenumber=0
for word in words:
#get word id
wordrow=self.con.execute("SELECT rowid FROM wordlist WHERE word='%s '" % word).fetchone()
if wordrow!=None:
wordid=wordrow[0]
wordids.append(wordid)
if tablenumber>0:
tablelist+=','
clauselist+=' and '
clauselist+='w%d.urlid=w%d.urlid and ' % (tablenumber-1, tablenumber)
fieldlist+=',w%d.location' % tablenumber
tablelist+='wordlocation w%d' % tablenumber
clauselist+='w%dd.wordid=%d' % (tablenumber, wordlist)
tablenumber+=1
#Create Query from the separate parts
fullquery='SELECT %s FROM %s WHERE %s' % (fieldlist, tablelist, clauselist)
cur=self.con.execute(fullquery)
rows=[row for row in cur]
return rows,wordids
#CONNTENT BASED RANKING FUNCTIONS
def getscoredlist (self, rows, wordids):
totalscores=dict(([row[0],0) for row in rows
#This is where the scoring function weights will go:
#ie to activate frequency call this: weights=[(1.0,self.frequencyscore(rows))]
weights=[]
for (weight,scores) in weights:
for url in totalscores:
totalscores[url]+=weight*scores[url]
return totalscores
def geturlname(self,id):
return sef.con.execute("SELECT url FROM urllist WHERE rowid=%d" % id).fetchone()[0]
def query(self,q):
rows,wordids = self.getmatchrows(q)
scores=self.getscoredlist(rows,wordids)
rankedscores=sorted([(score,url) for (url,score) in scores.items(),reverse=1)
for (score,urlid) in rankedscores[0:10]:
print ("%f\t%s" % (score,self.geturlname(urlid)))
#NORMALIZE SCORES TO 0 and 1
def normalizescores(self, scores, smallIsBetter=0):
vsmall=0.00001 #avoid div 0 errs
if smallIsBetter:
minscore=min(scores.values())
return dict([(u,float(minscore)/max(vsmall,l)) for (u,l) in scores.items()])
else:
maxscore=max(scores.values())
if maxscore==0: maxscore=vsmall
return dict([(u,float(c)/maxscore) for (u,c) in scores.items()])
#WORD FREQUENCY
def frequencyscore(self, rows):
count = dict([(row[0],0) for row in rows])
for row in rows: counts[row[0]]+=1
return self.normalizescores(counts)
#DOCUMENT LOCATION
def locationscore(self,rows):
locations=dict([(row[0],1000000) for row in rows])
for row in rows:
loc = sum(row[1:])
if loc<locations[row[0]]: locations[row[0]]=loc
#WORD DISTANCE
def distancescore(self, rows):
if len(rows[0])<=2: return dict([(row[0],1.0) for row in rows)
mindistance = dict([(row[0],1000000) for row in rows)
for row in rows:
dist=sum([abs(row[i]-row[i-1]) for i in range(2,len(row))])
if dist<mindistance[row[0]]: mindistance[row[0]]=dist
return self.normalizescores(mindistance,smallIsBetter=1)
#USING INBOUND LINKS - SIMPLE COUNT - sort of like pageRank
def inboundlinkscore(self,rows):
uniqueurls = set([row[0] for row in rows)
inboundcount=dict([(u,self.con.execute('SELECT count(*) FROM link WHERE toid=%d' % u).fetchone()[0]) for u in uniqueurls])
return self.normalizescores(inboundcount)
| UTF-8 | Python | false | false | 7,100 | py | 3 | searchengine.py | 2 | 0.672113 | 0.662254 | 0 | 263 | 25.86692 | 125 |
asphalt-framework/asphalt-filewatcher | 1,494,648,651,123 | c37dbe2b998300f131a04cb470d951d3b638e078 | 57f22cc7999c0aab8b58f7b59d6aba4cc3830b0e | /setup.py | ebf90fc970c424dc2ce133406d07eb63621f228f | [
"Apache-2.0"
]
| permissive | https://github.com/asphalt-framework/asphalt-filewatcher | a228b42aaf6d8e326a1d0ce5b5b7c60cb54bf43a | 137926356fb8e4cd0f369fc0bc787c4ecbd28b57 | refs/heads/master | 2021-07-05T03:14:42.394069 | 2016-12-24T08:52:49 | 2016-12-24T08:52:49 | 58,621,518 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import platform
from pathlib import Path
from setuptools import setup
cffi_modules = []
if platform.system() == 'Linux':
cffi_modules = ['asphalt/filewatcher/watchers/inotify_build.py:ffi']
elif platform.system() == 'Windows':
cffi_modules = ['asphalt/filewatcher/watchers/windows_build.py:ffi']
setup(
name='asphalt-filewatcher',
use_scm_version={
'version_scheme': 'post-release',
'local_scheme': 'dirty-tag'
},
description='File change notifier component for the Asphalt framework',
long_description=Path(__file__).with_name('README.rst').read_text('utf-8'),
author='Alex Grönholm',
author_email='alex.gronholm@nextday.fi',
url='https://github.com/asphalt-framework/asphalt-filewatcher',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
license='Apache License 2.0',
zip_safe=False,
packages=[
'asphalt.filewatcher',
'asphalt.filewatcher.watchers'
],
cffi_modules=cffi_modules,
setup_requires=[
'setuptools_scm >= 1.7.0',
'cffi >= 1.8.1; platform_system == "Linux" or platform_system == "Windows"'
],
install_requires=[
'asphalt ~= 2.0',
'cffi >= 1.8.1; platform_system == "Linux" or platform_system == "Windows"'
],
entry_points={
'asphalt.components': [
'filewatcher = asphalt.filewatcher.component:FileWatcherComponent'
],
'asphalt.watcher.watchers': [
'inotify = asphalt.filewatcher.watchers.inotify:INotifyFileWatcher',
'kqueue = asphalt.filewatcher.watchers.kqueue:KQueueFileWatcher',
'poll = asphalt.filewatcher.watchers.poll:PollingFileWatcher',
'windows = asphalt.filewatcher.watchers.windows:WindowsFileWatcher'
]
}
)
| UTF-8 | Python | false | false | 2,097 | py | 18 | setup.py | 11 | 0.632634 | 0.623092 | 0 | 58 | 35.137931 | 83 |
jandrejk/oeawai_challenge | 9,998,683,876,695 | 0a19abaee3205c83d0d506f4edb950790def18c8 | 2586c03ce61f1c1f5ae3083ab35f7ce19baa8a8c | /train_utils.py | d1f55668406bfc607573ff45f0cd979bab378d9c | []
| no_license | https://github.com/jandrejk/oeawai_challenge | 85b2cbe6f59e524b0e3c93127369d365a3f275f3 | 9fcf70fee45002295eeda8c82e0da2592dd47d1f | refs/heads/master | 2020-07-01T19:10:41.783349 | 2019-08-17T06:33:24 | 2019-08-17T06:33:24 | 201,268,244 | 0 | 0 | null | true | 2019-08-08T13:48:48 | 2019-08-08T13:48:48 | 2019-08-08T13:37:42 | 2019-08-08T13:37:41 | 5,333 | 0 | 0 | 0 | null | false | false | import torch
import numpy as np
import torch.nn.functional as F
import time
import csv
from sklearn.metrics import f1_score
import pickle
def output_to_class(output):
"""
takes the output from a nn feeded with a batch and returns the predicted classes
"""
classes = []
for sample in output:
classes.append(list(sample).index(max(sample)))
return classes
# This function trains the model for one epoch
def train(args, model, device, train_loader, optimizer, epoch, start_time):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tF1: {:.4f}\tRuntime: {:.1f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), f1_score(target.detach().cpu().numpy(), output_to_class(output), average='micro'), time.time() - start_time))
# This function evaluates the model on the test data
def test(args, model, device, test_loader, epoch, trainDataset, testDataset, path_save):
model.eval()
with open(path_save + 'NN-submission-' +str(epoch)+'.csv', 'w', newline='') as writeFile:
instruments = list(15*np.ones(len(testDataset)))
for samples, indices in test_loader:
out = model(samples)
prediction_batch = output_to_class(out)
for pred, index in zip(prediction_batch,indices):
instruments[int(index)] = trainDataset.transformInstrumentsFamilyToString([pred])
fieldnames = ['Id', 'Predicted']
writer = csv.DictWriter(writeFile, fieldnames=fieldnames, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for i in range(len(instruments)):
writer.writerow({'Id': i, 'Predicted': instruments[i][0]})
print('saved predictions')
def save_output(args, model, device, test_loader, which_net, trainDataset, testDataset, path_save):
model.eval()
with open(path_save + 'output-' +which_net+'.txt', 'wb') as writeFile:
outputs = np.ones([len(testDataset), 10])
for samples, indices in test_loader:
out = model(samples)
for pred, index in zip(out,indices):
outputs[int(index)] = pred.detach().cpu().numpy()
pickle.dump([outputs], writeFile)
print('saved outputs')
def save_geometric_mean_predictions(path_1D, path_2D, path_save, trainDataset, testDataset):
# get outs
instruments = []
with open(path_1D, 'rb') as readFile:
out_1D = pickle.load(readFile)[0]
with open(path_2D, 'rb') as readFile:
out_2D = pickle.load(readFile)[0]
# geometric mean
for pred1, pred2 in zip(out_1D, out_2D):
#print('out1D: ', out_1D)
#print('out2D: ', out_2D)
pred = np.log(np.sqrt(np.exp(pred1)*np.exp(pred2)))
#print('pred: ', pred)
pred = output_to_class([pred])
pred = trainDataset.transformInstrumentsFamilyToString(pred)
instruments.append(pred)
# write submission file
with open(path_save + 'NN-submission-combined-model.csv', 'w', newline='') as writeFile:
fieldnames = ['Id', 'Predicted']
writer = csv.DictWriter(writeFile, fieldnames=fieldnames, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for i in range(len(instruments)):
writer.writerow({'Id': i, 'Predicted': instruments[i][0]})
print('saved predictions') | UTF-8 | Python | false | false | 4,024 | py | 14 | train_utils.py | 9 | 0.590457 | 0.581759 | 0 | 116 | 33.698276 | 176 |
yuyaction/SIRmodel | 17,918,603,590,141 | 79d65ee4544460f2c4ff63a2821a10b1f93566c6 | 32616a9edea490c7a685b4648d5e20a18557289b | /main.py | 3d46c72429df03d7ab6b992880c02878513a466c | []
| no_license | https://github.com/yuyaction/SIRmodel | 7fa8841bb146ea30b3788f001028edd1b641c95c | ac9dfc892b54c5a7b473d28fd4adfe63e2811e9a | refs/heads/master | 2022-07-17T14:10:27.359613 | 2020-05-10T10:09:20 | 2020-05-10T10:09:20 | 262,759,716 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding: UTF-8
import math
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def eq_S(S,I):
return -beta*S*I
def eq_I(S,I):
return beta*S*I-gamma*I
def eq_R(I):
return gamma*I
def calc_k1(p,func,h):
return h*func(p)
def calc_k2(p,q,func,h):
return h*func(p,q)
def RungeKutta1(p,func,h):
k_1 = calc_k1(p,func,h)
k_2 = calc_k1(p+0.5*h*k_1,func,h)
k_3 = calc_k1(p+0.5*h*k_2,func,h)
k_4 = calc_k1(p+h*k_3,func,h)
k=(k_1+2*k_2+2*k_3+k_4)/6
return h*k
def RungeKutta2(p,q,func,h):
k_1 = calc_k2(p,q,func,h)
k_2 = calc_k2(p+0.5*h*k_1,q+0.5*h*k_1,func,h)
k_3 = calc_k2(p+0.5*h*k_2,q+0.5*h*k_2,func,h)
k_4 = calc_k2(p+h*k_3,q+h*k_3,func,h)
k=(k_1+2*k_2+2*k_3+k_4)/6
return h*k
#initial settings50
total_population = 10000000
beta = 2e-14
gamma= beta/5
total_time = 1000000
h = 0.1
time_cycle = int(total_time/h)
S = np.zeros(time_cycle+1)
I = np.zeros(time_cycle+1)
R = np.zeros(time_cycle+1)
RR = np.zeros(time_cycle+1)
S[0] = 0.999999*total_population
I[0] = 0.000001*total_population
R[0] = total_population-S[0]-I[0]
for t in range(time_cycle):
S[t+1] = S[t]+RungeKutta2(S[t],I[t],eq_S,h)
I[t+1] = I[t]+RungeKutta2(S[t],I[t],eq_I,h)
R[t+1] = total_population-S[t+1]-I[t+1]
#figure
time = np.arange(time_cycle+1)
#plt.plot(time,S)
plt.plot(time,I)
plt.plot(time,R)
plt.legend(['I','R'])
plt.savefig('tokyo')
| UTF-8 | Python | false | false | 1,488 | py | 4 | main.py | 2 | 0.586022 | 0.512097 | 0 | 70 | 20.242857 | 53 |
emgasu/TareaNoel | 16,140,487,099,829 | 57dc2211c1f4fd94072525bab88c23c93233e540 | f6cb90d3e7d2cbaa919796cbe4ab7992b59a7657 | /EjercicioPuntoInflexion.py | 5fd8b540c75ec332c33c0abf97b113416f20e770 | []
| no_license | https://github.com/emgasu/TareaNoel | eadda842dda9778d18d1d8ea60f7463a509f293e | 9e9f2accff52eb5a3ea5284a4fcbb206c5bac5f6 | refs/heads/main | 2022-12-27T01:46:24.535865 | 2020-10-17T05:08:43 | 2020-10-17T05:08:43 | 304,801,542 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Ejercicio punto de inflexion
import numpy as np
import matplotlib.pyplot as plt
def funcion(x):
return ((x*x*x*x)/12)-((x*x*x)/2)+(x*x)+10
def derivada1(x):
return ((x*x*x)/3)-((3(x*x))/2)+(2*x)
def derivada2(x):
return (x*x) - 3*x +2
#Paso I. f"(x) se iguala a 0 a fin de calcular las posiciones candidatas:
# x*x - 3x + 2 = 0 o bien ( x - 1)(x - 2) = 0
#Por lo tanto, f"(x) = 0 cuando x = 1 y x = 2
#Paso II. Para x = 1, f"(x) se evalúa a la izquierda y a la derecha de x = 1
# cuando x = 0.9 y x =1.1
x1=derivada2(0.9)
print(" Derivada2(0.9)= ",x1)
x2=derivada2(1.1)
print(" Derivada2(1.1)= ",x2)
if x1>0:
if x2<0:
print("Un Punto de inflexion existe cuando x=1")
elif x1<0:
if x2>0:
print("Un Punto de inflexion existe cuando x=1")
else:
print("x=1 NO es un Punto de inflexion")
# Para x = 2, f"(x) se evalúa a la izquierda y a la derecha de x = 2
# cuando x = 1.9 y x =2.1
x3=derivada2(1.9)
print(" derivada2(1.9)= ",x3)
x4=derivada2(2.1)
print(" derivada2(2.1)= ",x4)
if x3>0:
if x4<0:
print("Un Punto de inflexion existe cuando x=2")
elif x3<0:
if x4>0:
print("Un Punto de inflexion existe cuando x=2")
else:
print("x=2 NO es un Punto de inflexion")
# Grafica
x = np.linspace(-2.5, 2.5, 10)
y = funcion(x)
y1=funcion(1)
y2=funcion(2)
plt.plot(x, y)
plt.plot(1,y1, marker="o", color="red")
plt.plot(2,y2, marker="o", color="red")
| UTF-8 | Python | false | false | 1,480 | py | 3 | EjercicioPuntoInflexion.py | 3 | 0.579838 | 0.510149 | 0 | 54 | 25.37037 | 77 |
jj-a/basicPython | 5,978,594,526,839 | e2a1d8aad1715cdcc625b36ad951a83280549455 | f4fe0733d433cb650869cc502b58a9a81d657306 | /section11/04-datainfo.py | 1eb492482683fba2360f4127ea74ea4537ed9f2c | []
| no_license | https://github.com/jj-a/basicPython | c14bd1331bba1e0b3f2eee40053cbf15aab83dd2 | 50758d6faa95409d6a1d3f3cc183a142cc1a6312 | refs/heads/master | 2020-04-26T16:30:59.322970 | 2019-03-13T09:14:10 | 2019-03-13T09:14:10 | 173,681,661 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # section11 / 04-datainfo.py
# 기초통계 산출하기
from pandas import DataFrame
from sample import grade_dic
df = DataFrame(grade_dic, index=["철수", "영희", "민철", "수현", "호영"])
print(df)
print(df.head()) # 파라미터 없을 경우 기본 5줄
print(df.head(2)) # 전체에 대한 처음 2줄만 추출
print(df["영어"].head(2)) # 특정 열에 대한 첫 2줄
print(df.tail(2)) # 마지막 2줄만 보기
print(df["영어"].tail(2)) # 특정 열에 대한 마지막 2줄
# 요약정보의 개별 조회 (각 열 혹은 특정 열에 대해 n/a를 제외한 값의 수 반환)
print(df.count())
print(df["영어"].count())
print(df.min())
print(df["영어"].min())
print(df.max())
print(df["영어"].max())
print(df.mean()) # 평균
print(df["영어"].mean())
print(df.std()) # 표준편차
print(df["영어"].std())
print(df.median()) # 중앙값 (2사분위수)
print(df["영어"].median())
print(df.quantile(q=0.5)) # 사분위수(중앙값: 50% 위치의 값)
print(df["영어"].quantile(q=0.5))
print(df.quantile(q=0.25)) # 1사분위수(25% 위치의 값) | UTF-8 | Python | false | false | 1,109 | py | 92 | 04-datainfo.py | 86 | 0.611179 | 0.579587 | 0 | 40 | 19.6 | 63 |
mcode36/Python-Challenge | 12,661,563,624,442 | 27492cedfe8506bce9f6573646f95e3fb7daa946 | 2cd8463e198a41efce70016edb1bd16e9af75325 | /P3/hey-ho.py | 1fe0bdc811b5ccfffe900d73b82df314ce5fbaf5 | []
| no_license | https://github.com/mcode36/Python-Challenge | d3ec9ff293c2a6594a232cf0010a75c0b5b1d3b3 | 6e0767c1fd120ac8c01bf94488fa61d13069aa23 | refs/heads/master | 2020-04-28T01:23:13.921668 | 2019-04-24T22:37:13 | 2019-04-24T22:37:13 | 174,854,014 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | for i in range (1, 101):
s = str(i)
if (i%6 == 0) :
s = "hey-ho"
else:
if (i%2 == 0) :
s = "hey"
if (i%3 == 0) :
s = "ho"
print(s)
| UTF-8 | Python | false | false | 205 | py | 15 | hey-ho.py | 7 | 0.282927 | 0.234146 | 0 | 10 | 18.5 | 24 |
rrabit42/Python-Programming | 16,999,480,580,098 | 87083ba36cefe21342ce6fe90b94b3420f6f3cbb | 7012c3609f4aa5712f17bfee199d856d36a968d2 | /Python프로그래밍및실습/ch12-TKinter/lab12-7-radiobuttion.py | 3226edeff51ecf42af0636f67b2f1ae6bc957d1c | []
| no_license | https://github.com/rrabit42/Python-Programming | 1688c2b21ab19f09d2491152ae2dd0ddb1910288 | 551efb6fe4ee3b92c5cb2ef61d2198d55966471a | refs/heads/master | 2021-07-23T01:27:42.657069 | 2020-07-28T17:23:40 | 2020-07-28T17:23:40 | 200,571,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import *
def select():
label['text']= str(var.get()) + "번 선택"
window = Tk()
var = IntVar() # var 는 정수를 저장할 tkinter 변수
R1 = Radiobutton(window, text="1번", variable=var, value=1, command=select)
R1.pack() # R1을 클릭하면 정수 변수 var = 1이 됨
R2 = Radiobutton(window, text="2번", variable=var, value=2, command=select)
R2.pack() # R2를 클릭하면 정수 변수 var = 2이 됨
R3 = Radiobutton(window, text="3번", variable=var, value=3, command=select)
R3.pack() # R3를 클릭하면 정수 변수 var = 3이 됨
label = Label(window)
label.pack()
window.mainloop()
| UTF-8 | Python | false | false | 686 | py | 206 | lab12-7-radiobuttion.py | 205 | 0.60339 | 0.572881 | 0 | 20 | 27.5 | 74 |
tomatoy/cy | 2,551,210,609,217 | ff01518155f063b5f399a91546d3fdbfc4341a8f | 7b2d1dd1efb60c3a9dd4eb59355ca4041005b0ef | /setup.py | 7db98f90c06b22b439e2f302e2a3b48b79b5ed01 | []
| no_license | https://github.com/tomatoy/cy | eff89e3df084b8734a0bba277cb945f786c1c52c | 2e5f3232796f58825661e8fefcd636cb7942f1cd | refs/heads/master | 2021-05-05T16:28:15.473367 | 2018-01-13T15:08:59 | 2018-01-13T15:08:59 | 117,352,815 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
__version__ = '0.1.0'
dependencies = {}
for env in ('prod', 'dev'):
requires = []
with open('requirements/{}.txt'.format(env)) as f:
for line in f:
if not (line.startswith('#') or line.startswith('-r')):
requires.append(line)
dependencies[env] = requires
with open('README.rst') as f:
readme = f.read()
setup(
name='cy',
author='helms',
author_email='zhanghdcy@gmail.com',
url='https://github.com/helms/cy',
description='cy website',
long_description=readme,
version=__version__,
packages=find_packages(),
install_requires=dependencies['prod'],
tests_require=dependencies['dev'],
extras_require={
'dev': dependencies['dev'],
}
)
| UTF-8 | Python | false | false | 834 | py | 4 | setup.py | 2 | 0.597122 | 0.592326 | 0 | 36 | 22.166667 | 67 |
Toroi0610/unsupervised_heat_equation | 5,360,119,213,421 | 8ee841d0cf08d3c18bc3c961a13745525384598b | cbfb3c4886320d495f6149ba6071624114f650b5 | /.ipynb_checkpoints/utils-checkpoint.py | ce503a32f245044fc4432a07d5b8cb267384b684 | []
| no_license | https://github.com/Toroi0610/unsupervised_heat_equation | 9570769ac3eec9ece86136e7d60a2e1dbbae333f | 9bc24d1e07e18004b078dd3fc091c974c570405b | refs/heads/master | 2023-05-04T08:09:39.642561 | 2021-05-23T11:45:25 | 2021-05-23T11:45:25 | 367,778,313 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
def convert_array_to_tensor(arr):
return arr.reshape([1, arr.shape[0], arr.shape[1], 1])
def convert_tensor_to_array(tensor):
return tensor[0, :, :, 0]
def get2orderderivative(temp_field, config):
dx = config["simulation_params"]["dx"]
dy = config["simulation_params"]["dy"]
du2_dx2 = (temp_field[2:, 1:-1] - temp_field[1:-1, 1:-1] + temp_field[:-2, 1:-1]) / (dx*dx)
du2_dy2 = (temp_field[1:-1, 2:] - temp_field[1:-1, 1:-1] + temp_field[1:-1, :-2]) / (dy*dy)
return du2_dx2, du2_dy2
def getflow(temp_field, config):
kappa = config["simulation_params"]["kappa"]
du2_dx2, du2_dy2 = get2orderderivative(temp_field, config)
return kappa * (du2_dx2 + du2_dy2) | UTF-8 | Python | false | false | 735 | py | 11 | utils-checkpoint.py | 7 | 0.608163 | 0.548299 | 0 | 19 | 36.789474 | 95 |
NeaX1X/hw_Kazak | 9,947,144,285,315 | 52c93e636526f4e85cb4c7ed846894bc9e347db7 | 1f4c4b5808e3c4812889d083875008519f687426 | /hw2/calc2.py | cdc615564eb06b4fd58f18feeb992c6579099ef0 | []
| no_license | https://github.com/NeaX1X/hw_Kazak | 2e6700fe2faeb5dbabbe7c4d24a0628c5398ef8b | 39b2035e67abfe9a6a67897185a9f2befc127ea4 | refs/heads/main | 2023-01-31T18:46:04.471025 | 2020-12-14T18:26:31 | 2020-12-14T18:26:31 | 308,412,170 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | number1 = input('Enter first number ')
number2 = input('Enter secind number ')
operator = input('Enter operator ')
try:
number1, number2 = int(number1), int(number2)
except:
print('This in not a number')
else:
if operator == '-':
print(number1 - number2)
elif operator == '+':
print(number1 + number2)
elif operator == '*':
print(number1 * number2)
elif operator == '//':
print(number1 // number2)
elif operator == '**':
print(number1 ** number2)
elif operator == '/':
if number2 == 0:
print('Error: zero division')
else:
print(number1 / number2)
else:
print('Invalid operator')
| UTF-8 | Python | false | false | 614 | py | 21 | calc2.py | 18 | 0.643322 | 0.610749 | 0 | 26 | 22.576923 | 46 |
sgeyer-tgm/WahlAnalyse | 14,989,435,894,696 | 46ec2b436ebec6e6ce7ae872ccbac960e640883e | e96c087f26b15461137649f612d683dffa2c832f | /model.py | 401561b507915ed203e26661c9109b3130d77c5c | []
| no_license | https://github.com/sgeyer-tgm/WahlAnalyse | 237caa0e3b496272144da8ac803185657607d9cd | c269b08697f533ddaa93e5e7d12b0e467ac15770 | refs/heads/master | 2018-01-10T21:51:21.780765 | 2016-04-13T23:25:25 | 2016-04-13T23:25:25 | 51,511,857 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PySide.QtGui import QUndoStack
class WahlAnalyseModel(object):
"""
This class contains the any data that is not being persisted
"""
def __init__(self):
self.filename = None
self.undo_stack = QUndoStack()
| UTF-8 | Python | false | false | 243 | py | 12 | model.py | 10 | 0.650206 | 0.650206 | 0 | 10 | 23.3 | 64 |
egorncpk/webProject | 3,298,534,887,250 | 4bc9679afadfabc49f729b419f6254a935df36bc | c67197a2f656642d52105f9e17dfe154fa3db845 | /webProject/loginsys/urls.py | a2c3f172bd4391dcbcb321d7646364c62e7ea543 | []
| no_license | https://github.com/egorncpk/webProject | a96a5b85626273731eaf184b70aec0bf53e53994 | 8d82037f96fc7bc7fb5d388a93b56a8540e600af | refs/heads/master | 2019-06-17T01:57:13.001491 | 2017-01-16T10:21:19 | 2017-01-16T10:21:19 | 78,830,679 | 0 | 0 | null | false | 2017-01-16T10:15:28 | 2017-01-13T08:33:07 | 2017-01-16T10:11:16 | 2017-01-16T10:15:18 | 36 | 0 | 0 | 0 | CSS | null | null | from django.conf.urls import url
from django.contrib import admin
urlpatterns = {
url(r'^$auth/', 'loginsys.views.auth', name='auth'),
# url(r'^$logout/', 'loginsys.views.index', name='logout'),
# url(r'^log/$', 'blog.views.log', name='log'),
} | UTF-8 | Python | false | false | 264 | py | 8 | urls.py | 5 | 0.617424 | 0.617424 | 0 | 8 | 31.25 | 63 |
Computational-Biology-TUe/tissuespecific | 9,964,324,167,720 | 8e93bf17aa82c52c79a1c7ede498f076fd9376d2 | f6025b659614c29f653b005274e3d53cf525cf63 | /tissuespecific/sarcoModel/individual.py | 90a18e271beb725046e1b03a6adf5f7c5c3accb2 | []
| no_license | https://github.com/Computational-Biology-TUe/tissuespecific | 6d73db718f03bcec8574d6d0feaaab9546b183a3 | 16eb8ed81855c1b077e2f0d1e2172d76da2c5c8f | refs/heads/master | 2020-04-08T16:33:18.044671 | 2018-08-08T12:41:51 | 2018-08-08T12:41:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 15 15:01:39 2017
@author: acabbia
"""
class Individual(object):
'''
class to store input data about a single simulated individual
'''
proteinLeanFactor = 0.23 ## % proteins in the muscle (by weight)
ATPconversion = 20 # kcal / Mol ATP
def __init__(self, name , isMale , age , weight):
'''
name = Individual ID
isMale = bool
age = age in years
weight = weight in kg
returns an object of class Individual
'''
self.name = name
self.isMale = bool(isMale)
self.age = age
self.weight = weight
def set_fat_ratio(self, fat_ratio):
# sets fat lean ratio
self.fat_ratio = fat_ratio
def set_uptakeFactor(self, uptakeFactor):
# % of nutrients adsorbed from diet (function of age)
self.uptakeFactor = uptakeFactor
def set_leanMass_maintenance_cost(self, lean_maintenance):
# set lean mass maintenance cost (kcal/g)
self.leanMass_maintenance = lean_maintenance
def set_activityfactor(self, activityFactor): ## TO DO : accept input as category (i.e sedentary/ active...) or directly as factor
'''
Sedentary (little or no exercise, desk job).
BMR x 1.2
Lightly Active (light exercise/sports 3-5 days/week).
BMR x 1.3-1.4
Moderately Active (moderate exercise/sports 3-5 days/week).
BMR x 1.5-1.6
Very Active (hard exercise/sports 6-7 days per week).
BMR x 1.7-1.8
Extremely Active (very hard daily exercise/sports and physical job or 2/day training).
BMR x 1.9-2.0
'''
self.activityFactor = activityFactor
class Settings(object):
'''
class to store simulation settings
'''
def __init__ (self, model, simLength, timeStep):
'''
model = a COBRA model structiure
simLength = length of the sim in n(timesteps)
timeStep = simulation increments in days
proteins_MW = vector of molecular weights of the muscle proteins
'''
self.model = model
self.simLength = simLength
self.timeStep = timeStep
def proteins_MW(self,MW):
self.proteins_MW = sum(MW)
class Food(object):
'''
class to normalize and store data about nutrients uptake bounds
'''
def __init__(self, daily_bounds_dict, Settings, Individual):
'''
daily_bounds_dict = dict {EX_reaction_name : flux value}
Settings = Settings object
Individual = Individual object
returns:
normalized uptake bounds over the time step
'''
self.uptake_bounds = { k : Settings.timeStep * (v * (1 - Individual.fat_ratio)) for k , v in daily_bounds_dict.items()}
def load_bounds_file(path):
import pandas as pd
intake_bounds = pd.read_csv(path, index_col = 0)
daily_bounds_dict= dict(zip(intake_bounds.reaction, intake_bounds.fluxValue))
return daily_bounds_dict
| UTF-8 | Python | false | false | 3,347 | py | 43 | individual.py | 41 | 0.560502 | 0.546758 | 0 | 108 | 29.981481 | 135 |
Nji-Mariette/p2-25-coding-challenges-ds | 5,549,097,773,208 | d47da1cc092750cb322368b758b4a032f46f7631 | 4c8a532a691322786858aa840c736ac59ebc58fc | /Mariette/exercise_34.py | 583e8943402d12a728e79e016b970c94b07d79b9 | []
| no_license | https://github.com/Nji-Mariette/p2-25-coding-challenges-ds | 34e124703237c8722ce217f2674bd73b616c9326 | c995a13a0af48caec751c94357d9f2bcafd49ae8 | refs/heads/main | 2023-06-29T17:07:04.559192 | 2021-08-03T12:54:36 | 2021-08-03T12:54:36 | 387,260,628 | 0 | 0 | null | true | 2021-07-18T20:09:48 | 2021-07-18T20:09:47 | 2021-07-13T07:30:15 | 2021-07-13T07:30:12 | 0 | 0 | 0 | 0 | null | false | false | def array_of_words(text):
return text.split()
print(array_of_words("Returns each word as element of array")) | UTF-8 | Python | false | false | 113 | py | 8 | exercise_34.py | 8 | 0.725664 | 0.725664 | 0 | 4 | 27.5 | 62 |
Midnight1Knight/HSE-course | 7,215,545,101,971 | 5d11a6b52be30798e9e98ddf82267c713f12ab9e | 7e7e2c5d327a518a03b2307f7f3ece37517fa361 | /ThirdWeek/Task2.py | f2c06cfe0ee8ab88a4654960f5c5d7e5bdd95925 | []
| no_license | https://github.com/Midnight1Knight/HSE-course | 0fdd995f2e8bf98ecd5fc4ecbcd503e6ef2150ab | 9b79c359fc65d260e3d454de5464abd5c89be770 | refs/heads/master | 2022-09-24T14:36:29.472005 | 2020-06-04T17:11:37 | 2020-06-04T17:11:37 | 269,414,804 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
i = 1
a = n
b = 0
while i != a + 1:
n = (1 / i**2)
b += n
i += 1
print(b)
| UTF-8 | Python | false | false | 103 | py | 118 | Task2.py | 116 | 0.359223 | 0.300971 | 0 | 9 | 10.444444 | 18 |
sergiokv13/advent-2020 | 5,385,889,027,168 | 6e8a60706f50ce30e8ff7aa1e07062d7fbcb8b82 | 50ebb2555c62f1aee5dfee3b683327685ada9b04 | /advent-2021/day_11/day_11.py | 77c149cdcbfe2c1431edb6d38d0ec1436ab9a407 | []
| no_license | https://github.com/sergiokv13/advent-2020 | 781aefc15208004df12bb2076d1ff131315d9aae | 6d4a3f2f0bf66003ccd384835dde9b838ff48c4c | refs/heads/master | 2023-02-05T00:43:12.787154 | 2022-12-25T07:29:47 | 2022-12-25T07:29:47 | 317,715,024 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import fileinput
from typing import DefaultDict
def print_matrix(m):
for row in m:
print(row)
print()
def getp(matrix, i, j, default = None):
try:
if i < 0 or j < 0: return default
return matrix[i][j]
except:
return default
def increment_one(m):
to_propagate = []
for j in range(len(m)):
for i in range(len(m[j])):
m[i][j] += 1
if m[i][j] > 9: to_propagate.append((i,j))
return to_propagate
def propagate(m, to_prop):
visited = DefaultDict(bool)
for p in to_prop:
visited[p] = True
while(to_prop):
i, j = to_prop.pop()
# propagate to neighboors
for pos in [(i+1, j), (i-1, j), (i, j+1), (i, j-1), (i+1, j+1), (i-1, j-1), (i+1, j-1), (i-1, j+1)]:
if getp(m, pos[0], pos[1]) is not None:
m[pos[0]][pos[1]] += 1
if m[pos[0]][pos[1]] > 9 and not visited[pos]:
to_prop.append(pos)
visited[pos] = True
return m
def count_and_reset(m):
count = 0
for j in range(len(m)):
for i in range(len(m[j])):
if m[i][j] > 9:
count += 1
m[i][j] = 0
return count
def perform_step(m):
to_propagate = increment_one(m)
propagate(m, to_propagate)
count = count_and_reset(m)
return count, count == len(m) * len(m[0])
def perform_n_steps(m, steps):
count = 0
for _i in range(steps):
step_count, _f = perform_step(m)
count += step_count
return count
oct = []
for line in fileinput.input():
oct.append([int(el) for el in line.strip()])
# First Star
print(perform_n_steps(oct, 100))
# Second Star
finished = False
steps = 0
while(not finished):
steps+=1
_count, finished = perform_step(oct)
print(steps + 100)
| UTF-8 | Python | false | false | 1,671 | py | 103 | day_11.py | 98 | 0.5769 | 0.554159 | 0 | 78 | 20.410256 | 104 |
xtianmcd/diff_priv | 8,555,574,876,175 | 38b72765ad6db4a1caf34e84fb2da487f816f970 | 36d1215a3a746eb057e0d6087ccd8229d03a0515 | /logitstic_regression/DP-LogisticReg.py | 3ea78e36ee5f67ba658018e547cff4e20f865d55 | []
| no_license | https://github.com/xtianmcd/diff_priv | 26ec4c21e82d6a28130d50633ab07948d200e757 | 2a7c0a38970d972bda5a0678c436c4511fc74ccf | refs/heads/master | 2020-05-05T04:07:58.759236 | 2019-04-19T20:53:12 | 2019-04-19T20:53:12 | 179,698,691 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from joblib import Memory
from sklearn.datasets import load_svmlight_file
from scipy.sparse import hstack
import matplotlib.pyplot as plt
import math
import numpy as np
import copy
from sklearn.linear_model import LogisticRegression
mem = Memory("./mycache")
@mem.cache
def get_data():
#source: https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_svmlight_file.html
tr_data = load_svmlight_file("adult_train.txt")
te_data = load_svmlight_file("adult_test.t")
tr_dummy = np.ones((tr_data[0].shape[0],1))
te_dummy = np.ones((te_data[0].shape[0],1))
trn_data = hstack((tr_dummy,tr_data[0])).toarray()
tst_data = hstack((te_dummy,te_data[0],te_dummy)).toarray()
return trn_data, tr_data[1], tst_data, te_data[1]
def sigma(x):
#overflow avoidance source: https://stackoverflow.com/questions/23128401/overflow-error-in-neural-networks-implementation
signal = np.clip(x, -500, 500 )
sigmoid = 1/(1+np.exp(-signal))
return sigmoid
def ComputeGradient(b_vec,xy_pairs):
n = len(xy_pairs)
loss_grad = 1/n * \
np.sum([np.multiply(sigma(pair[1]*\
np.dot(np.transpose(b_vec),pair[0]))-1,
np.multiply(pair[1],pair[0])) for pair in xy_pairs],axis=0)
return loss_grad
def ApproxGrad(b_vec,xy_pairs,h=10**-5):
n = len(xy_pairs)
approx_grad=[]
# print(b_vec)
for i in range(len(b_vec)):
b_plus = copy.deepcopy(b_vec)
b_plus[i] = b_plus[i]+h
b_minus = copy.deepcopy(b_vec)
b_minus[i] = b_minus[i]-h
grad_i = (((-1/n) * \
sum([np.log(sigma(pair[1]*np.dot(np.transpose(b_plus),pair[0]))) \
for pair in xy_pairs])) - ((-1/n) * \
sum([np.log(sigma(pair[1]*np.dot(np.transpose(b_minus),pair[0]))) \
for pair in xy_pairs])))/(2*h)
approx_grad.append(grad_i)
return approx_grad
def dp_sensitivity(b_vec,xy_pairs):
# neighbors=[]
# for col in range(b_vec.shape[0]):
# neighb = copy.deepcopy(b_vec)
# neighb[col] = 0
# neighbors.append(neighb)
max_grad_dj = np.linalg.norm(1)*1
sens = np.max([max_grad_dj for dj in range(len(xy_pairs))])
print(f"Sensitivity Calculation for Differential Privacy: {sens}")
return sens
def NoisyGradient(b_vec, xy_pairs, sensitivity, ep, delta=10**-5):
var = sensitivity/ep * math.sqrt(2*math.log(1.25/delta))
Y = np.random.normal(0,var**2,xy_pairs[0][0].shape[0])#*np.identiy(xy_pairs[0,0].shape[1])
grad_tilda = np.reshape(np.add(np.transpose\
(ComputeGradient(b_vec,xy_pairs))[0],Y),(b_vec.shape[0],1))
return grad_tilda
def ComputeLoss(b_vec, xy_pair):
# l_p = np.log(1+np.exp(-xy_pair[1]*np.dot(np.transpose(b_vec),xy_pair[0])))
l_p = np.log(sigma(xy_pair[1]*np.dot(np.transpose(b_vec),xy_pair[0])))
return l_p
def backtrack(b_vector,xy_pairs,grad,s=0.9,a=0.4):
n=1
while (-1/(len(xy_pairs)) * \
sum([ComputeLoss(np.subtract(b_vector,np.multiply(n,grad)),\
xy_pairs[i]) for i in range(len(xy_pairs))]))\
> (-1/(len(xy_pairs)) * \
sum([ComputeLoss(b_vector,xy_pairs[i])\
- (a*n)*np.linalg.norm(grad)**2 for i in range(len(xy_pairs))])):
n *= s
return n
def Pred(x_test_i, beta):
y_hat_i = round(1/(1+np.exp(-(np.dot(np.transpose(beta),x_test_i))))[0])
if y_hat_i==0: y_hat_i=-1
return y_hat_i
def logistic_reg_train(x,y,x_tst,y_tst,iters=100,epsilon=10**-10,
thresh=10**-6,glob_sens=None,dp=False,dp_ep=0.5):
if not dp:
with open('./gradient_diffs.txt','w') as gd:
gd.write('\n---------- Calculated/Approx. Gradient Differences ----------\n\n')
xy = [[np.reshape(x[i],(x.shape[1],1)),y[i]] for i in range(x.shape[0])]
xy_test = [[np.reshape(x_tst[i],(x_tst.shape[1],1)),y_tst[i]]\
for i in range(x_tst.shape[0])]
beta_t = np.reshape(np.zeros(x.shape[1]),(x.shape[1],1)) #np.reshape(np.random.normal(0,0.01,x.shape[1]),(x.shape[1],1))
if not glob_sens: glob_sens = dp_sensitivity(beta_t,xy)#0.011070701960557393#
if dp: g_t = NoisyGradient(beta_t,xy,glob_sens,dp_ep)
else: g_t = ComputeGradient(beta_t,xy)
alpha_t = backtrack(beta_t,xy,g_t)
l_new = -1/(len(xy))*sum([ComputeLoss(beta_t,xy[i])\
for i in range(x.shape[0])])
tr_ax=[]
te_ax=[]
objectives=[]
obj_test=[]
ex=0
for k in range(iters):
if dp: g_t = NoisyGradient(beta_t,xy,glob_sens,dp_ep)
else: g_t = ComputeGradient(beta_t,xy)
if k+1 in [2,20,200] and not dp:
ex+=1
approx_grad = ApproxGrad(beta_t,xy)
grad_diffs = np.mean(np.subtract(g_t,approx_grad))
print(f"Gradient Example {ex} - On iteration {k}, the average difference between the calculated gradient and the gradient approximation is {grad_diffs}.")
with open('./gradient_diffs.txt','a') as gd:
gd.write(f'++++ Gradient Example {ex}; Training Iteration {k} ++++\nMEAN DIFFERENCE: \t{grad_diffs}\n\nActual Differences: {np.subtract(g_t,approx_grad)}\nb-vector: \n{np.reshape(beta_t,(1,len(beta_t)))}\nCalc. Grad.: \n{np.reshape(g_t,(1,len(g_t)))}\nApprox. Grad.: \n{approx_grad}\n')
if np.linalg.norm(g_t) < epsilon: return {'beta':beta_t,'loss':l_new,
'iters':k,'step':alpha_t,
'exit':'1','train_acc':tr_ax,
'test_acc':te_ax,
'train_obj':objectives,
'test_obj':obj_test,
'sensitivity':glob_sens}
l_t = -1/(len(xy))*sum([ComputeLoss(beta_t,xy[i])\
for i in range(x.shape[0])])
alpha_t = backtrack(beta_t,xy,g_t)
print(f'step size on iter {k+1}: {alpha_t}')
beta_new = np.subtract(beta_t,np.multiply(alpha_t,g_t))
l_new = -1/(len(xy))*sum([ComputeLoss(beta_new,xy[i])\
for i in range(x.shape[0])])
if not dp:
if (l_t - l_new) <= thresh: return {'beta':beta_t,'loss':l_new,
'iters':k,'step':alpha_t,'exit':'2',
'train_acc':tr_ax,'test_acc':te_ax,
'train_obj':objectives,
'test_obj':obj_test,
'sensitivity':glob_sens}
beta_t = beta_new
tr_acc = sum([1 for predxn in range(x.shape[0])\
if Pred(x[predxn],beta_t) == y[predxn]]) / x.shape[0] * 100
te_acc = sum([1 for predxn in range(x_tst.shape[0])\
if Pred(x_tst[predxn],beta_t) == y_tst[predxn]])\
/ x_tst.shape[0] * 100
l_tst = -1/(len(xy))*sum([ComputeLoss(beta_new,xy[i])\
for i in range(x.shape[0])])
tr_ax.append(tr_acc)
te_ax.append(te_acc)
objectives.append(l_new[0][0])
obj_test.append(l_tst[0][0])
return {'beta':beta_t,'loss':l_new,'iters':k,'step':alpha_t,'exit':'3',
'train_acc':tr_ax,'test_acc':te_ax,'train_obj':objectives,
'test_obj':obj_test,'sensitivity':glob_sens}
def plot_training(trn_acc, tst_acc, obj_vals, obj_tv, dp=False,ep=0.5):
tr_maxiter = trn_acc.index(np.max(trn_acc))
te_maxiter = tst_acc.index(np.max(tst_acc))
f,axrow = plt.subplots(1,2)
axrow[0].plot(range(len(trn_acc)),trn_acc, label="Training Acc")
axrow[0].plot(range(len(tst_acc)),tst_acc, label="Testing Acc", color='g')
axrow[0].set(xlabel='Training Epochs',ylabel='Accuracy')
axrow[0].set_title("Accuracy")
axrow[0].set_ylim(0,100)
axrow[0].annotate(f'{tr_maxiter},{np.max(trn_acc):.2f}%', xy=(tr_maxiter,
np.max(trn_acc)),xytext=(tr_maxiter+0.04*len(trn_acc),
np.max(trn_acc)+7),arrowprops=dict(facecolor='black', shrink=0.05),
)
axrow[0].annotate(f'{te_maxiter},{np.max(tst_acc):.2f}%', xy=(te_maxiter,
np.max(tst_acc)),xytext=(te_maxiter+0.01*len(tst_acc),
np.max(tst_acc)+10),arrowprops=dict(facecolor='black', shrink=0.05),
)
#annotation source; https://matplotlib.org/users/annotations_intro.html
axrow[0].legend(loc='lower center', bbox_to_anchor=(0.5, 1.05),
ncol=2, fancybox=True, shadow=True)
axrow[1].plot(range(len(obj_vals)),obj_vals, label="Training Loss")
axrow[1].plot(range(len(obj_tv)),obj_tv, label="Testing Loss",color='g')
axrow[1].set(xlabel='Training Epochs',ylabel="Loss")
axrow[1].set_title('Objective Function')
axrow[1].legend(loc='upper right', bbox_to_anchor=(1, 1),
ncol=1, fancybox=True, shadow=True)
#legend source: https://pythonspot.com/matplotlib-legend/
if dp: plt.savefig(f'./dp_plot_{ep}ep.png')
else: plt.savefig('./nonpriv_plot.png')
return
def verify_performance(X_trn,y_trn,X_tst,y_tst):
logreg = LogisticRegression().fit(X_trn[:,1:], y_trn)
logr_pred = logreg.predict(X_tst[:,1:])
beta=logreg.coef_
b0 = logreg.intercept_
acc = 100 - (sum([1 for predxn in range(len(logr_pred))\
if logr_pred[predxn]!= y_tst[predxn]])/len(X_tst)*100)
return acc
if __name__ == "__main__":
X_tr, y_tr, X_te, y_te = get_data()
print(X_tr.shape,y_tr.shape,X_te.shape,y_te.shape)
logreg = logistic_reg_train(X_tr,y_tr,X_te,y_te,500)
print(f"\nFinal Loss: \t\t{logreg['loss']}\nNumber of Iterations: \t{logreg['iters']+1}\nFinal Step Size: \t{logreg['step']}\nReturned at Breakpoint {logreg['exit']}\n")
# print(f"max testing acc: {np.max(te_accuracy)}")
# print(f"training acc: {np.max(tr_accuracy)}")
# with open('accuracies.txt','a') as accs:
# accs.write(f"Testing\n{te_accuracy}\n")
# accs.write(f"Training\n{tr_accuracy}\n")
acc = sum([1 for predxn in range(X_te.shape[0])\
if Pred(X_te[predxn],logreg['beta']) == y_te[predxn]])\
/ X_te.shape[0] * 100
print ("+ **************************** +")
print(f"| FINAL TEST ACCURACY: {acc:.3f}% |")
print(f"| MAX TEST ACCURACY: {np.max(logreg['test_acc']):.3f}% |")
print ("+ **************************** +")
with open('logreg_performance.txt','w') as perf:
perf.write(f"\nFinal Loss: \t\t{logreg['loss']}\nNumber of Iterations: \t{logreg['iters']+1}\nFinal Step Size: \t{logreg['step']}\nReturned at Breakpoint {logreg['exit']}\nFinal Test Accuracy: {acc:.3f}%\n Max Test Accuracy: {np.max(logreg['test_acc']):.3f}%")
plot_training(logreg['train_acc'],logreg['test_acc'],
logreg['train_obj'],logreg['test_obj'])
skl_acc = verify_performance(X_tr,y_tr,X_te,y_te)
print(f"\nFor verification, Scikit-Learn Logistic Regression achieved {skl_acc:.3f}% accuracy.")
epsilons = [0.1,0.5,1.0,1.5]
for e in epsilons:
dp_logreg = logistic_reg_train(X_tr,y_tr,X_te,y_te,iters=15,glob_sens=logreg['sensitivity'],dp=True,dp_ep=e)
with open('logreg_performance.txt','a') as perf:
perf.write(f"\nDiff. Private Run, epsilon={e}\nSensitivity: \t{dp_logreg['sensitivity']}\nFinal Loss: \t\t{dp_logreg['loss']}\nNumber of Iterations: \t{dp_logreg['iters']+1}\nFinal Step Size: \t{dp_logreg['step']}\nReturned at Breakpoint \t{dp_logreg['exit']}\nFinal Test Accuracy: \t{acc:.3f}%\nMax Test Accuracy: \t{np.max(dp_logreg['test_acc']):.3f}%")
print(f"\n\nDiff. Private Run, epsilon={e}\nSensitivity: \t{dp_logreg['sensitivity']}\nFinal Loss: \t\t{dp_logreg['loss']}\nNumber of Iterations: \t{dp_logreg['iters']+1}\nFinal Step Size: \t{dp_logreg['step']}\nReturned at Breakpoint \t{dp_logreg['exit']}\nFinal Test Accuracy: \t{acc:.3f}%\nMax Test Accuracy: \t{np.max(dp_logreg['test_acc']):.3f}%")
plot_training(dp_logreg['train_acc'],dp_logreg['test_acc'],
dp_logreg['train_obj'],dp_logreg['test_obj'],dp=True,ep=e)
| UTF-8 | Python | false | false | 12,062 | py | 5 | DP-LogisticReg.py | 2 | 0.574863 | 0.553474 | 0 | 244 | 48.434426 | 367 |
ussenko2017/flask-server | 17,265,768,550,774 | c18043f722aef9be19c3d01cf1986e9b1477715c | 171582fb1a5740edc909734195afa7afcd7659f2 | /test.py | 2b47d5ec383991fc55734f6c6db0c0a2069ac5ba | []
| no_license | https://github.com/ussenko2017/flask-server | d2b26c5f4ac239ca9275b8d5767bc52947f0821d | b0b04aa67876ff5bf3e163b0e690f0d0ea777d8e | refs/heads/master | 2020-04-15T04:58:02.233713 | 2019-01-07T17:14:25 | 2019-01-07T17:14:25 | 164,403,842 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests,myFunc
for i in range(1000):
r = requests.get('http://127.0.0.1:5000/api/v1/add/',
params={'tablename':myFunc.OTDEL_TABLE,
myFunc.NAME_FIELD:myFunc.NAME_FIELD})
r1 = requests.get('http://127.0.0.1:5000/api/v1/add/',
params={'tablename':myFunc.STUDENT_TABLE,
myFunc.FIRSTNAME_FIELD:myFunc.FIRSTNAME_FIELD,
myFunc.LASTNAME_FIELD:myFunc.LASTNAME_FIELD,
myFunc.PATR_FIELD:myFunc.PATR_FIELD,
myFunc.NUMBER_FIELD: myFunc.NUMBER_FIELD,
myFunc.OTDEL_ID_FIELD:1
})
r2 = requests.get('http://127.0.0.1:5000/api/v1/add/',
params={'tablename':myFunc.PREDMET_TABLE,
myFunc.NAME_FIELD:myFunc.NAME_FIELD,
myFunc.KOLVO_CHASOV_FIELD:99,
})
r3 = requests.get('http://127.0.0.1:5000/api/v1/add/',
params={'tablename':myFunc.BALL_TABLE,
myFunc.BALL_FIELD:'1',
myFunc.PREDMET_ID_FIELD:'1',
myFunc.STUDENT_ID_FIELD:'1'})
messages = r.json()
messages1 = r1.json()
messages2 = r2.json()
messages3 = r3.json()
print(messages)
print(messages1)
print(messages2)
print(messages3)
| UTF-8 | Python | false | false | 1,410 | py | 7 | test.py | 6 | 0.508511 | 0.461702 | 0 | 31 | 44.451613 | 71 |
Jenniferrrrrr/skyportal | 11,321,533,816,036 | fb51bc7bac3cc63fa80251d42a416113e2012aee | 0608f8ab81403b724a33dc109b1a3cefdd2cd844 | /skyportal/tests/tools/test_offset_util.py | 07524e0df120ee8097a2fd4eb3a4bf2bc774d487 | [
"BSD-3-Clause"
]
| permissive | https://github.com/Jenniferrrrrr/skyportal | b6b9ab995bb64a59287e8b67bff1c44a316dee8a | 9c6ffe3b8387e46ac4400db546e116d85aef60af | refs/heads/master | 2022-09-16T17:56:23.917982 | 2020-05-05T07:26:44 | 2020-05-05T07:26:44 | 258,950,285 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
import uuid
from skyportal.utils import (
get_nearby_offset_stars,
source_image_parameters, get_finding_chart, get_ztfref_url
)
def test_get_ztfref_url():
url = get_ztfref_url(123.0, 33.3, 2)
assert isinstance(url, str)
assert url.find("irsa") != -1
def test_get_nearby_offset_stars():
how_many = 3
rez = get_nearby_offset_stars(
123.0, 33.3, "testSource",
how_many=how_many,
radius_degrees=3 / 60.0
)
assert len(rez) == 4
assert isinstance(rez[0], list)
assert len(rez[0]) == how_many + 1
with pytest.raises(Exception):
rez = get_nearby_offset_stars(
123.0, 33.3, "testSource",
how_many=how_many,
radius_degrees=3 / 60.0,
allowed_queries=1,
queries_issued=2
)
def test_get_finding_chart():
rez = get_finding_chart(
123.0, 33.3, "testSource",
image_source='desi',
output_format='pdf'
)
assert isinstance(rez, dict)
assert rez["success"]
assert rez["name"].find("testSource") != -1
assert rez["data"].find(bytes("PDF", encoding='utf8')) != -1
rez = get_finding_chart(
123.0, 33.3, "testSource",
imsize=1.0
)
assert not rez["success"]
rez = get_finding_chart(
123.0, 33.3, "testSource",
image_source='zomg_telescope'
)
assert isinstance(rez, dict)
assert not rez["success"]
| UTF-8 | Python | false | false | 1,467 | py | 19 | test_offset_util.py | 17 | 0.572597 | 0.528971 | 0 | 65 | 21.553846 | 64 |
huayanqiaq/back | 11,897,059,435,077 | 858cc50d161f0b900b178d02f7dbbdbfc42d655f | aecd195ca7ff4594d4cec6b2019e6acc132a7baa | /ll.py | 57e9b06737d4ce9ad3d38de02305d12d1051af6f | []
| no_license | https://github.com/huayanqiaq/back | 023a57dafb12d897fc775e915ad79bd85cc9667a | 1eb1626ddc4b588e60f6eab8340f27e9c52f2d1e | refs/heads/master | 2021-01-18T19:42:48.965473 | 2017-04-01T11:24:12 | 2017-04-01T11:24:12 | 86,908,607 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from lxml import etree
import requests
file=open('1.txt','w+')
url="http://goods2goods.com/commonuser/getFormUsers.action"
req=requests.get(url=url,timeout=6)
content=req.content.decode('utf-8')
content_xpath=etree.HTML(content)
num=content_xpath.xpath('//tr/td[3]')
for i in num:
name=i.text.strip()
file.write(name+"\r\n")
file.close() | UTF-8 | Python | false | false | 360 | py | 28 | ll.py | 26 | 0.694444 | 0.680556 | 0 | 14 | 23.857143 | 59 |
ds-ga-1007/assignment8 | 11,536,282,203,868 | c7be2529412ad0e23cc3ad39be9bc1d21b9cb5a8 | 7974efcd3e1bad296d762e44153e8afaa8f6beef | /ps3336/test.py | 8f0858af08d567105d6dc426232bf57929fd08b1 | []
| no_license | https://github.com/ds-ga-1007/assignment8 | 2e455f9db7d11909f8aee45699504f3bf828058a | 5b904060e8bced7f91547ad7f7819773a7450a1e | refs/heads/master | 2020-07-02T07:54:18.029239 | 2017-01-02T19:46:54 | 2017-01-02T19:46:54 | 33,561,195 | 1 | 13 | null | false | 2015-05-06T20:30:48 | 2015-04-07T18:37:51 | 2015-05-06T20:30:48 | 2015-05-06T20:30:48 | 6,505 | 1 | 11 | 0 | Python | null | null | '''
Created on Nov 23, 2016
@author: peimengsui
@desc: test the investment class
'''
import unittest
from investment import investment
class Test(unittest.TestCase):
def test_constructor(self):
self.assertEqual(investment(10,10).num_positions,10)
self.assertEqual(investment(10,10).num_trials,10)
self.assertEqual(investment(10,10).position_value,100)
def test_simulate(self):
self.assertEqual(len(investment.simulate(investment(10,10))),10)
self.assertTrue(all(investment.simulate(investment(10,10))>=-1))
self.assertTrue(all(investment.simulate(investment(10,10))<=1))
if __name__ == "__main__":
unittest.main() | UTF-8 | Python | false | false | 684 | py | 266 | test.py | 208 | 0.690058 | 0.630117 | 0 | 21 | 31.619048 | 72 |
adreena/MyStudyCorner | 11,957,188,982,723 | 3658da32ea71a3031f4faf6f285857aa5af14be7 | 941c912f44beff33a072e086c1f561f6cdd64626 | /LeetCode/codes/67.py | 8c5757c6ccd0b7220efc5fc018021e9584048009 | []
| no_license | https://github.com/adreena/MyStudyCorner | 3a13a743769ed144965b767f547c16df4d0fa0dd | 355c0dbd32ad201800901f1bcc110550696bc96d | refs/heads/master | 2023-02-20T07:39:32.391421 | 2021-01-25T01:46:21 | 2021-01-25T01:46:21 | 255,104,133 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # time O(N)
# space O(N)
class Solution:
def addBinary(self, a: str, b: str) -> str:
i,j = len(a)-1, len(b)-1
carry = 0
output = ""
while i>=0 or j>=0:
v1, v2 = 0, 0
if i>=0:
v1 = int(a[i])
i-=1
if j>=0:
v2 = int(b[j])
j-=1
temp = v1+v2+carry
if temp ==2:
carry = 1
output = '0'+output
elif temp == 3:
carry = 1
output = '1'+output
else:
carry = 0
output = str(temp)+output
if carry==1:
output = str(carry)+output
return output
| UTF-8 | Python | false | false | 736 | py | 274 | 67.py | 252 | 0.347826 | 0.313859 | 0 | 28 | 25.285714 | 47 |
HernanFaustino/Flaskapp | 18,047,452,605,861 | 16e70fa2cc2b0703319b1fbdecf1ba1a9d9f4a8d | 122a3da328744f7c20619bc1fbcc55880f13d36e | /simpleapp.py | 58399988bb41aa0f1b09486f63230aef233e2922 | []
| no_license | https://github.com/HernanFaustino/Flaskapp | c29cfa725a24830b7a12247b9d71dd8ac9c7aa35 | 1beb7f2ea0f8ffc2ab7471e691a5581613847497 | refs/heads/master | 2020-04-05T13:15:34.205633 | 2018-11-09T17:29:34 | 2018-11-09T17:29:34 | 156,894,314 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
import sys
import optparse
import time
app = Flask(__name__)
start = int(round(time.time()))
@app.route("/")
def hello_world():
return "Hello World from Distelli!"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=False)
| UTF-8 | Python | false | false | 276 | py | 1 | simpleapp.py | 1 | 0.641304 | 0.612319 | 0 | 16 | 16.25 | 51 |
motor-dev/Motor | 17,059,610,127,628 | 8be4728e9dce32669d5447bd23e492b4f3fbac77 | 68e2df11645278a9997eeae804a9a075585b59f2 | /mak/libs/pyxx/cxx/grammar/expression/primary/requires/general.py | 6c9a87fb4c8f0da51b582b9065598b4d3c86f409 | [
"BSD-3-Clause"
]
| permissive | https://github.com/motor-dev/Motor | df673aafcd4040a7ce7e6ef9301c38270982d544 | edd724bba99af63d938a0db165dec07403a40fb6 | refs/heads/master | 2023-07-22T10:19:26.028314 | 2023-07-07T14:01:20 | 2023-07-07T14:01:20 | 398,261,504 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
requires-expression:
requires requirement-parameter-list? requirement-body
requirement-parameter-list:
( parameter-declaration-clause )
requirement-body:
{ requirement-seq }
requirement-seq:
requirement
requirement-seq requirement
requirement:
simple-requirement
type-requirement
compound-requirement
nested-requirement
"""
import glrp
from typing import Any, List
from .....parse import CxxParser, cxx20, cxx20_merge
from ......ast.constraints import RequiresExpression, RequirementBody, AmbiguousRequirement
from ......ast.expressions import ErrorExpression
@glrp.rule('requires-expression : requires requirement-parameter-list? requirement-body')
@cxx20
def requires_expression_cxx20(self: CxxParser, p: glrp.Production) -> Any:
if p[1] is not None and p[2] is not None:
return RequiresExpression(p[1], p[2])
else:
return ErrorExpression()
@glrp.rule('requirement-parameter-list? : "(" parameter-declaration-clause ")"')
@cxx20
def requires_parameter_list_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return p[1]
@glrp.rule('requirement-parameter-list? : "(" "#error" ")"')
@cxx20
def requires_parameter_list_error_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return None
@glrp.rule('requirement-parameter-list? : ')
@cxx20
def requires_parameter_list_empty_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return None
@glrp.rule('requirement-body : "{" requirement-seq "}"')
@cxx20
def requirement_body_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return RequirementBody(p[1])
@glrp.rule('requirement-body : "{" begin-expression "#error" "}"')
@cxx20
def requirement_body_error_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return None
@glrp.rule('requirement-seq : requirement')
@cxx20
def requirement_seq_end_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return [p[0]]
@glrp.rule('requirement-seq : requirement-seq requirement')
@cxx20
def requirement_seq_cxx20(self: CxxParser, p: glrp.Production) -> Any:
result = p[0]
result.append(p[1])
return result
@glrp.rule('requirement : begin-expression simple-requirement')
@cxx20
def requirement_expression_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return p[1]
@glrp.rule('requirement : begin-type-id type-requirement')
@cxx20
def requirement_type_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return p[1]
@glrp.rule('requirement : compound-requirement')
@cxx20
def requirement_compound_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return p[0]
@glrp.rule('requirement : nested-requirement')
@cxx20
def requirement_nested_cxx20(self: CxxParser, p: glrp.Production) -> Any:
return p[0]
@glrp.merge('requirement')
@cxx20_merge
def ambiguous_requirement(self: CxxParser, type_id: List[Any], expression: List[Any]) -> Any:
return AmbiguousRequirement(type_id + expression)
| UTF-8 | Python | false | false | 2,921 | py | 853 | general.py | 808 | 0.720301 | 0.697364 | 0 | 109 | 25.798165 | 93 |
ncrubin/reference-qvm | 2,688,649,570,088 | 3d793cfdd9632edc036ca89861e6d1c090edaa84 | eaeacb1f56a266f1c7551b36c61717cddb223c9e | /referenceqvm/tests/test_wavefunction.py | e563c4a52b40b7adc39f331b8765215ede3217eb | [
"Apache-2.0"
]
| permissive | https://github.com/ncrubin/reference-qvm | 4c625813d90a7e67853957c37d8f5825b5a418ad | 2ed67c0e26f433165de45dcda0bdb2dc2ea23489 | refs/heads/master | 2021-09-20T17:35:53.726897 | 2017-10-02T19:32:17 | 2017-10-02T19:32:17 | 105,916,404 | 1 | 0 | null | true | 2017-10-05T16:55:55 | 2017-10-05T16:55:55 | 2017-09-30T22:04:51 | 2017-10-02T19:32:18 | 233 | 0 | 0 | 0 | null | null | null | """
Testing the correctness of wavefunction() and unitary()
"""
from pyquil.quil import Program
from pyquil.gates import H as Hgate
from pyquil.gates import CNOT as CNOTgate
from pyquil.gates import X as Xgate
from pyquil.gates import I as Igate
from pyquil.gates import RX as RXgate
from pyquil.gates import RY as RYgate
from pyquil.gates import RZ as RZgate
from pyquil.gates import PHASE as PHASEgate
from pyquil.paulis import PauliTerm, exponentiate
import numpy as np
from referenceqvm.qvm_wavefunction import QVM_Wavefunction
from referenceqvm.qvm_unitary import QVM_Unitary
def test_initialize(qvm, qvm_unitary):
"""
can we initialize a qvm object
"""
assert isinstance(qvm, QVM_Wavefunction)
assert isinstance(qvm_unitary, QVM_Unitary)
def test_belltest(qvm):
"""
Generate a bell state with fake qvm and qvm and compare
"""
prog = Program().inst([Hgate(0), CNOTgate(0, 1)])
bellout, _ = qvm.wavefunction(prog)
bell = np.zeros((4, 1))
bell[0, 0] = bell[-1, 0] = 1.0 / np.sqrt(2)
assert np.allclose(bellout.amplitudes, bell)
def test_occupation_basis(qvm):
prog = Program().inst([Xgate(0), Xgate(1), Igate(2), Igate(3)])
state = np.zeros((2 ** 4, 1))
state[3, 0] = 1.0
meanfield_state, _ = qvm.wavefunction(prog)
assert np.allclose(meanfield_state.amplitudes, state)
def test_exp_circuit(qvm):
true_wf = np.array([[ 0.54030231-0.84147098j],
[ 0.00000000+0.j],
[ 0.00000000+0.j],
[ 0.00000000+0.j],
[ 0.00000000+0.j],
[ 0.00000000+0.j],
[ 0.00000000+0.j],
[ 0.00000000+0.j]])
create2kill1 = PauliTerm("X", 1, -0.25)*PauliTerm("Y", 2)
create2kill1 += PauliTerm("Y", 1, 0.25)*PauliTerm("Y", 2)
create2kill1 += PauliTerm("Y", 1, 0.25)*PauliTerm("X", 2)
create2kill1 += PauliTerm("X", 1, 0.25)*PauliTerm("X", 2)
create2kill1 += PauliTerm("I", 0, 1.0)
prog = Program()
for term in create2kill1.terms:
single_exp_prog = exponentiate(term)
prog += single_exp_prog
wf, _ = qvm.wavefunction(prog)
wf = np.reshape(wf.amplitudes, (-1, 1))
assert np.allclose(wf, true_wf)
def test_qaoa_circuit(qvm):
wf_true = [0.00167784 + 1.00210180e-05*1j, 0.50000000 - 4.99997185e-01*1j,
0.50000000 - 4.99997185e-01*1j, 0.00167784 + 1.00210180e-05*1j]
wf_true = np.reshape(np.array(wf_true), (4, 1))
prog = Program()
prog.inst([RYgate(np.pi/2)(0), RXgate(np.pi)(0),
RYgate(np.pi/2)(1), RXgate(np.pi)(1),
CNOTgate(0, 1), RXgate(-np.pi/2)(1), RYgate(4.71572463191)(1),
RXgate(np.pi/2)(1), CNOTgate(0, 1),
RXgate(-2*2.74973750579)(0), RXgate(-2*2.74973750579)(1)])
wf_test, _ = qvm.wavefunction(prog)
assert np.allclose(wf_test.amplitudes, wf_true)
def test_larger_qaoa_circuit(qvm):
square_qaoa_circuit = [Hgate(0), Hgate(1), Hgate(2), Hgate(3),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
CNOTgate(0, 1),
RZgate(0.78564882604980579)(1),
CNOTgate(0, 1),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
CNOTgate(0, 3),
RZgate(0.78564882604980579)(3),
CNOTgate(0, 3),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
CNOTgate(1, 2),
RZgate(0.78564882604980579)(2),
CNOTgate(1, 2),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
Xgate(0),
PHASEgate(0.3928244130249029)(0),
CNOTgate(2, 3),
RZgate(0.78564882604980579)(3),
CNOTgate(2, 3),
Hgate(0),
RZgate(-0.77868204192240842)(0),
Hgate(0),
Hgate(1),
RZgate(-0.77868204192240842)(1),
Hgate(1),
Hgate(2),
RZgate(-0.77868204192240842)(2),
Hgate(2),
Hgate(3),
RZgate(-0.77868204192240842)(3),
Hgate(3)]
prog = Program(square_qaoa_circuit)
wf_test, _ = qvm.wavefunction(prog)
wf_true = np.array([8.43771693e-05-0.1233845*1j, -1.24927731e-01+0.00329533*1j,
-1.24927731e-01+0.00329533*1j,
-2.50040954e-01+0.12661547*1j,
-1.24927731e-01+0.00329533*1j, -4.99915497e-01-0.12363516*1j,
-2.50040954e-01+0.12661547*1j, -1.24927731e-01+0.00329533*1j,
-1.24927731e-01+0.00329533*1j, -2.50040954e-01+0.12661547*1j,
-4.99915497e-01-0.12363516*1j, -1.24927731e-01+0.00329533*1j,
-2.50040954e-01+0.12661547*1j, -1.24927731e-01+0.00329533*1j,
-1.24927731e-01+0.00329533*1j,
8.43771693e-05-0.1233845*1j])
wf_true = np.reshape(wf_true, (2 ** 4, 1))
assert np.allclose(wf_test.amplitudes, wf_true)
| UTF-8 | Python | false | false | 5,862 | py | 14 | test_wavefunction.py | 12 | 0.502729 | 0.339304 | 0 | 143 | 39.993007 | 86 |
makjohansson/MySql_database | 4,776,003,654,617 | e83b93dceb8da519aea44d8c9f85d2902eccfa91 | 9d60c00cea2b7ca9a5876c6db0e91c3fbdfff99c | /view/resources/offers_handler_view.py | f47ef978e3bc6c1457059bcc349d8ff1b06db590 | []
| no_license | https://github.com/makjohansson/MySql_database | 42cf954a92cc6d030825147a6560e639ba2a8ccb | 05af9d209a1821c3f74d992da6f30f1598835e81 | refs/heads/master | 2023-02-26T18:57:07.402322 | 2021-02-04T15:05:58 | 2021-02-04T15:05:58 | 326,134,978 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PyQt5 import QtCore
from PyQt5.QtWidgets import QFormLayout, QHBoxLayout, QLabel, QLineEdit, QMessageBox, QPushButton, QVBoxLayout, QWidget
class OffersHandler(QWidget):
"""QWidget to change or remove a offer related to a specific company
"""
def __init__(self, offer, city, offer_id, db_controller):
super().__init__()
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setGeometry(600, 330, 500, 150)
self.setWindowTitle("Change/Remove")
self.db_controller = db_controller
self.offer = offer
self.city = city
self.offer_id = offer_id
self.setup_gui()
def setup_gui(self):
"""QWidget layout created
"""
main_layout = QVBoxLayout()
edit_layout = QFormLayout()
btn_layout = QHBoxLayout()
self.edit_offer = QLineEdit()
self.edit_offer.setMinimumWidth(400)
self.edit_offer.setText(self.offer.text())
edit_layout.addRow(QLabel("Offer"), self.edit_offer)
main_layout.addLayout(edit_layout)
submit_btn = QPushButton("Submit")
submit_btn.clicked.connect(self.submit)
delete_btn = QPushButton("Delete")
delete_btn.clicked.connect(self.delete)
cancel_btn = QPushButton("Cancel")
cancel_btn.clicked.connect(self.cancel)
btn_layout.addWidget(cancel_btn)
btn_layout.addWidget(delete_btn)
btn_layout.addWidget(submit_btn)
main_layout.addLayout(btn_layout)
self.setLayout(main_layout)
def submit(self):
"""Update database with changes made
"""
self.offer.setText(self.edit_offer.text())
self.db_controller.update_offer(self.edit_offer.text(), self.offer_id)
self.close()
def delete(self):
"""Remove offer form the offer table
"""
check = QMessageBox.question(self, "Remove", f"Remove this offer?\n\n{self.offer.text()}")
if check == QMessageBox.Yes:
self.offer.setHidden(True)
self.db_controller.delete_offer(self.offer_id)
self.close()
def cancel(self):
"""Close this QWidget
"""
self.close()
| UTF-8 | Python | false | false | 2,235 | py | 16 | offers_handler_view.py | 14 | 0.612528 | 0.604922 | 0 | 67 | 32.358209 | 119 |
omegafusion/Oxford-Hack-2019 | 18,580,028,538,120 | 00c31d3bfd8f0bd9b06896e2c4d4956697b31805 | 30fcde123432c3097d4ebd53a49adc8a6c43b75a | /src/gameobjects.py | b88a75e68159b3831a1d21e9efba9581ea2b3714 | []
| no_license | https://github.com/omegafusion/Oxford-Hack-2019 | 3bed0f120af57ae9c093cd62421fd1d06dda5348 | 4e54d489ce660deb6ac10549d1393292dc966bab | refs/heads/master | 2022-11-25T03:00:30.620446 | 2019-11-17T11:39:54 | 2019-11-17T11:39:54 | 222,096,312 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import os
import pygame
import pymunk
from pymunk.vec2d import Vec2d
import functions
pygame.init()
dir_path = os.path.dirname(os.path.realpath(__file__))
playerBulletImg = pygame.image.load(os.path.join(dir_path, "playerBullet.png")).convert_alpha()
enemyBulletImg = pygame.image.load(os.path.join(dir_path, "enemyBullet.png")).convert_alpha()
class Entity(object):
def __init__(self, screen, space, entities):
self.screen = screen
self.space = space
self.entities = entities
def update(self, dt):
pass
def draw(self):
pass
def handleEvent(self, event):
pass
def remove(self):
try:
self.entities.remove(self)
except:
pass
try:
self.space.remove(self.body)
self.space.remove(self.shape)
except:
pass
def sidescroll(self):
self.body.position = (self.body.position[0]-2, self.body.position[1])
class Character(Entity):
def __init__(self, screen, space, entities, pos):
super().__init__(screen, space, entities)
mass = 1
self.height = 150
self.body = pymunk.Body(mass, pymunk.inf)
self.body.entity_ref = self
self.body.position = pos
self.x = pos[0]
self.shape = pymunk.Poly.create_box(self.body, (30,self.height), 5)
self.space.add(self.body, self.shape)
self.shape.collision_type = 5
self.target = TargetLine(self.screen, self.space, self.entities, self, 122, -11/24 * math.pi, math.pi * 7/24)
self.entities.append(self.target)
self.thrusting = False
self.imageIndex = 0
self.maxVel = 1500
self.minVel = -1000
dir_path = os.path.dirname(os.path.realpath(__file__))
self.gunSound = pygame.mixer.Sound(os.path.join(dir_path, "sounds/sound_effects/Gun3.wav"))
self.gunSound.set_volume(0.20)
self.health = 500.0
dir_path = os.path.dirname(os.path.realpath(__file__))
self.shldrImg = pygame.image.load(os.path.join(dir_path, 'playerShoulder.png'))
self.shldrImg = pygame.transform.scale(self.shldrImg, (256,256))
thrustImg1 = pygame.image.load(os.path.join(dir_path, 'body1.png'))
thrustImg1 = pygame.transform.scale(thrustImg1, (256,256))
thrustImg2 = pygame.image.load(os.path.join(dir_path, 'body2.png'))
thrustImg2 = pygame.transform.scale(thrustImg2, (256,256))
thrustImg3 = pygame.image.load(os.path.join(dir_path, 'body3.png'))
thrustImg3 = pygame.transform.scale(thrustImg3, (256,256))
self.sheetThrust = [thrustImg1, thrustImg2, thrustImg3, thrustImg2]
idleImg1 = pygame.image.load(os.path.join(dir_path, 'idle1.png'))
idleImg1 = pygame.transform.scale(idleImg1, (256,256))
idleImg2 = pygame.image.load(os.path.join(dir_path, 'idle2.png'))
idleImg2 = pygame.transform.scale(idleImg2, (256,256))
self.sheetIdle = [idleImg1,idleImg2]
self.armImg = pygame.image.load(os.path.join(dir_path, 'playerArm.png'))
self.armImg = pygame.transform.scale(self.armImg, (256,256))
def update(self, dt):
self.body.position = (self.x, self.body.position[1])
topOfScreen = False
if self.body.position[1] > (self.screen.get_size()[1] - (self.height / 2)):
# if character is about to reach the top of the screen
self.body.position = (self.body.position[0], self.screen.get_size()[1] - (self.height / 2))
topOfScreen = True
if pygame.key.get_pressed()[pygame.K_SPACE]:
self.thrusting = True
if not topOfScreen:
self.body.apply_force_at_local_point(Vec2d(0,8000), self.body.center_of_gravity)
else:
self.thrusting = False
self.body.apply_force_at_local_point(Vec2d(0,-5000), self.body.center_of_gravity)
if self.body.velocity[1] > self.maxVel:
self.body.velocity = (self.body.velocity[0], self.maxVel)
elif self.body.velocity[1] < self.minVel:
self.body.velocity = (self.body.velocity[0], self.minVel)
def takeDamage(self, damage):
self.health -= damage
if self.health <= 0:
#print("You were killed!")
return False
else:
#print("Your health was reduced to",round(self.health))
return True
def draw(self):
x,y = functions.convert(self.body.position)
if self.thrusting:
self.imageIndex += 0.5
self.screen.blit(self.sheetThrust[int(self.imageIndex%4)], (x-100, y-120))
functions.rotate(self.screen, self.armImg, (x-4,y-24), (96,96), -math.degrees(self.target.currAngle))
else:
self.imageIndex += 0.25
self.screen.blit(self.sheetIdle[int(self.imageIndex%2)], (x-100, y-120))
functions.rotate(self.screen, self.armImg, (x-4,y-24), (96,96), -math.degrees(self.target.currAngle))
self.screen.blit(self.shldrImg, (x-100, y-120))
class TargetLine(Entity):
def __init__(self, screen, space, entities, parent, length, minAngle, maxAngle):
super().__init__(screen, space, entities)
self.parent = parent
self.length = length
self.maxAngle = maxAngle
self.minAngle = minAngle
self.currAngle = (minAngle + maxAngle) / 2
self.time = 0
self.cooldown = 0
self.update(0)
def createProjectile(self):
if pygame.mixer.get_init() == None:
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.mixer.Sound.play(self.parent.gunSound)
self.entities.append(Projectile(self.screen, self.space, self.entities, (self.endX,self.endY), 4000, self.parent.body.velocity, self.currAngle, 10, 125, True))
#def handleEvent(self, event):
# if event.type == pygame.KEYDOWN and event.key == pygame.K_d:
# self.createProjectile()
def update(self, dt):
#if pygame.key.get_pressed()[pygame.K_s]:
# if self.currAngle < self.maxAngle:
# self.currAngle += math.pi / 48
#if pygame.key.get_pressed()[pygame.K_w]:
# if self.currAngle > self.minAngle:
# self.currAngle -= math.pi / 48
self.centreX, self.centreY = functions.convert((self.parent.body.position[0]-4,self.parent.body.position[1]+24))
mouseX, mouseY = pygame.mouse.get_pos()
try:
angle = math.atan((mouseY-self.centreY) / (mouseX-self.centreX))
if angle >= self.minAngle and angle <= self.maxAngle:
self.currAngle = angle
elif angle < self.minAngle:
self.currAngle = self.minAngle
else:
self.currAngle = self.maxAngle
except:
angle = math.pi/2
if pygame.mouse.get_pressed()[0] and self.cooldown <= 0:
self.createProjectile()
self.cooldown = 0.25
self.cooldown -= dt
self.endX = self.centreX + self.length*math.cos(self.currAngle+0.40489)
self.endY = self.centreY + self.length*math.sin(self.currAngle+0.40489)
def sidescroll(self):
pass
"""
def draw(self):
pygame.draw.line(self.screen, (255,255,255), (self.centreX, self.centreY), (self.endX, self.endY))
"""
class Projectile(Entity):
def __init__(self, screen, space, entities, pos, speed, parentVelocity, angle, radius, mass, friendly = False):
self.mass = mass
self.radius = radius
super().__init__(screen, space, entities)
self.body = pymunk.Body(self.mass, pymunk.moment_for_circle(self.mass, 0, radius))
self.body.entity_ref = self
self.body.position = functions.convert(pos)
self.body.velocity = (speed*math.cos(angle), parentVelocity[1]-speed*math.sin(angle))
self.shape = pymunk.Circle(self.body, radius)
if friendly:
self.shape.collision_type = 1
self.image = playerBulletImg
else:
self.shape.collision_type = 2
self.image = pygame.transform.scale(enemyBulletImg, (radius*2, radius*2))
self.space = space
self.space.add(self.body, self.shape)
# self.coll_handler = self.space.add_wildcard_collision_handler(1)
# self.coll_handler.begin = self.coll_begin
# self.removed = False
def coll_begin(self, arbiter, space, data):
if not self.removed:
self.removed = True
self.remove()
return True
def update(self, dt):
if self.body.position[0] < -5 or self.body.position[0] > self.screen.get_width() + 5 or self.body.position[1] < -5:
self.remove()
def draw(self):
self.screen.blit(self.image, functions.convert((self.body.position[0]-self.radius, self.body.position[1] + self.radius)))
class Floor(Entity):
def __init__(self, screen, space, entities, startX, length):
super().__init__(screen, space, entities)
self.shape = pymunk.Segment(self.space.static_body, (startX, 5), (startX+length, 5), 10)
self.shape.elasticity = 0.2
self.shape.friction = 0.8
self.body = self.shape.body
self.body.entity_ref = self
self.body.position = (startX, 5)
self.space.add(self.shape)
def sidescroll(self):
pass | UTF-8 | Python | false | false | 9,436 | py | 17 | gameobjects.py | 14 | 0.607991 | 0.580861 | 0 | 251 | 36.59761 | 167 |
fadebowaley/MO-App | 7,524,782,730,237 | 4a3aad6c82e89bb75c6593da5a2dfc4ca66e7af1 | b410a506bd4bdbbc55a770ab76e0507625ebb52e | /app/auth/email.py | 1e0c7f7d5c8d034099a9ce797b820b742c916e68 | []
| no_license | https://github.com/fadebowaley/MO-App | 6a219a984c89c2dd0601a6f3f3d46810b644dcb3 | b7de965390d69e349533765db0ac190e8a67684b | refs/heads/main | 2023-08-24T06:17:17.510741 | 2021-09-14T12:31:30 | 2021-09-14T12:31:30 | 403,637,924 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import render_template, current_app
from email.utils import formataddr
#from flask_babel import _
from flask import *
from flask_mail import *
from app.email import send_email
from flask_login import login_required, login_user,\
logout_user, current_user
def send_welcome_email(user):
send_email((f'{user.first_name} We are Excited as you are'),
sender = formataddr(('Congratulations - Brvcase Team', current_app.config['ADMINS'][0] )),
recipients=[user.email],
text_body=render_template('email/welcome_email.txt', user=user, ),
html_body=render_template('email/welcome_email.html', user=user,))
def send_confirmation_email(user):
token = user.get_confirmation_token()
send_email(('[No-reply] Confirm your Account'),
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/confirm.txt', user=user, token=token, external=True),
html_body=render_template('email/confirm.html', user=user, token=token, external=True))
def send_reset_email(user):
token = user.get_reset_token()
send_email((f'{user.username} password reset from brvcase'),
sender = formataddr(('Brvcase password Reset', current_app.config['ADMINS'][0] )),
recipients=[user.email],
text_body=render_template('email/reset_password.txt', user=user, token=token, external=True),
html_body=render_template('email/reset_password.html', user=user, token=token, external=True))
def send_set_email(user):
token = user.get_reset_token()
send_email(('hello {}, - Welcome to brvcase ! '.format(user.username)),
sender = formataddr(('Brvcase Start Your Journey!', current_app.config['ADMINS'][0] )),
# sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/set_password.txt', user=user, current_user=current_user, token=token, external=True),
html_body=render_template('email/set_password.html', user=user, current_user=current_user, token=token, external=True), )
def send_invite_email(user):
token = user.get_reset_token()
send_email(('Invitation to join {} from {} {} '.format(current_user.vendor.company, current_user.first_name, current_user.last_name )),
sender = formataddr(('Brvcase - Invitation Card', current_app.config['ADMINS'][0] )),
recipients=[user.email],
text_body=render_template('email/send_invite.txt', user=user, current_user=current_user, token=token, external=True),
html_body=render_template('email/send_invite.html', user=user, current_user=current_user, token=token, external=True))
| UTF-8 | Python | false | false | 2,947 | py | 76 | email.py | 26 | 0.625721 | 0.623685 | 0 | 57 | 50.473684 | 154 |
bgreni/PSRS_MPI | 14,345,190,795,186 | a2886fc07c1d33956aef72b0ade9a3cbcf55f1b9 | 91204540629bb73cd96daf7d9a03019bb3b27204 | /outputhandling/makecsv.py | c096a4c3a1f09411ec4c9d57ffeaaa8b187e813d | []
| no_license | https://github.com/bgreni/PSRS_MPI | 59649885e792894087d4da7cb66c074b2006e53f | 97c2c6b1483f5fd9fdc134728b43fa18082ef36f | refs/heads/master | 2023-04-30T13:15:22.561519 | 2021-05-08T05:57:37 | 2021-05-08T05:57:37 | 338,663,290 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
"""
Parses the raw output from the PSRS program into a nice csv table used to create graphs and tables
"""
if __name__ == '__main__':
content = open('test.out').read().split('\n\n')
entries = [x.split('\n') for x in content]
for i in range(len(entries)):
for j in range(len(entries[i])):
entries[i][j] = entries[i][j].split(':')[1][1:]
stuff = []
for i in range(len(entries)):
e = entries[i]
if e[0] == '1':
stuff.append({
'cores': e[0],
'vector': e[1],
'time': e[2],
'p1time': None,
'p2time': None,
'p3time': None,
'p4time': None,
'speedup': 0,
'pertotalp1': None,
'pertotalp3': None,
'pertotalp4': None,
'speedpercore': None
})
else:
if int(e[0]) == 16:
ind = 5
else:
ind = int(e[0]) / 2
stuff.append({
'cores': e[0],
'vector': e[1],
'time': e[2],
'p1time': e[3],
'p2time': e[4],
'p3time': e[5],
'p4time': e[6],
'speedup': float(entries[int(i - ind)][2]) / float(e[2]),
'pertotalp1': (float(e[3]) / float(e[2])) * 100,
'pertotalp3': (float(e[5]) / float(e[2])) * 100,
'pertotalp4': (float(e[6]) / float(e[2])) * 100,
'speedpercore': (float(entries[int(i - ind)][2]) / float(e[2])) / float(e[0])
})
df = pd.DataFrame(stuff)
df.to_csv('out.csv')
| UTF-8 | Python | false | false | 1,739 | py | 17 | makecsv.py | 10 | 0.39908 | 0.367453 | 0 | 55 | 30.490909 | 98 |
richardsliu/testing | 11,020,886,097,863 | a6b45ab9018b7d64942c7840a0464ee6bd74f31d | 7b2394575410502a5853c3d5726fc41db889f164 | /py/kubeflow/testing/cd/update_kf_apps_test.py | 15960cd6fb1972bac3fc5f83ff14fa6a6776b852 | [
"Apache-2.0"
]
| permissive | https://github.com/richardsliu/testing | 9f05d579a4548fd62d17130bbd35c4f5aa3f654d | 97d2af6fd7e10d7ab2641a2b1a64ba0a3aaaf7ae | refs/heads/master | 2020-03-23T23:37:16.558768 | 2020-01-09T19:30:35 | 2020-01-09T19:30:35 | 140,356,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import difflib
import logging
import os
import pprint
import yaml
from kubeflow.testing.cd import update_kf_apps # pylint: disable=no-name-in-module
import pytest
def test_build_run():
this_dir = os.path.dirname(__file__)
template_file = os.path.abspath(os.path.join(this_dir, "..", "..", "..", "..",
"apps-cd", "runs",
"app-pipeline.template.yaml"))
with open(template_file) as hf:
template = yaml.load(hf)
app_spec = """
name: notebook-controller
params:
- name: "path_to_context"
value: "components/notebook-controller"
- name: "path_to_docker_file"
value: "components/notebook-controller/Dockerfile"
- name: "path_to_manifests_dir"
value: "jupyter/notebook-controller"
- name: "src_image_url"
value: "gcr.io/kubeflow-images-public/notebook-controller"
# The name of the repo containing the source
sourceRepo: kubeflow
"""
app = yaml.load(app_spec)
version_spec = """
name: master
# A tag to prefix image names with
tag: vmaster
repos:
- name: kubeflow
resourceSpec:
type: git
params:
- name: revision
value: master
- name: url
value: git@github.com:kubeflow/kubeflow.git
- name: manifests
resourceSpec:
type: git
params:
- name: revision
value: master
- name: url
value: git@github.com:kubeflow/manifests.git
"""
version = yaml.load(version_spec)
commit = "1234abcd"
run = update_kf_apps._build_run(template, app, version, commit) # pylint: disable=protected-access
with open(os.path.join("test_data", "notebook_controller.expected.yaml")) as hf:
expected = yaml.load(hf)
# Compare yaml dumps
# TODO(jlewi): Do we need a custom dump
actual_str = yaml.dump(run)
actual_lines = actual_str.splitlines()
expected_str = yaml.dump(expected)
expected_lines = expected_str.splitlines()
d = difflib.Differ()
result = d.compare(expected_lines, actual_lines)
line_diff = list(result)
message = pprint.pformat(line_diff)
assert actual_str == expected_str, message
def test_parse_git_url():
result = update_kf_apps._parse_git_url("git@github.com:kubeflow/manifests.git") # pylint: disable=protected-access
assert result == update_kf_apps.GIT_TUPLE("git@github.com", "kubeflow",
"manifests")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
pytest.main()
| UTF-8 | Python | false | false | 2,736 | py | 7 | update_kf_apps_test.py | 4 | 0.618421 | 0.616959 | 0 | 90 | 29.4 | 116 |
dantrevino/ferris-framework | 14,525,579,417,514 | 4e362bf6acbf66514929e20dc97d26c2053546f2 | b934f47abbd63d876f2bc257466333a1bcbc8219 | /ferris/core/ndb/model.py | e642d644514651c57dc47523284d5173da012096 | [
"Apache-2.0"
]
| permissive | https://github.com/dantrevino/ferris-framework | 10a3166d041d38ba0edcae1d80c737006821d8fd | 8f81058f7d3a355e47118e97e7f01dd8b643189a | refs/heads/master | 2021-07-18T03:06:55.455814 | 2013-03-21T12:52:40 | 2013-03-21T12:52:40 | 108,202,396 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Classes that extend the basic ndb.Model classes
"""
from google.appengine.ext import ndb
import types
import logging
class ModelMeta(ndb.model.MetaModel):
"""
Augments Models by adding the class methods find_all_by_x
and find_by_x that are proxies for find_all_by_properties and
find_by_properties respectively.
"""
def __init__(cls, name, bases, dct):
super(ModelMeta, cls).__init__(name, bases, dct)
if set(['beforeDelete', 'afterDelete', 'beforePut', 'afterPut']) & set(dct.keys()):
raise AttributeError('NDB Models use before_delete style callbacks')
for prop_name, property in cls._properties.items():
find_all_name = 'find_all_by_' + prop_name
def bind_all(name):
def find_all(cls, value):
args = {}
args[name] = value
return cls.find_all_by_properties(**args)
return types.MethodType(find_all, cls)
setattr(cls, find_all_name, bind_all(prop_name))
find_one_name = 'find_by_' + prop_name
def bind_one(name):
def find_one(cls, value):
args = {}
args[name] = value
return cls.find_by_properties(**args)
return types.MethodType(find_one, cls)
setattr(cls, find_one_name, bind_one(prop_name))
class Model(ndb.Model):
"""
Base class that augments ndb Models by adding easier find methods and callbacks.
"""
__metaclass__ = ModelMeta
@classmethod
def find_all_by_properties(cls, **kwargs):
"""
Generates an ndb.Query with filters generated from the keyword arguments.
Example::
User.find_all_by_properties(first_name='Jon',role='Admin')
is the same as::
User.query().filter(User.first_name == 'Jon', User.role == 'Admin')
"""
query = cls.query()
for name, value in kwargs.items():
property = cls._properties[name]
query = query.filter(property==value)
return query
@classmethod
def find_by_properties(cls, **kwargs):
"""
Similar to find_all_by_properties, but returns either None or a single ndb.Model instance.
Example::
User.find_by_properties(first_name='Jon',role='Admin')
"""
return cls.find_all_by_properties(**kwargs).get()
def before_put(self):
"""
Called before an item is saved.
:arg self: refers to the item that is about to be saved
:note: ``self.key`` is invalid if the current item has never been saved
"""
pass
def after_put(self, key):
"""
Called after an item has been saved.
:arg self: refers to the item that has been saved
:arg key: refers to the key that the item was saved as
"""
pass
@classmethod
def before_delete(cls, key):
"""
Called before an item is deleted.
:arg key: is the key of the item that is about to be deleted. It is okay to ``get()`` this key to interogate the properties of the item.
"""
pass
@classmethod
def after_delete(cls, key):
"""
Called after an item is deleted.
:arg key: is the key of the item that was deleted. It is not possible to call ``get()`` on this key.
"""
pass
@classmethod
def before_get(cls, key):
"""
Called after an item is retrieved.
:arg key: Is the key of the item that is to be retrieved.
"""
pass
@classmethod
def after_get(cls, key, item):
"""
Called after an item has been retrieved.
:arg key: Is the key of the item that was retrieved.
:arg item: Is the item itself.
"""
pass
# Impl details
def _pre_put_hook(self):
return self.before_put()
def _post_put_hook(self, future):
return self.after_put(future.get_result())
@classmethod
def _pre_delete_hook(cls, key):
return cls.before_delete(key)
@classmethod
def _post_delete_hook(cls, key, future):
return cls.after_delete(key)
@classmethod
def _pre_get_hook(cls, key):
return cls.before_get(key)
@classmethod
def _post_get_hook(cls, key, future):
return cls.after_get(key, future.get_result())
def __unicode__(self):
if hasattr(self, 'name'):
return self.name or super(Model, self).__str__()
else:
return super(Model, self).__str__()
def __str__(self):
return self.__unicode__()
class BasicModel(Model):
"""
Adds the common properties created, created_by, modified, and modified_by to :class:`Model`
"""
created = ndb.DateTimeProperty(auto_now_add=True)
created_by = ndb.UserProperty(auto_current_user_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
modified_by = ndb.UserProperty(auto_current_user=True)
| UTF-8 | Python | false | false | 5,076 | py | 104 | model.py | 61 | 0.578408 | 0.578408 | 0 | 180 | 27.2 | 144 |
uwaa-ndcl/ACC_2020_Avant | 7,447,473,298,266 | 22d4ee1897413ec3555c098577fdbc824e25c5dc | 15cbaf14ef526d7ce8a2e7a874c5970aee441c75 | /net_filter/tools/unit_conversion.py | 7562c7412c83fdc423fe9b7fd5a97226eb7e9fe9 | []
| no_license | https://github.com/uwaa-ndcl/ACC_2020_Avant | 9c9068df2669c4b7fd2aca82d75cf61c8f00d9a5 | a7238f783adf7556f49dd13028b8dbaecd3f0e71 | refs/heads/master | 2023-03-07T11:04:45.717597 | 2021-02-26T05:29:18 | 2021-02-26T05:29:18 | 263,537,423 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
m_to_cm = 100
cm_to_m = .01
rad_to_deg = 180/math.pi
deg_to_rad = math.pi/180
| UTF-8 | Python | false | false | 91 | py | 31 | unit_conversion.py | 28 | 0.648352 | 0.527473 | 0 | 6 | 14.166667 | 24 |
cbilgili/yukselauto | 10,428,180,603,670 | cff8f24bb908d5cc056c0073ef122bf546b4aa71 | a14ad3642b4789f11d8ac7a1ba070f07455cab2f | /mycmsproject/products/views.py | c8bf41be200e39d096c4761832b619294553c73c | []
| no_license | https://github.com/cbilgili/yukselauto | b1fcf014cab6bfba0fa31d85ded0be5a9236eb6f | 75304348587a3da93959f12d0a690a035056b4c7 | refs/heads/master | 2020-05-19T17:42:24.217523 | 2013-12-27T23:09:14 | 2013-12-27T23:09:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Create your views here.
# coding: utf-8
from django.template import RequestContext
from products.models import Product, Category, ProductType
from django.shortcuts import render_to_response, get_object_or_404, get_list_or_404
from django.views.decorators.csrf import csrf_exempt
from django.core.mail import send_mail, EmailMessage
from django.core.exceptions import ObjectDoesNotExist
def index(request):
if is_import_category(request):
product_type = product_type_obj = ProductType.objects.get(pk=1)
products = Product.objects.filter(category__product_type_id=product_type.id).order_by('-id')[:15]
else:
product_type = product_type_obj = ProductType.objects.get(pk=2)
products = Product.objects.filter(category__product_type_id=product_type.id).order_by('-id')[:15]
return render_to_response('products/index.html', {
'products': products,
'category_all': get_category_all(request)
}, context_instance=RequestContext(request))
#
def view_product(request, product_type, product_id, slug):
return render_to_response('products/product_view.html', {
'product': get_object_or_404(Product, pk=product_id),
'category_all': get_category_all(request)
}, context_instance=RequestContext(request))
def view_category(request, category_id, parentslug, slug, product_type):
product_type_obj = ProductType.objects.get(slug=product_type)
category = Category.objects.get(id=category_id)
return render_to_response('products/category_view.html', {
'products': category.product_set.all(),
'category': get_object_or_404(Category, pk=category_id),
'category_all': get_category_all(request)
}, context_instance=RequestContext(request))
def is_import_category(request):
return request.path.find('import') > 0
def get_category_all(request):
"""Bu methodla ürünlerin türüne göre sol menü için kategori verisi çekilir"""
#page_id = request.current_page.reverse_id
if is_import_category(request):
product_type_obj = ProductType.objects.get(pk=1)
else:
product_type_obj = ProductType.objects.get(pk=2)
return Category.objects.filter(product_type=product_type_obj)
def search_sub_category(request, category_id):
""" Ajax için gerekli """
from django.utils import simplejson
from django.http import HttpResponse
json = simplejson.dumps([{'pk': o.id,
'name': o.name} for o in Category.objects.filter(parent_id=category_id)])
return HttpResponse(json, mimetype='application/json')
def search_product(request):
oem_query = request.GET.get('oem')
try:
if oem_query:
#results = Product.objects.filter(oem__icontains=oem_query).order_by('category__name', 'id') | Product.objects.filter(yuksel_no__icontains=oem_query).order_by('category__name', 'id')
results = Product.objects.filter(oem__icontains=oem_query) | Product.objects.filter(yuksel_no__icontains=oem_query)
if results.count() > 0:
category = results[0].category
else:
category = None
else:
category_id = request.GET.get('marka')
category = Category.objects.get(pk=category_id)
oem_query = category.name
results = Product.objects.filter(category_id = category_id)
except Exception:
results = None
category = None
root_categories = Category.objects.filter(product_type_id = 2, level = 0)
return render_to_response('products/search_results.html', {
'products': results,
'term' : oem_query,
'category' : category,
'root_categories': root_categories
}, context_instance=RequestContext(request))
@csrf_exempt
def send_form(request):
a = '<html><body><table>'
for key, value in request.POST.iteritems():
a+= '<tr><td><strong>'+key+'</strong></td><td>'+value+'</td></tr>'
a += '</table></body></html>'
send_mail('Yüksel Oto Form', a, 'info@yukselautomotive.com',
['info@yukselautomotive.com'], fail_silently=False)
subject, from_email, to = 'Yüksel Oto Form', 'info@yukselautomotive.com', 'info@yukselautomotive.com'
html_content = a
msg = EmailMessage(subject, html_content, from_email, [to])
msg.content_subtype = "html" # Main content is now text/html
msg.send()
return render_to_response('products/send_form.html', {'form':a}, context_instance=RequestContext(request))
#
#def view_category(request, slug):
# category = get_object_or_404(Category, slug=slug)
# return render_to_response('category_view.html', {
# 'category': category,
# 'posts': Blog.objects.filter(category=category)[:5]
# })
| UTF-8 | Python | false | false | 4,759 | py | 49 | views.py | 21 | 0.670388 | 0.664069 | 0 | 115 | 40.286957 | 194 |
Raddock/MountWizzard4 | 14,224,931,727,252 | 587f036afcdb557300554cd2cf6fd955da0108de | f50500337a7a215793fe41e6ac1146d4ce86ae53 | /mw4/test/test_integration/test_setupQt.py | faedefc3041c4c175a8d0c2608a806641cb0b33e | [
"Apache-2.0"
]
| permissive | https://github.com/Raddock/MountWizzard4 | 7f00c0e9d900bcb9f6f5c284d759a5ec3219a983 | 15efed77c1634461184e90a7cf6419eec0dec909 | refs/heads/master | 2020-09-22T13:36:18.352375 | 2019-12-01T18:38:34 | 2019-12-01T18:38:34 | 225,218,989 | 0 | 0 | Apache-2.0 | true | 2019-12-01T19:39:33 | 2019-12-01T19:39:31 | 2019-12-01T18:38:43 | 2019-12-01T18:38:41 | 77,308 | 0 | 0 | 0 | null | false | false | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.7.5
#
# Michael Würtenberger
# (c) 2019
#
# Licence APL2.0
#
###########################################################
# standard libraries
import sys
# external packages
import PyQt5
# local import
from mw4 import mainApp
def setupQt():
# global app, spy, mwGlob, test
mwGlob = {'workDir': '.',
'configDir': './mw4/test_integration/config',
'dataDir': './mw4/test_integration/config',
'tempDir': './mw4/test_integration/temp',
'modeldata': 'test',
}
test = PyQt5.QtWidgets.QApplication(sys.argv)
app = mainApp.MountWizzard4(mwGlob=mwGlob)
spy = PyQt5.QtTest.QSignalSpy(app.message)
app.mount.stopTimers()
app.measure.timerTask.stop()
app.relay.timerTask.stop()
return app, spy, mwGlob, test
| UTF-8 | Python | false | false | 1,146 | py | 51 | test_setupQt.py | 38 | 0.482096 | 0.463755 | 0 | 43 | 25.627907 | 60 |
gianninapg/python-tasks | 7,619,271,995,552 | 5ef71e6282f537c05ffa43eca6944bd70ff533cb | 83d7e67287075a580a823310c45db5bb3a33611b | /names_list.py | 1249dbff85604d3bd84eb142404576ec0869bb47 | []
| no_license | https://github.com/gianninapg/python-tasks | 220fcf168ae44478fc670a8f94452a620659f2a2 | 5137e1dce918c9af9105ae71391f3a9e45b54bb2 | refs/heads/master | 2022-11-23T01:56:05.579994 | 2020-07-25T11:18:04 | 2020-07-25T11:18:04 | 280,637,766 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # #name = input("What is your name?")
# #print(name)
# #name = [input(f" Hello {name}")]
# names_list = [
# "Izzy", "Archie", "Boston"
# ]
# name = input(names_list)
# print(names_list)
# #print(names_list)
# print(name.append(name))
# #print(name)
input_names = input("Enter three names")
#names_list = input_names
#print("\n")
print("Printing names list")
print(input_names.split("[]"))
#print("Printing names list")
#for name in names_list:
#print(name) | UTF-8 | Python | false | false | 475 | py | 16 | names_list.py | 16 | 0.623158 | 0.623158 | 0 | 30 | 14.866667 | 40 |
RaghavSharma0007/rasa_based_voicebot | 17,248,588,689,658 | a7ac6bb47cd36891ae2c9f2cc83f69be8a22600f | 29e27bf74310d517d6127355c8ee0dcb3a386446 | /chatsite/settings.py | 6f17c35cac68981acc77cf56948924ea675a5293 | []
| no_license | https://github.com/RaghavSharma0007/rasa_based_voicebot | d5e56faf64695ab68ae80ca89f06a1b1130e64d8 | 143f613e58818bacc3c087ced962ddf3e8649d0a | refs/heads/master | 2022-04-27T02:31:50.292190 | 2020-04-22T02:36:06 | 2020-04-22T02:36:06 | 257,771,206 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Django settings for chatsite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
#from decouple import config
#from rasa_core.agent import Agent
#from rasa_core.channels.socketio import SocketIOInput
#from rasa_core.agent import Agent
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "4wn^(4h&w4ca^yo@u-s@@7f6a)&4mhupb-zpk^&#d9#jiik@-y"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# django-cors-headers
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# middleware for corsheaders
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
# allow cors headers
CORS_ORIGIN_ALLOW_ALL = True
# path for rasa_core models and dialogue
#RASA_CORE_MODELS = os.path.join(BASE_DIR, 'rasachat', 'models', 'dialogue')
RASA_CORE_NLU = os.path.join(BASE_DIR, 'rasachat', 'models', 'current', 'nlu')
# RASA SOCKET CHANNEL
# load your trained agent
# agent = Agent.load(RASA_CORE_MODELS, interpreter=RASA_CORE_NLU)
#input_channel = SocketIOInput(
# event name for messages sent from the user
# user_message_evt="user_uttered",
# event name for messages sent from the bot
# bot_message_evt="bot_uttered",
# socket.io namespace to use for the messages
# namespace=None
#)
#set serve_forever=False if you want to keep the server running
# s = agent.handle_channels([input_channel], 5500, serve_forever=True)
| UTF-8 | Python | false | false | 4,270 | py | 27 | settings.py | 8 | 0.693677 | 0.68548 | 0 | 164 | 25.036585 | 91 |
manibatra/Drones | 7,576,322,318,354 | b1a62b222d0eb195a5b0bb7cef690dce01a07214 | bfa29a47d9193adfa7dc70231ad1fb70e07ab27b | /drones/test/test_consumers.py | d9da8484c9eacb64f68d3ae6abc7d0be5deeb9f0 | []
| no_license | https://github.com/manibatra/Drones | c8b1c76ad94aa29e14fe1da1bac98fc534bb5bb0 | 99bfb92a0185b6f0cd641c74d6f9ad73a5543020 | refs/heads/master | 2020-03-23T18:06:48.581443 | 2018-07-29T03:22:00 | 2018-07-29T03:22:00 | 141,890,137 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from channels.testing import WebsocketCommunicator
from drones.consumers import DroneConsumer, DashboardConsumer
from channels.testing import HttpCommunicator
import pytest
@pytest.mark.asyncio
async def test_my_consumer():
communicator = WebsocketCommunicator(DroneConsumer, "/testws/")
connected, subprotocol = await communicator.connect()
assert connected
d_communicator = WebsocketCommunicator(DashboardConsumer, "/testws/")
d_connected, d_subprotocol = await d_communicator.connect()
assert d_connected
# Test sending text
await communicator.send_json_to({ "id" : 0,
"lat": 32,
"long": 32,
"speed": 10})
response = await d_communicator.receive_json_from()
# response = await communicator.receive_from()
assert response == { "id" : 0,
"latitude": 32,
"longitude": 32,
"speed": 10}
# Close
await communicator.disconnect()
await d_communicator.disconnect()
| UTF-8 | Python | false | false | 1,108 | py | 11 | test_consumers.py | 5 | 0.603791 | 0.591155 | 0 | 30 | 35.933333 | 73 |
tanayseven/python-meetup-nelkinda-14th-october | 16,080,357,600,288 | e20b2e0060496d865787c2bfffeb5746e4b7c351 | 5c14b4926b6285659a335767a5471c5c04271301 | /todo_app/todo_app/list/views.py | 5b6d804b1135b1ed40ce8e0c456bf58d4578375b | [
"MIT"
]
| permissive | https://github.com/tanayseven/python-meetup-nelkinda-14th-october | c6e1df6a1d021e43044c7d29a2ce209696ca649e | 14d39ef6b79afe30f3c0d38ae90273b1458923a1 | refs/heads/master | 2021-07-12T09:11:27.262277 | 2017-10-16T17:13:42 | 2017-10-16T17:13:42 | 106,182,564 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Blueprint, jsonify
from todo_app.extensions import app, login_required
from todo_app.list.repos import ToDoListRepo
from todo_app.user.repos import ListUserRepo
todo_list_endpoints = Blueprint('todo_list', __name__, url_prefix='todo_list/')
app.register_blueprint(todo_list_endpoints)
@todo_list_endpoints.route('/all', methods=('GET',))
@login_required
def all_todo_lists():
auth_token = ''
user = ListUserRepo.load_user_if_exists(auth_token)
lists = ToDoListRepo.load_all_lists_for_(user)
return jsonify(dict(todo_list=lists)), 200
| UTF-8 | Python | false | false | 571 | py | 22 | views.py | 17 | 0.742557 | 0.737303 | 0 | 16 | 34.6875 | 79 |
Maxfan1999/algorythms_and_data_structures | 10,496,900,095,586 | 56317e511b7152abf0551dff583066126c0e9e0a | d15ca7d5c6cc67e3ffef19cd465e912b16bcd35d | /Search/task1/user.py | 25472e01bdadae0dceec782d54fcf533a46dd688 | []
| no_license | https://github.com/Maxfan1999/algorythms_and_data_structures | 3f6b512759dcdcd6f8b3c691bc4871c7a8006e0b | 08f0859001d8397a7d8e9d8f2a6fe8c1a12302ab | refs/heads/master | 2021-01-03T19:45:21.859326 | 2020-06-01T17:14:22 | 2020-06-02T11:19:44 | 240,213,554 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Реалізуйте інтерфейс для роботи з англійсько-українським словником та швидким пошуком перекладу.
"""
dictionary = []
def addTranslation(eng, translation):
""" Додає до словника англійське слово та його переклад.
Пари (eng, translation) приходяться у порядку, що відповідає лексикографічному порядку.
:param eng: англійське слово
:param translation: переклад
"""
dictionary.append((eng, translation))
return True
def find(eng):
""" Повертає переклад слова зі словника.
:param eng: англійське слово
:return: переклад слова, якщо воно міститься у словнику, або порожній рядок у іншому разі.
"""
l, r = 0, len(dictionary) - 1
while l < r:
m = l + (r - l) // 2
if eng == dictionary[m][0]:
return dictionary[m][1]
elif eng > dictionary[m][0]:
l = m + 1
else:
r = m - 1
if eng == dictionary[l][0]:
return dictionary[l][1]
else:
return ""
| UTF-8 | Python | false | false | 1,311 | py | 36 | user.py | 31 | 0.609707 | 0.599596 | 0 | 34 | 27.911765 | 96 |
daniela08marquez/proyecto-compu | 9,844,065,043,075 | 612cdc02a1c198e1af906dd884112fc07084e519 | e91b1e8eed34299036b39fe6a19de92eddd2a542 | /assignments/00COVIDMUERTESMEXICO/src/exercise.py | 05fff5de9c880d7d2e4e2c3724eb7a5f5650a918 | []
| no_license | https://github.com/daniela08marquez/proyecto-compu | 9634f61fc4e7349efbba6bce4ce30c083df0cb3e | f14d626de03a95db094df4a37bdc65018531b463 | refs/heads/main | 2023-09-01T18:27:46.792390 | 2021-10-23T02:38:43 | 2021-10-23T02:38:43 | 419,894,866 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from matplotlib import pyplot as plt
def guardar_matrizhombres(m):
with open('graficas_hombres_covid.csv', 'w') as archivo:
for i in m:
renglon = ""
for j in i:
renglon = renglon + str(j) + ","
renglon = renglon[:-1]
renglon = renglon + "\n"
archivo.write(renglon)
def cargar_matrizhombres():
m=[]
with open('graficas_hombres_covid.csv', 'r') as archivo:
for linea in archivo :
lista =[]
lista_linea = linea.split(',')
for elemento in lista_linea:
elemento = elemento.replace("\n","")
lista.append(int(elemento))
m.append(lista)
return m
def guardar_matrizmujeres(m):
with open('graficas_mujeres_covid.csv', 'w') as archivo:
for i in m:
renglon = ""
for j in i:
renglon = renglon + str(j) + ","
renglon = renglon[:-1]
renglon = renglon + "\n"
archivo.write(renglon)
def cargar_matrizmujeres():
m=[]
with open('graficas_mujeres_covid.csv', 'r') as archivo:
for linea in archivo :
lista =[]
lista_linea = linea.split(',')
for elemento in lista_linea:
elemento = elemento.replace("\n","")
lista.append(int(elemento))
m.append(lista)
return m
def grafico_muertes_hombres():
print("Grafico de barras hombres")
edad=["0-4", "5-9", "10-14", "15-19", "20-24","25-29","30-34","35-39","40-44","45-49","50-54","55-59","60-64","65-69","70-74","75-79","80-84","85-89","90-94","95-99"]
muertes=[257, 0, 0, 195, 711, 1835, 3307, 5775, 8499, 13256, 16464, 20465, 23105, 23514, 21129, 16763, 11506, 6527, 2260, 563]
xs = [i for i,_ in enumerate(edad)]
plt.bar(xs, muertes, color = "b")
plt.ylabel("# de muertes")
plt.title("Muertes de covid (hombre)")
plt.xticks([i for i,_ in enumerate(edad)], edad)
plt.savefig('grafico_muertes_hombres.png')
plt.show()
def grafico_muertes_mujeres():
print("Grafico de barras mujeres")
edad=["0-4", "5-9", "10-14", "15-19", "20-24","25-29","30-34","35-39","40-44","45-49","50-54","55-59","60-64","65-69","70-74","75-79","80-84","85-89","90-94","95-99"]
muertes=[215, 0, 0, 218, 489, 1112, 1728, 2678, 3971, 6750, 8870, 12571, 15034, 15466, 13786, 11295, 7840, 4477, 1838, 449]
xs = [i for i,_ in enumerate(edad)]
plt.bar(xs, muertes, color = "r")
plt.ylabel("# de muertes")
plt.title("Muertes de covid (mujer)")
plt.xticks([i for i,_ in enumerate(edad)], edad)
plt.savefig('grafico_muertes_mujeres.png')
plt.show()
def main():
#escribe tu código abajo de esta línea
reg = "si"
print('')
print('-------------------------------------------')
print('Base de datos COVID-19 México')
print(' ')
nomb = input('¿Cuál es tu nombre? ')
print ('Hola ' + str(nomb))
print('Bienvenid@ a la base de datos de COVID-19 México, por favor eliga el tipo de datos que desea consultar.')
print (' ')
while (reg == "si"):
print("1.-Número de Muertes por COVID-19. ")
#dentro de este habrán mas opciones para mostrar la grafica de definciones, a nivel nacional o los puros datos por estado
print('2.- Número de casos activos.')
#también da la opción de ver los datos a nivel estatal o nacional
print("3.- Semáforo Epidemiológico.")
#también da la opción de ver los datos a nivel estatal o nacional
print("4.- Sintomas de COVID-19.")
#en este apartado entran las preguntas y dependiendo de cuantas contestes te dice da recomendaciones de que hacer
print("5.- Protegerse y prevenir el COVID-19.")
#se da una lista de recomendaciones de como ciudarse pa q no te de cobis
num=int(input("Teclee el número que coorresponda al apartado que sea de su interés: "))
if num == 1 :
print('-----------------------------------------------------------------')
print('Haz seleccionado el apartado de Número de Muertes por COVID-19.')
print('')
print('El numero de muertes acumuladas en México desde Marzo 2020 hasta Octubre 2021 es de 284,295.')
g = input('¿Desea ver las graficas muertes de hombres y mujeres por edades? (si o no)')
if g == 'si':
grafico_muertes_mujeres()
grafico_muertes_hombres()
cargar_matrizhombres()
cargar_matrizmujeres()
reg = input('¿Desea volver al menú principal? (si o no) ')
else:
reg = input('¿Desea volver al menú principal? (si o no) ')
elif num == 2 :
#num casos activos
print('-----------------------------------------------------------------')
print('Haz seleccionado el apartado de número de casos activos.')
print('')
print('- A nivel nacional desde Marzo 2020 hasta octubre 2021 hay 3,767,758 casos confirmados.')
print('- A nivel nacional desde Marzo 2020 hasta octubre 2021 hay 3,986,789 casos estimados.')
print('')
reg = input('¿Desea volver al menú principal? ')
elif num == 3 :
#semaforos
print('-----------------------------------------------------------------')
print('Haz seleccionado el apartado de Semáforo Epidemiológico.')
print('')
print('¿A que nivel te gustaría consultar los datos? ')
print(' ')
print(' 1.- Rojo ')
print(' 2.- Naranja ')
print(' 3.- Amarillo ')
print(' 4.- Verde ')
num3= int(input())
if num3 == 1:
#matriz estados rojos
print ('Los estados en semáforo rojo son: ')
rojo = ['Ninguno']
print (rojo)
reg = input('¿Desea volver al menú principal? ')
elif num3 == 2:
#matriz estados naranja
print ('Los estados en semáforo naranja son: ')
naranja = ['Baja California']
print (naranja)
reg = input('¿Desea volver al menú principal? ')
elif num3 == 3:
#matriz estados amarillo
print ('Los estados en semáforo amarillo son: ')
amarilla = ['Chihuahua, Coahuila, Jalisco, Aguascalientes, Colima, Guanajuato, Querétaro, Morelos, Tabasco, Campeche, Yucatán']
print (amarilla)
reg = input('¿Desea volver al menú principal? ')
elif num3 == 4 :
#matriz estados verdes
print ('Los estados en semáforo verde son: ')
verde = ['Sonora, Baja California sur, Sinaloa, Durango, Nayarit, Zacatecas, San Luis Potosí, Nuevo León, Tamaulipas, Veracruz, Puebla, Tlaxcala, Hidalgo, Estado de México, Ciudad de México, Michoacán, Guerrero, Oaxaca, Chiapas, Quintana Roo']
print (verde)
reg = input('¿Desea volver al menú principal? ')
else:
print ('Por favor ingrese una opción valida.')
elif num == 4 :
#sintomas, agregar un contador por cada pregunta
print('-----------------------------------------------------------------')
print('Haz seleccionado el apartado de Síntomas.')
print('')
print('Selecciona que quieres hacer: ')
print(' ')
print(' 1.- Ver sintomas. ')
print(' 2.- Tomar test. ')
num4= int(input())
if num4 == 1 :
print('-----------------------------------------------------------------')
print('De acuerdo con la OMS, los síntomas de COVID-19 son: ')
print('Los síntomas más habituales:')
print('Fiebre, Tos, Cansancio, Pérdida del gusto o del olfato')
print('')
print('Los síntomas menos habituales')
print('Dolor de garganta, dolor de cabeza, molestias y dolores, ')
print('diarrea, Erupción cutánea o pérdida del color de los dedos')
print('de las manos o los pies y Ojos rojos o irritados.')
print('')
print('Los síntomas mas serios:')
print('Dificultad para respirar, pérdida de movilidad o del habla')
print('o sensación de confusión y dolor en el pecho.')
print('')
#FALTA: matrices que digan los sintomas
reg = input('¿Desea volver al menú principal? ')
elif num4== 2:
#hacer contador con las respuestas que vaya dando la persona
print('Por favor responda las siguientes preguntas:')
print('')
print('En los últimos 10 días, ¿Has presentado uno o mas')
print('de los siguientes signos o síntomas? responde "si" o "no"')
cont=0
respuesta= 'si'
print('-----------------------------------------------------------------')
pregunta1=input('Temperatura mayor a 37.5 grados centigrados: ')
if pregunta1=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta2=input('Dolor de cabeza intenso: ')
if pregunta2=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta3=input('Tos de reciente aparición: ')
if pregunta3=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta4=input('Dificultad para respirar: ')
if pregunta4=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta5=input('Dificultad para percibir olores: ')
if pregunta5=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta6=input('Dificultad para percibir sabores: ')
if pregunta6=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta7=input('Dolor muscular: ')
if pregunta7=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta8=input('Dolor en las articulaciones: ')
if pregunta8=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta9=input('Dolor de garganta o al tragar: ')
if pregunta9=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
pregunta10=input('Irritación en los ojos(ardor y/o comezón: ')
if pregunta10=='si' :
cont = cont+1
print('-----------------------------------------------------------------')
print (cont)
if cont<=4:
print('El número de sintomas que usted presenta es igual a: '+str(cont))
print('')
print('La probabilidad de que padezcas de COVID-19 es baja,')
print('pero eso no significa que debas de bajar la guardia,')
print('aquí te dejamos algunas recomendaciones a seguir:')
print('')
print('- Lavarse las manos con agua y con jabón como pimpon.')
print('- Guarde al menos 1 metro de distancia entre usted y otras personas. Cuanto mayor distancia, mejor.')
print('- Convierta el uso de la mascarilla en una parte normal de su interacción con otras personas.')
print('- Evite ir a lugares bastante concurridos.')
print('')
elif cont>4:
print('El número de sintomas que usted presenta es igual a: '+str(cont))
print('')
print('La probabilidad de que padezcas de COVID-19 es intermedia,')
print('aquí le dejamos algunas recomendaciones a seguir:')
print('')
print('- Evitar el contacto con otras personas')
print('- Acudir al medico')
print('')
else:
print('-----------------------------------------------------------------')
reg = input('¿Desea volver al menú principal? ')
elif num == 5 :
#recomendaciones de prevención
print('-----------------------------------------------------------------')
print('Haz seleccionado el apartado de Protegerse y prevenir el COVID-19.')
print('')
print('A continuación se mostrarán tips emitidos por la OMS (Organización Mundial de la Salud')
print('Por favor eliga que apartado desea consultar: ')
print(' ')
print('1. Qué hacer para mantenerse y mantener a los demás a salvo del COVID-19.')
print('2. Indicaciones básicas sobre la manera de ponerse la mascarilla.')
print('3. Normas básicas de la buena higiene')
print(' ')
num5=int(input())
if num5 == 1:
print('-----------------------------------------------------------------')
print('Qué hacer para mantenerse y mantener a los demás a salvo de la COVID-19:')
print('')
print('- Guarde al menos 1 metro de distancia entre usted y otras personas. Cuanto mayor distancia, mejor.')
print('- Convierta el uso de la mascarilla en una parte normal de su interacción con otras personas.')
print('- Evite ir a lugares bastante concurridos.')
print('')
reg = input ('¿Quieres volver al menú principal? ')
elif num5 == 2:
print('-----------------------------------------------------------------')
print('Indicaciones básicas sobre la manera de ponerse la mascarilla:')
print('')
print('- Lávese las manos antes de ponerse la mascarilla, y también antes y después de quitársela y cada vez que la toque.')
print('- Asegúrese de que le cubre la nariz, la boca y el mentón.')
print('- Cuando se quite la mascarilla, guárdela en una bolsa de plástico limpia.')
print('- No utilice mascarillas con válvulas.')
print('')
reg = input ('¿Quieres volver al menú principal? ')
elif num5 == 3:
print('-----------------------------------------------------------------')
print('Normas básicas de la buena higiene:')
print('')
print('- Lávese periódica y cuidadosamente las manos con un gel hidroalcohólico o con agua y jabón. ')
print('- Evite tocarse los ojos, la nariz y la boca. ')
print('- Al toser o estornudar cúbrase la boca y la nariz con el codo flexionado o con un pañuelo.')
print('- Limpie y desinfecte frecuentemente las superficies, en particular las que se tocan con regularidad.')
print('')
reg = input ('¿Quieres volver al menú principal? ')
else:
print('Por favor eliga una opción valida.')
else :
#que te vuelva a preguntar hasta que contestes un numero
print('Por favor ingrese una opción valida.')
reg = input('¿Desea volver al menu principal? ')
if reg == 'no' :
print('Recuerde seguir las indicaciones emitidas por las autoridades')
print('para cuidarse a usted y sus seres queridos. ')
print('Muchas gracias por usar nuestro programa c:')
if __name__=='__main__':
main() | UTF-8 | Python | false | false | 16,701 | py | 3 | exercise.py | 1 | 0.482723 | 0.455165 | 0 | 334 | 48.652695 | 259 |
BUEC500C1/video-jadtay | 11,063,835,767,949 | c7eb1c4a5d70e0a90914797bbe5e381098c90999 | 1eba85d9154d80f818a9c26848b51ac2ef7b974d | /image2video.py | a6b97e9d9dc6905d782f20f1e2f660bdbf095ee2 | []
| no_license | https://github.com/BUEC500C1/video-jadtay | aea705bd99f294c947e64768b486322b1bd66b08 | 3e4260756dcb16c0088d5c03f10e2cea1824ccc1 | refs/heads/master | 2020-12-29T09:33:47.068215 | 2020-02-29T00:12:57 | 2020-02-29T00:12:57 | 238,557,535 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 09:49:10 2020
@author: jadta
"""
import os
import glob
import subprocess
import urllib.request
import json
from urllib.error import URLError, HTTPError
#used to open subreddit JSON, can chooose which subreddit as well as quantity of pictures
def getSubreddit(subreddit='pics',quantity=20):
#usually have to try twice in order to connect, cannot connect multiple times in short time period!!
try:
response = urllib.request.urlopen("https://www.reddit.com/r/{}/hot.json?limit={}".format(subreddit,quantity+1))
#if cannot connect to url use backup file
except (URLError,HTTPError):
print("Connection timed out. Loading sample file.")
backup = open('backup.json')
obj = json.load(backup)
backup.close()
return obj
else:
print("Request successful!")
obj = json.loads(response.read())
return obj
#used to retrieve images from JSON file and create concat demuxer txt file
def getImages(obj,quantity = 20):
print("Getting images...")
#create images folder if not already present
if not os.path.exists('images'):
os.makedirs('images')
queue = open('images/queue.txt','w')
for i in range(1,quantity+1):
#parse json for domain and image url
domain = obj['data']['children'][i]['data']['domain']
imageurl = obj['data']['children'][i]['data']['url']
#write file names to queue.txt
queue.write('file \'pic{}.jpg\'\n'.format(i))
queue.write('duration 3\n')
#clean up imgur.com links
if(domain == 'imgur.com'):
urllib.request.urlretrieve(imageurl+'.jpg','images/pic{}.jpg'.format(i))
else:
urllib.request.urlretrieve(imageurl,'images/pic{}.jpg'.format(i))
queue.close()
return 1
#used to process images with ffmpeg
def processImages(txt = 'images/queue.txt'):
print("Processing...")
#process images from queue.txt into summary.mp4 video, added padding in order to stop even pixel error
try:
subprocess.check_call(['ffmpeg','-y','-f','concat','-i','{}'.format(txt),'-pix_fmt','yuvj422p','-vf','pad=ceil(iw/2)*2:ceil(ih/2)*2','summary.mp4'])
except (subprocess.CalledProcessError, FileNotFoundError):
print("Oops something went wrong. Please try again.")
return 0
else:
print("Complete!")
return 1
#used to delete images folder
def cleanupImages():
files = glob.glob('images/*')
for f in files:
os.remove(f)
os.rmdir('images')
return 1
#function calls
def main():
obj = getSubreddit('pics',20)
getImages(obj,20)
processImages('images/queue.txt')
cleanupImages()
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 2,828 | py | 5 | image2video.py | 2 | 0.625884 | 0.612447 | 0 | 92 | 29.75 | 156 |
Ji-hyeong/TIL | 18,786,186,975,428 | 071af7484fe3df63aeea6dd216ae3f34a4d8f757 | dd67dd3c4bafa2ff25d5c312e8b75efc83e31371 | /programmers/Code/stringCompression.py | f3eed7313cd8d01a579e487b0563aebc688ef2df | []
| no_license | https://github.com/Ji-hyeong/TIL | 47cc2c189d4618acc9c03fea92bc044c5e2001f0 | 443cd145b50fdbbc50d65ec6924f4743475babf2 | refs/heads/master | 2023-06-05T22:20:35.594233 | 2021-07-26T08:40:45 | 2021-07-26T08:40:45 | 280,310,521 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.stdin=open("input.txt","r")
def solution(s):
answer = len(s)
for length in range(1,len(s)):
start,cnt,before,temp=length,1,s[0:length],''
while True:
if start>=len(s): break
next=s[start:start+length]
if before==next:
cnt+=1
else:
if cnt==1:
temp+=''.join(before)
else:
temp += str(cnt) + ''.join(before)
cnt=1
before=next[:]
start+=length
if cnt == 1:
temp += ''.join(before)
else:
temp += str(cnt)+''.join(before)
answer=min(answer,len(temp))
# print(temp,length,len(s))
return answer
for _ in range(5):
s=input()
print(solution(s)) | UTF-8 | Python | false | false | 825 | py | 262 | stringCompression.py | 215 | 0.447273 | 0.437576 | 0 | 29 | 27.482759 | 54 |
sebapaik/django-shop | 9,113,920,637,458 | 814f74df85b329ea392f23cdfd2fc05ca7a26cdf | 8332e0d00d3734f7347b88d23c2a14cf654fb9db | /shop/admin.py | 1f36c381da194da389b29171085c67d1f4f1e686 | [
"MIT"
]
| permissive | https://github.com/sebapaik/django-shop | e21f3298eee5b7e0b3981266c0fa02bf79f3bb33 | 00c7190ac6614ebe1eb0c0ea444f8e91c2e7f7a8 | refs/heads/master | 2022-10-08T02:58:06.441724 | 2018-09-10T21:12:57 | 2018-09-10T21:12:57 | 148,103,658 | 0 | 1 | MIT | false | 2022-09-30T00:43:03 | 2018-09-10T05:30:01 | 2018-09-10T21:13:04 | 2022-09-30T00:43:02 | 6,698 | 0 | 1 | 1 | Python | false | false | from django.contrib import admin
from .models import Product, Order
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = ['id','brand','pname','price','inventory','description','imageurl']
list_editable = ['brand', 'pname', 'price', 'inventory', 'description', 'imageurl']
list_per_page = 20
admin.site.register(Product, ProductAdmin)
class OrderAdmin(admin.ModelAdmin):
list_display = ['id','product', 'first_name', 'last_name', 'dateordered', 'ordertotal', 'address_line_1', 'address_line_2', 'city', 'state', 'zipcode']
list_editable = ['product', 'first_name', 'last_name', 'dateordered', 'ordertotal', 'address_line_1', 'address_line_2', 'city', 'state', 'zipcode']
list_per_page = 20
admin.site.register(Order, OrderAdmin)
| UTF-8 | Python | false | false | 783 | py | 19 | admin.py | 13 | 0.689655 | 0.679438 | 0 | 14 | 54.928571 | 155 |
Wilson3g/simple-blog-flask | 17,119,739,665,637 | e52d1cf29bc2ba08bf309880687eefd4b32148c8 | 7594b431838bb224807b4fa5453437b9b43ecec5 | /app/model/Post.py | 006484bc63a80dfc849c3fa7237a8e54fc3e6a94 | []
| no_license | https://github.com/Wilson3g/simple-blog-flask | bbe13a7aa970e5c53b14c6c91ee65bab18741afa | 05f71c2e589926e59db616606dc716854cac7fa7 | refs/heads/master | 2023-04-23T10:53:30.802525 | 2020-06-03T22:32:19 | 2020-06-03T22:32:19 | 260,315,754 | 0 | 0 | null | false | 2021-05-06T20:26:16 | 2020-04-30T20:50:07 | 2020-06-03T22:32:23 | 2021-05-06T20:26:16 | 4,958 | 0 | 0 | 14 | Python | false | false | from app.config.database import db
from app.model import Comment
from app.model.Tags import Tag
from app.model import posts_has_tags
class Post(db.Model):
__tablename__ = 'post'
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255))
content = db.Column(db.Text())
author = db.Column(db.String(255))
is_active = db.Column(db.Boolean(), default=True)
comment = db.relationship('Comment', backref="post")
tags = db.relationship('Tag', secondary='posts_has_tags')
| UTF-8 | Python | false | false | 519 | py | 18 | Post.py | 15 | 0.689788 | 0.678227 | 0 | 14 | 36.071429 | 61 |
Yu0606/test | 4,372,276,728,779 | b773f124a155811dd9a503e2034e039a7d83fce7 | 3044a21fcb4b739c79368c18e2c6fb902eefea91 | /myuse/scoring.py | bf26286207d0678883b97a6cb7160335c2ca8d9a | []
| no_license | https://github.com/Yu0606/test | 087a8fc4da118a44b4d72e1783526bfbfb667631 | 18315ce14907ddcb4eb674d0b5052a33538f633a | refs/heads/master | 2018-09-19T14:20:15.128262 | 2018-07-01T11:46:44 | 2018-07-01T11:46:44 | 93,718,818 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math,MeCab,re,json
class Scoring:
score_noun = {"後尾": 70, "最後尾": 70, "折り返し": 60, "列": 20, "待機列": 70, "在庫": 30,
"完売": 20, "到着": 100, "移動": 100, "アナウンス": 90, "スタッフ": 90, "まとめ": -200, "現状": 90, "現在": 90, "札": 30,
"今": 100, "行列": 40, "時間": 100, "自宅": -200, "検索": -100}
score_adjective_verb = {"動く": 100, "進む": 100, "伸びる": 100, "諦める": -100, "聞く": 10, "すごい": 80, "長い": 100, "延びる": 100,
"並ぶ": 200, "たい": -50}
word_forms = ["名詞", "動詞", "形容詞"]
phone_client = ["Twitter for.*", "Twitter Lite", "twicca", "twitcle.*", "Echofon", "Janetter", "iOS", "TheWorld.*",
"TwitPane.*", "ツイタマ"]
# 文の区切り
sentence_pereods = "[。!+?+\n]"
# 距離を含めたスコア計算
def calc_score_with_distance(self,score_default, distance):
d = math.log2(distance + 2)
# log2(x+2) >= 1 (x>=0のとき)
return int(score_default / d)
def is_score_word(self,word, form):
# print("check",word,form)
if form == self.word_forms[0]:
for w in self.score_noun.keys():
if word == w:
return self.score_noun[w]
elif form == self.word_forms[1] or form == self.word_forms[2]:
for w in self.score_adjective_verb.keys():
if word == w:
return self.score_adjective_verb[w]
return None
# スコア計算分析
def analyze(self, text, mecab, day=None, tweet_id=None, created_at=None, test=False, client=None):
# 文の区切りを見て、複数の文書に分ける、一定の文字数以下は使わない
# !複数の文に渡って同じラベルが出てきたら、高い方を採用する!
# !ラベルの見つからなかったものは入れない!
# !問題点:1つのラベルのことが複数行に渡っていることがある!
# スコア欄側にはツイート文自体は保存してないから割りとデータ保存はなんとかなるか
scored_label = {} # 単語とスコア
score_sql = "insert into booth_score_8(space, created_at, score, tweet_id) values(%s, %s, %s, %s)"
if day:
day = str(day)
else:
day = "*"
if isinstance(text, list):
for t in text:
temp = analyze_text_score(t, mecab, day=day)
# 一度スコア結果を見る
for k in temp.keys():
# 既にスコア付けされているラベルがあるか
if k in scored_label.keys():
if isinstance(scored_label[k], list):
scored_label[k].append(temp[k])
else:
scored_label[k] = [scored_label[k], temp[k]]
else:
# なければ普通に追加
scored_label.update({k: temp[k]})
# 終わった後、複数スコアが出ているものを選定
for sl in scored_label.keys():
if isinstance(scored_label[sl], list):
scored_label.update({sl: max(scored_label[sl])})
else:
scored_label = self.analyze_text_score(text, mecab, day=day)
# スコア付け関連のデータを入れる
if not test:
for key in scored_label.keys():
score = scored_label[key]
# 携帯端末でなければ半減
for phone in self.phone_client:
if re.match(phone, client):
score = score / 2
break
if score < 1:
# 最低1点を保証
score = 1
print(key, score)
self.mysql.insert(sql=score_sql, datas=[key, created_at, score, tweet_id])
else:
print(scored_label)
for key in scored_label.keys():
print(key, scored_label[key])
def analyze_text_score(self,text, mecab, day=None):
unscored_label = {} # 単語と距離
scoring_label = {} # 単語と距離
fixed_label = []
scored_label = {} # 単語とスコア
# 逆順に辿る用
node_reverse = []
labels = []
print("analyze:", text)
mecab.parse("")
node = mecab.parseToNode(text)
# #品詞によって増やす距離を変える
# distance = 0
# サークル単語の文頭からの出現位置によって減衰させる?
space_dist = 0
while node:
if "BOS/EOS" not in node.feature:
print(node.surface)
print(node.feature)
features = node.feature.split(",")
# スコアワード反応
word_score = self.is_score_word(features[6], features[0])
if word_score:
# print("calc",wn)
# スコア付けされてないラベルがあるならスコアを入れる
# print(unscored_label)
if len(unscored_label) > 0:
# この時スコア付け中のラベルがあればそれ以上扱わないようにする
if len(scoring_label) > 0:
fixed_label.extend(scoring_label.keys())
scoring_label = {}
for usl in unscored_label.keys():
score = self.calc_score_with_distance(word_score, unscored_label[usl])
print(features[6], score)
# 最初のスコア付けのとき、文頭からのサークル単語の位置で減衰させる
scored_label.update({usl: score - space_dist * 10})
# スコア付け中のラベルとする
scoring_label.update({usl: unscored_label[usl]})
# スコア付けしたので初期化
unscored_label = {}
# 未スコア付けなし、かつスコア付け中のラベルありの場合
elif len(scoring_label) > 0:
for scl in scoring_label.keys():
score = self.calc_score_with_distance(word_score, scoring_label[scl])
scored_label.update({scl: scored_label[scl] + score})
# C92辞書の単語の場合
# print(features, bool("c92dic" in features))
if "c92dic" in features:
if features[10] == day or features[10] == "hole" or features[10] == "space":
if features[11] not in labels:
# print(node.surface)
# print(node.feature)
# print("booth number", features[11])
labels.append(features[11])
# スコア対象、このときはまだスコア付けされていない
# 距離の計測開始
unscored_label.update({features[11]: 0})
# print(unscored_label)
node_reverse.insert(0, node)
node = node.next
# 距離のインクリメント
if len(unscored_label) > 0:
for key in unscored_label.keys():
unscored_label[key] += 1
if len(scoring_label) > 0:
for key in scoring_label.keys():
scoring_label[key] += 1
space_dist += 1
# 逆順に辿る
# analyze_text_reverse(node_reverse)
return scored_label | UTF-8 | Python | false | false | 8,060 | py | 7,012 | scoring.py | 56 | 0.462057 | 0.443598 | 0 | 173 | 38.462428 | 119 |
Periodically-Peckish-Artisans/project-browser-backend | 12,472,585,041,006 | 06a244a53f5cc9f797d2ff268680fc49a7fca099 | 6ad6f506307e2b163c4f37cc78e3cedd298abe69 | /reset-search-index.py | 719c6ba38ab354420fa687089e1f176ffcd0f965 | [
"MIT"
]
| permissive | https://github.com/Periodically-Peckish-Artisans/project-browser-backend | 7fb52a1cf06e000521e796c3e73ea9a37d3d1010 | f684bdd0d17f9e5d6f0ab3ebc3a640f6c924f054 | refs/heads/master | 2020-07-22T15:18:35.550498 | 2019-10-01T23:27:42 | 2019-10-01T23:27:42 | 207,243,335 | 0 | 0 | MIT | false | 2019-09-25T23:33:58 | 2019-09-09T06:50:18 | 2019-09-25T22:26:52 | 2019-09-25T23:33:57 | 36 | 0 | 0 | 0 | C# | false | false | # Currently, this will make the site search functions not work until the search indexes are rebuilt.
# If you wanted to get fancy, you could slot it into another index rather than deleting the main one.
import json
import http.client
import os
searchkey = ''
with open('searchkeys.json') as fp:
searchkey = json.load(fp)['primaryKey']
def executeSearchFunc(method, relativeUrl, body):
baseurl = '%s.search.windows.net' % (os.environ['APPNAME'])
req = http.client.HTTPSConnection(baseurl)
req.request(method, relativeUrl + '?api-version=2019-05-06', body = json.dumps(body),
headers = {
'api-key': searchkey,
'Content-Type': 'application/json'
})
response = req.getresponse()
print('Http request:')
print('%s %s' % (method, relativeUrl))
print(response.status)
print(response.read())
print()
req.close()
storageconnstr = ''
with open('storagekeys.json') as fp:
storageconnstr = json.load(fp)['connectionString']
def resetDataSource(dataSourceName, container):
executeSearchFunc('DELETE', '/datasources/%s' % (dataSourceName), {})
executeSearchFunc('POST', '/datasources', {
'name' : dataSourceName,
'type' : 'azureblob',
'credentials' : { 'connectionString' : storageconnstr },
'container' : { 'name' : container },
})
resetDataSource('projectdatasource', 'project')
resetDataSource('eventdatasource', 'event')
def resetIndex(indexFileName):
indexConfig = {}
with open('%s.json' % (indexFileName)) as fp:
indexConfig = json.load(fp)
indexName = indexConfig['name']
executeSearchFunc('DELETE', '/indexes/%s' % (indexName), {})
executeSearchFunc('POST', '/indexes', indexConfig)
def resetIndexer(indexerFileName):
indexerConfig = {}
with open('%s.json' % (indexerFileName)) as fp:
indexerConfig = json.load(fp)
indexerName = indexerConfig['name']
executeSearchFunc('DELETE', '/indexers/%s' % (indexerName), {})
executeSearchFunc('POST', '/indexers', indexerConfig)
resetIndex('project-search-index')
resetIndexer('project-search-indexer')
resetIndex('event-search-index')
resetIndexer('event-search-indexer') | UTF-8 | Python | false | false | 2,121 | py | 15 | reset-search-index.py | 3 | 0.69967 | 0.695898 | 0 | 65 | 31.646154 | 101 |
arjuna-mendis/EVOBLIS_REPO_EVO | 7,653,631,766,383 | 972d7445df1388152228659172d3f01a3f658453 | d1b2363b68352ed1ff933c769a4d109100219213 | /users/silvia/circles_with_webcam.py | 7842eaa6d15bc59d010c58c1becbde0022aa85a9 | []
| no_license | https://github.com/arjuna-mendis/EVOBLIS_REPO_EVO | edaadb1dfb1e61aa9b976a8d9727048f57c8b541 | 078e5db8f7753abf9a5187239dbd0d2f4dea8f01 | refs/heads/master | 2021-07-08T09:34:10.847712 | 2017-10-04T14:27:01 | 2017-10-04T14:27:01 | 105,766,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append('../api')
sys.path.append('../settings')
from local import *
import cv2
from users.silvia.petri_dish_coordinates import petridishes
from syringe import Syringe
from datalogger import DataLogger
from evobot import EvoBot
from head import Head
from worldcor import WorldCor
usrMsgLogger = DataLogger()
evobot = EvoBot(PORT_NO, usrMsgLogger)
head = Head(evobot)
window_size = (1280, 720)
cap = cv2.VideoCapture(1)
decanol_syringe = Syringe(evobot, SYRINGES['SYRINGE4'])
decanol_syringe.home()
decanol_syr_coord = WorldCor(decanol_syringe, mode='default')
cv2.namedWindow('Window')
cap.set(3, window_size[0])
cap.set(4, window_size[1])
while True:
ret, frame = cap.read() # Read a new frame from the camera
if ret == 0:
print "ERROR READING INPUT"
result = frame.copy()
i = 0
for petri in petridishes:
i += 1
x = petri.center[0]
y = petri.center[1]
radius_pix = 132
# the coordinates need to be transformed in order to be used for the mask
x_in_pixels = int(decanol_syr_coord.mmTopix((x, y))[0])
y_in_pixels = int(decanol_syr_coord.mmTopix((x, y))[1])
# print x_in_pixels
# print y_in_pixels
cv2.circle(result, (x_in_pixels, y_in_pixels), radius_pix, (0, 0, 255), 2)
# cv2.putText(result, "individual: %d, (160, 20), cv2.FONT_HERSHEY_SIMPLEX, .5, 255)
ph = 8
molarity = 9
cv2.putText(result, "TRACKING MODE", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, .5, 255)
cv2.putText(result, "ph: %d, mol: %d" % (ph, molarity), (150, 20), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255))
cv2.imshow('Window', result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 1,742 | py | 175 | circles_with_webcam.py | 116 | 0.649254 | 0.606774 | 0 | 56 | 30.089286 | 113 |
marble-git/python-laoqi | 13,958,643,715,051 | 9d66a95a8b9643f899d8615a8832348d7edbaed0 | 352e47d9e028e7ab8c0f46a6b04518816559f002 | /chap5/arithmetic_sequence.py | ba2ad26cfaef63bffba9d35417ae22e411752038 | [
"MIT"
]
| permissive | https://github.com/marble-git/python-laoqi | 79b464f19b961df0f936242a44442823813bc8e5 | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | refs/heads/main | 2023-07-01T07:55:36.804089 | 2021-08-03T22:34:32 | 2021-08-03T22:34:32 | 329,666,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding:utf-8
'''
filename:arithmetic_sequence.py
chap:5
subject:12
conditions:a1,d,n
solution:sum arithmetic seq
'''
def sum_arithmetic_seq(a,d,n):
return n*a + n*(n-1)*d/2
print(sum_arithmetic_seq(1,1,100))
| UTF-8 | Python | false | false | 241 | py | 183 | arithmetic_sequence.py | 137 | 0.626556 | 0.576763 | 0 | 15 | 15.066667 | 35 |
cash2one/xai | 3,384,434,246,246 | 189892da4d18641e3af800e1c2a6ff3dc6d70df1 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_booming.py | 2909fad6faa2fbb657f8d48ed13fcbee2a3aac62 | [
"MIT"
]
| permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from xai.brain.wordbase.verbs._boom import _BOOM
#calss header
class _BOOMING(_BOOM, ):
def __init__(self,):
_BOOM.__init__(self)
self.name = "BOOMING"
self.specie = 'verbs'
self.basic = "boom"
self.jsondata = {}
| UTF-8 | Python | false | false | 228 | py | 37,275 | _booming.py | 37,266 | 0.631579 | 0.631579 | 0 | 10 | 21.6 | 48 |
Guest400123064/Treectory | 4,037,269,292,722 | 159f6a4b823c8a0c28215fee243c10347f99f8ec | 247c0276c501e865140f743ac2b5dd770f717576 | /main.py | 79635f95afe71e9bda3a4d7b3657700fba36953d | []
| no_license | https://github.com/Guest400123064/Treectory | 15ec107f5abfe69fc4e9daa9e9ffdb327f7eeb21 | 42c30690d9ecb9ae5d3cced434a5a221b04bbc54 | refs/heads/master | 2020-04-29T10:08:35.942637 | 2019-03-17T11:25:02 | 2019-03-17T11:25:02 | 176,051,021 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import json
import datetime
import math
import os
def kwhToTree(kwh: float)-> float:
'''
A simply converter that convert kilo-watt hour into number of trees consumption.
This is calculated with average quantity of carbon a tree can absorb and equivalent
carbon consumption used for electricity generation.
@param kwh: energy consumption in kwh
@return: equivalent "tree consumption", a float number
@throws: none
'''
ratio = 144.0 / 1
return kwh / ratio
def deltaTree(percent_save_last_week, percent_save_grand, kwh):
'''
A calculator to calculate number of tree change, according to today's energy consumption
comparing to average consumption and grand average of all users or data from web. Result
is calculated with weighted average.
@param percent_save_last_week: percentage of energy saved (exceeded) comparing to previous
week of the same user
@param percent_save_grand: percentage of energy saved (exceeded) comparing to grand average
of all users
@return: final change in number of trees, in float
@throw: none
'''
self_compare_weight = 8
self_compare_base = 0.2
grand_compre_weight = 2
absolute_val_weight = 10
return (self_compare_weight * (self_compare_base + percent_save_last_week) +
grand_compre_weight * percent_save_grand +
absolute_val_weight * kwhToTree(kwh))
def main(argc: int, argv: list)-> int:
'''
Main function of the calculator, which reads the previous log info of trees owned by
the user and update number of trees according to recent energy consumption history.
'''
# Reed in the tree log info
with open("data\\tree_history\\TREE_LOG{}.json".format(argv[1]), 'r') as tree:
TREE_LOG = json.load(tree)
# Do not update in the same day if updated
if TREE_LOG["date"] == str(datetime.date.today()):
os.startfile(".\\visual\\WindowsNoEditor\\Demo.exe")
return 0
x_today = list()
x_average_last_week = list()
x_average_grand = list()
# Read in the recent energy consumption if history not updated
with open("data\\consumption_history\\CONSUMPTION_LOG{}.json".format(argv[1])) as consumption:
CONSUMPTION_LOG = json.load(consumption)
for category in CONSUMPTION_LOG.values():
x_today.append(category["today"])
x_average_last_week.append(category["average_previous_week"])
x_average_grand.append(category["average_grand"])
total_kwh = sum(x_today)
total_average_last_week = sum(x_average_last_week)
total_average_grand = sum(x_average_grand)
percent_save_last_week = (total_average_last_week - total_kwh) / total_average_last_week
percent_save_grand = (total_average_grand - total_kwh) / total_average_grand
last_log = TREE_LOG["num_tree"]
TREE_LOG["date"] = str(datetime.date.today())
TREE_LOG["num_tree"] = TREE_LOG["num_tree"] + deltaTree(percent_save_last_week, percent_save_grand, total_kwh)
# Update the number of trees
with open("data\\tree_history\\TREE_LOG{}.json".format(argv[1]), "w") as tree:
json.dump(TREE_LOG, tree, indent = 4)
with open("visual\\temp.txt", 'w') as tmp:
tmp.write("{:}\n{:}".format(math.floor(last_log), math.floor(TREE_LOG["num_tree"])))
os.startfile(".\\visual\\WindowsNoEditor\\Demo.exe")
return 0
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
| UTF-8 | Python | false | false | 3,698 | py | 7 | main.py | 2 | 0.629259 | 0.624662 | 0 | 101 | 34.613861 | 118 |
russelldj/reconstruction_camera_vis | 10,874,857,218,232 | b12339d525be95001774995c95ae512cd2150a60 | 6bb21a0c8668b362b352d9477f80d99db8805fa4 | /flying_camera.py | 53859590e5e5cc5c9f4b9294ac743daae97e0095 | []
| no_license | https://github.com/russelldj/reconstruction_camera_vis | 68d2c51a352555d53251a9ba0296de6e6143805e | bfb3ea8aa840307c1786f7ae2614355e92d0016c | refs/heads/master | 2021-09-05T08:59:11.382059 | 2018-01-23T22:48:45 | 2018-01-23T22:48:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #This should be copied into the blender python console
from mathutils import Vector
import numpy as np
import os
class flyingCam:
def __init__(self, normal, fovh=50,fovv=40):
"""this is the only non-default constructor. It takes in the world-up tuple, the horizontal fov and the vertical fov"""
cam = D.objects["Camera"]
normal_vec = Vector(normal)
mag = normal_vec.magnitude
self.normal=normal_vec / mag
self.FOV=fovh
self.filepath = '/home/david/Documents/blender_outputs/stabilized_render'
bpy.data.cameras["Camera"].lens_unit = 'FOV'
bpy.data.cameras["Camera"].angle = fovh / 180.0 * pi
bpy.data.scenes["Scene"].render.resolution_x = 2000
bpy.data.scenes["Scene"].render.resolution_y = 2000 * tan((fovv * pi / 180.0)/2.0) / tan((fovh * pi/ 180.0)/2.0)
print('This is a simple class for placing cameras in a virtual blender environment')
print('you can initialize an object as obj_name = flyingCam(world up tuple, hovizontal FOV, vertical FOV)')
print('The most useful methods are:')
print('.place(height) which places the camera a specified distance above the clicked point')
print('.aim() which aims at the clicked point')
print('.pic(filepath) which takes a picture and saves it to the specified location. The filepath need only be entered once')
print('.picgl(filepath) which produces a faster but lower-quality render')
print('Before beggining, please make sure the camera is selected as the active object.')
print('Also, you will generally want to set Viewpoint shading to material and make the material "emit"')
def aim(self):
"""This points the camera toward the cursor"""
cursor_loc = bpy.context.scene.cursor_location
heading = self.cam.location - cursor_loc
rot_quat = heading.to_track_quat('Z', 'X')
self.cam.rotation_euler = rot_quat.to_euler()
def to_cursor(self):
"""This moves the camera halfway to the cursor"""
cursor_loc = bpy.context.scene.cursor_location
heading = self.cam.location - cursor_loc
self.cam.location -= heading/2.0
rot_quat = heading.to_track_quat('Z', 'X')
self.cam.rotation_euler = rot_quat.to_euler()
def forward(self, distance=1):
"""Similar to the above, but moves a specified distance"""
#Taken from: https://blender.stackexchange.com/questions/13738/
#how-to-calculate-camera-direction-and-up-vector
up = self.cam.matrix_world.to_quaternion() * Vector((0.0, 1.0, 0.0))
cam_direction = self.cam.matrix_world.to_quaternion() * Vector((0.0, 0.0, -1.0))
cam.location += cam_direction * distance
e_rot = self.cam.rotation_euler
heading_vec = Vector((e_rot.x,e_rot.y,e_rot.z))
def pic(self, filepath='/home/david/Documents/blender_outputs/stabilized_render'):
"""Takes a picture using the full render. The filepath is only needed on the first capture"""
#Taken from https://blender.
#stackexchange.com/questions/30643/how-to-toggle-to-camera-view-via-python
if filepath == '/home/david/Documents/blender_outputs/stabilized_render':
filepath = s
self.filepath = filepath
area = next(area for area in bpy.context.screen.areas if area.type == 'VIEW_3D')
initial_view_perspective = area.spaces[0].region_3d.view_perspective
area.spaces[0].region_3d.view_perspective = 'CAMERA'
i = 0
full_file_path = filepath + str(i) + '.png'
while os.path.isfile(full_file_path):
i+=1
full_file_path = filepath + str(i) + '.png'
self.aim()
self.rot()
D.scenes["Scene"].render.filepath = filepath + str(i) + '.png'
bpy.ops.render.opengl(write_still=True)
area.spaces[0].region_3d.view_perspective = initial_view_perspective
def picgl(self, filepath='/home/david/Documents/blender_outputs/stabilized_render'):
"""Takes a picture more quickly"""
#Taken from https://blender.
#stackexchange.com/questions/30643/how-to-toggle-to-camera-view-via-python
if filepath == '/home/david/Documents/blender_outputs/stabilized_render':
filepath = self.filepath
self.filepath = filepath
area = next(area for area in bpy.context.screen.areas if area.type == 'VIEW_3D')
initial_view_perspective = area.spaces[0].region_3d.view_perspective
area.spaces[0].region_3d.view_perspective = 'CAMERA'
i = 0
full_file_path = filepath + str(i) + '.png'
while os.path.isfile(full_file_path):
i+=1
full_file_path = filepath + str(i) + '.png'
self.aim()
self.rot()
D.scenes["Scene"].render.filepath = filepath + str(i) + '.png'
bpy.ops.render.opengl(write_still=True)
area.spaces[0].region_3d.view_perspective = initial_view_perspective
def place(self,offset):
"""Places the camera a specified distance above the clicked point"""
cursor_loc = bpy.context.scene.cursor_location
self.cam.location = cursor_loc + self.normal * offset
def rot(self):
"""Rotates the camera so it's up axis is as close as possible to the world up"""
num_rotations = 200
active_obj = bpy.context.scene.objects.active
active_obj_matrix = active_obj.matrix_world
z_axis = (active_obj_matrix[0][2], active_obj_matrix[1][2], active_obj_matrix[2][2])
#taken from: https://stackoverflow.com/questions/4930404/how-do-get-more-control-over-loop-increments-in-python
min_angle = 180.0
best_step = -1
for i in range(num_rotations):
bpy.ops.transform.rotate(value= 2 * pi / float(num_rotations), axis=z_axis)
active_obj_matrix = active_obj.matrix_world
y_axis = (active_obj_matrix[0][1], active_obj_matrix[1][1], active_obj_matrix[2][1])
angle = self.angle(y_axis, self.normal.to_tuple())
if angle < min_angle:
min_angle = angle
which_step = i
bpy.ops.transform.rotate(value= ((num_rotations - 1)-which_step)*((-1.0)*(2 * pi / float(num_rotations))), axis=z_axis)#unrotating it to the last best one
def angle(self,v1, v2):
"""computes the < 180 angle between two vectors"""
#Taken from: https://stackoverflow.com/questions/39497496/angle-between-two-vectors-3d-python
# v1 is your firsr vector
# v2 is your second vector
angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
if angle < pi:
return angle
else:
return 2 * pi - angle
class cameraRig:
def __init__(self, offset_list, fov_list):
self.cam = D.objects['Camera']
self.cam_coords_list = []
self.cam_fov_list = []
for rt in offset_list:
matrix_rt = Matrix(rt)
self.cam_coords_list.append(matrix_rt)
print(matrix_rt)
for fov in fov_list:
self.cam_fov_list.append(fov)
print(self.cam_coords_list)
print(self.cam_fov_list)
def pic(self, mat):
initial_world_matrix = cam.matrix_world
for rt in self.cam_coords:
new_rt = initial_world_matrix * rt
print(new_rt)
cam.world_matrix = new_rt
cam.
c = flyingCam((0,-1,0),60,40)
c.rot()
rig = cameraRig([( (0, -1, 0, 0), (1, 0, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1)) , ((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1))], [2,3])
| UTF-8 | Python | false | false | 7,650 | py | 2 | flying_camera.py | 2 | 0.618824 | 0.596601 | 0 | 146 | 51.39726 | 162 |
praekeltfoundation/mc2 | 1,743,756,742,612 | fdbae1448d433155b7ae37471375f3f15d9547e6 | 90539451715e39fd35d7385bb8b566113b99f5dd | /mc2/controllers/urls.py | d2fabf6d078f643461e33af155944393d752b0b9 | [
"BSD-2-Clause"
]
| permissive | https://github.com/praekeltfoundation/mc2 | 3445695964f41c52c6acaed61c31916c857eec9e | 5367a8aed309fade0f17bc72efa099b0afc76aa7 | refs/heads/develop | 2021-10-21T17:05:41.215783 | 2018-03-09T08:40:51 | 2018-03-09T08:40:51 | 45,339,984 | 0 | 0 | BSD-2-Clause | true | 2019-03-05T06:34:38 | 2015-11-01T12:42:17 | 2018-11-06T07:34:11 | 2019-03-05T06:34:24 | 5,546 | 4 | 1 | 10 | JavaScript | false | null | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(
r'^base/',
include('mc2.controllers.base.urls', namespace='base')),
url(
r'^docker/',
include(
'mc2.controllers.docker.urls', namespace='controllers.docker')),
)
| UTF-8 | Python | false | false | 359 | py | 81 | urls.py | 48 | 0.62117 | 0.615599 | 0 | 15 | 22.933333 | 76 |
Dmitriysp55/E-shop-lvl2 | 16,037,407,917,986 | 150ed4a253ee5390ab764bca61f2200c7e70d396 | c7a5f6eb6a74fb634e19b21113c7e15f592af826 | /models/Product.py | 23ff937ae1cb4633dc55413111c0906fdf21323d | []
| no_license | https://github.com/Dmitriysp55/E-shop-lvl2 | b1cb7e82dd421c8647608c8009d4b6c298f29d46 | 3193c7f6c1a98b59431eac0c2099f6a446a29093 | refs/heads/main | 2023-07-08T21:00:55.110157 | 2021-08-17T10:20:54 | 2021-08-17T10:20:54 | 397,206,516 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .Money import *
class Product:
def __init__(self, id , image_path, name, price):
self._id = id
self.image_path = image_path
self._name = name
self.setPrice(price)
def setId(self,id):
if type(id)!= int:
raise TypeError("Id must be of type string")
def getId(self):
return self._id
def setName(self,name):
if type(name)!= str:
raise TypeError("Name must be of type string")
def getName(self):
return self._name
def setPrice(self,price):
if type(price)!= Money:
raise TypeError("Price must be of type Money!")
self._price = price
def getPrice(self):
return self._price
def __str__(self):
return f"\n\
Product ID: {self._id}\n\
Name: {self._name}\n\
Price: {self._price}\n"
def __repr__(self):
return str(self)
class ProductRepositoryFactory:
def __init__(self):
self._lastCreatedId=0
self._products=[]
def getProduct(self,image_path,name,price):
obj=Product(id,image_path,name,price)
self._lastCreatedId+=1
obj.id=self._lastCreatedId
self.save(obj)
return obj
def save(self,product):
self._products.append(product)
def saveAll(self, products):
self._products = products
def all(self):
return tuple(self._products)
def findById(self,id):
for p in self._products:
if p._id==id:
return p
def findByProductName(self, name):
for prod in self._products:
if prod._name==name:
return prod
def deleteById(self, _id):
for order in self._products:
if order._id == _id:
self._products.pop(order) | UTF-8 | Python | false | false | 1,988 | py | 17 | Product.py | 16 | 0.50503 | 0.504024 | 0 | 77 | 23.844156 | 59 |
daquintero/voto-studio-backend | 1,271,310,347,972 | 6276462362a4b95f209f0bec686966cd372fea07 | ea01b598447a26766f1906db4367f37d1fdd280f | /voto_studio_backend/corruption/migrations/0003_auto_20190219_1636.py | a1b23cd0d673638486fa24aae3a0730e4b24c0e9 | []
| no_license | https://github.com/daquintero/voto-studio-backend | 40eaa16d679e3f3e4b7cd4fff384d87ad48ac7da | f99995b1b4531573af08387795b6b97e3c8e427e | refs/heads/master | 2023-06-26T06:12:17.885508 | 2019-04-16T13:41:17 | 2019-04-16T13:41:17 | 247,274,876 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.7 on 2019-02-19 16:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('political', '0001_initial'),
('media', '0001_initial'),
('corruption', '0002_auto_20190219_1636'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='informativesnippet',
name='permitted_users',
field=models.ManyToManyField(blank=True, related_name='corruption_informativesnippet_related', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='informativesnippet',
name='resources',
field=models.ManyToManyField(blank=True, to='media.Resource'),
),
migrations.AddField(
model_name='informativesnippet',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='informativesnippet',
name='videos',
field=models.ManyToManyField(blank=True, to='media.Video'),
),
migrations.AddField(
model_name='financialitem',
name='images',
field=models.ManyToManyField(blank=True, to='media.Image'),
),
migrations.AddField(
model_name='financialitem',
name='permitted_users',
field=models.ManyToManyField(blank=True, related_name='corruption_financialitem_related', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='financialitem',
name='resources',
field=models.ManyToManyField(blank=True, to='media.Resource'),
),
migrations.AddField(
model_name='financialitem',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='financialitem',
name='videos',
field=models.ManyToManyField(blank=True, to='media.Video'),
),
migrations.AddField(
model_name='corruptioncase',
name='controversies',
field=models.ManyToManyField(blank=True, related_name='corruption_cases', to='political.Controversy'),
),
migrations.AddField(
model_name='corruptioncase',
name='financial_items',
field=models.ManyToManyField(blank=True, related_name='corruption_cases', to='corruption.FinancialItem'),
),
migrations.AddField(
model_name='corruptioncase',
name='images',
field=models.ManyToManyField(blank=True, to='media.Image'),
),
migrations.AddField(
model_name='corruptioncase',
name='permitted_users',
field=models.ManyToManyField(blank=True, related_name='corruption_corruptioncase_related', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='corruptioncase',
name='resources',
field=models.ManyToManyField(blank=True, to='media.Resource'),
),
migrations.AddField(
model_name='corruptioncase',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='corruptioncase',
name='videos',
field=models.ManyToManyField(blank=True, to='media.Video'),
),
]
| UTF-8 | Python | false | false | 3,837 | py | 114 | 0003_auto_20190219_1636.py | 111 | 0.599166 | 0.589002 | 0 | 100 | 37.37 | 136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.