repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
elinlarsen/WordSegComprehension | 8,924,942,051,281 | 03566b4f2b14d976d4f347a5ce83b6a33c27f808 | 04ae5108359803ad42081f73a5c63e35614f61bf | /wordsegcomp/pipeline/translate.py | adbe735ec6296a610e0a87334e485a961ee8c421 | []
| no_license | https://github.com/elinlarsen/WordSegComprehension | 0438ec0f2426888b9c91ce11501403081735dd13 | 42c368afbb5e58e39c433151aadbdc2c6ffa80a8 | refs/heads/master | 2021-05-07T06:00:31.446826 | 2018-07-24T10:41:35 | 2018-07-24T10:41:35 | 111,671,313 | 8 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 15 11:45:59 2016
@author: elinlarsen
"""
import collections
try:
# Python 2
from itertools import izip
except ImportError:
# Python 3
izip = zip
import pandas as pd
#import file
import read
def build_phono_to_ortho(phono_file, ortho_file):
"""
Dictionnary from phono text to ortho text
# open ortho and gold file and check if in each line, the number of words match
# if not, skip the line and count the error,
# then create a dictionarry with key each phono token and value a dictionary of ortho token with their occurence
"""
count_errors = 0
d=collections.defaultdict(dict)
with open(phono_file,'r') as phono, open(ortho_file,'r') as ortho:
for line_phono, line_ortho in izip(phono, ortho):
line_phono = line_phono.lower().split()
line_ortho = line_ortho.lower().split()
if len(line_phono) != len(line_ortho):
count_errors += 1
else:
for word_phono, word_ortho in izip(line_phono, line_ortho):
count_freq = d[word_phono]
try:
count_freq[word_ortho] += 1
except:
count_freq[word_ortho] = 1
print("There were {} errors".format(count_errors))
return d
def build_phono_to_ortho_representative(d):
"""
list of two dictionaries:
# 1. one of phono token and the most representative ortho token
# 2. one linking token to their freqency
"""
res ={}
token_freq={}
for d_key,d_value in d.items():
value_max=0
key_max = 'undefined'
for key, value in d_value.items():
if value > value_max:
value_max = value
key_max = key
res[d_key] = key_max
token_freq[value_max]=key_max
#freq_token = {v: k for k, v in token_freq.iteritems()}
freq_res=sorted(token_freq.items(),reverse=True)
return([res,freq_res])
def create_file_word_freq(path_res, dic, sub, algos,unit="syllable", freq_file="/freq-top.txt"):
"""
look at true positive (ie well-segmented words) in all algos and in all subs-corpus
from "freq-file.txt" in phonological form to orthographic form
for each result of each algo in each subcorpus, create the file in the orthographic form
Parameters :
-----------
path_res : string, absolute path to the folder that will contain the results
dic : dictionnary, created by the function build_phono_to_ortho
sub : list, list of names of the sample of corpus
algos : list, list of names of algorithms used in the package wordseg
unit : string, either syllable or phoneme, default is syllable
freq_file : string, name of the file output by wordseg containing word segmented by the algorithms ordered by frequency
"""
for SS in sub:
for algo in algos:
res_folder=path_res+"/"+SS+"/"+algo+ "/" +unit
path=res_folder +freq_file
df_token=pd.read_table(path,sep='\s+', header=None, names=('Freq','phono'), index_col=None)
list_token=read.list_freq_token_per_algo(algo,SS,path_res,unit,freq_file)
d={}
for item in list_token:
if item in dic.keys():
d[item]=dic[item]
df_dic_token=pd.DataFrame(list(d.items()),columns=['phono', 'Type'])
df_dic_token.columns=['phono', 'Type']
s=pd.merge(df_token, df_dic_token, how='inner', on=['phono'])
del s['phono']
s.drop_duplicates(subset='Type', keep='first',inplace=True)
path_out=res_folder+ "/freq-words.txt"
s.to_csv(path_out, sep='\t', index=False)
return(s)
| UTF-8 | Python | false | false | 3,834 | py | 44 | translate.py | 34 | 0.592071 | 0.586333 | 0 | 102 | 36.588235 | 123 |
haakonvt/cognite-sdk-python | 541,165,890,438 | 8ce5f960ef8ab295cb73d3b4a006b2a45a7a6823 | 6eefa36c107182bfa403b9bebf599a6736f75ae5 | /cognite/client/data_classes/transformations/notifications.py | 29cfb0850249b5fe7188ef9a0c0b291609944b52 | [
"Apache-2.0"
]
| permissive | https://github.com/haakonvt/cognite-sdk-python | 056afc9ab46af73e1348099c98c7cf254bdfa293 | 70200ce771d630edb5f918a33a10381ba5112417 | refs/heads/master | 2022-09-08T07:41:30.667283 | 2022-08-01T11:17:22 | 2022-08-01T11:17:22 | 213,588,099 | 0 | 0 | Apache-2.0 | true | 2022-08-25T13:15:59 | 2019-10-08T08:25:44 | 2022-06-23T16:38:06 | 2022-08-04T00:43:47 | 13,482 | 0 | 0 | 0 | Python | false | false | from typing import TYPE_CHECKING, Dict, Optional, Union, cast
from cognite.client.data_classes._base import CogniteFilter, CogniteResource, CogniteResourceList
if TYPE_CHECKING:
from cognite.client import CogniteClient
class TransformationNotification(CogniteResource):
"""The transformation notification resource allows configuring email alerts on events related to a transformation run.
Args:
id (int): A server-generated ID for the object.
transformation_id (int): Transformation Id.
transformation_external_id (str): Transformation external Id.
destination (str): Email address where notifications should be sent.
created_time (int): Time when the notification was created.
last_updated_time (int): Time when the notification was last updated.
cognite_client (CogniteClient): The client to associate with this object.
"""
def __init__(
self,
id: int = None,
transformation_id: int = None,
transformation_external_id: str = None,
destination: str = None,
created_time: int = None,
last_updated_time: int = None,
cognite_client: "CogniteClient" = None,
):
self.id = id
self.transformation_id = transformation_id
self.transformation_external_id = transformation_external_id
self.destination = destination
self.created_time = created_time
self.last_updated_time = last_updated_time
self._cognite_client = cast("CogniteClient", cognite_client)
@classmethod
def _load(cls, resource: Union[Dict, str], cognite_client: "CogniteClient" = None) -> "TransformationNotification":
instance = super(TransformationNotification, cls)._load(resource, cognite_client)
return instance
def __hash__(self) -> int:
return hash(self.id)
class TransformationNotificationList(CogniteResourceList):
_RESOURCE = TransformationNotification
class TransformationNotificationFilter(CogniteFilter):
"""
Args:
transformation_id (Optional[int]): Filter by transformation internal numeric ID.
transformation_external_id (str): Filter by transformation externalId.
destination (str): Filter by notification destination.
"""
def __init__(
self, transformation_id: Optional[int] = None, transformation_external_id: str = None, destination: str = None
):
self.transformation_id = transformation_id
self.transformation_external_id = transformation_external_id
self.destination = destination
| UTF-8 | Python | false | false | 2,585 | py | 83 | notifications.py | 69 | 0.69207 | 0.69207 | 0 | 67 | 37.58209 | 122 |
CJFJack/cmdb | 6,682,969,144,721 | 0de4d0d12b4a57ab76f6471d597e7d8d9b107e95 | 193a4ca08daeb3b3fc3a65ee32d2699ac59e07f3 | /cmdb_update_loop_log.py | df02843e13490c00ef8aef89061ef7a851b11c6e | []
| no_license | https://github.com/CJFJack/cmdb | e3a512b640930407b63e8786ca9a13b32e7aefc1 | fc8d8bf2462c8f4f7e685aed4fd6acc1e86b92ac | refs/heads/master | 2020-12-12T08:14:44.316317 | 2020-01-15T13:27:25 | 2020-01-15T13:27:25 | 228,413,223 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- encoding: utf-8 -*-
"""外部的脚本,通过读取redis中
的cmdb:log的key记录,来更新日志文件到
前端页面中
"""
import time
import json
import logging
from logging.handlers import RotatingFileHandler
import traceback
from concurrent import futures
import redis
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmdb.settings")
import django
django.setup()
from cmdb.settings import REDIS_HOST
from cmdb.settings import REDIS_PORT
from cmdb.settings import REDIS_PASSWORD
# from cmdb import asgi
from channels import Channel
redis_client = redis.Redis(
host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db=2, charset="utf-8", decode_responses=True)
# cl = asgi.get_channel_layer()
# 默认的过期时间是十分钟
DEFAULT_INTERVAL = 60 * 5
MAX_WORKER = 15
class LoopLog(object):
"""记录工单流程的log"""
def __init__(self):
# create logger
self.logger = logging.getLogger('cmdb_update_loop')
self.logger.setLevel(logging.DEBUG)
if not self.logger.handlers:
# create file handler and set level to debug
# fh = logging.FileHandler('/var/log/cmdb_update_loop.log', 'a', encoding='UTF-8')
# fh.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter
# fh.setFormatter(formatter)
# create handler
rh = RotatingFileHandler('/var/log/cmdb_update_loop.log', maxBytes=1000 * 1000 * 10, backupCount=5)
rh.setLevel(logging.DEBUG)
rh.setFormatter(formatter)
# add fh to logger
# self.logger.addHandler(fh)
self.logger.addHandler(rh)
looplog = LoopLog()
def yield_cmdb_log(reply_channel_name, uuid, row, heartbeat):
"""使用生成器获取从row
开始的log的内容
"""
# filename = '/var/log/nginx/access.log'
filename = os.path.join('/var/log/cmdb_hotupdate/', uuid)
if not os.path.isfile(filename):
looplog.logger.info('thread: %s end process no such file' % (reply_channel_name))
return None
lines = ''
with open(filename) as fp:
fp.seek(row)
for line in iter(fp.readline, ''):
# time.sleep(.1)
lines += line
else:
Channel(reply_channel_name).send({'text': lines}, immediately=True)
row = fp.tell()
# 更新reply_channel_name记录的行数
reply_channel_name_dic = {'uuid': uuid, 'row': row, 'heartbeat': heartbeat}
name = 'hotupdate:cmdb:log'
redis_client.hset(name, reply_channel_name, json.dumps(reply_channel_name_dic))
# print('thread: %s end process' % (reply_channel_name))
looplog.logger.info('thread: %s end process' % (reply_channel_name))
return None
def main():
name = 'hotupdate:cmdb:log'
# all_cmdb_log_reply_channels = [x for x in redis_client.hgetall(name)]
while True:
duplicate_reply_channels_name = []
to_do_map = {}
with futures.ThreadPoolExecutor(max_workers=MAX_WORKER) as executor:
looplog.logger.info('*** starting a new round thread ***')
for reply_channel_name, reply_channel_info in redis_client.hgetall(name).items():
reply_channel_info = json.loads(reply_channel_info)
uuid = reply_channel_info.get('uuid')
row = reply_channel_info.get('row')
heartbeat = reply_channel_info.get('heartbeat')
current_time = int(time.time())
if current_time - heartbeat > DEFAULT_INTERVAL:
duplicate_reply_channels_name.append(reply_channel_name)
else:
looplog.logger.info('Scheduled thread %s' % (reply_channel_name))
future = executor.submit(yield_cmdb_log, reply_channel_name, uuid, row, heartbeat)
to_do_map[future] = reply_channel_name
looplog.logger.info('Current to_do_map list is %s' % (to_do_map))
done_iter = futures.as_completed(to_do_map)
for future in done_iter:
try:
future.result()
except Exception as exc:
print('------ find Exception -------', to_do_map[future])
traceback.print_exc()
# 删除过期的
for rc in duplicate_reply_channels_name:
# 主动断开ws连接,这样,正常的连接会通过
# 浏览器重新自动连接
# 如果是异常断开的无效的reply_channel
# 还是需要从redis中删除
# 调用send({"close": True})会调用disconnect
# 也就会删除redis里面的reply_channel
Channel(rc).send({"close": True})
redis_client.hdel(name, rc)
# 休眠一秒钟
time.sleep(1)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 5,055 | py | 493 | cmdb_update_loop_log.py | 301 | 0.592345 | 0.587743 | 0 | 159 | 29.069182 | 111 |
vivekaxl/LexisNexis | 19,585,050,902,119 | 2e77b398a0f9a461b4cc9f20a65cd97ac20c6846 | 1a2ca64839723ede3134a0781128b0dc0b5f6ab8 | /ExtractFeatures/Data/kracekumar/2.py | 3ef19b303974fd157f047b3205121b0b92244c56 | []
| no_license | https://github.com/vivekaxl/LexisNexis | bc8ee0b92ae95a200c41bd077082212243ee248c | 5fa3a818c3d41bd9c3eb25122e1d376c8910269c | refs/heads/master | 2021-01-13T01:44:41.814348 | 2015-07-08T15:42:35 | 2015-07-08T15:42:35 | 29,705,371 | 9 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
WINDOW = WIDTH, HEIGHT = 400, 300
def main():
pygame.init()
screen = pygame.display.set_mode(WINDOW)
pygame.display.set_caption("Freed.in Demo")
while True:
pygame.display.flip()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 259 | py | 936 | 2.py | 924 | 0.610039 | 0.586873 | 0 | 14 | 17.5 | 47 |
ezember/Ezekiel-Bustillo | 14,422,500,192,447 | 757e43a1eebd72d96b0d98623ce8ce4ebf36ea92 | d4a98d50c3089ad3a96ae33ef4614b40d9ed8636 | /buQ2p2.39.py | c52255cd3e3852d40464fd80e811e3f0966d6b85 | []
| no_license | https://github.com/ezember/Ezekiel-Bustillo | e418d3a3cfc0e245f27e87ee64880af5c3ffee52 | fb0fa49237636329e177f9ba30c0421a7828f2ee | refs/heads/master | 2021-04-01T05:52:15.726058 | 2020-03-18T08:06:28 | 2020-03-18T08:06:28 | 248,161,153 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Ezekiel M. Bustillo
DATALOGO Lab04
Feb. 19, 2020
I have neither received nor provided any help on this (lab) activity,
nor have I concealed any violation of the Honor Code.
"""
from abc import ABCMeta, abstractmethod
class Polygon(metaclass=ABCMeta):
def __init__(self, lengths):
self.no_sides = len(lengths)
self.lengths = [0] * self.no_sides
self.val(lengths)
def numsides(self):
print('The polygon has ' + str(self.no_sides) + 'sides.')
def val(self, lengths):
a = 0
while a < len(lengths):
self.lengths[a] = lengths[a]
a += 1
@abstractmethod
def area(self):
pass
@abstractmethod
def perimeter(self):
pass
class Pentagon(Polygon):
def __init__(self, lengths):
super().__init__(lengths)
assert 5, self.no_sides
def area (self):
x, y = self.lengths[0], self.lengths[1]
return x * y
def perimeter(self):
x, y = self.lengths
return (x+y)*2
class Hexagon(Polygon):
def __init__(self, lengths):
super().__init__(lengths)
assert 6, self.no_sides
def area(self):
x, y = self.lengths[0], self.lengths[1]
return x * y
def perimeter(self):
x, y = self.lengths
return (x+y)*2
if __name__=="__main__":
| UTF-8 | Python | false | false | 1,403 | py | 11 | buQ2p2.39.py | 10 | 0.54526 | 0.531718 | 0 | 49 | 26.632653 | 73 |
AmazingWood/simple-two-layers-mlp | 3,710,851,781,784 | 801b8fd0a80fd240c0a03ebab671aaf54711b7ac | b7ca2afdfaa002b0260c12b4be40f8093b82fc9e | /buildConfig.py | 5b3369f8c7e0559b4f6b97d49366f499e2f4d1c1 | []
| no_license | https://github.com/AmazingWood/simple-two-layers-mlp | 43968824b59ac4e6b0901ace0b15faec940b4594 | 7946884202654d089b62d035e9667df15c619008 | refs/heads/master | 2020-12-11T07:01:57.372806 | 2020-01-14T09:09:55 | 2020-01-14T09:09:55 | 233,795,463 | 0 | 0 | null | true | 2020-01-14T08:41:00 | 2020-01-14T08:40:59 | 2020-01-10T08:51:11 | 2020-01-10T08:51:09 | 69 | 0 | 0 | 0 | null | false | false | class buildConfig(object):
def __init__(self,isDebug):
self.incDirs=["/home/robin/installFromSource/boost_1_72_0","/home/robin/installFromSource/eigen-git-mirror"]
self.linkDir=["/home/robin/installFromSource/boost_1_72_0/stage/lib"]
self.linkOpt=["pthread" ,"m","dl"]
self.CC="gcc-9"
self.CXX="g++-9"
self.CCFLAGS=['-std=c++17', '-Wall',"-fpermissive"]
self.mklroot="/home/robin/intel/mkl"
self.isDebug=int(isDebug)
if(self.isDebug==1):
self.preDifines=['-DDEBUG']
self.CCFLAGS.append('-g')
else:
self.preDifines=['-DNDEBUG']
self.CCFLAGS.append('-O3')
class MlpBuildConfig(buildConfig):
def __init__(self,isDebug):
buildConfig.__init__(self,isDebug)
if(self.isDebug==1):
self.targetName="mlpDebug"
else:
self.targetName="mlpRelease"
self.incDirs.append(self.mklroot+"/include")
self.linkDir.append(self.mklroot+"/lib/intel64")
self.linkOpt.append(["mkl_intel_lp64","mkl_sequential" ,"mkl_core"])
self.preDifines.append("EIGEN_USE_MKL_ALL")
#class LayersBuildConfig(buildConfig):
| UTF-8 | Python | false | false | 1,214 | py | 11 | buildConfig.py | 9 | 0.596376 | 0.580725 | 0 | 33 | 35.787879 | 116 |
illulia/Baekjoon | 2,628,520,000,718 | 380c4633e65fd6a3f070ee59114d70298e9a9282 | ed60f34a00c24b5d6115c995ec796080d1c07687 | /python/5543.py | 13f4357cc35ec5865ca11711b7b729a0242dc37a | []
| no_license | https://github.com/illulia/Baekjoon | fe6613c0e344e081bb42d5b06396363e7f9614d2 | 0b816bd271028e1128a8c092aedf80cae5e5fbd8 | refs/heads/master | 2021-05-18T17:31:06.897050 | 2020-10-09T21:57:19 | 2020-10-09T21:57:19 | 251,338,754 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | b = 2000
d = 2000
for i in range(3):
a = int(input())
if a < b:
b = a
for i in range(2):
c = int(input())
if c < d:
d = c
print(b+d-50)
| UTF-8 | Python | false | false | 175 | py | 28 | 5543.py | 28 | 0.417143 | 0.348571 | 0 | 11 | 14.272727 | 20 |
jordy33/python_proxy | 4,252,017,645,687 | 54759fec835d1d78980c52c9bcd4ee3173472223 | 1ea03ebaa0fcd8257f140db4ad88d8ed1002d3a5 | /proxy/__init__.py | 0709b421c670fe97181827d93e7c14c529b27ecc | []
| no_license | https://github.com/jordy33/python_proxy | 208ca09c6c5c8a5a6e825decc2f29bd9c80a2251 | 572b4eaf0f830f4531b51a3533c783a046114e97 | refs/heads/main | 2023-02-20T03:55:53.120077 | 2021-01-22T00:10:07 | 2021-01-22T00:10:07 | 331,789,277 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ###############################################################################
# Copyright (C) 2017 Aludirk Wong #
# #
# This file is part of TCP Proxy. #
# #
# TCP Proxy is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# TCP Proxy is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with TCP Proxy. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import argparse
import pkg_resources
import signal
import socket
import threading
from . import logger
from .proxy import Proxy
shutdownEvent = threading.Event()
def parseArg():
"""Parse the progrm arguements.
Returns:
argparse.Namespace: The parsed attributes.
"""
parser = argparse.ArgumentParser(description="TCP proxy.")
parser.add_argument("upHost",
help="the host of the upstream server for the proxy.",
metavar="upstream-host")
parser.add_argument("upPort",
type=int,
help="the port of the upstream server for the proxy.",
metavar="upstream-port")
parser.add_argument("-H",
"--host",
default="",
help="the host of the downstream server for the proxy, default is \"\".",
metavar="downstream-host",
dest="downHost")
parser.add_argument("-p",
"--port",
default=5354,
type=int,
help="the port of the downstream server for the proxy, default is 5354.",
metavar="downstream-port",
dest="downPort")
parser.add_argument("-m",
"--select-model",
default="select",
choices=["epoll", "kqueue", "select"],
help=("the I/O select method for the socket connections, "
"supports [\"epoll\", \"kqueue\", \"select\"], "
"default is \"select\". "
"This is platform dependant feature, "
"some models may not support on your platform."),
metavar="model",
dest="select")
parser.add_argument("-v",
"--version",
action="version",
version="%(prog)s {}".format("1.0"))
return parser.parse_args()
def shutdownHandler(signal, frame):
"""Handler for shutdown process.
Args:
signal (int): The signal number.
frame (frame): Current stack frame.
"""
shutdownEvent.set()
def main():
"""Main function."""
# Parse arguments.
args = parseArg()
# Set up shutdown handler.
signal.signal(signal.SIGINT, shutdownHandler)
# Set up logger.
logger.setUpLogger()
try:
# Start the proxy.
proxy = Proxy(args.upHost,
args.upPort,
args.downHost,
args.downPort,
shutdownEvent,
args.select)
proxy.daemon = True
proxy.start()
logger.info("Proxy established: upstream ({}:{}) <-> downstream ({}:{})".
format(args.upHost, args.upPort, args.downHost, args.downPort))
while proxy.is_alive():
proxy.join(0.05)
return proxy.err
except socket.gaierror as e:
logger.critical("Fail to initialize proxy (socket.gaierror: {}).".format(e))
except RuntimeError as e:
logger.critical("\"{}\" is not supported.".format(e))
return 1
| UTF-8 | Python | false | false | 4,797 | py | 3 | __init__.py | 2 | 0.452991 | 0.449031 | 0 | 125 | 37.376 | 97 |
rady1337/FirstYandexLyceumCourse | 12,644,383,756,558 | 20d3e91dc93e8928791ad7310eb190aec7676a1a | 338062cc2bb422f1364fd18ad5e721f6f713907a | /27. Обработка коллекций. Потоковый ввод sys.stdin/Домашняя работа/Есть ли 0.py | 2135a4c06f9f7d5ec36e994627e8daf11e63d7ba | []
| no_license | https://github.com/rady1337/FirstYandexLyceumCourse | f3421d5eac7e7fbea4f5e266ebeb6479b89941cf | 0d27e452eda046ddd487d6471eeb7d9eb475bd39 | refs/heads/master | 2022-06-17T03:07:51.017888 | 2020-05-12T22:17:34 | 2020-05-12T22:17:34 | 263,459,364 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import stdin as st
print("0" in st.read().split())
| UTF-8 | Python | false | false | 61 | py | 411 | Есть ли 0.py | 368 | 0.672131 | 0.655738 | 0 | 3 | 19.333333 | 31 |
kdriver/planes | 11,656,541,276,321 | 3f7ee24706ce37d305ed230baee8829aac447bd9 | d25fd9631f3567e4d1508c4d1d0398048a92f8d4 | /mk2/detect.py | 7248862dbdc8887591e1875c7223878b7451f017 | []
| no_license | https://github.com/kdriver/planes | d88942b3dbf84ddcca6e25d073163eff58c39b62 | 0fed8c84df5576d55ab07eee7502d18661650351 | refs/heads/master | 2023-05-25T12:53:28.305503 | 2023-05-14T10:57:37 | 2023-05-14T10:57:37 | 142,787,180 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ A program to detect planes and tweet """
import json
import time
import math
import os
import zipfile
import requests
import sqldb
import say
from vrs import Vrs
from loggit import loggit,init_loggit
from loggit import BOTH
from loggit import TO_SCREEN
# from loggit import TO_FILE
from loggit import GREEN_TEXT
from loggit import YELLOW_TEXT
from loggit import CYAN_TEXT
#from loggit import RED_TEXT as RED_TEXT
from Haversine import Haversine
from reference_data import update_reference_data
from reference_data import init_reference_data
from reference_data import add_reference_data
from reference_data import flush_suppression_list
from reference_data import is_suppressed
from twitter import tweet
from web import start_webserver
from web import update_plane_data
from kml import kml_doc
from kml import write_kmz
from kml import three_d_vrs
from my_queue import my_queue
from my_queue import INFINITE
from home import home
from blessed import Terminal
term = Terminal()
all_planes={}
# planes with closest approach to home of less that TWEET_RADIUS miles will be tweeted
TWEET_RADIUS=2.0
osm = requests.Session()
dump_planes = False
dump_icao = None
dump_time = 0
def get_term_width()->int:
""" Return the current terminal width """
return term.width
def get_time(clock=time.time()):
""" Return an ascii string of the current time """
answer = time.asctime(time.localtime(clock))
return answer
def enrich(icao_hex, the_plane):
""" Given the icao hex for the plane, enrich the plane data from reference data """
if is_suppressed(icao_hex):
return
try:
result = add_reference_data(icao_hex, the_plane)
except Exception as my_exp:
print(f"enrich exception {my_exp}")
return
# A tilde in the hex indicates a TIS-B record
# Dumping planes around the record gives a chance to see which plane it is
if result is None and '~' not in icao_hex:
loggit("could not enrich plane {}".format(icao_hex))
return
if result is None and '~' in icao_hex:
# loggit("found tilde in icao , trigger a dump of planes around {}".format(icao))
global dump_time, dump_icao, dump_planes
dump_time = time.time() + 60
dump_icao = icao_hex
dump_planes = False
the_plane['enriched'] = 1
def get_place(clat, clon):
""" Use the Open Street Map API to look up the nearest place"""
place = "unknown"
try:
req = "https://nominatim.openstreetmap.org/reverse?format=json&lat={}&lon={}".format(
clat, clon)
resp = osm.get(url=req)
pos = json.loads(resp.text)
if 'display_name' in pos:
place = pos['display_name']
else:
place = "somewhere"
except Exception as e:
loggit("could not access OSM API {} ".format(e))
return None
return place[0:90]
def nearest_point(the_plane):
""" The plane has reached the nearest point to HOM,
so collect the report data, print it and insert it into the sql database
Also write out the kml file with tracked path.
and if its within TWEET_RADIUS - tweet it too """
pd = "{} {}".format(get_time(the_plane["closest_time"]),the_plane['icao'])
for item in ['icao_country','closest_miles','flight','tail','track','alt_baro','Owner','Manufacturer','plane','route']:
if item in the_plane and the_plane[item] is not None:
if item in {'closest_miles','track'}:
pd = pd + " {:>7.2f} ".format(the_plane[item])
elif item in {'flight','tail','alt_baro'}:
pd = pd + "{0:7} ".format(the_plane[item])
elif item in { 'icao_country'}:
pd = pd + f" {the_plane['icao_country']:<15}"
else:
pd = pd + " {:<} ".format(the_plane[item])
else:
if item in ['closest_miles','track','alt_baro']:
the_plane[item] = 0
else:
the_plane[item] = "unknown"
try:
sqldb.insert_data((time.time(),the_plane["flight"],the_plane["icao"],the_plane["tail"],the_plane['plane'],the_plane["alt_baro"],the_plane["track"],the_plane["closest_miles"],the_plane["closest_lat"],the_plane["closest_lon"]))
except Exception as e:
loggit("could not insert data iinto planes record {}".format(e))
name=''
if 'tail' in the_plane:
name=the_plane['tail']
else:
name='unknown'
if 'alt_baro' not in the_plane:
the_plane["alt_baro"] = "0"
kml_text = kml_doc(the_plane['closest_lon'],the_plane['closest_lat'], -1.9591988377888176,50.835736602072664, the_plane["alt_baro"],name,the_plane['closest_miles'],the_plane["tracks"])
#redo_miles = Haversine()
#with open("kmls/{}.kml".format(name),"w") as f:
# f.write(kml_text)
# f.close()
with zipfile.ZipFile("kmls/{}.kmz".format(name),"w") as zf:
zf.writestr("{}.kml".format(name),kml_text)
zf.close()
if 'expired' in the_plane:
pd = pd + ' expired '
linelen=145
pd = pd[0:linelen]
if len(pd) < 145:
pd = pd +" "*(linelen-len(pd))
place = get_place(the_plane['closest_lat'],the_plane['closest_lon'])
if place is None:
place = " API failed "
the_plane['reported'] = 1
width = get_term_width()-1
try:
if 'miles' not in the_plane:
pd = pd + " " + json.dumps(the_plane)
loggit(pd,BOTH,CYAN_TEXT)
return
if the_plane['miles'] < TWEET_RADIUS:
# Twitter suspended the account tweet(pd)
pd = pd + " : " + place
loggit(pd[:width],BOTH,GREEN_TEXT)
txt = "the_plane overhead "
if 'Owner' in the_plane:
txt = txt + " " + the_plane['Owner']
m = int(the_plane['miles'])
if 'alt_baro' in the_plane:
if the_plane['alt_baro'] != 'ground':
h = math.floor(int(the_plane['alt_baro'])/100)
if h > 9:
txt = txt + " at " + str(h/10) + " thousand feet"
else:
txt = txt + " at " + str(h) + " hundred feet"
else:
txt = txt + " on ground"
txt = txt + " distance {:>1.1f} miles".format(m)
say.speak(txt)
else:
pd = pd + " : " + place
if 'plane' in the_plane:
if 'DA42' in the_plane['plane']:
loggit(pd[:width],BOTH,YELLOW_TEXT)
else:
loggit(pd[:width],BOTH,CYAN_TEXT)
#loggit("{}".format(the_plane["tracks"].get_values()),BOTH,CYAN_TEXT)
except Exception as e:
loggit("reporting failed {}".format(e))
# Read the file produced by dump1090 and cache each the_plane seen so we can track its position reletive to home
# and check if it gets close.
def read_planes():
""" read in the planes from dump1090 and cache the data """
try:
with open('/var/run/dump1090-fa/aircraft.json', 'r') as f:
try:
data = json.load(f)
except Exception:
print("error - can't open aircraft.json")
global all_planes
planes = data["aircraft"]
#num_planes = len(planes)
#print("num planes {}".format(num_planes))
for plane in planes:
start_miles = 1000
miles = start_miles
try:
icao = plane["hex"].strip().upper()
if icao not in all_planes:
all_planes[icao] = {"icao": icao, 'max_miles': 0.0, 'closest_miles': start_miles,
'closest_lat': 0.0, 'closest_lon': 0.0, 'miles': start_miles, 'tracks': my_queue(INFINITE,icao)}
this_plane = all_planes[icao]
this_plane['touched'] = time.time()
except Exception as e_name:
print(f"no icao code in plane record {e_name} ")
continue
for attr in ['lon', 'lat', 'flight', 'track', 'alt_baro']:
if attr in plane:
this_plane[attr] = plane[attr]
if 'lat' in this_plane and 'lon' in this_plane and 'alt_baro' in this_plane:
try:
hv = Haversine(
home, [this_plane["lon"], this_plane["lat"]])
miles = hv.miles
bearing = int(hv.bearing)
this_plane['current_miles'] = miles
this_plane['tracks'].add(
{'miles': miles, "lon": this_plane["lon"], "lat": this_plane["lat"], "alt": this_plane["alt_baro"]})
if miles < this_plane['miles']:
this_plane['closest_lat'] = float(this_plane['lat'])
this_plane['closest_lon'] = float(this_plane['lon'])
this_plane['closest_alt'] = this_plane["alt_baro"]
this_plane['closest_miles'] = miles
this_plane["closest_time"] = time.time()
if this_plane['miles'] == start_miles:
#loggit("{:<7s} new plane @ {:<7.2f} miles".format(icao,miles),TO_FILE)
pass
if 'reported' in this_plane:
del this_plane['reported']
this_plane['miles'] = miles
if miles > this_plane['max_miles']:
this_plane['max_miles'] = miles
this_plane['max_lon'] = this_plane['lon']
this_plane['max_lat'] = this_plane['lat']
if isinstance(this_plane["alt_baro"], int):
vrs.update_entry(
bearing, this_plane["lat"],
this_plane["lon"],
this_plane["alt_baro"],
miles,
this_plane["icao"])
except Exception as e:
print("oh dear haversine {} {}".format(e, json.dumps(this_plane)))
continue
if miles < 200 and 'enriched' not in this_plane:
enrich(icao, this_plane)
if (miles - this_plane['closest_miles']) > (this_plane['closest_miles']*0.1):
if 'reported' not in this_plane and this_plane['closest_miles'] < 50:
nearest_point(this_plane)
except Exception as e_name:
print(f" error in read_planes {e_name}\n")
def dump_the_planes(icao_hex):
"""Called to dump planes with similar height and distance"""
loggit(f"Dump planes with similar distance to {icao_hex}")
if icao_hex not in all_planes:
loggit(f"could not find {icao_hex} in all_planes")
return
target = all_planes[icao_hex]
if 'miles' not in target:
loggit("could not find 'miles' in all_planes")
return
if 'lat' not in target or 'lon' not in target:
loggit("target plane does not have both lat and lon - exit")
return
ll_target = [target['lat'], target['lon']]
# distance = int(target['miles'])
alt = int(target['alt_baro'])
# loggit("Dump icao {} distance {}, {}".format(icao, distance, json.dumps(target, indent=4)))
target_time = target['touched']
for the_plane,_a_plane in all_planes.items():
this_plane = all_planes[the_plane]
proximity = 100
if 'lat' in this_plane and 'lon' in this_plane:
ll_this = [this_plane['lat'], this_plane['lon']]
hv = Haversine(ll_target, ll_this)
proximity = hv.miles
h_diff = 1001
if 'alt_baro' in this_plane and this_plane['alt_baro'] != 'ground':
h_diff = abs(alt - int(this_plane['alt_baro']))
if proximity < 20 and h_diff < 1000:
txt = "{" + " hex:'{}',proximity:'{:.2f}'".format(icao, proximity)
for item in ['icao', 'alt_baro', 'miles', 'track', 'tail', 'lat', 'lon']:
if item in this_plane:
txt = txt + ",{}:'{}'".format(item, this_plane[item])
txt = txt + ",version:'1'"
txt = txt + ",tdiff:'{:.2f}', tn:'{}' ".format(
(target_time - this_plane['touched']), get_time()) + "},"
loggit(txt)
init_loggit("output.txt","/tmp/debug.txt")
init_reference_data()
update_reference_data()
start_webserver()
last_tick = time.time()
last_log = last_tick
sqldb.attach_sqldb()
vrs = Vrs("vrs_data.sqb")
while 1:
read_planes()
delete_list = []
now = time.time()
for icao,record in all_planes.items():
if (now - record['touched']) > 60:
delete_list.append(icao)
for plane in delete_list:
p = all_planes[plane]
if 'reported' not in p and 'miles' in p and p['miles'] < 50:
p['expired'] = 1
nearest_point(p)
write_kmz(home, p)
del all_planes[plane]
# check to see if we need to referesh any of the online databases
update_reference_data()
# update the cache used by the HTTP query to generate a table ( default port 4443 )
update_plane_data(all_planes)
#triggered if we have seen a tilde encoded in the icao hex
if dump_planes:
if now > dump_time:
dump_the_planes(dump_icao)
dump_planes = False
if os.path.exists("check_icao"):
with open('check_icao') as f:
s = f.read()
dump_the_planes(str(s).strip().upper())
os.remove('check_icao')
# every 60 seconds
if (now - last_tick) > 60:
if (now - last_log) > 300:
loggit("{} planes being tracked ".format(len(all_planes)), TO_SCREEN)
last_log = now
flush_suppression_list()
# write out a kml file with all the t planes we can see
three_d_vrs(all_planes)
last_tick = now
time.sleep(5)
| UTF-8 | Python | false | false | 14,331 | py | 33 | detect.py | 26 | 0.537436 | 0.527877 | 0 | 387 | 36.031008 | 233 |
leandreboris/XMatosBackend | 4,964,982,242,822 | c5276d8a0d8db5ee89a004feab741e40ee6cd959 | 5099bdf6ee8485e281f834ddcd71e2827bcce4d6 | /Profile_updates/apps.py | abae4ee4473ed1189704eed80fb09be52112b522 | []
| no_license | https://github.com/leandreboris/XMatosBackend | 5c57a491ec3ffbe195389b02d0733ff31d66a243 | 2118bf08fbc9fdf099d35a1e8da5715610fd2c15 | refs/heads/master | 2023-08-11T08:28:46.106004 | 2021-09-28T10:03:31 | 2021-09-28T10:03:31 | 390,736,624 | 0 | 0 | null | false | 2021-08-06T15:59:54 | 2021-07-29T13:40:48 | 2021-07-29T16:20:43 | 2021-08-06T15:59:41 | 175 | 0 | 0 | 1 | Python | false | false | from django.apps import AppConfig
class ProfileUpdatesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Profile_updates'
| UTF-8 | Python | false | false | 161 | py | 24 | apps.py | 24 | 0.763975 | 0.763975 | 0 | 6 | 25.833333 | 56 |
ghdus4185/SWEXPERT | 11,141,145,200,089 | 52bc4462d88f81b8733b5f1fd29e86c487d58514 | 3e85618c79a1a934fec543e1327e772ca081a5b9 | /N1486.py | 1564c24d32ff1e1c9575221b39304ca0b3f96357 | []
| no_license | https://github.com/ghdus4185/SWEXPERT | 72d79aa4a668452327a676a644b952bab191c79b | 4dc74ad74df7837450de4ce55526dac7760ce738 | refs/heads/master | 2020-07-16T18:31:22.153239 | 2019-12-20T04:18:30 | 2019-12-20T04:18:30 | 205,843,190 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.stdin = open('input.txt', 'r')
T = int(input())
for tc in range(1, T+1):
N, B = map(int, input().split())
h = list(map(int, input().split()))
subset_list = []
minV = 10000000
for i in range(1, 2**N):
res = 0
for j in range(N):
if i & (1 << j) != 0:
res += h[j]
if res >= B:
if minV > res:
minV = res
print('#{} {}'.format(tc, minV - B)) | UTF-8 | Python | false | false | 470 | py | 142 | N1486.py | 140 | 0.417021 | 0.385106 | 0 | 20 | 22.55 | 40 |
ChadDiaz/cs-Hash-Tables-I-project-5.21.21 | 10,943,576,671,492 | e78845da445e86fa770494dd0778e24df1cfdc32 | 27c6db187a103a2b83ad403af8fd75cc2c10620c | /csCommonStrategyForHashCollisions.py | 372e01b852afcaa0f94fff4e0ab679683fed4e84 | []
| no_license | https://github.com/ChadDiaz/cs-Hash-Tables-I-project-5.21.21 | 9814bcfa5d54d40a7e663c67df20e48f664ad853 | 133ed4d21f758681ff62976f0f8dd03bcb3aba35 | refs/heads/main | 2023-05-03T21:02:32.068048 | 2021-05-22T02:28:04 | 2021-05-22T02:28:04 | 369,682,370 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
"What is the most common strategy for dealing with hash collisions?"
A: Not storing the values directly at an index of the hash table's array. Instead, the array index stores a pointer to a linked list. Each node in the linked list stores a key, value, and a pointer to the next item in the linked list.
""" | UTF-8 | Python | false | false | 313 | py | 3 | csCommonStrategyForHashCollisions.py | 3 | 0.753994 | 0.753994 | 0 | 5 | 61.6 | 234 |
zaojiahua/flask_dn_server | 2,147,483,673,266 | 8288694b82afdaf939cb33148819a49fedc82f0f | 28f05b2ddb635f1577a09a502b29ee30c0dba0f0 | /dn/common/lln.py | 656b99df590c70c4aa03fbca9b3f8468f0a0c39e | []
| no_license | https://github.com/zaojiahua/flask_dn_server | 709efeced512cfabdd35720373f75bb8ca8d7a96 | 9e5aa865633399f86242d9ebdd5a9ed8c623d203 | refs/heads/master | 2023-05-10T06:17:16.682532 | 2020-01-08T08:43:46 | 2020-01-08T08:43:46 | 232,228,795 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib.error
import urllib.parse
import urllib.request
import simplejson as json
class LLNFormatError(Exception):
pass
_not_safe = '%|\r\n='
_safe_map = {}
for i, c in zip(list(range(256)), str(bytearray(list(range(256))))):
_safe_map[c] = c if c not in _not_safe else '%{:02x}'.format(i)
def _encode_char(c):
return _safe_map.get(c, c)
def _encode(s):
if s is None:
return 'None'
elif isinstance(s, bool):
return str(s)
elif isinstance(s, (int, float)):
return repr(s)
elif isinstance(s, str):
return ''.join(map(_encode_char, s))
return s
def _decode(s):
if not s:
return s
if s.startswith('$'):
return json.loads(urllib.parse.unquote(s[1:]))
index = s.find('=')
if index != -1:
return urllib.parse.unquote(
s[0:index]), \
urllib.parse.unquote(s[index + 1:])
return urllib.parse.unquote(s)
def to_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, str):
s = bytes(s, encoding)
return s
def to_str(s, decoding='utf-8'):
if isinstance(s, bytes):
s = s.decode(decoding)
else:
raise LLNFormatError('bytes input required.')
return s
def escape_string(data):
_escape_map = {
'\r': '\\r',
'\n': '\\n',
}
escaped = data
for k, v in list(_escape_map.items()):
escaped = escaped.replace(k, v)
return escaped
# 这个逻辑应该不用if else 了 since python3 unified str and unicode
def translate_left(left):
filtered = '\r\n\t =!@#$:;,+-()[]~`'
# if isinstance(left, str):
# left = left.translate(None, filtered)
# elif isinstance(left, str):
# left = left.translate({ord(i): None for i in filtered})
if isinstance(left, str):
left = left.translate({ord(i): None for i in filtered})
return left
def dump_string(data):
if not data:
return str(data).encode("utf8")
data = to_bytes(escape_string(data))
if b'|' in data or b'=' in data or data[0] == b'$'[0]:
return b'$%d$ %s' % (len(data), data)
else:
return data
def dump_binop(left, right):
left = translate_left(to_bytes('%s' % (left)))
if isinstance(right, (list, dict)):
try:
right = json.dumps(right)
except Exception:
right = '%s' % right
right = to_bytes(escape_string('%s' % right))
if b'|' in left or b'=' in left or b'|' in right or left[0] == b'$'[0]:
return b'$%d,%d$ %s=%s' % (len(left), len(right), left, right)
else:
return b'%s=%s' % (left, right)
def dump_dict(data):
try:
d = json.dumps(data)
if '|' in d or d[0] == '$':
return b'$$%d$ %s' % (len(d), to_bytes(d))
else:
return b'$%s' % (to_bytes(d))
except Exception:
return dump_string(repr(data))
def dump_object(data):
return dump_string(repr(data))
def dumps2(msgs):
s = []
for msg in msgs:
if msg is None:
s.append(b'None')
elif isinstance(msg, bool):
s.append(to_bytes(str(msg)))
elif isinstance(msg, (int, float)):
s.append(to_bytes(repr(msg)))
elif isinstance(msg, str):
s.append(dump_string(msg))
elif isinstance(msg, tuple):
if len(msg) == 2:
s.append(dump_binop(msg[0], msg[1]))
else:
s.append(dump_string(repr(msg)))
elif isinstance(msg, (list, dict)):
s.append(dump_dict(msg))
else:
s.append(dump_object(msg))
return b'|'.join(s)
def string_list_to_bytes(str_lst):
# make sure every item in str_lst is of type bytes, modified in place.
for i in range(len(str_lst)):
if isinstance(str_lst[i], str):
str_lst[i] = to_bytes(str_lst[i])
def load_meta(s, i):
m1 = s[i + 1:i + 2]
if m1 == b'{' or m1 == b'[':
return s[i:i + 1]
elif m1 == b'$' or m1 in b'0123456789':
j = s.find(b'$ ', i + 1)
if j == -1:
raise LLNFormatError(
'meta <%s> info not completed. <:> not found' % (s[i:]))
else:
return s[i:j + 2]
else:
raise LLNFormatError('meta <%s> is invalid' % (s[i:]))
def load_data_withmeta(s, i, meta):
meta = meta.rstrip()
if meta == b'$':
string = load_string(s, i)
return json.loads(to_str(string)), len(string)
exp = meta[1:-1].replace(b' ', b'')
if not exp:
raise LLNFormatError('meta <%s> is invalid' % (meta))
if b',' in exp:
pair = exp.split(b',')
if len(pair) != 2:
raise LLNFormatError(
'meta <%s> only support one <,> now.' % (meta))
llen, rlen = int(pair[0]), int(pair[1])
left = s[i:i + llen]
i += llen
if s[i:i + 1] != b'=':
raise LLNFormatError('LLN expect <=> but <%s> found.' % (s[i]))
i += 1
right = s[i:i + rlen]
i += rlen
return {to_str(left): to_str(right)}, llen + rlen + 1
elif exp[0:1] == b'$':
data_len = int(exp[1:])
string = s[i:i + data_len]
return json.loads(to_str(string)), len(string)
else:
data_len = int(exp)
string = s[i:i + data_len]
return to_str(string), len(string)
def load_string(s, i):
j = s.find(b'|', i)
if j == -1:
return s[i:]
return s[i:j]
def loads2(s):
if isinstance(s, str):
s = to_bytes(s)
loaded = []
check_separator = False
i = 0
while i < len(s):
c = s[i]
if check_separator:
if c == b'|'[0]:
i += 1
check_separator = False
continue
else:
raise LLNFormatError(
'separator | expected, but <%s> found.' % chr(c))
if c == b'$'[0]:
meta = load_meta(s, i)
i += len(meta)
data, length = load_data_withmeta(s, i, meta)
loaded.append(data)
i += length
else:
string = load_string(s, i)
if b'=' in string:
loaded.append(
dict((json_load_right(
to_str(string).split('=', 1)[0:2]), )))
else:
loaded.append(to_str(string))
i += len(string)
check_separator = True
return loaded
def json_load_right(lst):
if not isinstance(lst, list):
raise LLNFormatError('in json_load_right function string required!')
left = lst[0]
right = lst[1]
try:
right = str(json.loads(right))
except Exception:
right = '%s' % right
return [left] + [right]
loads = loads2
dumps = dumps2
| UTF-8 | Python | false | false | 6,805 | py | 14 | lln.py | 11 | 0.509061 | 0.498748 | 0 | 261 | 25.003831 | 76 |
ShadyZOZ/zblog | 3,513,283,252,039 | 55e2fa84e9765449a2de857c36555076abf8a189 | d88ccd19b2f788affb5caec81122802f0418a09a | /zblog/views.py | 0f75bc02b9008da6fba63ae6e324e0fd1b652a87 | []
| no_license | https://github.com/ShadyZOZ/zblog | 93900e6368006f991db031b004ff72b9942db631 | df397d9deeb81a25e624b49b593ce188c5d70256 | refs/heads/master | 2016-09-22T10:30:57.859203 | 2016-07-25T18:48:30 | 2016-07-25T18:48:30 | 64,157,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
def index(request):
r = request.COOKIES
print(r)
return render(request, 'index.jinja') | UTF-8 | Python | false | false | 135 | py | 5 | views.py | 1 | 0.711111 | 0.711111 | 0 | 6 | 21.666667 | 41 |
Spredzy/ansible-builder | 9,483,287,828,202 | 2d2d2d1b48ca3c563fcf05d697acc933d6b78293 | efa5c0f6590750e97d7489e29e9d5fea5cda9147 | /ansible_builder/main.py | 4234a3c70a2c51ff3dbf43b787292c96cb35eb38 | []
| no_license | https://github.com/Spredzy/ansible-builder | 1c375f559f4125cbbf921268d3d345aafe70a1da | a1adc2aec9b40cb223b02613699dda77f7bcb110 | refs/heads/master | 2023-06-04T14:30:01.008282 | 2020-05-09T17:18:04 | 2020-05-09T17:18:04 | 265,592,353 | 0 | 0 | null | true | 2020-05-20T14:32:24 | 2020-05-20T14:32:23 | 2020-05-09T17:18:16 | 2020-05-09T17:18:13 | 37 | 0 | 0 | 0 | null | false | false | import os
import yaml
from shutil import copy
default_base_image = 'shanemcd/ansible-runner'
class AnsibleBuilder:
def __init__(self, filename='execution-environment.yml', base_image=default_base_image, build_context=None):
self.definition = Definition(filename=filename)
self.base_image = base_image
self.build_context = build_context or os.path.join(os.getcwd(), 'context')
self.containerfile = Containerfile(
filename='Containerfile',
definition=self.definition,
base_image=base_image,
build_context=self.build_context)
@property
def version(self):
return self.definition.version
def process(self):
return self.containerfile.write()
class Definition:
def __init__(self, *args, filename):
self.filename = filename
with open(filename, 'r') as f:
self.raw = yaml.load(f, Loader=yaml.FullLoader)
@property
def version(self):
version = self.raw.get('version')
if not version:
raise ValueError("Expected top-level 'version' key to be present.")
return str(version)
@property
def galaxy_requirements_file(self):
return self.raw.get('dependencies', {}).get('galaxy')
class Containerfile:
newline_char = '\n'
def __init__(self, *args, filename, definition, build_context, base_image):
self.build_context = build_context
os.makedirs(self.build_context, exist_ok=True)
self.definition = definition
self.path = os.path.join(self.build_context, filename)
self.base_image = base_image
self.build_steps()
def build_steps(self):
self.steps = []
self.steps.append("FROM {}".format(self.base_image))
self.steps.append(self.newline_char)
[self.steps.append(step) for step in GalaxySteps(containerfile=self)]
return self.steps
def write(self):
with open(self.path, 'w') as f:
for step in self.steps:
if step == self.newline_char:
f.write(step)
else:
f.write(step + self.newline_char)
return True
class GalaxySteps:
def __new__(cls, *args, containerfile):
definition = containerfile.definition
if not definition.galaxy_requirements_file:
return []
src = definition.galaxy_requirements_file
dest = containerfile.build_context
copy(src, dest)
basename = os.path.basename(definition.galaxy_requirements_file)
return [
"ADD {} /build/".format(basename),
"RUN ansible-galaxy role install -r /build/{}".format(basename),
"RUN ansible-galaxy collection install -r /build/{}".format(basename)
]
| UTF-8 | Python | false | false | 2,815 | py | 6 | main.py | 4 | 0.61492 | 0.61492 | 0 | 92 | 29.597826 | 112 |
W1rner/SITE_FOR_SCHOOL | 8,933,532,015,511 | 6aba75a49029570ea0770198f877f6078bb94fa8 | 24c6132ae420ccada3b742aebca4c62044166d87 | /main/models.py | 66ea51308a73947c1af131907b32c9bcc0f7cda3 | []
| no_license | https://github.com/W1rner/SITE_FOR_SCHOOL | 207728e4314740b80e1d7a02efdf1027bc5fef90 | c0e08816bb7aad2e1b9898b20a02a04f9be0da13 | refs/heads/main | 2023-04-30T12:39:07.888539 | 2021-05-14T11:00:19 | 2021-05-14T11:00:19 | 367,328,554 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Votings(models.Model):
name = models.TextField(unique=True)
about = models.TextField()
author = models.TextField()
all_votes_quantity = models.IntegerField()
variants = models.TextField()
variants_values = models.TextField()
participants = models.TextField()
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
history = models.TextField()
class Complaints(models.Model):
author = models.TextField()
user_id = models.IntegerField(default=None)
message = models.TextField()
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| UTF-8 | Python | false | false | 1,041 | py | 37 | models.py | 13 | 0.722382 | 0.722382 | 0 | 37 | 27.135135 | 63 |
loveunCG/linkedin-front-end | 1,984,274,915,267 | 6666007b6ebc27d05f873cc62ca7273ca52ac377 | 8ddcf2bb34b3d2a69d84ea535ef2235db8e089ea | /app/migrations/0019_auto_20180513_1347.py | 54383fdb892df2427e12c475f263d418402da0ae | []
| no_license | https://github.com/loveunCG/linkedin-front-end | 2a8ada9603c0f0cfaee23dd7b6762752d86772cd | a96f51736ee5b9b882c9a7f3ba78f600ddb9f648 | refs/heads/master | 2022-12-10T08:08:56.182716 | 2018-06-24T13:29:50 | 2018-06-24T13:29:50 | 125,702,961 | 2 | 0 | null | false | 2021-06-10T20:29:42 | 2018-03-18T07:42:27 | 2019-10-17T12:56:54 | 2021-06-10T20:29:40 | 14,039 | 2 | 0 | 2 | CSS | false | false | # Generated by Django 2.0.5 on 2018-05-13 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0018_auto_20180513_1955'),
]
operations = [
migrations.RemoveField(
model_name='linkedinuser',
name='is_pin_needed',
),
migrations.AlterField(
model_name='linkedinuser',
name='status',
field=models.CharField(choices=[('Queued', 'Queued'), ('Running', 'Running'), ('Pin Required', 'Pin Required'), ('Pin Invalid', 'Pin Invalid'), ('Error', 'Error'), ('Done', 'Done')], default='Queued', max_length=20),
),
]
| UTF-8 | Python | false | false | 682 | py | 294 | 0019_auto_20180513_1347.py | 114 | 0.573314 | 0.524927 | 0 | 22 | 30 | 228 |
momchil-lukanov/hack-bulgaria | 824,633,767,036 | 75e34fb7acc995a6b60c238db2a96e75feada40c | 78724939218b9023aed2b7d0256f2798d8a3f246 | /programming-0/week-4/program.py | a1b6be02fb53fb8401028a3c15fdd27cf89fb9e7 | []
| no_license | https://github.com/momchil-lukanov/hack-bulgaria | 6968e80b654fe1b352d2289d652dca7395757914 | bb6b8ea3db272577443b98de0af636fb022b0fbb | refs/heads/master | 2020-12-25T18:22:28.467960 | 2015-11-22T16:17:18 | 2015-11-22T16:17:18 | 31,120,172 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | person = {}
person["first_name"] = input()
person["second_name"] = input()
person["third_name"] = input()
person["birth_year"] = int(input())
person["current_age"] = 2015 - person["birth_year"]
print(person)
| UTF-8 | Python | false | false | 208 | py | 164 | program.py | 144 | 0.653846 | 0.634615 | 0 | 7 | 28.714286 | 51 |
GavrilinEugene/geo2osm | 12,498,354,832,123 | 6ffdf0a19a4c3e83b4dc8d4782260eb4d6f0f576 | f0067fa81119fff7e9b7dfd03003b85fe7e75d77 | /application/overpass_utils.py | c05badbfc9720dfdf30ffd0ea3130c35ce127476 | []
| no_license | https://github.com/GavrilinEugene/geo2osm | 06e68c5ac48284dbd0f134be70da012d99e965a5 | eb8fbf62f637c8bd91421ec36c2709c4114c49d3 | refs/heads/master | 2023-03-01T23:59:37.595129 | 2021-02-13T13:19:57 | 2021-02-13T13:19:57 | 324,165,968 | 0 | 0 | null | false | 2021-02-13T13:19:58 | 2020-12-24T13:46:43 | 2021-02-13T11:24:38 | 2021-02-13T13:19:58 | 33 | 0 | 0 | 0 | Python | false | false | import requests
from osm2geojson import json2geojson
def get_geojson(bbox, list_node_types = ['way["building"]']):
"""
"""
(left, bottom, right, top) = bbox
way_query = ""
for node_type in list_node_types:
way_query += f"{node_type}({bottom},{left},{top},{right});\n"
query = f"""
[out:json][timeout:25];
({way_query}
);
out body;
>;
out skel qt;
"""
url = "http://overpass-api.de/api/interpreter"
r = requests.get(url, params={'data': query})
if r.status_code != 200:
raise requests.exceptions.HTTPError(f'Overpass server respond with status {r.status_code}')
data = json2geojson(r.json())
print(data)
for x in range(0, len(data['features'])):
data['features'][x]['id'] = data['features'][x]['properties']['id']
return data
| UTF-8 | Python | false | false | 856 | py | 8 | overpass_utils.py | 4 | 0.570093 | 0.559579 | 0 | 29 | 28.517241 | 99 |
jiananarthurli/insight_api | 12,549,894,461,015 | 1fa75a092d9185d18ef1d9d54cb38ac79f7794c6 | 539c267a58cb727c5f1925b67da0bbbae0b04de2 | /insight_api_src/vectorizer/views.py | b22e6c6de88eac610c098d01f3abb25997cedab1 | []
| no_license | https://github.com/jiananarthurli/insight_api | e228b7cbd193b4eb2a9c3ad5a9b490816c1f65ed | c6c46f1fa96e3fe6d182ef6b7a575deaa3d6bee9 | refs/heads/master | 2022-12-17T08:58:29.978049 | 2020-10-03T04:42:04 | 2020-10-03T04:42:04 | 191,235,576 | 6 | 1 | null | false | 2022-12-08T05:17:11 | 2019-06-10T19:49:08 | 2020-10-05T09:03:13 | 2022-12-08T05:17:10 | 166,118 | 6 | 1 | 5 | Jupyter Notebook | false | false | from django.shortcuts import render
import re
import nltk
import spacy
import numpy as np
from nltk.stem import WordNetLemmatizer
from collections import defaultdict
from nltk.corpus import stopwords
lemmatizer = WordNetLemmatizer()
nlp = spacy.load("en_core_web_sm")
stop = set(stopwords.words('english'))
stop_words = set(['event', 'collection', 'street', 'many',
'exhibition', 'work', 'monday', 'tuesday',
'wednesday', 'thursday', 'friday', 'saturday',
'sunday', 'new', 'york', 'new york', 'new york city',
'visit', 'museum', 'world', 'department', 'NYC'
])
stop.update(stop_words)
def preprocess(text):
# text cleaning
text = text.replace('\n', ' ')
text = text.replace(''', "'")
text = text.replace('’', "'")
text = text.replace('B.C.', "BC")
text = text.replace('A.D.', "AD")
text = text.replace('&', "and")
# remove ',' in numbers
text = re.sub('(\d+),(\d+)', lambda x: "{}{}".format(x.group(1).replace(',', ''), x.group(2)), text)
text = re.sub('&#x(.*?);', ' ', text)
text = re.sub('http(.+?) ', '', text)
return text
# PoS tagging for text
def doc2tag(text):
sentences = nltk.sent_tokenize(text)
tag_list = []
for s in sentences:
tokens = nltk.word_tokenize(s)
text_tagged = nltk.pos_tag(tokens)
pair = [(word, pos) for (word, pos) in text_tagged]
tag_list.extend(pair)
return tag_list
# find PoS pattern of NNP, NNP, NN
def nnp_nn(text):
patterns = "NNP_NN: {<NNP>+(<NNS>|<NN>+)}" # at least one NNP followed by NNS or at least one NN
parser = nltk.RegexpParser(patterns)
p = parser.parse(doc2tag(text))
phrase = []
for node in p:
if type(node) is nltk.Tree:
phrase_str = ''
for w in node:
phrase_str += w[0]
phrase_str += ' '
phrase_str = phrase_str.strip()
phrase.append(phrase_str)
return phrase
# find PoS pattern of JJ, NN
def jj_nn(text):
patterns = "NNP_NN: {<JJ>+(<NN>+)}" #
parser = nltk.RegexpParser(patterns)
p = parser.parse(doc2tag(text))
phrase = []
for node in p:
if type(node) is nltk.Tree:
phrase_str = ''
for w in node:
phrase_str += w[0]
phrase_str += ' '
phrase_str = phrase_str.strip()
phrase.append(phrase_str)
return phrase
# calculate TF-IDF vector for the text, assume trigrams
def tf_idf(text, key_tokens, idf_dict, ngram=3):
tf_idf_dict = defaultdict(int)
text = text.lower()
# tokens been used for tf-idf
tokens = nltk.word_tokenize(text)
# get unigram, bigram, trigram
token_list = []
for i in range(1, ngram + 1):
token_list.extend(nltk.ngrams(tokens, i))
token_list = [' '.join(token) for token in token_list]
# lemmatize the tokens
for i, token in enumerate(token_list):
token_list[i] = lemmatizer.lemmatize(token)
# initialize the tf_idf_dict with all the tokens to be used
for token in key_tokens:
tf_idf_dict[token] = 0
# count frequency of each token
for token in token_list:
if token in key_tokens:
tf_idf_dict[token] += 1
# tf-idf vector calculation
for key in tf_idf_dict.keys():
tf_idf_dict[key] = tf_idf_dict[key] * idf_dict[key]
tf_idf_vec = np.zeros((len(key_tokens),))
for i, key in enumerate(key_tokens):
tf_idf_vec[i] = tf_idf_dict[key]
# returns a normalized 1d np array
tf_idf_vec = tf_idf_vec / np.linalg.norm(tf_idf_vec)
return tf_idf_vec
| UTF-8 | Python | false | false | 3,702 | py | 16 | views.py | 9 | 0.574014 | 0.568882 | 0 | 130 | 27.461538 | 104 |
yamaguchiyuto/location_inference | 532,575,987,397 | 1fd6562079077366de858571ea6d3801fb427e61 | 02f82a98a1db5d9a28ad97b11b447194d3ec6aab | /lib/words.py | dbf28f07b7f24bd80f2de2848bda74c768578824 | [
"MIT"
]
| permissive | https://github.com/yamaguchiyuto/location_inference | d5301d71010016e23a0716e73776b30c2fe67b00 | 370a632cef843aa42a4a3662f8ee492789cbe053 | refs/heads/master | 2021-01-19T13:53:15.354412 | 2013-11-05T14:08:59 | 2013-11-05T14:08:59 | 13,535,937 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import json
class Words:
def __init__(self, words={}):
self.values = words
def __str__(self):
res = ""
for word in self.values.values():
res += json.dumps(word) + "\n"
return res[:-1]
def set(self, words):
self.values = words
def load_file(self, filepath):
for line in open(filepath, 'r'):
word = json.loads(line.rstrip())
self.values[word['word']] = word
def load_mysql(self, mysqldb):
pass
def load_mongodb(self, mongodb):
pass
def get(self, word_str):
if word_str in self.values:
return self.values[word_str]
else:
return None
def contain(self, w):
if w in self.values:
return True
else:
return False
def add(self, word):
if not word['word'] in self.values:
self.values[word['word']] = word
def iter(self):
for word in self.values.values():
yield word
if __name__ == '__main__':
import sys
words = Words()
words.load_file(sys.argv[1])
print words.get(u'那覇')
print words.get(u'北浦和')
| UTF-8 | Python | false | false | 1,220 | py | 35 | words.py | 22 | 0.513223 | 0.510744 | 0 | 54 | 21.407407 | 44 |
jm-begon/clustertools | 1,821,066,182,091 | 5440ef01bd7a08e64f97370eb282710e2886c979 | f52a5fdb795279f87df6bf61161dfe720ad97034 | /clustertools/test/test_paramset.py | d22137a592d15474abe1c0ecbbc2629174b1fef8 | [
"BSD-3-Clause"
]
| permissive | https://github.com/jm-begon/clustertools | 3ce5f96831b16b9f7a9f9bfaf3dcc4952674f6a8 | 264198d0ffbd60b883b7b6a2af79341425c7729b | refs/heads/master | 2021-05-25T09:02:13.946550 | 2019-08-21T07:51:22 | 2019-08-21T07:51:22 | 43,871,227 | 8 | 3 | BSD-3-Clause | false | 2019-08-21T07:51:24 | 2015-10-08T07:51:56 | 2019-08-20T11:50:10 | 2019-08-21T07:51:23 | 745 | 5 | 2 | 15 | Python | false | false | from nose.tools import assert_equal, assert_in, assert_less, assert_raises, \
with_setup, assert_true
from clustertools import ParameterSet, ConstrainedParameterSet, \
PrioritizedParamSet, ExplicitParameterSet
# --------------------------------------------------------- ExplicitParameterSet
from clustertools.parameterset import CartesianMixer
def test_explicit_paramset():
ps = ExplicitParameterSet()
ps.add_parameter_tuple(p1=1, p2=2, p3="param")
ps.add_parameter_tuple(p1=1, p2=3, p3="param")
ps.add_parameter_tuple(p1=1, p2=5, p3="param")
ps.add_parameter_tuple(p1=4, p2=2, p3="param")
ps.add_parameter_tuple(p1=4, p2=3, p3="param")
ps.add_parameter_tuple(p1=4, p2=5, p3="param")
assert_equal(len(ps), 6)
cart_prod = [
{"p1": 1, "p2": 2, "p3": "param"},
{"p1": 1, "p2": 3, "p3": "param"},
{"p1": 1, "p2": 5, "p3": "param"},
{"p1": 4, "p2": 2, "p3": "param"},
{"p1": 4, "p2": 3, "p3": "param"},
{"p1": 4, "p2": 5, "p3": "param"},
]
assert_equal(len(ps), 6)
i = 0
for _, param_dict in ps:
assert_in(param_dict, cart_prod)
i += 1
assert_equal(i, 6)
assert_equal(len(ps), 6)
# ------------------------------------------------------- Cartesian ParameterSet
def test_paramset_yield():
ps = ParameterSet()
assert_equal(len(ps), 1) # The null dictionary
ps.add_parameters(p1=1, p2=[2, 3], p3="param")
ps.add_parameters(p1=4, p2=5)
cart_prod = [
{"p1": 1, "p2": 2, "p3": "param"},
{"p1": 1, "p2": 3, "p3": "param"},
{"p1": 1, "p2": 5, "p3": "param"},
{"p1": 4, "p2": 2, "p3": "param"},
{"p1": 4, "p2": 3, "p3": "param"},
{"p1": 4, "p2": 5, "p3": "param"},
]
assert_equal(len(ps), 6)
i = 0
for _, param_dict in ps:
assert_in(param_dict, cart_prod)
i += 1
assert_equal(i, 6)
assert_equal(len(ps), 6)
def test_paramset_list_insertion():
ps = ParameterSet()
ps.add_single_values(p1=(1, 2, 3), p2=(1, 2))
assert_equal(len(ps), 1)
for _, param_dict in ps:
assert_equal(param_dict, {"p1": (1, 2, 3), "p2": (1, 2)})
def test_paramset_separator():
ps = ParameterSet()
ps.add_parameters(p1=[1, 2], p2=["a", "b"])
ps.add_separator(p3="param")
ps.add_parameters(p1=3)
assert_equal(len(ps), 6)
for i, param_dict in ps:
assert_equal(param_dict["p3"], "param")
if i < 4:
assert_in(param_dict["p1"], [1, 2])
else:
assert_equal(param_dict["p1"], 3)
ps.add_parameters(p2="c")
assert_equal(len(ps), 9)
count = 0
for i, param_dict in ps:
assert_equal(param_dict["p3"], "param")
if i < 4:
assert_in(param_dict["p1"], [1, 2])
assert_in(param_dict["p2"], ["a", "b"])
if param_dict["p1"] == 3 and param_dict["p2"] == "c":
count += 1
assert_equal(count, 1)
assert_raises(ValueError, ps.add_parameters, p4=10)
def test_paramset_getitem():
ps = ParameterSet()
ps.add_parameters(p1=[1, 2], p2=["a", "b"])
ps.add_separator(p3="param")
ps.add_parameters(p1=3, p2="c")
for i, param_dict in ps:
assert_equal(param_dict, ps[i])
def test_paramset_get_indices_with():
ps = ParameterSet()
ps.add_parameters(p1=[1, 2], p2=["a", "b"])
ps.add_separator(p3="param")
ps.add_parameters(p1=3, p2="c")
for index in ps.get_indices_with(p1={3}):
assert_less(3, index) # 0,1,2,3 --> [1,2] x [a,b]
assert_equal(ps[index]["p1"], 3)
assert_equal(len(list(ps.get_indices_with(p1={4}))), 0)
# ---------------------------------------------------------------- Cartesian mix
def test_cartesianmix():
ps = ParameterSet()
ps.add_parameters(p1=[1, 2], p2=["a", "b"])
ps1 = ExplicitParameterSet()
ps1.add_parameter_tuple(p3=3, p4=10)
ps1.add_parameter_tuple(p3=4, p4=11)
c = CartesianMixer(ps, ps1)
assert_equal(len(c), 8)
expected = [
{"p1": 1, "p2": "a", "p3": 3, "p4": 10},
{"p1": 1, "p2": "a", "p3": 4, "p4": 11},
{"p1": 1, "p2": "b", "p3": 3, "p4": 10},
{"p1": 1, "p2": "b", "p3": 4, "p4": 11},
{"p1": 2, "p2": "a", "p3": 3, "p4": 10},
{"p1": 2, "p2": "a", "p3": 4, "p4": 11},
{"p1": 2, "p2": "b", "p3": 3, "p4": 10},
{"p1": 2, "p2": "b", "p3": 4, "p4": 11},
]
i = 0
for idx, tup in c:
assert_equal(i, idx)
assert_in(tup, expected)
i += 1
assert_true(repr(c).startswith("CartesianMixer"))
# ------------------------------------------------------ ConstrainedParameterSet
def test_constrainparamset():
ps = ParameterSet()
ps.add_parameters(p1=[1, 2, 3], p2=["a", "b"])
cps = ConstrainedParameterSet(ps)
cps.add_constraints(c1=lambda p1, p2: True if p2 == "a" else p1 % 2 == 0)
assert_equal(len(cps), 4) # (1, a), (2, a), (3, a), (2, b)
expected = [{"p1": 1, "p2": "a"},
{"p1": 2, "p2": "a"},
{"p1": 3, "p2": "a"},
{"p1": 2, "p2": "b"},
]
for _, param_dict in cps:
assert_in(param_dict, expected)
# ---------------------------------------------------------- PrioritizedParamSet
def test_prioritized_paramset():
ps = ParameterSet()
ps.add_parameters(p1=[1, 2, 3, 4], p2=["a", "b", "c"])
pps = PrioritizedParamSet(ps)
pps.prioritize("p2", "b")
pps.prioritize("p1", 2)
pps.prioritize("p1", 3)
pps.prioritize("p2", "c")
expected = [
(4, {"p1": 2, "p2": "b"}), # 12 = 0 2^0 + 0 2^1 + 1 2^2 + 1 2^ 3
(7, {"p1": 3, "p2": "b"}), # 10 = 0 2^0 + 1 2^1 + 0 2^2 + 1 2^ 3
(1, {"p1": 1, "p2": "b"}), # 8 = 0 2^0 + 0 2^1 + 0 2^2 + 1 2^ 3
(10, {"p1": 4, "p2": "b"}), # 8 = 0 2^0 + 0 2^1 + 0 2^2 + 1 2^ 3
(5, {"p1": 2, "p2": "c"}), # 5 = 1 2^0 + 0 2^1 + 1 2^2 + 0 2^ 3
(3, {"p1": 2, "p2": "a"}), # 4 = 0 2^0 + 0 2^1 + 1 2^2 + 0 2^ 3
(8, {"p1": 3, "p2": "c"}), # 3 = 1 2^0 + 1 2^1 + 0 2^2 + 0 2^ 3
(6, {"p1": 3, "p2": "a"}), # 2 = 0 2^0 + 2 2^1 + 0 2^2 + 0 2^ 3
(2, {"p1": 1, "p2": "c"}), # 1 = 1 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3
(11, {"p1": 4, "p2": "c"}), # 1 = 1 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3
(0, {"p1": 1, "p2": "a"}), # 0 = 0 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3
(9, {"p1": 4, "p2": "a"}), # 0 = 0 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3
]
result = list(pps)
assert_equal(result, expected) | UTF-8 | Python | false | false | 6,526 | py | 69 | test_paramset.py | 42 | 0.46353 | 0.38339 | 0 | 212 | 29.787736 | 80 |
zrdsj/py-learning-for-u | 19,542,101,203,855 | e2898f7434aa4ee0fca5373e6ece57e1a5359e46 | c658bc613ad430d563e4805c598258e2d1bd05d5 | /01-07/06.py | 5f10bc8104f9a495ee992d9ac97ea9e5fad2fd5a | []
| no_license | https://github.com/zrdsj/py-learning-for-u | 5e7867a77c9a73eba2840827a3f1d89c6ec31060 | a37d5a8c5dcc0f54868c419550dd59b8b358bef9 | refs/heads/master | 2020-08-03T11:59:41.917884 | 2019-10-13T06:51:12 | 2019-10-13T06:51:12 | 211,745,480 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # print('Hello, World!')
# print(2**3)
# print(9**(1/2))
# print(20 // 6)
# 商
# print(1.25 % 0.5)
# 余数
# 我爱你
# print('我爱你')
# print("love u")
# print("1231")
# print(1231)9
# print(woaini我爱你),,,是错误的
input("Enter a number:")
| UTF-8 | Python | false | false | 283 | py | 26 | 06.py | 26 | 0.518519 | 0.427984 | 0 | 19 | 11.105263 | 25 |
arcaorm00/sba-2-api | 3,805,341,056,960 | 47f491f97e90823911b503a1b6729841bb80ba89 | 63fab1fc9d7114c38280048c94ad71b3488f6f63 | /model/cabbage_model.py | f6b65300d137783ffc45adea874045b0b0ed341a | []
| no_license | https://github.com/arcaorm00/sba-2-api | 8122ef06e0c2a9b1c2a541d8f49b5772e7f2119f | fd383732684c339d6017ce9e33304982f14224c9 | refs/heads/master | 2022-12-25T03:33:37.768412 | 2020-09-28T06:01:09 | 2020-09-28T06:01:09 | 299,144,358 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.insert(0, '/Users/saltQ/sbaProject')
from util.file_handler import FileReader
import pandas as pd
import numpy as np
import tensorflow as tf
from dataclasses import dataclass
@dataclass
class Cabbage: # entity + service
# year,avgTemp,minTemp,maxTemp,rainFall,avgPrice
# 20100101,-4.9,-11,0.9,0,2123
# 멤버변수
year:int = 0
avgTemp:float = 0.0
minTemp:float = 0.0
maxTemp:float = 0.0
rainFall:float = 0.0
avgPrice:int = 0
# 클래스 내부에서 공유하는 객체, 상수값
def __init__(self):
self.fileReader = FileReader() # 기능은 상수
self.context = '/Users/saltQ/sbaProject/price_prediction/data/'
def new_model(self, payload):
this = self.fileReader
this.context = self.context
this.fname = payload
return pd.read_csv(this.context + this.fname, sep=',')
def create_tf(self, payload):
xy = np.array(payload, dtype=np.float32)
x_data = xy[:, 1:-1] # feature
y_data = xy[:, [-1]] # price
x = tf.compat.v1.placeholder(tf.float32, shape=[None, 4])
y = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
w = tf.Variable(tf.random.normal([4, 1]), name='weight')
b = tf.Variable(tf.random.normal([1]), name='bias')
hyposthesis = tf.matmul(x, w) + b
cost = tf.reduce_mean(tf.square(hyposthesis - y))
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.000005)
train = optimizer.minimize(cost)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(100000):
cost_, hypo_, _ = sess.run([cost, hyposthesis, train],
feed_dict={x: x_data, y: y_data})
if step % 500 == 0:
print(f'# {step} 손실비용: {cost_}')
print(f'- 배추가격: {hypo_[0]}')
saver = tf.compat.v1.train.Saver()
saver.save(sess, self.context + 'saved_model.ckpt')
print('저장 완료')
def test(self):
self.avgPrice = 100
return self.avgPrice
def service(self):
print('############# service #############')
X = tf.compat.v1.placeholder(tf.float32, shape=[None, 4])
# year,avgTemp,minTemp,maxTemp,rainFall,avgPrice
# 에서 avgTemp,minTemp,maxTemp,rainFall 입력 받겠다.
# year는 모델에서 필요없는 값 -> 상관관계 없음
# avgPrice는 얻고자 하는 답. 종속변수
# avgTemp,minTemp,maxTemp,rainFall는 종속변수를 결정하는 독립변수이자
# avgPrice를 결정하는 요소로 사용되는 파라미터 (중요!)
# 이제 우리는 통계와 확률로 들어가야 하니 용어를 잘 정의하자.
# y = wx + b 선형관계
# X는 대문자를 사용하고 확률변수라고 한다.
# 비교. 웹프로그래밍(Java, C)에서는 소문자로 x를 쓰는데 이것은 한 타인에 하나의 value
# 그리고 그 값은 외부에서 주어지는 하나의 값이므로 그냥---변수
# 지금은 X의 값이 제한적이지만 집합상태로 많은 값이 있는 상태
# 이럴 때는 확률---변수.
W = tf.Variable(tf.random.normal([4, 1]), name='weight')
b = tf.Variable(tf.random.normal([1]), name='bias')
# tensorflow에서 변수는 웹프로그래밍에서의 변수와 다르다.
# 이 변수를 결정하는 것은 외부에서 주어진 값이 아니라 tensor가 내부에서 사용하는 변수이다.
# 기존 웹에서 사용하는 변수는 placeholder.
saver = tf.compat.v1.train.Saver()
with tf.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
saver.restore(sess, self.context + 'saved_model.ckpt')
data = [[self.avgTemp, self.minTemp, self.maxTemp, self.rainFall], ]
arr = np.array(data, dtype = np.float32)
dict = sess.run(tf.matmul(X, W) + b, {X: arr[0:4]}) # matmul: 상호 곱 (매트릭스 구조이기 때문)
# Y = WX + b를 코드로 표현하면 위와 같이 나타낼 수 있다.
print(dict[0])
return int(dict[0])
if __name__ == '__main__':
cabbage= Cabbage()
# dframe = m.new_model('price_data.csv')
# print(dframe.head())
# m.create_tf(dframe)
print(cabbage.test())
| UTF-8 | Python | false | false | 4,508 | py | 3 | cabbage_model.py | 3 | 0.574296 | 0.551877 | 0 | 100 | 37.32 | 93 |
arnabs542/BigO-Coding-material | 12,549,894,473,163 | 4481ddaeaad78b4c4c22c38a032d67434ac0ae29 | 9b527131c291b735a163226d1daac2397c25b712 | /BigO_Algorithm/algorithmreview/Algo practice/Eleven.py | dc67102af23c3ada8c98a7fa94885b859f706b92 | []
| no_license | https://github.com/arnabs542/BigO-Coding-material | dbc8895ec6370933069b2e40e0610d4b05dddcf2 | 3b31bddb1240a407aa22f8eec78956d06b42efbc | refs/heads/master | 2022-03-19T18:32:53.667852 | 2019-11-27T23:55:04 | 2019-11-27T23:55:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Eleven chooses names
n = int(input())
Fib_arr = [0 for i in range(n+5)]
result = []
def Fib(i):
if i == 0:
return 0
elif i == 1 or i == 2:
return 1
else:
return Fib_arr[i-1] + Fib_arr[i-2]
for i in range(n+5):
Fib_arr[i] = Fib(i)
for num in range(1,n+1,1):
if num in Fib_arr:
result.append('O')
else:
result.append('o')
print(''.join(result))
| UTF-8 | Python | false | false | 447 | py | 138 | Eleven.py | 127 | 0.47651 | 0.447427 | 0 | 22 | 17.681818 | 42 |
Rohan175/roadCompanion-SGH | 18,425,409,724,598 | 12f905a98ab70f96858d4bb124e35bf92b1ad734 | f2c95171d8094de7ab774cfea69cd8d27937c579 | /roadG-master/roadGriev/migrations/0029_delete_prestoredloc.py | 1a5d05c1d32ff69133f4ecf8e6112f65a9144935 | []
| no_license | https://github.com/Rohan175/roadCompanion-SGH | 1d96429bffe6ee5f73b1cb97c3bccc4efe1c63e3 | ec949dc559a65bd7f417d4c1eab83207e91ca878 | refs/heads/master | 2022-12-14T20:59:41.907755 | 2018-09-09T07:44:02 | 2018-09-09T07:44:02 | 144,819,916 | 2 | 0 | null | false | 2022-12-07T23:52:10 | 2018-08-15T07:23:39 | 2019-01-21T15:17:28 | 2022-12-07T23:52:10 | 1,707 | 1 | 0 | 5 | JavaScript | false | false | # Generated by Django 2.0.7 on 2018-08-12 16:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('roadGriev', '0028_roadmapping'),
]
operations = [
migrations.DeleteModel(
name='PreStoredLoc',
),
]
| UTF-8 | Python | false | false | 296 | py | 127 | 0029_delete_prestoredloc.py | 90 | 0.601351 | 0.537162 | 0 | 16 | 17.5 | 47 |
GabrielBergam0/Trabalho-de-FLA-4 | 10,282,151,739,857 | f25385c498e490389b64c901d104ebd228978792 | d14e222c70a4e599578a0f0f7ef6835fb1b5884f | /RINHA DE BARCO.py | 284be5d1bd3b304e845447a443096274fb139f47 | []
| no_license | https://github.com/GabrielBergam0/Trabalho-de-FLA-4 | 69addbcae73c1d4fadb3a486c6fc4b84a192246e | 04c4e1d24e8aea0c3dd7daf1f5d582fa57bb4746 | refs/heads/master | 2022-01-04T17:51:42.565510 | 2019-11-23T18:46:03 | 2019-11-23T18:46:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
barcos1 = 0
BARCO = u"\u2588\u2588"
BARCO_ATINGIDO = "##"
EMPTY = '@'.center(2)
MISS = ")("
#=============================================================================
#=============================================================================
def tem_barco(tabuleiro):
global BARCO
for line in tabuleiro:
if u"\u2588\u2588" in line:
return True
else:
return False
print("FIM DE JOGO, EXISTE UM GANHADOR!!!")
break
#=============================================================================
#=============================================================================
def cria_tabuleiro(n):
tab = []
for i in range(n):
line = []
for j in range(n * i, n * i + n, 1):
line.append(EMPTY.center(2))
tab.append(line)
return tab
#=============================================================================
#=============================================================================
def mostra_tabuleiro(tab, vez = True):
global BARCO_ATINGIDO, EMPTY
n= len(tab[0])
print(" 01 | 02 | 03 | 04 | 05 | 06 | 07 | 08 | 09 | 10 ")
print(" --------------------------------------------------")
for i in range(n):
line = [c if (c == BARCO_ATINGIDO or c == MISS or vez) else EMPTY for c in tab[i]]
print(chr(ord('A') + i) + " " + (("| %s " * n) % tuple(line)) )
if i < (n - 1): print("---|----|----|----|----|----|----|----|----|----|----")
print(" --------------------------------------------------")
#=============================================================================
#=============================================================================
def get_pos():
pos = input("Digite a posição onde deseja colocar o barco acima : ").split(',')
linha = ord(pos[0]) - ord('A')
coluna = int(pos[1]) - 1
while not (0 <= linha <= 9 and 0 <= coluna <= 9):
print("Voce esta saindo do oceano! Tente novamente!")
pos = input("Digite a posição onde deseja colocar o barco acima : ").split(',')
linha = ord(pos[0]) - ord('A')
coluna = int(pos[1]) - 1
return linha, coluna
#=============================================================================
#=============================================================================
def get_random_pos():
linhas = ["A","B","C","D","E","F","G","H","I","J"]
colunas = ["1","2","3","4","5","6","7","8","9","10"]
letra = random.choice(linhas)
numero = random.choice(colunas)
linha = ord(letra) - ord('A')
coluna = int(numero) - 1
return linha, coluna
#=============================================================================
#=============================================================================
Tabuleiro1 = cria_tabuleiro(10)
Tabuleiro2 = cria_tabuleiro(10)
#=============================================================================
#=============================================================================
def barcodepatrulha(tabuleiro, linha, coluna, direcao):
try:
if direcao == "H" or direcao == "h":
if (not (tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +1] == u"\u2588\u2588")) and coluna +2 <= len(tabuleiro):
tabuleiro[linha][coluna +1] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
elif direcao == "V" or direcao == "v":
if (not (tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +1][coluna] == u"\u2588\u2588")) and linha + 2 <= len(tabuleiro):
tabuleiro[linha +1][coluna] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
except:
return False
#modelo
#=============================================================================
#=============================================================================
def destroyer(tabuleiro, linha, coluna, direcao):
try:
if direcao == "H" or direcao == "h":
if (not (tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +1] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +2] == u"\u2588\u2588")) and coluna + 3 <= len(tabuleiro):
tabuleiro[linha][coluna +1] = u"\u2588\u2588"
tabuleiro[linha][coluna +2] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
elif direcao == "V" or direcao == "v":
if ( not (tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +1][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +2][coluna] == u"\u2588\u2588")) and linha + 3 <= len(tabuleiro):
tabuleiro[linha +1][coluna] = u"\u2588\u2588"
tabuleiro[linha +2][coluna] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
except:
return False
#=============================================================================
#=============================================================================
def submarino(tabuleiro, linha, coluna, direcao):
try:
if direcao == "H" or direcao == "h":
if (not (tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +1] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +2] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +3] == u"\u2588\u2588")) and coluna + 4 <= len(tabuleiro):
tabuleiro[linha][coluna +1] = u"\u2588\u2588"
tabuleiro[linha][coluna +2] = u"\u2588\u2588"
tabuleiro[linha][coluna +3] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
elif direcao == "V" or direcao == "v":
if (not (tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +1][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +2][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +3][coluna] == u"\u2588\u2588")) and linha + 4 <= len(tabuleiro):
tabuleiro[linha +1][coluna] = u"\u2588\u2588"
tabuleiro[linha +2][coluna] = u"\u2588\u2588"
tabuleiro[linha +3][coluna] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
except:
return False
#=============================================================================
#=============================================================================
def porta_avioes(tabuleiro, linha, coluna, direcao):
try:
if direcao == "H" or direcao == "h":
if (not(tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +1] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +2] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +3] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +4] == u"\u2588\u2588")) and coluna + 5 <= len(tabuleiro):
tabuleiro[linha][coluna +1] = u"\u2588\u2588"
tabuleiro[linha][coluna +2] = u"\u2588\u2588"
tabuleiro[linha][coluna +3] = u"\u2588\u2588"
tabuleiro[linha][coluna +4] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
elif direcao == "V" or direcao == "v":
if (not(tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +1][coluna] == u"\u2588\u2588" or\
tabuleiro[linha +2][coluna] == u"\u2588\u2588" or\
tabuleiro[linha +3][coluna] == u"\u2588\u2588" or\
tabuleiro[linha +4][coluna] == u"\u2588\u2588")) and linha + 5 <= len(tabuleiro):
tabuleiro[linha +1][coluna] = u"\u2588\u2588"
tabuleiro[linha +2][coluna] = u"\u2588\u2588"
tabuleiro[linha +3][coluna] = u"\u2588\u2588"
tabuleiro[linha +4][coluna] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
except:
return False
#=============================================================================
#=============================================================================
def encouraçado(tabuleiro, linha, coluna, direcao):
try:
if direcao == "H" or direcao == "h":
if (not (tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +1] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +2] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +3] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +4] == u"\u2588\u2588" or \
tabuleiro[linha][coluna +5] == u"\u2588\u2588")) and coluna + 6 <= len(tabuleiro):
tabuleiro[linha][coluna +1] = u"\u2588\u2588"
tabuleiro[linha][coluna +2] = u"\u2588\u2588"
tabuleiro[linha][coluna +3] = u"\u2588\u2588"
tabuleiro[linha][coluna +4] = u"\u2588\u2588"
tabuleiro[linha][coluna +5] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
elif direcao == "V" or direcao == "v":
if (not ( tabuleiro[linha][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +1][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +2][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +3][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +4][coluna] == u"\u2588\u2588" or \
tabuleiro[linha +5][coluna] == u"\u2588\u2588")) and linha + 6 <= len(tabuleiro):
tabuleiro[linha +1][coluna] = u"\u2588\u2588"
tabuleiro[linha +2][coluna] = u"\u2588\u2588"
tabuleiro[linha +3][coluna] = u"\u2588\u2588"
tabuleiro[linha +4][coluna] = u"\u2588\u2588"
tabuleiro[linha +5][coluna] = u"\u2588\u2588"
tabuleiro[linha][coluna] = u"\u2588\u2588"
return True
except:
return False
#=============================================================================
#=============================================================================
def atack():
pos = input("Digite a posição onde deseja lançar um missil : ").split(',')
linha = ord(pos[0]) - ord('A')
coluna = int(pos[1]) - 1
while not (0 <= linha <= 9 and 0 <= coluna <= 9):
print("Voce esta saindo do oceano! Tente novamente!")
pos = input("Digite a posição onde deseja lançar um missil : ").split(',')
linha = ord(pos[0]) - ord('A')
coluna = int(pos[1]) - 1
return linha, coluna
#=============================================================================
#=============================================================================
def ran_atack():
linha = random.choice(["A","B","C","D","E","F","G","H","I","J"])
coluna = random.choice(["1","2","3","4","5","6","7","8","9","10"])
linha = ord(linha) - ord('A')
coluna = int(coluna) - 1
return linha,coluna
#=============================================================================
#=============================================================================
print("Ola, seja bem vindo ao meu codigo de batalha naval, carinhosmente apelidado de Rinha de Barco")
print("Para jogar basta digitar a direção e a posicao desejada no formato Letra,numero...(A,1) ")
print(" ")
print("Este é o seu tabuleiro: ")
print(" ")
mostra_tabuleiro(Tabuleiro1)
print(" ")
print("Este é o tabuleiro do inimigo: ")
print(" ")
mostra_tabuleiro(Tabuleiro2, False)
#--------------------------------------------------------------
print("Posicione um barco de patrulha 2x1")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
while not barcodepatrulha(Tabuleiro1, linha, coluna, direcao):
print("Voce nao pode colocar este barco ai!")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
linha, coluna = get_random_pos()
direcao = random.choice(['H', 'V'])
while not barcodepatrulha(Tabuleiro2, linha, coluna, direcao):
linha, coluna = get_random_pos()
mostra_tabuleiro(Tabuleiro1)
print(" ")
mostra_tabuleiro(Tabuleiro2, False)
#--------------------------------------------------------------
print("Posicione um barco de destroyer 3x1")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
while not destroyer(Tabuleiro1, linha, coluna, direcao):
print("Voce nao pode colocar este barco ai!")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
linha, coluna = get_random_pos()
direcao = random.choice(['H', 'V'])
while not destroyer(Tabuleiro2, linha, coluna, direcao):
linha, coluna = get_random_pos()
mostra_tabuleiro(Tabuleiro1)
print(" ")
mostra_tabuleiro(Tabuleiro2, False)
#--------------------------------------------------------------
print("Posicione um barco de submarino 3x1")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
while not destroyer(Tabuleiro1, linha, coluna,direcao):
print("Voce nao pode colocar este barco ai!")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
direcao = random.choice(['H', 'V'])
linha, coluna = get_random_pos()
while not submarino(Tabuleiro2, linha, coluna, direcao):
linha, coluna = get_random_pos()
mostra_tabuleiro(Tabuleiro1)
print(" ")
mostra_tabuleiro(Tabuleiro2, False)
#--------------------------------------------------------------
print("Posicione um barco de porta-avioes 5x1")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
while not porta_avioes(Tabuleiro1, linha, coluna, direcao):
print("Voce nao pode colocar este barco ai!")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
direcao = random.choice(['H', 'V'])
linha, coluna = get_random_pos()
while not porta_avioes(Tabuleiro2, linha, coluna, direcao):
linha, coluna = get_random_pos()
mostra_tabuleiro(Tabuleiro1)
print(" ")
mostra_tabuleiro(Tabuleiro2, False)
#--------------------------------------------------------------
print("Posicione um barco de encouraçado 6x1")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
while not encouraçado(Tabuleiro1, linha, coluna, direcao):
print("Voce nao pode colocar este barco ai!")
direcao = str(input("Digite H para horizontal e V para vertical: "))
linha, coluna = get_pos()
direcao = random.choice(['H', 'V'])
linha, coluna = get_random_pos()
while not encouraçado(Tabuleiro2, linha, coluna, direcao):
linha, coluna = get_random_pos()
mostra_tabuleiro(Tabuleiro1)
print(" ")
mostra_tabuleiro(Tabuleiro2, False)
#--------------------------------------------------------------
def tem_barco(Tabuleiro2):
global BARCO
for line in Tabuleiro2:
if BARCO in line:
return False
return True
print("JOGO ENCERRADO, EXISTE UM GANHADOR")
while not tem_barco(Tabuleiro2):
linha,coluna = atack()
if Tabuleiro2[linha][coluna] == (u"\u2588\u2588"):
Tabuleiro2[linha][coluna] = (BARCO_ATINGIDO)
else:
Tabuleiro2[linha][coluna] = (MISS)
linha, coluna = ran_atack()
if Tabuleiro1[linha][coluna] == (u"\u2588\u2588"):
Tabuleiro1[linha][coluna] = (BARCO_ATINGIDO)
else:
Tabuleiro1[linha][coluna] = (MISS)
mostra_tabuleiro(Tabuleiro1)
print(" ")
mostra_tabuleiro(Tabuleiro2, False)
| UTF-8 | Python | false | false | 16,423 | py | 1 | RINHA DE BARCO.py | 1 | 0.46559 | 0.412984 | 0 | 398 | 40.201005 | 104 |
ayushianan/Summarizer | 712,964,578,755 | 1e02b3000715cc95c95279097ae273bed2c9b180 | 4c040d6bdccd3d0f7308ad8934b38c1363d7cda8 | /venv/lib/python3.6/_collections_abc.py | 7e2c2af484be8c7b454774fe8c49b8d810569f38 | []
| no_license | https://github.com/ayushianan/Summarizer | 456dc75b3f53b7b03ce2808c584ec12ee91f4ffd | ced46a48c1071a65f0d2b24d064d55aa575f58f2 | refs/heads/master | 2020-05-18T08:47:42.956905 | 2019-05-03T17:03:34 | 2019-05-03T17:03:34 | 184,304,942 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /home/ayushi/anaconda3/lib/python3.6/_collections_abc.py | UTF-8 | Python | false | false | 56 | py | 44 | _collections_abc.py | 42 | 0.821429 | 0.767857 | 0 | 1 | 56 | 56 |
wuhuabushijie/sample | 7,576,322,312,570 | 2e1e1b2f3a3029dc5498fe7279e05c9523d8ea3a | 58fd635c519d573127bc2d4df2e46b9afb75c65c | /chapter05/bisect_test.py | 1febe0ffad1ce8804cef842d124e8c22f977793e | []
| no_license | https://github.com/wuhuabushijie/sample | 5be50cb5582233a6409e8e6ca9479a300ac1a8d4 | b5ceb5c4f061732ecf27fff8242daa9372c18385 | refs/heads/master | 2020-03-30T06:07:55.742159 | 2018-10-13T15:39:51 | 2018-10-13T15:39:51 | 150,840,806 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import bisect
# 用来处理和维持已排序的序列
# insort 将参数插入序列中
# bisect 查询参数将要插入的位置
aList = []
bisect.insort(aList,3)
bisect.insort(aList,4)
bisect.insort(aList,7)
bisect.insort(aList,4)
bisect.insort(aList,3)
bisect.insort(aList,6)
bisect.insort(aList,5)
bisect.insort(aList,9)
print(bisect.bisect(aList,8))
print(aList) | UTF-8 | Python | false | false | 372 | py | 36 | bisect_test.py | 36 | 0.772727 | 0.743506 | 0 | 17 | 17.176471 | 29 |
dbr/tvdb_api | 1,803,886,264,870 | 53d6bbde21dd65915a1b33c0c13fd8303b0914a9 | 6faa73f33a4ce3f2d62588bc16f17d0cf787b10d | /tests/test_tvdb_api.py | 2b3034f1e0123e1957dabca27a85839659779d6a | [
"Unlicense"
]
| permissive | https://github.com/dbr/tvdb_api | 326cad12fa8540cbb3e9ce1cb8db7695ba6fc636 | ce0382181a9e08a5113bfee0fed2c78f8b1e613f | refs/heads/master | 2023-08-17T08:08:09.660904 | 2021-09-03T13:30:43 | 2021-09-03T13:30:43 | 14,645 | 226 | 54 | Unlicense | false | 2023-06-29T18:08:42 | 2008-05-05T13:01:53 | 2023-06-28T13:34:07 | 2023-06-29T18:08:41 | 2,288 | 342 | 69 | 15 | Python | false | false | #!/usr/bin/env python
# encoding:utf-8
# author:dbr/Ben
# project:tvdb_api
# repository:http://github.com/dbr/tvdb_api
# license:unlicense (http://unlicense.org/)
"""Unittests for tvdb_api
"""
import os
import sys
import types
import datetime
import pytest
# Force parent directory onto path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import tvdb_api # noqa: E402
from tvdb_api import ( # noqa: E402
tvdb_shownotfound,
tvdb_seasonnotfound,
tvdb_episodenotfound,
tvdb_attributenotfound,
)
import requests_cache.backends # noqa: E402
import requests_cache.backends.base # noqa: E402
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
import pickle # noqa: E402
IS_PY2 = sys.version_info[0] == 2
if IS_PY2:
# Not really but good enough for backwards-compat here
FileNotFoundError = IOError
# By default tests use persistent (committed to Git) cache.
# Setting this env-var allows the cache to be populated.
# This is necessary if, say, adding new test case or TVDB response changes.
# It is recommended to clear the cache directory before re-populating the cache.
ALLOW_CACHE_WRITE_ENV_VAR = "TVDB_API_TESTS_ALLOW_CACHE_WRITE"
ALLOW_CACHE_WRITE = os.getenv(ALLOW_CACHE_WRITE_ENV_VAR, "0") == "1"
class FileCacheDict(MutableMapping):
def __init__(self, base_dir):
self._base_dir = base_dir
def __getitem__(self, key):
path = os.path.join(self._base_dir, key)
try:
with open(path, "rb") as f:
data = pickle.load(f)
return data
except FileNotFoundError:
if not ALLOW_CACHE_WRITE:
raise RuntimeError("No cache file found %s" % path)
raise KeyError
def __setitem__(self, key, item):
if ALLOW_CACHE_WRITE:
path = os.path.join(self._base_dir, key)
with open(path, "wb") as f:
# Dump with protocol 2 to allow Python 2.7 support
f.write(pickle.dumps(item, protocol=2))
else:
raise RuntimeError(
"Requested uncached URL and $%s not set to 1" % (ALLOW_CACHE_WRITE_ENV_VAR)
)
def __delitem__(self, key):
raise RuntimeError("Removing items from test-cache not supported")
def __len__(self):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def __str__(self):
return str(dict(self.items()))
class FileCache(requests_cache.backends.base.BaseCache):
def __init__(self, _name, fc_base_dir, **options):
super(FileCache, self).__init__(**options)
self.responses = FileCacheDict(base_dir=fc_base_dir)
self.keys_map = FileCacheDict(base_dir=fc_base_dir)
requests_cache.backends.registry['tvdb_api_file_cache'] = FileCache
def get_test_cache_session():
here = os.path.dirname(os.path.abspath(__file__))
additional = "_py2" if sys.version_info[0] == 2 else ""
sess = requests_cache.CachedSession(
backend="tvdb_api_file_cache",
fc_base_dir=os.path.join(here, "httpcache%s" % additional),
include_get_headers=True,
allowable_codes=(200, 404),
)
sess.cache.create_key = types.MethodType(tvdb_api.create_key, sess.cache)
return sess
class TestTvdbBasic:
# Used to store the cached instance of Tvdb()
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), banners=False)
def test_different_case(self):
"""Checks the auto-correction of show names is working.
It should correct the weirdly capitalised 'sCruBs' to 'Scrubs'
"""
assert self.t['scrubs'][1][4]['episodeName'] == 'My Old Lady'
assert self.t['sCruBs']['seriesName'] == 'Scrubs'
def test_spaces(self):
"""Checks shownames with spaces
"""
assert self.t['My Name Is Earl']['seriesName'] == 'My Name Is Earl'
assert self.t['My Name Is Earl'][1][4]['episodeName'] == 'Faked My Own Death'
def test_numeric(self):
"""Checks numeric show names
"""
assert self.t['24'][2][20]['episodeName'] == 'Day 2: 3:00 A.M. - 4:00 A.M.'
assert self.t['24']['seriesName'] == '24'
def test_show_iter(self):
"""Iterating over a show returns each seasons
"""
assert len([season for season in self.t['scrubs']]) == 10
def test_season_iter(self):
"""Iterating over a show returns episodes
"""
assert len([episode for episode in self.t['scrubs'][1]]) == 24
def test_get_episode_overview(self):
"""Checks episode overview is retrieved correctly.
"""
assert self.t['Scrubs'][1][6]['overview'].startswith(
'Dr. Cox is still facing the threat of suspension'
)
try:
self.t['Scrubs']['something nonsensical']
except tvdb_attributenotfound:
pass # good
else:
raise AssertionError("Expected attribute error")
def test_get_parent(self):
"""Check accessing series from episode instance
"""
show = self.t['Scrubs']
season = show[1]
episode = show[1][1]
assert season.show == show
assert episode.season == season
assert episode.season.show == show
def test_no_season(self):
show = self.t['Katekyo Hitman Reborn']
print(tvdb_api)
print(show[1][1])
class TestTvdbErrors:
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), banners=False)
def test_seasonnotfound(self):
"""Checks exception is thrown when season doesn't exist.
"""
with pytest.raises(tvdb_seasonnotfound):
self.t['Scrubs'][42]
def test_shownotfound(self):
"""Checks exception is thrown when episode doesn't exist.
"""
with pytest.raises(tvdb_shownotfound):
self.t['the fake show thingy']
def test_shownotfound_by_id(self):
"""Checks exception is thrown when episode doesn't exist.
"""
with pytest.raises(tvdb_shownotfound):
self.t[999999999999999999999999]
def test_episodenotfound(self):
"""Checks exception is raised for non-existent episode
"""
with pytest.raises(tvdb_episodenotfound):
self.t['Scrubs'][1][30]
def test_attributenamenotfound(self):
"""Checks exception is thrown for if an attribute isn't found.
"""
with pytest.raises(tvdb_attributenotfound):
self.t['Scrubs'][1][6]['afakeattributething']
self.t['Scrubs']['afakeattributething']
class TestTvdbSearch:
# Used to store the cached instance of Tvdb()
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), banners=False)
def test_search_len(self):
"""There should be only one result matching
"""
assert len(self.t['My Name Is Earl'].search('Faked My Own Death')) == 1
def test_search_checkname(self):
"""Checks you can get the episode name of a search result
"""
assert self.t['Scrubs'].search('my first')[0]['episodeName'] == 'My First Day'
assert (
self.t['My Name Is Earl'].search('Faked My Own Death')[0]['episodeName']
== 'Faked My Own Death'
)
def test_search_multiresults(self):
"""Checks search can return multiple results
"""
assert len(self.t['Scrubs'].search('my first')) >= 3
def test_search_no_params_error(self):
"""Checks not supplying search info raises TypeError"""
with pytest.raises(TypeError):
self.t['Scrubs'].search()
def test_search_season(self):
"""Checks the searching of a single season"""
assert len(self.t['Scrubs'][1].search("First")) == 3
def test_search_show(self):
"""Checks the searching of an entire show"""
assert len(self.t['CNNNN'].search('CNNNN', key='episodeName')) == 3
def test_aired_on(self):
"""Tests aired_on show method"""
sr = self.t['Scrubs'].aired_on(datetime.date(2001, 10, 2))
assert len(sr) == 1
assert sr[0]['episodeName'] == u'My First Day'
try:
sr = self.t['Scrubs'].aired_on(datetime.date(1801, 1, 1))
except tvdb_episodenotfound:
pass # Good
else:
raise AssertionError("expected episode not found exception")
class TestTvdbData:
# Used to store the cached instance of Tvdb()
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), banners=False)
def test_episode_data(self):
"""Check the firstaired value is retrieved
"""
assert self.t['lost']['firstAired'] == '2004-09-22'
class TestTvdbMisc:
# Used to store the cached instance of Tvdb()
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), banners=False)
def test_repr_show(self):
"""Check repr() of Season
"""
assert (
repr(self.t['CNNNN']).replace("u'", "'")
== "<Show 'Chaser Non-Stop News Network (CNNNN)' (containing 3 seasons)>"
)
def test_repr_season(self):
"""Check repr() of Season
"""
assert repr(self.t['CNNNN'][1]) == "<Season instance (containing 9 episodes)>"
def test_repr_episode(self):
"""Check repr() of Episode
"""
assert repr(self.t['CNNNN'][1][1]).replace("u'", "'") == "<Episode 01x01 - 'Terror Alert'>"
def test_available_langs(self):
"""Check available_languages returns something sane looking
"""
langs = self.t.available_languages()
print(langs)
assert "en" in langs
class TestTvdbLanguages:
def test_episode_name_french(self):
"""Check episode data is in French (language="fr")
"""
t = tvdb_api.Tvdb(cache=get_test_cache_session(), language="fr")
assert t['scrubs'][1][1]['episodeName'] == "Mon premier jour"
assert t['scrubs']['overview'].startswith(u"J.D. est un jeune m\xe9decin qui d\xe9bute")
def test_episode_name_spanish(self):
"""Check episode data is in Spanish (language="es")
"""
t = tvdb_api.Tvdb(cache=get_test_cache_session(), language="es")
assert t['scrubs'][1][1]['episodeName'] == u'Mi primer día'
assert t['scrubs']['overview'].startswith(u'Scrubs es una divertida comedia')
def test_multilanguage_selection(self):
"""Check selected language is used
"""
t_en = tvdb_api.Tvdb(cache=get_test_cache_session(), language="en")
t_it = tvdb_api.Tvdb(cache=get_test_cache_session(), language="it")
assert t_en['dexter'][1][2]['episodeName'] == "Crocodile"
assert t_it['dexter'][1][2]['episodeName'] == "Lacrime di coccodrillo"
class TestTvdbUnicode:
def test_search_in_chinese(self):
"""Check searching for show with language=zh returns Chinese seriesname
"""
t = tvdb_api.Tvdb(cache=get_test_cache_session(), language="zh")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
assert type(show) == tvdb_api.Show
assert show['seriesName'] == u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i'
@pytest.mark.skip('Новое API не возвращает сразу все языки')
def test_search_in_all_languages(self):
"""Check search_all_languages returns Chinese show, with language=en
"""
t = tvdb_api.Tvdb(cache=get_test_cache_session(), search_all_languages=True, language="en")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
assert type(show) == tvdb_api.Show
assert show['seriesName'] == u'Virtues Of Harmony II'
class TestTvdbBanners:
# Used to store the cached instance of Tvdb()
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), banners=True)
def test_have_banners(self):
"""Check banners at least one banner is found
"""
assert len(self.t['scrubs']['_banners']) > 0
def test_banner_url(self):
"""Checks banner URLs start with http://
"""
for banner_type, banner_data in self.t['scrubs']['_banners'].items():
for res, res_data in banner_data.items():
if res != 'raw':
for bid, banner_info in res_data.items():
assert banner_info['_bannerpath'].startswith("http://")
@pytest.mark.skip('В новом API нет картинки у эпизода')
def test_episode_image(self):
"""Checks episode 'filename' image is fully qualified URL
"""
assert self.t['scrubs'][1][1]['filename'].startswith("http://")
@pytest.mark.skip('В новом API у сериала кроме банера больше нет картинок')
def test_show_artwork(self):
"""Checks various image URLs within season data are fully qualified
"""
for key in ['banner', 'fanart', 'poster']:
assert self.t['scrubs'][key].startswith("http://")
class TestTvdbActors:
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), actors=True)
def test_actors_is_correct_datatype(self):
"""Check show/_actors key exists and is correct type"""
assert isinstance(self.t['scrubs']['_actors'], tvdb_api.Actors)
def test_actors_has_actor(self):
"""Check show has at least one Actor
"""
assert isinstance(self.t['scrubs']['_actors'][0], tvdb_api.Actor)
def test_actor_has_name(self):
"""Check first actor has a name"""
names = [actor['name'] for actor in self.t['scrubs']['_actors']]
assert u"Zach Braff" in names
def test_actor_image_corrected(self):
"""Check image URL is fully qualified
"""
for actor in self.t['scrubs']['_actors']:
if actor['image'] is not None:
# Actor's image can be None, it displays as the placeholder
# image on thetvdb.com
assert actor['image'].startswith("http://")
class TestTvdbDoctest:
def test_doctest(self):
"""Check docstring examples works"""
import doctest
doctest.testmod(tvdb_api)
class TestTvdbCustomCaching:
def test_true_false_string(self):
"""Tests setting cache to True/False/string
Basic tests, only checking for errors
"""
tvdb_api.Tvdb(cache=True)
tvdb_api.Tvdb(cache=False)
tvdb_api.Tvdb(cache="/tmp")
def test_invalid_cache_option(self):
"""Tests setting cache to invalid value
"""
try:
tvdb_api.Tvdb(cache=2.3)
except ValueError:
pass
else:
pytest.fail("Expected ValueError from setting cache to float")
def test_custom_request_session(self):
from requests import Session as OriginalSession
class Used(Exception):
pass
class CustomCacheForTest(OriginalSession):
call_count = 0
def request(self, *args, **kwargs):
raise Used("Hurray")
c = CustomCacheForTest()
t = tvdb_api.Tvdb(cache=c)
try:
t['scrubs']
except Used:
pass
else:
pytest.fail("Did not use custom session")
class TestTvdbById:
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), actors=True)
def test_actors_is_correct_datatype(self):
"""Check show/_actors key exists and is correct type"""
assert self.t[76156]['seriesName'] == 'Scrubs'
class TestTvdbShowOrdering:
def test_ordering(self):
"""Test Tvdb.search method
"""
t_dvd = tvdb_api.Tvdb(cache=get_test_cache_session(), dvdorder=True)
t_air = tvdb_api.Tvdb(cache=get_test_cache_session())
assert u'The Train Job' == t_air['Firefly'][1][1]['episodeName']
assert u'Serenity' == t_dvd['Firefly'][1][1]['episodeName']
assert (
u'The Cat and the Claw (1)' == t_air['Batman The Animated Series'][1][1]['episodeName']
)
assert u'On Leather Wings' == t_dvd['Batman The Animated Series'][1][1]['episodeName']
class TestTvdbShowSearch:
# Used to store the cached instance of Tvdb()
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session())
def test_search(self):
"""Test Tvdb.search method
"""
results = self.t.search("my name is earl")
all_ids = [x['id'] for x in results]
assert 75397 in all_ids
class TestTvdbAltNames:
t = None
@classmethod
def setup_class(cls):
if cls.t is None:
cls.t = tvdb_api.Tvdb(cache=get_test_cache_session(), actors=True)
def test_1(self):
"""Tests basic access of series name alias
"""
results = self.t.search("Don't Trust the B---- in Apartment 23")
series = results[0]
assert 'Apartment 23' in series['aliases']
if __name__ == '__main__':
cache = get_test_cache_session()
t = tvdb_api.Tvdb(cache=cache)
t['scrubs'][1][2]
t = tvdb_api.Tvdb(cache=cache)
t['scrubs'][1][2]
# pytest.main()
| UTF-8 | Python | false | false | 18,071 | py | 13 | test_tvdb_api.py | 3 | 0.598676 | 0.58638 | 0.000278 | 569 | 30.586995 | 99 |
ksergie/testMobile | 6,889,127,549,079 | c4f3cbeaea7715e77482c979cff62e1aba4d89d0 | d8b74852bc3856d6a7272c67c926facbe0b34b30 | /test_main_page.py | da47ee5d3fba8b06a49bbd223d9eb75c0583bf64 | []
| no_license | https://github.com/ksergie/testMobile | 4cc9ee5bd4edf82197318474a613f1b033c66977 | 08d24bd3d94ac2e68761f9c242fa9eee3dc115f4 | refs/heads/master | 2021-06-30T19:04:12.666123 | 2020-01-05T11:11:52 | 2020-01-05T11:11:52 | 231,894,084 | 0 | 0 | null | false | 2021-06-02T00:54:52 | 2020-01-05T09:21:20 | 2020-01-05T11:12:01 | 2021-06-02T00:54:51 | 13,758 | 0 | 0 | 2 | Python | false | false | from .pages.main_page import MainPage
def test_open_main_page(browser):
link = "https://www.lotteryheroes.com/"
page = MainPage(browser, link)
page.open()
assert page.main_pages_title(), "Play Online Lottery | Bet on global lotteries jackpots | LotteryHeroes"
| UTF-8 | Python | false | false | 278 | py | 3 | test_main_page.py | 2 | 0.71223 | 0.71223 | 0 | 8 | 33.75 | 108 |
hjcafaroUC/matique | 12,936,441,508,382 | b74bb6ea455f356181a1dc12a56288454a9e98ea | c864e8066b4ae2abf85e4e9fe85408d27b5c47d6 | /diff.py | 503c3a656e3c7feb8217bd1736d2bae6a46c37e6 | []
| no_license | https://github.com/hjcafaroUC/matique | b7421cbd78d7328f109261e0e5231c279e080a98 | bc733d7a87874d9b6483f896e91546322bc09388 | refs/heads/master | 2022-11-30T07:37:45.892478 | 2020-08-07T03:08:47 | 2020-08-07T03:08:47 | 279,121,823 | 0 | 0 | null | false | 2020-08-07T00:30:38 | 2020-07-12T18:13:12 | 2020-08-06T04:20:01 | 2020-08-07T00:30:37 | 64 | 0 | 0 | 2 | Python | false | false | #diff utility for checking correctness of code-check against earlier versions
print("Enter 1st file")
f1 = open(input(),"r")
print("Enter 2nd file")
f2 = open(input(),"r")
lines1 = f1.read().split(sep="\n")
lines2 = f2.read().split(sep="\n")
if(len(lines1) != len(lines2)):
print("Lengths differ : " + str(len(lines1)) + " " + str(len(lines2)))
for i in range(min(len(lines1),len(lines2))):
if(lines1[i] !=lines2[i]):
print("Difference on line " + str(i))
print(lines1[i])
print(lines2[i])
print("Diff is complete")
| UTF-8 | Python | false | false | 555 | py | 16 | diff.py | 13 | 0.61982 | 0.587387 | 0 | 22 | 24.227273 | 77 |
dnanexus/dx-toolkit | 3,393,024,202,844 | 2f04c6d67c05efbaadf8372d7965218b25e7bc4b | d75f33671fe9431c09b384a91d04bdcaaa6ae68c | /src/python/dxpy/toolkit_version.py | e7a7c36712315585f4e1456eca3ca710ce1c4459 | [
"Apache-2.0"
]
| permissive | https://github.com/dnanexus/dx-toolkit | df61a794c275f735b6216afe234fcff0240c422d | ad4f498ae80fb0cd2e591f63a7bf4fb983049c75 | refs/heads/master | 2023-08-30T20:19:06.383154 | 2023-08-30T18:06:44 | 2023-08-30T18:06:44 | 4,802,269 | 82 | 80 | Apache-2.0 | false | 2023-09-14T18:35:09 | 2012-06-27T00:23:47 | 2023-08-14T11:42:53 | 2023-09-14T18:35:08 | 36,224 | 76 | 77 | 80 | Python | false | false | version = '0.356.0'
| UTF-8 | Python | false | false | 20 | py | 502 | toolkit_version.py | 312 | 0.6 | 0.35 | 0 | 1 | 19 | 19 |
HenReLili/FP02 | 4,939,212,430,612 | c57d47c7a0d37452f39b4cda59783d4ca57841b0 | 40c2b31d6bca6bc16444d444cea4894f737ac761 | /dataorganisator.py | f4edba12b64fe451b85580b3c4a360f23da38876 | []
| no_license | https://github.com/HenReLili/FP02 | bc8e4e842465faf162207e59dd7b49d2fb9130ca | 5fe4c41bf55b640d930e194afa973292e67c78c0 | refs/heads/main | 2023-05-30T18:47:05.173962 | 2021-06-17T12:17:06 | 2021-06-17T12:17:06 | 377,820,877 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
reads data
"""
import os.path
import numpy as np
def datareader(datanumber):
"""Reads the data of a measurement from dataname
Args: dataname is the name of the .txt-file wich data is to be read
Returns: data is the data of the measurement in a numpy-array
"""
wavelengths = []
intensity = []
dataname = "spektrum{number}.txt".format(number=datanumber)
fp = open(os.path.join("measurements", dataname))
readdata = fp.read().split()
fp.close
for i in range(len(readdata)):
readdata[i] = float(readdata[i].replace(",", "."))
if i % 2 == 0:
wavelengths.append(readdata[i])
else:
intensity.append(readdata[i])
data = np.zeros((2, len(wavelengths)))
data[0, :] = wavelengths
data[1, :] = intensity
return data
| UTF-8 | Python | false | false | 823 | py | 5 | dataorganisator.py | 5 | 0.611179 | 0.605103 | 0 | 33 | 23.939394 | 71 |
Squiercg/recologia | 515,396,078,843 | 0a325d78f94be414bcf539df041e6333699381b4 | 7ae64966a18056434545eec13a6fe6c563a8fb50 | /project_euler/prob009.py | 537c96689b28fdaf9fdb363a075c2fb55871f31f | []
| no_license | https://github.com/Squiercg/recologia | 522812932fc26ae6f0f60df911365ef06b00d916 | d873b3fa08bac3fe633506c8c3591b884526c728 | refs/heads/master | 2020-05-21T22:11:43.879102 | 2019-11-07T14:23:35 | 2019-11-07T14:23:35 | 9,555,220 | 7 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | def produto(l):
"""Calcula o produto dos itens de um iterable"""
t = 1
for i in l:
t *= int(i)
return t
def tripleto(n):
"""Encontra o tripleto pitagorico qual a soma é igual ao numero de entrada"""
for c in range(n - 3, 1, -1):
for b in range(c - 1, 1, -1):
a = (c**2 - b**2)**.5
if a + b + c == n:
return [c, b, int(a)]
return False
print tripleto(1000)
print produto(tripleto(1000))
| UTF-8 | Python | false | false | 476 | py | 195 | prob009.py | 180 | 0.513684 | 0.475789 | 0 | 19 | 23.894737 | 81 |
ChaoOnGitHub/psClean | 10,900,627,002,742 | 579aafec4c3721704bcacc0d4e3609213a8892fb | 7ad2468f1398f7f4eb6105ff06964bb51c84fc5c | /test/test_ipc_green_query.py | e6c71ddb091bc008316bf00cadec1fad403d71a1 | []
| no_license | https://github.com/ChaoOnGitHub/psClean | ecd0e5450787eece5be2f03f6e457b4646ba36af | 85e1a98f709e7ab8efe1b0f6a5418fe0b107f142 | refs/heads/master | 2021-01-18T11:33:40.816377 | 2013-01-25T18:44:44 | 2013-01-25T18:44:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import MySQLdb
import csv
import re
import time
os.chdir('/home/markhuberty/Documents/psClean')
conn = open('./data/ipc_green_inventory_tags_8dig.csv')
reader = csv.reader(conn)
ipc_codes = [row[-1] for row in reader]
conn.close()
## Format correctly
ipc_codes = [re.sub(' ', ' ', code) for code in ipc_codes]
ipc_strings = ','.join(['%s'] * len(ipc_codes))
dbconn = MySQLdb.connect(host="127.0.0.1",
port=3306,
user="markhuberty",
passwd="patstat_huberty",
db="patstatOct2011",
use_unicode=True,
charset='utf8'
)
start_time = time.time()
conn_cursor = dbconn.cursor()
conn_cursor.execute("""
SELECT appln_id, ipc_class_symbol FROM tls209_appln_ipc WHERE ipc_class_symbol IN (%s)
""" % ipc_strings, tuple(ipc_codes))
ipc_ids = conn_cursor.fetchall()
conn_cursor.close()
dbconn.close()
end_time = time.time()
time_diff = end_time - start_time
print time_diff
fieldnames = ['appln_id', 'ipc_code']
conn_out = open('./data/ipc_grn_class_ids.csv', 'wt')
writer = csv.writer(conn_out)
writer.writerow(fieldnames)
for item in ipc_ids:
writer.writerow([str(item[0]), item[1]])
conn_out.close()
| UTF-8 | Python | false | false | 1,286 | py | 26 | test_ipc_green_query.py | 25 | 0.602644 | 0.585537 | 0 | 47 | 26.361702 | 86 |
calvinchankf/AlgoDaily | 16,750,372,481,116 | 4645fd5d73e7b8b2cb944173855d4e7c8f70d3fb | e00f0f6a6f605532b1123d3e985a5902609eb30a | /codility/10-min_perimeter_rectangle/main.py | 5571eaf511a9e96e44af9cede309df8cf68da7d6 | []
| no_license | https://github.com/calvinchankf/AlgoDaily | b53c5ea5896e9b8662f4f97bce3eb41b480eb8e0 | 8ee5006e42442043816881ee88ccc485a3a56ec5 | refs/heads/master | 2023-08-25T11:48:47.415388 | 2023-08-25T08:55:22 | 2023-08-25T08:55:22 | 146,955,627 | 154 | 48 | null | false | 2023-09-09T09:02:43 | 2018-09-01T00:56:43 | 2023-09-03T17:36:52 | 2023-09-09T09:02:42 | 63,478 | 2 | 1 | 11 | Python | false | false | import math
"""
1st approach: math
Time O(sqrt(N))
Space O(1)
Result 100/100
https://app.codility.com/demo/results/training96RPGQ-5K6/
"""
def solution(N):
# write your code in Python 3.6
root = int(math.sqrt(N))
for i in range(root+1, 0, -1):
quotient = N//i
if i*quotient == N:
return 2*(i+quotient)
| UTF-8 | Python | false | false | 371 | py | 3,428 | main.py | 3,404 | 0.560647 | 0.512129 | 0 | 19 | 18.526316 | 61 |
derf/icinga2-check-submitter | 2,903,397,935,590 | 9a72bea731457a81ffd90964dbf1dc9f97bc8648 | e8f315c264b0bfffd1c9693e1aff966efe570196 | /bin/icinga2-run-checks | 47c9f37767f37f768264a576757186fd68f707d1 | [
"MIT"
]
| permissive | https://github.com/derf/icinga2-check-submitter | 23fce502e297a29672d1df95e71e6d9526db71fc | 7edf5f61313bb288f89546a05b407ce2ad31041f | refs/heads/master | 2023-01-05T15:17:29.156674 | 2020-11-07T10:39:44 | 2020-11-07T10:40:23 | 310,821,495 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import json
import random
import requests
import subprocess
import sys
import time
with open("/etc/nagios/icinga2-passive-checks.json", "r") as f:
config = json.load(f)
host = config["host"]
passive_ping = config["passive_ping"]
checks = config["checks"]
api = config["api"]
headers = {"Accept": "application/json"}
auth = tuple(config["auth"])
if len(sys.argv) > 1 and sys.argv[1] == "cron":
time.sleep(random.random() * 30)
for check, command in checks.items():
check_result = subprocess.run(command, shell=True, capture_output=True)
req = {
"type": "Service",
"filter": f"""host.name=="{host}" && service.name=="{check}" """,
"exit_status": check_result.returncode,
}
check_output = check_result.stdout.decode("utf-8")
if "|" in check_output:
output, performance_data = check_output.split("|")
req["plugin_output"] = output.strip()
req["performance_data"] = performance_data.strip()
else:
req["plugin_output"] = check_output.strip()
res = requests.post(api, auth=auth, headers=headers, json=req)
if res.status_code != 200:
print(f"Error {res.status_code} when submitting {check}: {res.json()}")
if passive_ping:
req = {
"type": "Host",
"filter": f"""host.name=="{host}" """,
"exit_status": 0,
"plugin_output": "Alive",
}
res = requests.post(api, auth=auth, headers=headers, json=req)
if res.status_code != 200:
print(res.json())
| UTF-8 | Python | false | false | 1,523 | 3 | icinga2-run-checks | 1 | 0.613263 | 0.604071 | 0 | 52 | 28.288462 | 79 |
|
NeehaK/python_intermediate_project | 19,344,532,717,253 | f9b06fea2d0a8d7caf03a186ed6c62baef0c07e1 | 9fe355a0da2fd9e615322b8122b622a1cb20c2e2 | /q08_get_total_extras/build.py | 5edaf3cd70745f8274f85b6b91ae8cc2cc6bd363 | []
| no_license | https://github.com/NeehaK/python_intermediate_project | af8af5d307abb0041199f54b4ab74c8475d9a1c2 | ef4b317ae32495f6ad62196e798656b880c293df | refs/heads/master | 2020-03-14T01:59:28.465710 | 2018-05-02T12:14:26 | 2018-05-02T12:14:26 | 131,389,663 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # %load q08_get_total_extras/build.py
# Default Imports
from greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv
import numpy as np
path = 'data/ipl_matches_small.csv'
def get_total_extras():
file_data = read_ipl_data_csv(path, dtype='|S50')
extras_col =list(file_data[:,17])
extras_list= [int(x) for x in extras_col]
extras=0
for i in range(0,len(extras_list),1):
if extras_list[i]!=0:
extras+=extras_list[i]
return extras
# Enter Code Here
| UTF-8 | Python | false | false | 523 | py | 5 | build.py | 5 | 0.66348 | 0.640535 | 0 | 18 | 28 | 85 |
general-programming/tumblrarchives | 11,098,195,505,639 | 669f59b19e02df11d063c678eac19e1b2db3971a | 15c3febe00af3959c6e16bcc8b59920208fba962 | /web/alembic/versions/9dafd4fd2d5f_last_crawl_update.py | 9e442b7bce2cf6c0067324dd3a86023bd1e29fc1 | []
| no_license | https://github.com/general-programming/tumblrarchives | 26ee1ca5877b387a9356f200e9769def92b4f948 | 74386daf0f50d7dfd07042207ff29105fb3430fa | refs/heads/master | 2020-04-09T16:36:24.500759 | 2018-12-13T23:51:29 | 2018-12-13T23:51:29 | 160,457,485 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """last_crawl_update
Revision ID: 9dafd4fd2d5f
Revises: d00401ed8599
Create Date: 2018-12-08 03:25:42.357038
"""
# revision identifiers, used by Alembic.
revision = '9dafd4fd2d5f'
down_revision = 'd00401ed8599'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blogs', sa.Column('last_crawl_update', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blogs', 'last_crawl_update')
# ### end Alembic commands ###
| UTF-8 | Python | false | false | 677 | py | 64 | 9dafd4fd2d5f_last_crawl_update.py | 40 | 0.686854 | 0.618907 | 0 | 28 | 23.178571 | 88 |
TheScottishFly/Netflux | 7,464,653,161,255 | 76c4d7b855dbff4ff215578022cadb7b22080067 | 82975e04aea861c5f9b10fad270baf929bf67baa | /apps/users/models.py | ab39d0dbfc13126a300b600d8b600ef4ffb67889 | []
| no_license | https://github.com/TheScottishFly/Netflux | a7d278c82f7de38bafe2ba57931a118777551379 | 4df0564c95f50dd8ee6dddab93e8dea84e252af0 | refs/heads/master | 2020-04-13T08:34:53.889224 | 2019-02-20T11:19:14 | 2019-02-20T11:19:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
class ExtendedUserManager(models.Manager):
def get_queryset(self):
return super(ExtendedUserManager, self).get_queryset().select_related('user')
class ExtendedUserModel(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
avatar = models.CharField(max_length=1500)
default_lang = models.CharField(max_length=10, default="en")
already_seen = models.ManyToManyField("home.Movie", related_name='already_seen')
port = models.IntegerField(null=True)
type = models.CharField(max_length=100, default='')
| UTF-8 | Python | false | false | 633 | py | 54 | models.py | 40 | 0.742496 | 0.728278 | 0 | 16 | 38.5 | 85 |
YeFeiyangx/grownup_share | 16,767,552,342,927 | e6bc8775fccc59ea05c2426955a1e9206993584e | f13798ab8440948364bab96cbf23ce6b30c0a9f8 | /rl-book-challenge-master/chapter6/driving.py | 0fab24a334f716c9e85ab1c3731dae6eb2dd3579 | [
"MIT"
]
| permissive | https://github.com/YeFeiyangx/grownup_share | 29de787d268e6abd6028296c6684adbc4097389c | a6686dbd614c62f62d55735bc95a5e58320e36e6 | refs/heads/master | 2022-12-07T05:57:48.336393 | 2021-05-06T12:17:21 | 2021-05-06T12:17:21 | 230,780,311 | 5 | 0 | null | false | 2022-11-21T21:04:58 | 2019-12-29T17:13:30 | 2021-05-06T12:17:35 | 2022-11-21T21:04:55 | 25,516 | 2 | 0 | 4 | Jupyter Notebook | false | false | GO_HOME = 0
STATES = ["leaving office",
"reach car",
"exiting highway",
"2ndary road",
"home street",
"arrive home"]
TRAVEL_TIME = [5, 15, 10, 10, 3, 0]
class DrivingEnv:
def __init__(self):
self.reset()
@property
def moves(self):
return [GO_HOME]
@property
def states(self):
return STATES
def associated_reward(self, state):
return TRAVEL_TIME[self.states.index(state)]
def step(self, action):
state_idx = self.states.index(self.state)
done = state_idx == len(self.states) - 2
new_state = self.states[(state_idx + 1) % len(self.states)]
self.state = new_state
return new_state, TRAVEL_TIME[state_idx], done, {}
def reset(self):
self.state = self.states[0]
return self.state
def __str__(self):
return self.state
| UTF-8 | Python | false | false | 836 | py | 219 | driving.py | 147 | 0.599282 | 0.582536 | 0 | 37 | 21.594595 | 63 |
FeezyHendrix/colosseum | 13,082,470,430,376 | 2060174420f96dafe050df01f0e8fed86502be46 | e352a11ba612bc52b5e5d0f2635dd4cc54e972b0 | /tests/web_platform/CSS2/normal_flow/test_block_in_inline_append_002_ref.py | 1e30c6f942537a3f5ec8aadd09d4f3b04b54e587 | [
"BSD-3-Clause"
]
| permissive | https://github.com/FeezyHendrix/colosseum | 6e8f4996a3d6531813a0a797c9a1cdb0771057ec | d6cffba96d732ac50dc514b600622a44c95ec616 | refs/heads/master | 2023-04-15T04:41:29.469230 | 2020-03-01T13:37:54 | 2020-03-01T13:37:54 | 226,541,151 | 0 | 1 | NOASSERTION | true | 2023-04-04T01:11:58 | 2019-12-07T16:14:59 | 2020-03-01T13:38:14 | 2023-03-24T22:34:23 | 27,292 | 0 | 0 | 0 | Python | false | false | from tests.utils import W3CTestCase
class TestBlockInInlineAppend002Ref(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'block-in-inline-append-002-ref'))
| UTF-8 | Python | false | false | 174 | py | 721 | test_block_in_inline_append_002_ref.py | 492 | 0.775862 | 0.724138 | 0 | 4 | 42.25 | 85 |
bbhunter/boucanpy | 15,075,335,215,361 | 63c8f1800d92cb0a182aff536e100428c775d04f | d3a0303ce235e131c8b14ce1bdf362bb85a31311 | /boucanpy/db/models/zone.py | b32478f1ec8341074eb2d678ba40527b870a4a79 | [
"MIT"
]
| permissive | https://github.com/bbhunter/boucanpy | 303f56d5134e615506435a9c12027d495f5aa11a | 7d2fb105e7b1e90653a511534fb878bb62d02f17 | refs/heads/master | 2022-02-27T03:22:12.426415 | 2021-02-10T21:26:57 | 2021-02-10T21:26:57 | 224,160,082 | 0 | 0 | MIT | true | 2022-02-01T19:49:07 | 2019-11-26T10:06:39 | 2019-11-26T10:06:41 | 2022-02-01T19:49:05 | 1,669 | 0 | 0 | 0 | null | false | false | from sqlalchemy import Boolean, Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from boucanpy.core import logger
from boucanpy.broadcast import make_redis, make_broadcast_url
from .base import Base
class Zone(Base):
__tablename__ = "zones"
__searchable__ = ["domain", "ip"]
id = Column(Integer, primary_key=True, index=True)
domain = Column(String, unique=True, index=True)
ip = Column(String, unique=False, index=True)
is_active = Column(Boolean(), default=True)
dns_server_id = Column(ForeignKey("dns_servers.id"), nullable=True)
dns_server = relationship(
"boucanpy.db.models.dns_server.DnsServer",
foreign_keys="boucanpy.db.models.zone.Zone.dns_server_id",
back_populates="zones",
)
dns_requests = relationship(
"boucanpy.db.models.dns_request.DnsRequest",
foreign_keys="boucanpy.db.models.dns_request.DnsRequest.zone_id",
back_populates="zone",
)
dns_records = relationship(
"boucanpy.db.models.dns_record.DnsRecord",
foreign_keys="boucanpy.db.models.dns_record.DnsRecord.zone_id",
back_populates="zone",
)
http_server_id = Column(ForeignKey("http_servers.id"), nullable=True)
http_server = relationship(
"boucanpy.db.models.http_server.HttpServer",
foreign_keys="boucanpy.db.models.zone.Zone.http_server_id",
back_populates="zones",
)
http_requests = relationship(
"boucanpy.db.models.http_request.HttpRequest",
foreign_keys="boucanpy.db.models.http_request.HttpRequest.zone_id",
)
@staticmethod
async def on_after_insert(mapper, connection, target):
try:
publisher = await make_redis()
res = await publisher.publish_json(
"channel:auth",
{"type": "MESSAGE", "name": "ZONE_CREATED", "payload": ""},
)
except Exception as e:
log.warning(f"on_after_insert error: {str(e)}")
def __repr__(self):
return f"<{str(self.__class__.__name__)}(id={str(self.id)},ip={str(self.ip)},domain={str(self.domain)},dns_server_id={str(self.dns_server_id)})>"
| UTF-8 | Python | false | false | 2,183 | py | 179 | zone.py | 172 | 0.640861 | 0.640861 | 0 | 61 | 34.786885 | 153 |
grechko1985/Stepik-2021 | 12,893,491,853,813 | 9245b4462029518e8f9e42772cded2766f0c2c25 | 48e97f95cff37ff412c0b683145b9ef1a5c63306 | /Chapter №2/10. Среднее арифметическое, которое кратно 3 (from part 2.3).py | db6c5fa2d4ab70c5c70336ac99916a3333380872 | []
| no_license | https://github.com/grechko1985/Stepik-2021 | 7751e78d6074777421cad10533bc38bac5b76218 | a077d7b71260fc0fdef269427547976691d0f475 | refs/heads/main | 2023-03-15T17:55:24.248213 | 2021-03-04T23:47:58 | 2021-03-04T23:47:58 | 330,654,184 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Напишите программу, которая считывает с клавиатуры два числа a и b, считает и выводит на консоль среднее
# арифметическое всех чисел из отрезка [a; b], которые кратны числу 3. В приведенном ниже примере среднее
# арифметическое считается для чисел на отрезке [-5; 12]. Всего чисел, делящихся на 3, на этом отрезке 6: -3, 0, 3, 6,
# 9, 12. Их среднее арифметическое равно 4.5.
# На вход программе подаются интервалы, внутри которых всегда есть хотя бы одно число, которое делится на 33.
a, b = int(input('Введите число a: ')), int(input('Введите число b: '))
s = 0
n = 0
for i in range(a, b + 1):
if i % 3 == 0:
s += i
n += 1
print(s / n)
| UTF-8 | Python | false | false | 1,054 | py | 84 | 10. Среднее арифметическое, которое кратно 3 (from part 2.3).py | 77 | 0.66716 | 0.633136 | 0 | 13 | 49.846154 | 118 |
joanna-janos/GestureRecognizer | 11,914,239,328,359 | 2f9118e6858a97201a16915a431515904fe2bac5 | 3970a8df09b6e42919fe96e1d3f9938863f1dff7 | /data_preparation/dataset.py | 2484ac7fb3a7604e5bba009c9c99a40edd59a535 | []
| no_license | https://github.com/joanna-janos/GestureRecognizer | b5cfddc9a092ebb085aebb973d06046c9d990095 | 62d9bd2f02d642901e094cfefbe4cb634b5e83ad | refs/heads/master | 2022-04-03T01:21:07.337830 | 2020-02-13T10:25:56 | 2020-02-13T10:25:56 | 220,803,684 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import typing
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from torch.utils.data import Dataset
from torchvision.transforms import Compose, Resize, ToTensor
class GestureDataset(Dataset):
""" Dataset for gestures where one sample is image-label.
Parameters
----------
paths : typing.List[str]
paths to images
gestures : typing.List[str]
labels (gestures names)
transform : torchvision.transforms.Compose
Transformations to carry out on image, default: changing to tensor
Returns
-------
torch.Tensor, torch.Tensor
tensors representing gestures' image and label
"""
def __init__(self,
paths: typing.List[str],
gestures: typing.List[str],
transform=Compose([Resize((512, 256)), ToTensor()])):
self.paths = paths
self.gestures = gestures
self.transform = transform
def __len__(self):
return len(self.paths)
def __getitem__(self, idx: int):
img = Image.open(self.paths[idx])
return self.transform(img), _gesture_name_to_class_label(self.gestures[idx])
def _gesture_name_to_class_label(gesture_name: str) -> int:
""" Get index of gesture name.
Helpful while training a model (classification).
Arguments
----------
gesture_name : str
Name of a gesture
Returns
-------
int
Index of gesture
"""
gestures = ('1', '2', '3', '4', '5', 'A', 'O', 'U')
return gestures.index(gesture_name)
| UTF-8 | Python | false | false | 1,575 | py | 16 | dataset.py | 13 | 0.612063 | 0.605079 | 0 | 61 | 24.819672 | 84 |
Hari025/syncupcall | 13,477,607,391,977 | 396f84f1d8fb6eead0366a264375f563f7df48a6 | edb8e1e83e862cda2884db184700ea77770f90bf | /testapp/admin.py | b222d0c5c35b76a39b9424349bf7395f8243bbf6 | []
| no_license | https://github.com/Hari025/syncupcall | 776c90bde36f47bec88b11f51a0c102f8ec6f32a | 149fb732f516571ccaf8ac948f7b8d64d724f5a6 | refs/heads/master | 2023-06-09T02:51:35.403464 | 2021-06-30T13:40:57 | 2021-06-30T13:40:57 | 381,695,291 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
# Register your models here.
from testapp.models import Truecomp
from testapp.models import Smart
from testapp.models import PDB
from testapp.models import DMS
# Register your models here.
class TruecompAdmin(admin.ModelAdmin):
list_display=['TC','title','content','attendees','date']
admin.site.register(Truecomp,TruecompAdmin)
class SmartAdmin(admin.ModelAdmin):
list_display=['TC','title','content','attendees','date']
admin.site.register(Smart,SmartAdmin)
class PdbAdmin(admin.ModelAdmin):
list_display=['TC','title','content','attendees','date']
admin.site.register(PDB,PdbAdmin)
class DmsAdmin(admin.ModelAdmin):
list_display=['TC','title','content','attendees','date']
admin.site.register(DMS,DmsAdmin)
| UTF-8 | Python | false | false | 762 | py | 7 | admin.py | 3 | 0.761155 | 0.761155 | 0 | 22 | 33.636364 | 60 |
bleeptv/obey-sm-post-classification | 17,695,265,277,327 | eacda154e0eac84156faff702a68ec2d2e96571e | 4fbb49ac49dc9bc9d68adac09eea43de49e09f94 | /app/src/data_cleaning/custom_text_preprocessor.py | 8ecb70dc3848a19c4ae23c0af63e937b0d145dc3 | []
| no_license | https://github.com/bleeptv/obey-sm-post-classification | 8948104d01644c8a51671310b83cd93a2345ed55 | b14722737296c6a5df93f1dba3db4b57f14f34c6 | refs/heads/master | 2023-03-16T04:58:09.862950 | 2021-03-05T10:15:43 | 2021-03-05T10:15:43 | 344,027,299 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import nltk
from nltk.stem import WordNetLemmatizer, PorterStemmer
from string import punctuation
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from gensim.parsing.preprocessing import strip_punctuation
from gensim.parsing.preprocessing import strip_non_alphanum
nltk.download('wordnet')
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
def lemmatize_stemming(input_text):
""" Turn a word into it's original, dictionary form (i.e. turning fried into fry)
Parameters
----------
input_text : str
The word to convert to a language dictionary equivalent
Returns
-------
str
lemmatized input text converted to language dictionary original word
"""
if lemmatizer.lemmatize(input_text).endswith('e'):
return lemmatizer.lemmatize(input_text)
return stemmer.stem(input_text)
def preprocess(input_text):
""" Turn a word into it's original, dictionary form (i.e. turning fried into fry)
Parameters
----------
input_text : str
The word to convert to a language dictionary equivalent
Returns
-------
list
List of all the words in the input text
"""
result = []
stripped_text = strip_punctuation(input_text.lower()).split(" ")
filtered_stripped_text = filter(None, stripped_text)
for token in filtered_stripped_text:
if token not in STOPWORDS and len(token) > 1:
result.append(lemmatize_stemming(token))
return result | UTF-8 | Python | false | false | 1,585 | py | 9 | custom_text_preprocessor.py | 4 | 0.668139 | 0.667508 | 0 | 56 | 27.321429 | 85 |
kc8/UPC_API | 13,786,845,067,985 | 46846bdcf92808ea07a212e2f0835552f7c9de56 | c7422c7a1cd127c690b9f87d80b0d2692efa6a38 | /tests.py | 8b82b1b1dc921c59733d0a0b8f621ceb352f2e3a | []
| no_license | https://github.com/kc8/UPC_API | 42e38cf11c5adf1b10171237cfea74acf71c9558 | 15465791a28169b47435eb518e2733d9ea44f686 | refs/heads/master | 2022-04-09T04:40:53.326498 | 2020-02-26T12:50:05 | 2020-02-26T12:50:05 | 241,003,215 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
This is an example test script.
In its current state this will not run
'''
import unittest
#import module
class TestModule(unittest.TestCase):
def test_add(self):
result = module.add(4,6) #this would be a test
self.assertEqual(result, 15)
if __name__ == "__main__":
unittest.main() #this will kick off all the unit tests
| UTF-8 | Python | false | false | 357 | py | 7 | tests.py | 5 | 0.658263 | 0.647059 | 0 | 17 | 19.941176 | 58 |
ngoodman90/CodingGame | 14,851,996,927,012 | 762527730e2ca2ff1f31a97c9cfb6c764686fc58 | d26b4c0eb87ccd861ff99d7b78bb052cd5d3e421 | /python/RockPaperScissorsSpock/main.py | 5901ae46aa64fe7ec290bb8790765980144389e2 | []
| no_license | https://github.com/ngoodman90/CodingGame | 6ad8226253b94cc20c18bdd019b3c880301017ff | 529da15a964f92e5407522cea0c9b7a7063c05eb | refs/heads/master | 2021-01-10T07:07:30.880015 | 2020-05-08T13:36:23 | 2020-05-08T13:36:23 | 48,571,145 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://www.codingame.com/ide/puzzle/rock-paper-scissors-lizard-spock
from collections import defaultdict
rules = {
'C': ['P', 'L'],
'P': ['R', 'S'],
'R': ['L', 'C'],
'L': ['S', 'P'],
'S': ['C', 'R']
}
def winner(pl1, pl2):
pl1_wins = pl2[1] in rules[pl1[1]]
pl2_wins = pl1[1] in rules[pl2[1]]
if pl1_wins and not pl2_wins:
return pl1, pl2
if pl2_wins and not pl1_wins:
return pl2, pl1
return (pl1, pl2) if pl1[0] < pl2[0] else (pl2, pl1)
opponents = defaultdict(list)
n = int(input())
games = []
for i in range(n):
numplayer, signplayer = input().split()
games.append((int(numplayer), signplayer))
while len(games) > 1:
next_games = []
for p1, p2 in zip(games[::2], games[1::2]):
w, l = winner(p1, p2)
next_games.append(w)
opponents[w[0]].append(l[0])
games = next_games
print(games[0][0])
print(*opponents[games[0][0]])
| UTF-8 | Python | false | false | 933 | py | 44 | main.py | 41 | 0.559486 | 0.514469 | 0 | 41 | 21.756098 | 71 |
AnchorFree/python-statuspageio | 14,577,119,021,251 | b8667f2a05ca8d0aa795e87e3a2b8db363848ff4 | a33714d6344ae8449e973254d20ff27b151a0d10 | /statuspageio/__init__.py | 3b211b55c6863a9beaec0104b12763c93b06ab87 | [
"MIT"
]
| permissive | https://github.com/AnchorFree/python-statuspageio | 916f1770a32f6cdbbd284c5a921b937ef8e5923f | 54a87368c851de585043e2d8c05086b017b36015 | refs/heads/master | 2023-01-02T18:21:17.530675 | 2020-10-27T04:29:02 | 2020-10-27T04:29:02 | 299,762,396 | 0 | 0 | MIT | true | 2020-10-27T04:29:03 | 2020-09-29T23:36:42 | 2020-09-29T23:36:45 | 2020-10-27T04:29:02 | 17 | 0 | 0 | 0 | null | false | false | """
StatusPage.io API V1 library client for Python.
~~~~~~~~~~~~~~~~~~~~~
Usage::
>>> import statuspageio
>>> client = statuspageio.Client(api_key=os.environ.get('STATUSPAGE_API_KEY')
>>> status = client.components.list()
>>> print status
:copyright: (c) 2016 by GameSparks TechOps (techops@gamesparks.com).
:license: MIT, see LICENSE for more details.
"""
from statuspageio.version import VERSION
from statuspageio.errors import (
ConfigurationError,
RateLimitError,
BaseError,
RequestError,
ResourceError,
ServerError
)
#
from statuspageio.configuration import Configuration
from statuspageio.http_client import HttpClient
from statuspageio.services import (
PageService,
ComponentsService,
IncidentsService,
SubscribersService,
MetricsService,
UsersService,
)
from statuspageio.client import Client
| UTF-8 | Python | false | false | 864 | py | 5 | __init__.py | 4 | 0.725694 | 0.719907 | 0 | 37 | 22.351351 | 79 |
dpq/spacetrack | 12,618,613,953,960 | 693d1da1d7368092e410a1a022d6e2a864ac11cf | bdeebfa1b3f3abcfaa8881c086d9e0ea1ee7e635 | /dupe.py | 09289d4c4c9ca7b93d50fcaa7631827709863ba3 | []
| no_license | https://github.com/dpq/spacetrack | 4bfc7f113243ba72a32f60a0fd1a19284ab485ab | 907d0bfcc9ab44993e0ca31fa8e7e6bdce55532f | refs/heads/master | 2020-07-22T00:03:38.819325 | 2011-03-28T13:19:05 | 2011-03-28T13:19:05 | 1,541,691 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from google.appengine.api import memcache
from google.appengine.ext import db
import model
def main():
print "Content-Type: text/plain"
print ""
print "Dupes"
query = model.Object.gql("where orbiting = :1 order by noradid asc", True)
res = {}
sects = {}
while True:
result = query.fetch(1000)
for x in result:
if res.has_key(x.noradid):
res[x.noradid] += 1
sects[x.noradid] += " " + x.section
else:
res[x.noradid] = 1
sects[x.noradid] = x.section
if len(result) < 1000:
break
cursor = query.cursor()
query.with_cursor(cursor)
for x in res:
if res[x] > 1:
s = sects[x].split()
if len(s) > len(set(s)):
print x, "::", res[x], " ", sects[x]
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 898 | py | 16 | dupe.py | 11 | 0.495546 | 0.482183 | 0 | 33 | 26.242424 | 78 |
SvanderHeijden/SimpleMovingAverageModel | 7,430,293,433,648 | d31f72f48e85c3a412218ac0e014c3030e16ae71 | 7f3f3d85ab18a019ceaf953f768bf18ee44a6866 | /SimpleMovingAverageModel.py | de496fb1f4ecec21d89eecc00fedf99ed1dbaac1 | []
| no_license | https://github.com/SvanderHeijden/SimpleMovingAverageModel | 98316e401a0687a6988d7f66843b25e25fc2a7e7 | f1a9faca2f0b8fec7153c5e15a610661aef2f87a | refs/heads/master | 2020-09-03T15:51:16.793378 | 2019-11-04T13:03:58 | 2019-11-04T13:03:58 | 219,502,998 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This application serves as a simple plotting tool for the US stock market. For closing
data the financial moddeling pred API is used.
@author: Sjoerd van der Heijden
"""
import urllib.parse
import json
import requests
import numpy as np
import pandas as pd
from tqdm import tqdm
"""
When this definition is called the API is requested.
@return: myData is a dictionary containing the close and dates of the requested ticker.
"""
def request(ticker):
myUrl = 'https://financialmodelingprep.com/api/v3/historical-price-full/'
totalUrl = urllib.parse.urljoin(myUrl, ticker)
request = requests.get(totalUrl)
myData = json.loads(request.text.replace("<pre>","").replace("</pre>",""))
return(myData)
"""
When this definition is called it computes the simple moving average for the
last 50 and 200 days. Then, the score is computed by deviding the 50 days SMA
by the 200 days SMA.
@param: ticker is the Ticker for any given stock.
@return: myScore is the score for any given stock.
"""
def score(ticker):
try:
myData = request(ticker)
myPrice = []
for i in range(0, len(myData["historical"])):
myPrice.append(float(myData["historical"][i]["close"]))
moving = [50, 200]
totalSMA = []
for h in range(0, len(moving)):
mySMA = []
for i in range(moving[h]-1, len(myData["historical"])):
mySum = 0
for j in range(0, moving[h]-1):
mySum += float(myData["historical"][i-j]["close"])
mySMA.append(mySum/(moving[h]-1))
totalSMA.append(mySMA[len(mySMA)-1])
myScore = totalSMA[0]/totalSMA[1]
except:
myScore = 0
return(myScore)
"""
When this definition is called the summarizes the results of the program in
a dataframe. The data frame is sorted by decending order and written to
an excel file.
@param: listTickers is a list of Tickers.
"""
def result(listTickers):
myScore = []
myList = []
for i in tqdm(range(len(listTickers))):
listTickers[i] = str(listTickers[i]).strip()
myScore.append(score(listTickers[i]))
myList.append(listTickers[i])
myResult = np.array(np.transpose(np.array([myList, myScore])))
df = pd.DataFrame(myResult, columns=['Ticker', 'Score']).sort_values(by=['Score'], ascending=False).head(50)
df.to_excel(excel_writer = "/Users/SjoerdvanderHeijden/Documents/API/fcf_model/code/V1.0/result.xlsx")
if __name__ == '__main__':
listTickers = open("ticker.csv").readlines()
result(listTickers) | UTF-8 | Python | false | false | 2,910 | py | 1 | SimpleMovingAverageModel.py | 1 | 0.578694 | 0.568041 | 0 | 123 | 22.666667 | 113 |
matrix-org/synapse | 12,369,505,821,150 | 1fd595704053862eae81c16d53291297d53e2013 | 7343ece3b82ac87a594865c4074623b45b0297b4 | /synapse/storage/databases/main/task_scheduler.py | 9ab120eea9ca5f326f9470c6bdf4c4300a397d22 | [
"Apache-2.0"
]
| permissive | https://github.com/matrix-org/synapse | a00111f83310783b78e2996557f8bbae4d9fb229 | d35bed8369514fe727b4fe1afb68f48cc8b2655a | refs/heads/develop | 2023-09-05T05:24:20.808942 | 2023-09-04T16:14:09 | 2023-09-04T16:14:09 | 22,844,864 | 12,215 | 2,869 | Apache-2.0 | false | 2023-09-14T15:20:48 | 2014-08-11T15:51:42 | 2023-09-14T11:01:28 | 2023-09-14T15:20:47 | 388,587 | 11,321 | 2,120 | 1,497 | Python | false | false | # Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.types import JsonDict, JsonMapping, ScheduledTask, TaskStatus
from synapse.util import json_encoder
if TYPE_CHECKING:
from synapse.server import HomeServer
class TaskSchedulerWorkerStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
@staticmethod
def _convert_row_to_task(row: Dict[str, Any]) -> ScheduledTask:
row["status"] = TaskStatus(row["status"])
if row["params"] is not None:
row["params"] = db_to_json(row["params"])
if row["result"] is not None:
row["result"] = db_to_json(row["result"])
return ScheduledTask(**row)
async def get_scheduled_tasks(
self,
*,
actions: Optional[List[str]] = None,
resource_id: Optional[str] = None,
statuses: Optional[List[TaskStatus]] = None,
max_timestamp: Optional[int] = None,
) -> List[ScheduledTask]:
"""Get a list of scheduled tasks from the DB.
Args:
actions: Limit the returned tasks to those specific action names
resource_id: Limit the returned tasks to the specific resource id, if specified
statuses: Limit the returned tasks to the specific statuses
max_timestamp: Limit the returned tasks to the ones that have
a timestamp inferior to the specified one
Returns: a list of `ScheduledTask`, ordered by increasing timestamps
"""
def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]:
clauses: List[str] = []
args: List[Any] = []
if resource_id:
clauses.append("resource_id = ?")
args.append(resource_id)
if actions is not None:
clause, temp_args = make_in_list_sql_clause(
txn.database_engine, "action", actions
)
clauses.append(clause)
args.extend(temp_args)
if statuses is not None:
clause, temp_args = make_in_list_sql_clause(
txn.database_engine, "status", statuses
)
clauses.append(clause)
args.extend(temp_args)
if max_timestamp is not None:
clauses.append("timestamp <= ?")
args.append(max_timestamp)
sql = "SELECT * FROM scheduled_tasks"
if clauses:
sql = sql + " WHERE " + " AND ".join(clauses)
sql = sql + " ORDER BY timestamp"
txn.execute(sql, args)
return self.db_pool.cursor_to_dict(txn)
rows = await self.db_pool.runInteraction(
"get_scheduled_tasks", get_scheduled_tasks_txn
)
return [TaskSchedulerWorkerStore._convert_row_to_task(row) for row in rows]
async def insert_scheduled_task(self, task: ScheduledTask) -> None:
"""Insert a specified `ScheduledTask` in the DB.
Args:
task: the `ScheduledTask` to insert
"""
await self.db_pool.simple_insert(
"scheduled_tasks",
{
"id": task.id,
"action": task.action,
"status": task.status,
"timestamp": task.timestamp,
"resource_id": task.resource_id,
"params": None
if task.params is None
else json_encoder.encode(task.params),
"result": None
if task.result is None
else json_encoder.encode(task.result),
"error": task.error,
},
desc="insert_scheduled_task",
)
async def update_scheduled_task(
self,
id: str,
timestamp: int,
*,
status: Optional[TaskStatus] = None,
result: Optional[JsonMapping] = None,
error: Optional[str] = None,
) -> bool:
"""Update a scheduled task in the DB with some new value(s).
Args:
id: id of the `ScheduledTask` to update
timestamp: new timestamp of the task
status: new status of the task
result: new result of the task
error: new error of the task
Returns: `False` if no matching row was found, `True` otherwise
"""
updatevalues: JsonDict = {"timestamp": timestamp}
if status is not None:
updatevalues["status"] = status
if result is not None:
updatevalues["result"] = json_encoder.encode(result)
if error is not None:
updatevalues["error"] = error
nb_rows = await self.db_pool.simple_update(
"scheduled_tasks",
{"id": id},
updatevalues,
desc="update_scheduled_task",
)
return nb_rows > 0
async def get_scheduled_task(self, id: str) -> Optional[ScheduledTask]:
"""Get a specific `ScheduledTask` from its id.
Args:
id: the id of the task to retrieve
Returns: the task if available, `None` otherwise
"""
row = await self.db_pool.simple_select_one(
table="scheduled_tasks",
keyvalues={"id": id},
retcols=(
"id",
"action",
"status",
"timestamp",
"resource_id",
"params",
"result",
"error",
),
allow_none=True,
desc="get_scheduled_task",
)
return TaskSchedulerWorkerStore._convert_row_to_task(row) if row else None
async def delete_scheduled_task(self, id: str) -> None:
"""Delete a specific task from its id.
Args:
id: the id of the task to delete
"""
await self.db_pool.simple_delete(
"scheduled_tasks",
keyvalues={"id": id},
desc="delete_scheduled_task",
)
| UTF-8 | Python | false | false | 6,970 | py | 1,212 | task_scheduler.py | 932 | 0.562984 | 0.561693 | 0 | 202 | 33.50495 | 91 |
amimimor/questionnaire-poc-be | 12,867,722,057,160 | bffb04ae6384590447f77573a4c81dfa6be1ccb7 | 7ffc3497a6af48b6c1674ed948d3805c4a7cb28d | /app/neo/neo_transaction.py | 6b071fd6b35b8a1d34debaa55414ed83e8dbf27f | []
| no_license | https://github.com/amimimor/questionnaire-poc-be | 3597c32f3523506112c245d09f20c43e9b114f1f | 5f2fbdd13427ab1ce81dda9d62cd0fb5368bfd0f | refs/heads/main | 2023-07-30T14:05:24.458497 | 2021-09-02T16:33:40 | 2021-09-02T16:33:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from app.neo.neo_utils import *
def create_new_respondent(tx, company_name, policy_request_id):
query = "CREATE(r:Respondent:MatanDev $node) return r"
node = {
'id': generate_node_id(),
'comapnyName': company_name,
'policyRequestId': policy_request_id
}
result = tx.run(query, node=node)
record = result.single()
return record[0].get("id")
def get_questions_from_neo(tx, form_id):
q = "MATCH (f:Form:MatanDev {Id:$form_id})-[:hasQuestion]-(q:Question:MatanDev)-[:hasAnswer]-(a:Answer:MatanDev) return q, collect(a) as optionalAnswers"
result = tx.run(q, form_id=form_id)
questions = []
for record in result:
question = {
'id': record[0].get('Id'),
'name': record[0].get('Name'),
'question': record[0].get('BaseQuestion'),
'subQuestion': None,
'expectMinLen': record[0].get('ExpectMinLen'),
'expectMaxLen': record[0].get('ExpectMaxLen'),
'optionalAnswerList': extract_answers(record[1])
}
questions.append(question)
return questions
def get_forms_id_from_neo(tx):
q = "Match(f:Form:MatanDev {Name:'Opeining Form'}) return f "
result = tx.run(q)
record = result.single()
return record[0].get("Id")
def add_answer_of_respondent(tx, respondent_id, question_id, answer_ids):
if answer_ids:
query_to_insert = ('Match(r:Respondent:MatanDev ),(q:Question:MatanDev ),(a:Answer:MatanDev ) '
'where r.id=$respondent_id and q.Id=$question_id and a.Id IN $answer_ids '
'MERGE (r)-[rw:respondedWith]->(a) '
'MERGE (r)-[rt:respondedTo]->(q) '
'RETURN r,q,a')
tx.run(query_to_insert, respondent_id=respondent_id, question_id=question_id,
answer_ids=answer_ids)
def delete_answer_of_respondent(tx, respondent_id, answer_ids):
if answer_ids:
query_to_delete = ('MATCH (r:Respondent:MatanDev)-[rw:respondedWith]->(a:Answer:MatanDev) '
'WHERE r.id=$respondent_id and a.Id IN $answer_ids '
'Delete rw')
tx.run(query_to_delete, respondent_id=respondent_id, answer_ids=answer_ids)
def update_respondent_answer(tx, respondent_id, question):
# check if I answer the question
# if No -> add new answer to question
# if Yes ->
# get all answers for this question from respondent
# 1. if they not exacly like new - remove the unnececry
# 2. don't do anything
question_id = question['questionId']
answer_ids = extract_answer_ids(question['answersList'])
query_to_check_question = ('MATCH (r:Respondent:MatanDev)-[:respondedTo]->(q:Question:MatanDev) '
'WHERE r.id=$respondent_id and q.Id=$question_id '
'RETURN count(q.Id) as isQuestionConnect')
response = tx.run(query_to_check_question, respondent_id=respondent_id, question_id=question_id)
is_question_connect = 0
for record in response:
is_question_connect = record.get('isQuestionConnect')
if is_question_connect:
query_to_check_latest_respondent_answers = (
'MATCH (r:Respondent)-[:respondedWith]->(a:Answer), (q:Question)-[:hasAnswer]->(a:Answer) '
'WHERE r.id=$respondent_id and q.Id=$question_id '
'return a.Id as answerId')
response = tx.run(query_to_check_latest_respondent_answers, respondent_id=respondent_id,
question_id=question_id)
list_of_latest_answers = []
for record in response:
list_of_latest_answers.append(record.get('answerId'))
answer_to_remove = extract_in_the_first_list(list_of_latest_answers, answer_ids)
answer_to_add = extract_in_the_first_list(answer_ids, list_of_latest_answers)
else:
answer_to_remove = []
answer_to_add = answer_ids
add_answer_of_respondent(tx, respondent_id, question_id, answer_to_add)
delete_answer_of_respondent(tx, respondent_id, answer_to_remove)
def get_all_rules(tx):
query = ('Match (r:Rule)-[aw:answerWith]-(a:Answer) return r.Id as ruleId, collect(a.Id) as answersId')
response = tx.run(query)
rules = {}
for record in response:
rules[record.get('ruleId')] = record.get('answersId')
return rules
def get_all_respondent_answers(tx, respondent_id):
query = ('Match (r:Respondent)-[rw:respondedWith]-(a:Answer) WHERE r.id=$respondent_id '
'return collect(a.Id) as respondent_answers')
response = tx.run(query, respondent_id=respondent_id)
for record in response:
return record.get('respondent_answers')
def get_question_to_show(tx, rules_ids):
query = ('Match (r:Rule)-[:ruleTo]-(f:Form)-[:hasQuestion]-(q:Question) '
'where r.Id in $rules_ids '
'return collect(q.Id) as allQuestionToShow')
response = tx.run(query, rules_ids=rules_ids)
for record in response:
return record.get('allQuestionToShow')
def get_base_rule(tx):
query = ('Match (r:Rule {Name:\'Base\'}) return r.Id as ruleId')
response = tx.run(query)
for record in response:
return record.get('ruleId')
def get_questions_by_ids(tx, questions_ids):
query = ('MATCH (q:Question:MatanDev)-[:hasAnswer]-(a:Answer:MatanDev) where q.Id in $questions_ids '
'return q, collect(a) as optionalAnswers')
response = tx.run(query, questions_ids=questions_ids)
questions = []
for record in response:
question = {
'id': record[0].get('Id'),
'name': record[0].get('Name'),
'question': record[0].get('BaseQuestion'),
'subQuestion': None,
'expectMinLen': record[0].get('ExpectMinLen'),
'expectMaxLen': record[0].get('ExpectMaxLen'),
'optionalAnswerList': extract_answers(record[1])
}
questions.append(question)
return questions
| UTF-8 | Python | false | false | 6,030 | py | 8 | neo_transaction.py | 6 | 0.612438 | 0.609619 | 0 | 149 | 39.469799 | 157 |
Erick-LONG/MxShop | 4,767,413,702,428 | 42e950392aabc75f9c800d29ffdf3fe907bb40d4 | ce741ade3d7ebfc64cf2736358f6e77b06168830 | /apps/trade/models.py | 9833ff9c0502730ca9e76d98bed3b6859c63e790 | []
| no_license | https://github.com/Erick-LONG/MxShop | 798a1ce4eb557973732ee6206640bdf9a247216b | 783e5d66a4d49b3eceb3eb6d7c729fcfa69742cb | refs/heads/master | 2021-04-03T08:31:41.588749 | 2018-03-22T04:01:46 | 2018-03-22T04:01:46 | 124,395,167 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
from django.db import models
from django.contrib.auth import get_user_model
from goods.models import Goods
# Create your models here.
User = get_user_model() #返回User类
class ShoppingCart(models.Model):
'''购物车'''
user = models.ForeignKey(User,verbose_name='用户')
goods = models.ForeignKey(Goods,verbose_name='商品')
nums = models.IntegerField(default=0,verbose_name='购买数量')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
class Meta:
verbose_name = '购物车'
verbose_name_plural = verbose_name
unique_together = ('user','goods')
def __str__(self):
return '%s(%d)' % (self.goods.name,self.nums)
class OrderInfo(models.Model):
'''订单信息'''
ORDER_STATUS = (
("TRADE_SUCCESS", "成功"),
("TRADE_CLOSED", "超时关闭"),
("WAIT_BUYER_PAY", "交易创建"),
("TRADE_FINISHED", "交易结束"),
("paying", "待支付"),
)
# PAY_TYPE = (
# ('alipay','支付宝'),
# ('wechat','微信'),
# )
user = models.ForeignKey(User, verbose_name='用户')
order_sn = models.CharField(max_length=30,unique=True,null=True,blank=True,verbose_name='订单号')
trade_no = models.CharField(max_length=100,unique=True,null=True,blank=True,verbose_name='支付宝订单号')
pay_status = models.CharField(choices=ORDER_STATUS,default='paying',max_length=30,verbose_name='订单状态')
post_script = models.CharField(max_length=11,verbose_name='订单留言')
order_mount = models.FloatField(default=0.0,verbose_name='订单金额')
pay_time = models.DateTimeField(null=True,blank=True,verbose_name='支付时间')
#用户信息
address = models.CharField(max_length=100,default='',verbose_name='收货地址')
signer_name = models.CharField(max_length=20,default='',verbose_name='收件人 ')
signer_mobile = models.CharField(max_length=11,default='',verbose_name='联系电话')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
class Meta:
verbose_name = '订单信息'
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order_sn)
class OrderGoods(models.Model):
'''订单商品详情'''
order = models.ForeignKey(OrderInfo,verbose_name='订单信息',related_name='goods')
goods = models.ForeignKey(Goods,verbose_name='商品')
goods_num = models.IntegerField(default=0,verbose_name='商品数量')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
class Meta:
verbose_name = '订单商品详情'
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order.order_sn) | UTF-8 | Python | false | false | 2,831 | py | 7 | models.py | 7 | 0.654932 | 0.647195 | 0 | 79 | 31.734177 | 106 |
shalgrim/advent_of_code_2019 | 18,365,280,165,247 | 70cb77a144f512d2ff8f476e48b60e8d92fc8972 | ac4f3bfa74fa452448757203a7b03786fbe6f40f | /day14_1.py | 0e2e6f9d27a899ca01d732650530c098df34b16b | []
| no_license | https://github.com/shalgrim/advent_of_code_2019 | e1f1f825f75597c10b6fda3141323ee162d36618 | ffb6731254c48abe1decc769dab9580a86bf8e05 | refs/heads/master | 2021-10-31T00:41:03.263486 | 2021-10-21T14:35:16 | 2021-10-21T14:35:16 | 227,891,104 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fuel_builder import FuelBuilder
class Rule(object):
def __init__(self, line):
lhs, rhs = line.split('=>')
lhs = [l.strip().split() for l in lhs.strip().split(',')]
self.lhs = {t[1]: int(t[0]) for t in lhs}
self.rhs_ingredient = rhs.strip().split()[1]
self.rhs_quantity = int(rhs.strip().split()[0])
@property
def output(self):
return self.rhs_ingredient
@property
def num_produced(self):
return self.rhs_quantity
def does_produce(a, b, rules):
"""True if a produces b eventually"""
if b not in rules:
return False
if rules[b].lhs.get(a, False):
return True
else:
return any(does_produce(a, c, rules) for c in rules[b].lhs)
num_inputs_produced = 0
resources = {}
def find_missing_ingredient(rule):
global resources
lhs = rule.lhs
for ingredient, required in lhs.items():
if resources.get(ingredient, 0) < required:
return ingredient
return None
def get_required(input_ingredient, output_ingredient, rules):
global resources, num_inputs_produced
output_rule = rules[output_ingredient]
missing_ingredient = find_missing_ingredient(output_rule)
while missing_ingredient:
if missing_ingredient == input_ingredient:
total_needed = output_rule.lhs[missing_ingredient]
new_inputs_needed = output_rule.lhs[input_ingredient] - resources.get(input_ingredient, 0)
resources[input_ingredient] = total_needed
num_inputs_produced += new_inputs_needed
else:
get_required(input_ingredient, missing_ingredient, rules)
missing_ingredient = find_missing_ingredient(output_rule)
try:
resources[output_ingredient] += output_rule.num_produced
except KeyError:
resources[output_ingredient] = output_rule.num_produced
for ing, num in output_rule.lhs.items():
resources[ing] -= num
def main(lines, input_ingredient='ORE', outupt_ingredient = 'FUEL'):
global resources, num_inputs_produced
resources = {}
num_inputs_produced = 0
rules = [Rule(line) for line in lines]
rules = {rule.output: rule for rule in rules}
get_required('ORE', 'FUEL', rules)
return num_inputs_produced, resources
def main_using_class(lines):
rules = [Rule(line) for line in lines]
rules = {rule.output: rule for rule in rules}
fb = FuelBuilder(rules)
min_to_produce_one, leftovers = fb.calc_min_to_produce_one()
return min_to_produce_one
if __name__ == '__main__':
with open('data/input14.txt') as f:
lines = [line.strip() for line in f.readlines()]
print(main(lines)[0]) # 870051
print(resources)
| UTF-8 | Python | false | false | 2,731 | py | 57 | day14_1.py | 57 | 0.639326 | 0.633101 | 0 | 92 | 28.684783 | 102 |
wakita/glvis | 19,292,993,100,379 | 01b41ca3d18a4065ffed90081417355ac9147b6d | a052b3ecee8b5dd8d0857a80ba9b16f0e9b1515b | /lib/sn/gl/geometry/Geometry.py | 7e62597deb2f80190ce37fbd84595ca6dc65e20a | []
| no_license | https://github.com/wakita/glvis | ae67d1f904e7e6e75cd4d5cb9c5aad27ec694d85 | 700a355a31e15eb920b73923b52ec9cf0db8bf52 | refs/heads/kw | 2021-09-19T22:18:18.552349 | 2018-08-01T04:57:15 | 2018-08-01T04:57:15 | 49,638,984 | 4 | 2 | null | false | 2017-08-04T04:14:14 | 2016-01-14T10:02:36 | 2017-04-21T05:02:14 | 2017-08-03T08:48:35 | 3,243 | 4 | 2 | 9 | Python | null | null | #!/usr/bin/env python
from collections import defaultdict
import os, os.path, sys
import numpy as np
np.set_printoptions(precision=4)
root_path = os.path.join(os.path.normpath(os.environ['DROPBOX']), 'work', 'pyqt')
sys.path.append(os.path.join(root_path, 'lib'))
def demo(Demo):
try: Demo.start(Demo)
except: pass
def Point():
from sn.gl.geometry.point import D
demo(D)
def RegularPolygon():
from sn.gl.geometry.regularpolygon import D
demo(D)
def PointGrid():
from sn.gl.geometry.pointgrid import D
demo(D)
d = defaultdict(lambda: lambda: None)
d['Point'] = Point
d['RegularPolygon'] = RegularPolygon
d['PointGrid'] = PointGrid
for arg in sys.argv[1:]: d[arg]()
| UTF-8 | Python | false | false | 705 | py | 114 | Geometry.py | 73 | 0.693617 | 0.69078 | 0 | 33 | 20.363636 | 81 |
JetErorr/Python3 | 14,963,666,096,704 | bf7d22d24490160f0c3c2d02215b0e428d839193 | 6e79476207f4c114b374eaa0a149bde2d9b181ff | /1.Prints.py | 1e47864dfa4673731845831a3105971841fe7c91 | []
| no_license | https://github.com/JetErorr/Python3 | e9493d1f7949c4af179b8fff927d504ab418927d | c7c9cb81f76e727211cfd3671c2f5da4532bb418 | refs/heads/master | 2020-03-19T16:38:21.050952 | 2019-07-31T07:03:04 | 2019-07-31T07:03:04 | 136,722,688 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #1 Print and comments
print("#1 Print and Comments")
#These are comments btw
print("This will be displayed without any processing.!")
print("These things are called prints")
print("Just like the C printf() or the C++ cout()")
print("Or the bash and cmd echo")
#This line will not be proccessed or printed
print("This line will not be processes as well, but it will be printed")
| UTF-8 | Python | false | false | 378 | py | 18 | 1.Prints.py | 16 | 0.743386 | 0.738095 | 0 | 9 | 41 | 72 |
eadasfa/LeetCode | 6,786,048,352,636 | ae77c27ee7503ce6584eb5716e226862216ff91b | 33ca5a05a6dfa1a4b37c9fd33c870743886cb87d | /8.字符串转换整数-atoi.py | bc1e91e81339e3ff342b37d7e06c33367ba4c2bc | []
| no_license | https://github.com/eadasfa/LeetCode | b6c06f57cc2db7a16d9e18661bb43b239ba48659 | 924b8643279c14bd32d3d8b4b982e463b23f162b | refs/heads/master | 2022-09-26T04:49:20.932243 | 2020-06-07T01:53:14 | 2020-06-07T01:53:14 | 255,844,958 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# @lc app=leetcode.cn id=8 lang=python
#
# [8] 字符串转换整数 (atoi)
#
# @lc code=start
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
start,end=-1,-1
nums = '1234567890'
for i in range(len(str)):
if start==-1:
if str[i] =='-' or str[i] == '+' or str[i] in nums:
start = i
elif str[i] != ' ':
return 0
continue
if str[i] not in nums:
end = i
break
end = len(str) if end==-1 else end
# 排除只有空格的字符串和只有+-的字符串
if start==-1 or (end-start==1 and (str[start] in '+-')):
return 0
res = int(str[start:end])
if res > (1<<31)-1:
return (1<<31)-1
if res < -(1<<31):
return -(1<<31)
return res
# s = Solution()
# print(s.myAtoi('2147483646'))
# @lc code=end
'''
请你来实现一个 atoi 函数,使其能将字符串转换成整数。
首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。接下来的转化规则如下:
如果第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字字符组合起来,形成一个有符号整数。
假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成一个整数。
该字符串在有效的整数部分之后也可能会存在多余的字符,那么这些字符可以被忽略,它们对函数不应该造成影响。
注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换,即无法进行有效转换。
在任何情况下,若函数不能进行有效的转换时,请返回 0 。
提示:
本题中的空白字符只包括空格字符 ' ' 。
假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−231, 231 − 1]。如果数值超过这个范围,请返回 INT_MAX (231 − 1) 或 INT_MIN (−231) 。
示例 1:
输入: "42"
输出: 42
示例 2:
输入: " -42"
输出: -42
解释: 第一个非空白字符为 '-', 它是一个负号。
我们尽可能将负号与后面所有连续出现的数字组合起来,最后得到 -42 。
示例 3:
输入: "4193 with words"
输出: 4193
解释: 转换截止于数字 '3' ,因为它的下一个字符不为数字。
示例 4:
输入: "words and 987"
输出: 0
解释: 第一个非空字符是 'w', 但它不是数字或正、负号。
因此无法执行有效的转换。
示例 5:
输入: "-91283472332"
输出: -2147483648
解释: 数字 "-91283472332" 超过 32 位有符号整数范围。
因此返回 INT_MIN (−231) 。
''' | UTF-8 | Python | false | false | 2,952 | py | 46 | 8.字符串转换整数-atoi.py | 46 | 0.562432 | 0.493428 | 0 | 85 | 20.494118 | 103 |
wizh/codewars | 8,340,826,536,550 | 9f5f6f475fefe6e75921125ef3641b4600c72530 | d4453687d18351b53201b655433803763034ea51 | /5kyu/Sudoku_solved/solution.py | 8280d61615fad0c7d6f3e0e3f39282faa8283b77 | [
"MIT"
]
| permissive | https://github.com/wizh/codewars | 3af2fc9c8d2fcabd4d626439d848632d47e8129b | bdb421720437a9fcafaa2eda8869a1cd4835bb47 | refs/heads/master | 2021-01-10T02:17:21.274346 | 2015-11-25T00:05:09 | 2015-11-25T00:05:09 | 46,823,367 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Obscure, non-understandable, but really cool!
from itertools import chain
def done_or_not(b):
cs = [[b[j][i] for j in range(9)] for i in range(9)]
rs = ([[[[b[3 * j + m][3 * i + n] for n in range(3)] for m in range(3)]
for j in range(3)] for i in range(3)])
rs = ([set(chain(*rs[i][j])) for j in range(3) for i in range(3)])
return (('Try again!', 'Finished!')
[all(len(set(x[i])) == 9 for i in range(3) for x in zip(b, cs, rs))])
| UTF-8 | Python | false | false | 497 | py | 34 | solution.py | 18 | 0.519115 | 0.49497 | 0 | 11 | 44.181818 | 77 |
VatsalP/PythonScripts | 1,417,339,221,213 | f0415d407a4347a9b46f83eaf558af55f92d13d2 | 329b462f546f8524528cf6752d1e364c3ae14c14 | /no-ip/AutoUpdateHost.py | aea576d46ab7acf226046e7646f7784477063f1b | []
| no_license | https://github.com/VatsalP/PythonScripts | a3d8f764073052a847b2796cc488e6fdfe8eb2ad | 7aa1abfcccd5bb77622ad2e4811d75e643589573 | refs/heads/master | 2021-01-10T14:34:28.100128 | 2020-10-01T02:18:42 | 2020-10-01T02:18:42 | 49,646,764 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from selenium import webdriver
# use this instead: "browser = webdriver.Firefox()" if you don't have PhantomJS
browser = webdriver.PhantomJS()
browser.get('http://www.noip.com/login')
username = browser.find_element_by_name('username')
username.send_keys('yourusername')
passwd = browser.find_element_by_name('password')
passwd.send_keys('yourpassword')
loginbtn = browser.find_element_by_name('Login')
loginbtn.submit()
browser.get('http://www.noip.com/members/dns/')
hostdet = browser.find_element_by_class_name('bullet-modify')
hostdet.click()
updatebtn = browser.find_element_by_xpath("//input[@type='submit']")
updatebtn.submit() | UTF-8 | Python | false | false | 661 | py | 20 | AutoUpdateHost.py | 17 | 0.750378 | 0.750378 | 0 | 21 | 30.52381 | 79 |
ATNoG/5gcontact | 670,014,920,032 | e59daf6576c443215d25e848f425254a33e7977d | d18abd71a7867eb40c5227e96295059ef14a78b2 | /slimano/agents/osm/osm_error.py | 35de537aa26a38aa1c8e79679181ceffc385977d | [
"MIT"
]
| permissive | https://github.com/ATNoG/5gcontact | 0a529ea15b7778a49c1989c4950c26fcd0c399cb | fb65ae919977fb6411932b986bd565b962d9c0b9 | refs/heads/main | 2023-03-27T23:10:42.058444 | 2021-04-01T08:30:26 | 2021-04-01T08:30:26 | 326,690,140 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
class OsmError:
def __init__(self, message=None):
self.response = {
'message': message
}
def set_message(self, message):
self.response['message'] = message
def __str__(self):
return json.dumps(self.response)
| UTF-8 | Python | false | false | 281 | py | 90 | osm_error.py | 60 | 0.562278 | 0.562278 | 0 | 15 | 17.733333 | 42 |
paviprajansai/sai8 | 10,033,043,636,624 | 89d633273fd70de285431246af9e31fbfac37683 | 5d670e61c128c3e75b43e463c06a0cab0ef61b7a | /saira1.py | 66f6a279f12ed46edd17b42e8c0954e04bd21b23 | []
| no_license | https://github.com/paviprajansai/sai8 | 7499766f396da3885c9dbe82cb917381389a2dc5 | 45fb53ca081788c33cea2a83ebdc9dfbbfc4cc18 | refs/heads/master | 2020-06-27T00:44:46.262735 | 2019-07-31T07:48:40 | 2019-07-31T07:48:40 | 199,802,475 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n11=int(input())
s11=0
while n11>0:
r11=n11%10
s11=s11+r11
n11=n11//10
print(s11)
| UTF-8 | Python | false | false | 85 | py | 8 | saira1.py | 8 | 0.670588 | 0.341176 | 0 | 7 | 11.142857 | 16 |
xyan36/measurement_codes | 13,322,988,595,434 | 9dd44b1c2437516288a70ecdcccf3ede4a36de34 | 46d86c4dfa69d1eb7841501f293bfac4480df781 | /suspended_wire/power_dependence_test2.py | f1da49b61c0a11057c24452b08af20842bff4b01 | []
| no_license | https://github.com/xyan36/measurement_codes | 01330e557a155316949bc37adc0d5df22776e366 | fda03ac08c575bbe575bd6e557367c0db2194512 | refs/heads/master | 2021-07-21T17:42:23.758620 | 2021-07-19T18:29:42 | 2021-07-19T18:29:42 | 207,862,388 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 12:05:51 2019
@author: Administrator
"""
from datetime import datetime
import time
import visa
import numpy as np
import os
########### function defitions ################################################
### lock in 1w initialize ###
def lockinInit_1w():
lockin1.write("HARM 1")
lockin2.write("HARM 1")
### lock in 3w initialize ###
def lockinInit_3w():
#set lockins to measure the 3w voltage
lockin1.write("HARM 3")
lockin2.write("HARM 3")
#reserve mode
lockin1.write("RMOD 1")
lockin2.write("RMOD 1")
def lockin_set_pms(timeCon,sensitivity):
#time constant
lockin1.write("OFLT %d" %timeCon)
lockin2.write("OFLT %d" %timeCon)
#sensitivity
lockin1.write("SENS %d" %sensitivity)
lockin2.write("SENS %d" %sensitivity)
def lockinsingle_set_pms(lockin, timeCon, sensitivity):
#time constant
lockin.write("OFLT %d" %timeCon)
#sensitivity
lockin.write("SENS %d" %sensitivity)
def outputs_query():
v = lockin1.query('slvl?').rstrip()
tc = lockin1.query('oflt?').rstrip()
s_x3 = lockin1.query('sens?').rstrip()
s_x1 = lockin2.query('sens?').rstrip()
X3 = lockin1.query('outp?1').rstrip()
Y3 = lockin1.query('outp?2').rstrip()
X1_ref = lockin2.query('outp?1').rstrip()
Y1_ref = lockin2.query('outp?2').rstrip()
header = "V_input TC SENS_X3 SENS_X1 X3 Y3 X1_ref Y1_ref"
print(header)
print(v, tc, s_x3, s_x1, X3, Y3, X1_ref, Y1_ref, sep = " ")
def settings_query():
f1 = lockin1.query('freq?').rstrip()
f2 = lockin2.query('freq?').rstrip()
tc1 = lockin1.query('oflt?').rstrip()
tc2 = lockin2.query('oflt?').rstrip()
sstvt1 = lockin1.query('sens?').rstrip()
sstvt2 = lockin2.query('sens?').rstrip()
header = "LOCKIN# FREQ TC SENS"
print(header)
print('lockin1', f1, tc1, sstvt1, sep = "\t")
print('lockin2', f2, tc2, sstvt2, sep = "\t")
def set_V_input(lockin, voltage):
lockin.write('slvl %f' %voltage)
print(datetime.now(), f"Set V = {voltage}")
### measurements ###
def measurement(sens,initWaitTime, add_wait_time = 5): #sens= allowed error in reading
X = float(lockin1.query("OUTP?1"))
Y = float(lockin1.query("OUTP?2"))
X_ref = float(lockin2.query("OUTP?1"))
Y_ref = float(lockin2.query("OUTP?2"))
time.sleep(initWaitTime) #initial wait time
# #check reading to be stable
while (np.abs(X - float(lockin1.query('OUTP?1')))> sens
or np.abs(X_ref - float(lockin2.query('OUTP?1')))> sens):
X = float(lockin1.query("OUTP?1"))
Y = float(lockin1.query("OUTP?2"))
X_ref = float(lockin2.query("OUTP?1"))
Y_ref = float(lockin2.query("OUTP?2"))
time.sleep(add_wait_time) #additional wait time
line = str(X) + " " + str(Y) + " " \
+ str(X_ref) + " " + str(Y_ref) + " "
return line
### voltage swap ###
def VoltageSweep(voltages,sens1, TC1, SENS1, initWaitTime1, sens3, TC3, SENS3, initWaitTime3):
for v in voltages:
lockin1.write("SLVL %f" %v)
line = str(v) + " "
time.sleep(5) #waiting for voltage stable
lockinInit_1w()
lockin_set_pms(TC1,SENS1)
line += measurement(sens1,initWaitTime1)
lockinInit_3w()
lockin_set_pms(TC3,SENS3)
line += measurement(sens3,initWaitTime3).rstrip()
t = float(time.time()-t0)
print(str(datetime.now()) + " " + str(t) + " " + line)
with open(FILENAME,'a') as output:
output.write(str(datetime.now()) + " " + str(t) + " " + line +"\n")
def voltage_sweep_auto(voltages, initWaitTime):
try:
for v in voltages:
lockin1.write('slvl %f' %v)
time.sleep(initWaitTime)
dt = str(datetime.now())
t = round(float(time.time()-t0), 4)
vin = str(v)
tc = lockin1.query('oflt?').rstrip()
sens_x3 = lockin1.query('sens?').rstrip()
sens_x1 = lockin2.query('sens?').rstrip()
X3 = lockin1.query('outp?1').rstrip()
Y3 = lockin1.query('outp?2').rstrip()
X1_ref = lockin2.query('outp?1').rstrip()
Y1_ref = lockin2.query('outp?2').rstrip()
line = f"{dt},{t},{vin},{tc},{sens_x3},{sens_x1},{X3},{Y3},{X1_ref},{Y1_ref}"
print(line)
with open(FILENAME,'a') as output:
output.write(line +"\n")
except KeyboardInterrupt:
print('keyboardinteeruupt')
pass
finally:
lockin1.write("SLVL %f" %0.004)
lockin1.write('FREQ %f' %17)
lockin_set_pms(timeCon=9,sensitivity=22)
lockinInit_1w()
tf = datetime.now()
print ("Program done! total time is: "+ str(tf-ti))
def voltage_sweep_manual(voltages, initWaitTime):
try:
for v in voltages:
lockin1.write('slvl %f' %v)
time.sleep(initWaitTime)
dt = str(datetime.now())
t = round(float(time.time()-t0), 4)
vin = str(v)
tc = lockin1.query('oflt?').rstrip()
sens_x3 = lockin1.query('sens?').rstrip()
sens_x1 = lockin2.query('sens?').rstrip()
X3 = lockin1.query('outp?1').rstrip()
Y3 = lockin1.query('outp?2').rstrip()
X1_ref = lockin2.query('outp?1').rstrip()
Y1_ref = lockin2.query('outp?2').rstrip()
line = f"{dt},{t},{vin},{tc},{sens_x3},{sens_x1},{X3},{Y3},{X1_ref},{Y1_ref}"
print(line)
usercheck = input('Is reading stable? Type \'y\' to record: ')
while usercheck != 'y':
print('Not recorded.')
usercheck = input('Is reading stable? Type \'y\' to record: ')
dt = str(datetime.now())
t = round(float(time.time()-t0), 4)
vin = str(v)
tc = lockin1.query('oflt?').rstrip()
sens_x3 = lockin1.query('sens?').rstrip()
sens_x1 = lockin2.query('sens?').rstrip()
X3 = lockin1.query('outp?1').rstrip()
Y3 = lockin1.query('outp?2').rstrip()
X1_ref = lockin2.query('outp?1').rstrip()
Y1_ref = lockin2.query('outp?2').rstrip()
line = f"{dt},{t},{vin},{tc},{sens_x3},{sens_x1},{X3},{Y3},{X1_ref},{Y1_ref}"
print(line)
with open(FILENAME,'a') as output:
output.write(line +"\n")
print('Recorded.')
except KeyboardInterrupt:
print('keyboardinteeruupt')
pass
finally:
lockin1.write("SLVL %f" %0.004)
#output.close()# may record unfinished data
tf = datetime.now()
print ("Program done! total time is: "+ str(tf-ti))
##############################################################################
### crate a folder with today's date and create a new file name ###
date = '210714'
try:
os.mkdir(date)
except FileExistsError:
pass
FILENAME = f"{date}//{date}_Bi2Te3_n10_power_dep_f3p4_1.txt"
rm = visa.ResourceManager();
print(rm.list_resources())
lockin1 = rm.open_resource("GPIB2::8::INSTR") #sample & SINE_OUT source
lockin2 = rm.open_resource("GPIB2::9::INSTR") #reference resistor
header = "Date_time,Time,V_input,TC,SENS_X3,SENS_X1,X3,Y3,X1_ref,Y1_ref\n"
print(header)
with open(FILENAME,'w') as output:
output.write(header)
### Set the parameters ###
freq = 3.4 #Hz
timeCon = 13 #
voltages = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5])
sensitivity1 = 24# sensitivity for 1w measurement
sensitivity3 = 21# sensitivity for 3w measurement
initWaitTime = 15 * 60#s
lockin1.write('harm 3')
lockin2.write('harm 1')
##########################
lockin1.write('FREQ %f' %freq)
t0 = time.time()
ti = datetime.now()
lockinsingle_set_pms(lockin1, timeCon, sensitivity3)
lockinsingle_set_pms(lockin2, timeCon, sensitivity1)
voltage_sweep_auto(voltages, initWaitTime)
| UTF-8 | Python | false | false | 7,954 | py | 165 | power_dependence_test2.py | 20 | 0.562987 | 0.526905 | 0 | 222 | 34.828829 | 94 |
Volen/edsdtest | 3,959,959,886,310 | 298737ae529c1eefc7deeef2f4d981c1a2b87f48 | f433d026216c6c35fb6ac37d82409529260b75b8 | /psychics/views.py | 91a0ec32a183652f9d08b57ad8094c8e38f74d8d | []
| no_license | https://github.com/Volen/edsdtest | 7db093be672919c0e3e7f375b681a679b1116223 | 7df9fb24b6b0df6ee1ad0b75d74e7c37166c577d | refs/heads/master | 2023-04-11T11:02:15.949442 | 2021-04-09T17:31:16 | 2021-04-09T17:31:16 | 355,106,933 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.http.response import HttpResponseRedirect
from django.views import View
from django.shortcuts import render
from django.urls import reverse
from .forms import CorrectAnswerForm
from .models import HistoryDB, PsychicsPool
from edsdtest.settings import PSYCHICS_NAMES
class HomePage(View):
template_name = 'psychics/index.html'
def get(self, request):
history_db = HistoryDB(request)
user_history = history_db.get_user_history()
psychics_history = history_db.get_psychics_history(PSYCHICS_NAMES)
credibility = history_db.get_psychics_credibility(PSYCHICS_NAMES)
return render(request, self.template_name, {'user_history': user_history, 'psychics_history': psychics_history,
'credibility': credibility})
class GetGuess(View):
form_class = CorrectAnswerForm
template_name = 'psychics/guess.html'
def post(self, request):
form = self.form_class(request.POST)
history_db = HistoryDB(request)
if form.is_valid():
correct_answer = request.POST['correct_answer']
history_db.add_correct_answer(correct_answer)
history_db.set_check_performed(False)
return HttpResponseRedirect(reverse('check', args=[correct_answer]))
else:
user_history = history_db.get_user_history()
psychics_history = history_db.get_psychics_history(PSYCHICS_NAMES)
credibility = history_db.get_psychics_credibility(PSYCHICS_NAMES)
guesses = history_db.get_psychics_guesses_cache()
return render(request, self.template_name, {'form': form, 'user_history': user_history, 'guesses': guesses,
'psychics_history': psychics_history, 'credibility': credibility})
def get(self, request):
history_db = HistoryDB(request)
user_history = history_db.get_user_history()
psychics_history = history_db.get_psychics_history(PSYCHICS_NAMES)
credibility = history_db.get_psychics_credibility(PSYCHICS_NAMES)
psychics_pool = PsychicsPool(PSYCHICS_NAMES)
guesses = psychics_pool.generate_guesses()
history_db.save_psychics_guesses_cache(guesses)
form = self.form_class()
return render(request, self.template_name, {'form': form, 'user_history': user_history, 'guesses': guesses,
'psychics_history': psychics_history, 'credibility': credibility})
class CheckResult(View):
template_name = 'psychics/check.html'
def get(self, request, correct_answer):
history_db = HistoryDB(request)
user_history = history_db.get_user_history()
psychics_history = history_db.get_psychics_history(PSYCHICS_NAMES)
check_performed = history_db.get_check_perfromed()
result = history_db.get_final_result_with_check(PSYCHICS_NAMES, correct_answer)
credibility = history_db.get_psychics_credibility(PSYCHICS_NAMES)
return render(request, self.template_name, {'check_performed': check_performed, 'result': result, 'correct_answer': correct_answer,
'user_history': user_history, 'psychics_history': psychics_history,
'credibility': credibility})
| UTF-8 | Python | false | false | 3,448 | py | 11 | views.py | 5 | 0.634281 | 0.634281 | 0 | 74 | 45.554054 | 140 |
innogames/serveradmin | 10,746,008,208,510 | 431e3b5c4eb06d5da657dcc000efe2dcbec58289 | df96f5d5555a8eb0f783598c6c4fc53bdfa629a7 | /adminapi/exceptions.py | 8cf8e6ce5e4891d5bd10c4eb8ce6c1d088534732 | [
"MIT"
]
| permissive | https://github.com/innogames/serveradmin | 142b536f9516fc0e70630049e631fff42c1a38c8 | f59755035a07abdeb8681db4b49729e6861e0967 | refs/heads/main | 2023-08-09T15:27:56.949189 | 2023-08-02T05:48:14 | 2023-08-02T05:48:14 | 82,291,244 | 46 | 18 | MIT | false | 2023-08-02T05:48:15 | 2017-02-17T11:33:27 | 2023-07-13T12:03:25 | 2023-08-02T05:48:14 | 4,495 | 43 | 17 | 4 | Python | false | false | """adminapi - Exceptions
Copyright (c) 2019 InnoGames GmbH
"""
class AdminapiException(Exception):
"""Adminapi exception parent class."""
pass
class ConfigurationError(AdminapiException):
"""Missing or invalid configuration"""
class ApiError(AdminapiException):
"""An API request wasn't successful"""
def __init__(self, *args, **kwargs):
if 'status_code' in kwargs:
self.status_code = kwargs.pop('status_code')
else:
self.status_code = 400
super(Exception, self).__init__(*args, **kwargs)
class AuthenticationError(AdminapiException):
"""No suitable authentication credentials available"""
pass
class DatasetError(AdminapiException):
"""Something went wrong within a dataset instance"""
pass
class DatatypeError(AdminapiException):
"""A query or dataset attribute had the wrong value datatype"""
pass
# XXX: Sub-class ValueError for backwards compatibility
class FilterValueError(DatatypeError, ValueError):
"""A filter value made no sense"""
pass
| UTF-8 | Python | false | false | 1,063 | py | 158 | exceptions.py | 111 | 0.688617 | 0.682032 | 0 | 44 | 23.159091 | 67 |
daisukeiot/OpenVINO-Toolkit-Setup | 16,518,444,251,041 | 7a7d705628acfb5be9705eb329c63c82353927a6 | 38360de287983f99f1d5a49fba36a9dc4f142de5 | /App/ObjectDetection/Python/VideoProcessor.py | 54ee1e75dbadd3f42d25d069f5b73ef1f38bbc46 | [
"MIT"
]
| permissive | https://github.com/daisukeiot/OpenVINO-Toolkit-Setup | f71e801d4b47cefdf84dec74b940143f8742ef72 | d5002e7f9cb12de0be14bf2d8c30c2767afc83e9 | refs/heads/master | 2022-07-06T02:01:31.767246 | 2021-01-06T20:37:57 | 2021-01-06T20:37:57 | 246,788,872 | 3 | 2 | MIT | false | 2022-06-22T01:51:16 | 2020-03-12T09:04:42 | 2021-01-06T20:38:01 | 2022-06-22T01:51:15 | 1,510 | 3 | 1 | 1 | Python | false | false | import sys
import logging
import traceback
import time
import cv2
import asyncio
import numpy as np
from FPS import FPS
from enum import IntEnum
import json
from OpenVINO_Engine import OpenVINO_Util, OpenVINO_Engine
from OpenVINO_Config import Engine_State, Model_Flag
from concurrent.futures import ThreadPoolExecutor, CancelledError
from WebServer import ImageStreamHandler
from pathlib import Path
from Video_Data import Video_Data, Video_Device_Type, Video_Data_State, Video_Playback_Mode
import youtube_dl
class VideoProcessorState(IntEnum):
Unknown = 0
Running = 1
Stop = 2
Pause = 3
Error = 4
class VideoProcessor(object):
#
# Initialization of Video Processor Class
# Reads video frame from Video Stream class and process (AI Inference etc)
# Set frame data to displayFrame for visualization
#
def __init__(self,
videoPath = '/dev/video0',
videoW = 1024,
videoH = 768,
fontScale = 1.0,
verbose = True):
self.verbose = verbose
self._debug = False
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
logging.info('===============================================================')
logging.info('Initializing Video Processor with the following parameters:')
logging.info(' - OpenCV Version : {}'.format(cv2.__version__))
logging.info(' - Device Path : {}'.format(videoPath))
logging.info(' - Frame Size : {}x{}'.format(videoW, videoH))
logging.info('===============================================================')
# To send message to clients (Browser)
self.imageStreamHandler = None
self.threadExecutor = None
# Video source
self.videoData = Video_Data(self, videoPath)
self.displayFrame = np.array([])
self.frame_org = np.array([])
# for Frame Rate measurement
self.fps = FPS()
playback_mode = self.videoData.get_playback_mode()
self._playback_sync = (playback_mode == Video_Playback_Mode.Sync)
self._fps_target = 30
self._fps_wait = 1000.0/30
self.currentFps = 30.0
# For display
self._fontScale = float(fontScale)
self._annotate = False
# Track states of this object
self.set_video_processor_state(VideoProcessorState.Unknown)
# OpenVINO
self.inference_engine = None
self.runInference = 0
self.ioLoop = None
self.current_model_data = None
#
# Sets up Video Processor Class
# Creates Video Stream Class for video capture
#
def __enter__(self):
# async def __aenter__(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
self.set_video_path('{{\"set_video_path\":\"{}\"}}'.format(self.videoData.videoPath))
self.inference_engine = OpenVINO_Engine(self)
# with OpenVINO_Util() as openVino:
# devices = openVino.get_supported_devices()
# for device in devices:
# logging.info('>> Device : {0}'.format(device))
# fullName = openVino.get_device_name(device)
# logging.info('>> Name : {0}'.format(fullName))
# self.inference_engine.hwList.append(device)
self.inference_engine.initialize_engine()
return self
#
# Clean up Video Processor Class
#
def __exit__(self, exception_type, exception_value, traceback):
# async def __aexit__(self, exception_type, exception_value, traceback):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.threadExecutor:
self.threadExecutor.shutdown(wait=True)
self.set_video_processor_state(VideoProcessorState.Stop)
#
# Send message to browser
#
def send_message(self, msg):
if self.imageStreamHandler:
ImageStreamHandler.broadcast_message(msg)
#
# Set Video Processor State flag
#
def set_video_processor_state(self, flag):
self._state = flag
#
# Initializes Video Source
#
def _init_video_source(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
raise NotImplementedError
#
# Sets current video frame for display
#
def set_display_frame(self, frame):
assert frame.size > 0, "Frame Empty"
self.displayFrame = frame
#
# Resturns current video frame for display
# Converts to byte data
#
def get_display_frame(self):
if self.displayFrame.size == 0:
if self.videoData.get_video_data_state() == Video_Data_State.PhotoReady:
if self.videoData.videoH == 0 or self.videoData.videoW == 0:
wallpaper = np.zeros((720, 1280, 3), np.uint8)
else:
wallpaper = np.zeros((self.videoData.videoH, self.videoData.videoW, 3), np.uint8)
ret, buffer = cv2.imencode( '.jpg', wallpaper )
else:
return None, 0
else:
ret, buffer = cv2.imencode( '.jpg', self.displayFrame )
if ret and buffer.size > 0:
return buffer.tobytes(), self.currentFps
else:
assert(False), '>> Display Frame Empty *************** '
#
# Resturns Inference Engine Info
#
def get_inference_engine_info(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
devices = json.dumps(self.inference_engine.get_devices())
if self.runInference == 1:
state = "On"
else:
state = "Off"
return '{{\"{0}\":\"{1}\",\"devices\":{2},\"get_inference_state\":\"{3}\"}}'.format(sys._getframe().f_code.co_name, self.inference_engine.signature, devices, state)
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Retrieve a list of models
#
def get_model_list(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
json_data = json.loads('{\"get_model_list\":[]}')
model_list = self.inference_engine.get_model_list()
for model in model_list:
json_data["get_model_list"].append(json.loads(model.to_json()))
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
return json_data
#
# Set to keep FPS for video or not
#
def playback_mode(self, msg):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
jsonData = json.loads(msg)
playback_mode = jsonData["playback_mode"]
self._playback_sync = playback_mode == "0"
self.videoData.set_playback_mode(playback_mode)
return '{{\"playback_mode\":\"{0}\"}}'.format(self.videoData.get_playback_mode())
#
# Stop video process
#
def set_video_playback(self, msg):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
jsonData = json.loads(msg)
if jsonData['set_video_playback'] == "1":
self.set_video_processor_state(VideoProcessorState.Running)
else:
self.set_video_processor_state(VideoProcessorState.Pause)
return self.get_video_playback()
#
# Return current video playback state
#
def get_video_playback(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self._state == VideoProcessorState.Pause:
state = "0"
elif self._state == VideoProcessorState.Running:
state = "1"
else:
assert False, "Unexpected Video Processor State"
return '{{\"get_video_playback\":\"{}\"}}'.format(state)
#
# Stop video process
#
def set_video_stop(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
self.videoData.set_video_playback(isPause = True)
self.set_video_processor_state(VideoProcessorState.Pause)
#
# Start video process
#
def set_video_start(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
self.fps.reset(self.videoData.get_video_fps())
self.videoData.set_video_playback(isPause = False)
self.set_video_processor_state(VideoProcessorState.Running)
self.send_message('{\"frame_ready\":1}')
#
# Set Video Resolution
#
def set_video_path(self, msg, loop = None):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
jsonData = json.loads(msg)
if jsonData.get("set_video_path"):
videoPath = jsonData["set_video_path"]
else:
videoPath = jsonData["videoPath"]
self.set_video_processor_state(VideoProcessorState.Pause)
video_data_state = self.videoData.set_video_path(videoPath, loop)
if video_data_state == Video_Data_State.Running or video_data_state == Video_Data_State.PhotoReady:
self.fps.reset(self.videoData.get_video_fps())
self.set_video_start()
else:
self.set_video_processor_state(VideoProcessorState.Pause)
return self.get_video_path()
#
# Return current video path
#
def get_video_path(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
return self.videoData.get_video_path()
#
# Set Video Resolution
#
def set_video_resolution(self, msg):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
return self.videoData.set_video_resolution(msg)
#
# Get Video Resolution
#
def get_video_resolution(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
return self.videoData.get_video_resolution()
#
# Set AI model to use
#
async def set_ai_model(self, loop, msg):
if self.verbose:
logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
try:
self.ioLoop = loop
#1 Get Model Data
model_data = self.inference_engine.get_ai_model_data(msg)
current_hw = json.loads(self.inference_engine.get_target_device())
current_precision = json.loads(self.inference_engine.get_precision())
if model_data.isFlagSet(Model_Flag.Loaded):
json_data = json.loads(msg)
device = json_data["set_target_device"]
precision = json_data["set_precision"]
if current_hw['get_target_device'] == device and current_precision['get_precision'] == precision:
logging.info(">> Model {} is loaded to {}".format(model_data.modelName, current_hw))
self.runInference = 1
self.send_message('{{\"set_ai_model\":\"Running {}\",\"isComplete\":1}}'.format(model_data.modelName))
else:
if self.current_model_data:
self.current_model_data.clearFlag(Model_Flag.Loaded)
self.current_model_data = None
if not model_data is None:
self.set_device_params(msg)
# self.set_precision(msg)
# self.set_target_device(msg)
# create a task to download model from model zoo
self.set_video_processor_state(VideoProcessorState.Pause)
self.send_message('{{\"set_ai_model\":\"Downloading {}\"}}'.format(model_data.modelName))
task = self.ioLoop.run_in_executor(None, self.inference_engine.download_model, model_data)
task.add_done_callback(self.model_download_callback)
else:
json_data = json.loads(msg)
self.send_message('{{\"set_ai_model\":\"Failed to get model data for {}\",\"isFailure\":1}}'.format(json_data["SetAiModel"]))
except CancelledError:
logging.info('-- {0}() - Cancelled'.format(sys._getframe().f_code.co_name))
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_obj, exc_tb)
logging.error('!! {0}:{1}() : Exception {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, ex))
#
# Callback function for model download
#
def model_download_callback(self, future):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
model_data = future.result()
assert(model_data is not None, "Model Data is None")
if model_data.isFlagSet(Model_Flag.Downloaded):
self.send_message('{{\"set_ai_model\":\"{} downloaded. Converting to IR\"}}'.format(model_data.modelName))
if model_data.framework == 'dldt':
task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, model_data)
task.add_done_callback(self.model_load_callback)
else:
task = self.ioLoop.run_in_executor(None, self.inference_engine.convert_model, model_data)
task.add_done_callback(self.model_convert_callback)
else:
self.set_video_start()
self.send_message('{{\"set_ai_model\":\"Download failed {}\",\"isFailure\":1}}'.format(model_data.errorMsg))
#
# Callback function for model conversion
#
def model_convert_callback(self, future):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
model_data = future.result()
if model_data.isFlagSet(Model_Flag.Converted):
logging.info(' FP16 {}'.format(str(model_data.ir_dir['FP16'])))
logging.info(' FP32 {}'.format(str(model_data.ir_dir['FP32'])))
self.send_message('{{\"set_ai_model\":\"{} converted to IR.\\nLoading....\", \"isSuccess\":1}}'.format(model_data.modelName))
self.inference_engine.remove_model_dir(model_data)
task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, model_data)
task.add_done_callback(self.model_load_callback)
else:
self.set_video_start()
self.send_message('{{\"set_ai_model\":\"Convert Failed : {}\",\"isFailure\":1}}'.format(model_data.errorMsg))
#
# Callback function for model load
#
def model_load_callback(self, future):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
model_data = future.result()
self.set_video_start()
if model_data.isFlagSet(Model_Flag.Loaded):
target_device = json.loads(self.inference_engine.get_target_device())
self.send_message('{{\"set_ai_model\":\"Successfully loaded {}\", \"isComplete\":1}}'.format(model_data.modelName))
self.send_message('{{\"get_inference_engine_info\":\"{} running on {}\"}}'.format(self.inference_engine.signature, target_device['get_target_device']))
self.current_model_data = model_data
else:
self.send_message('{{\"set_ai_model\":\"Load failed : {}\",\"isFailure\":1}}'.format(model_data.errorMsg))
#
# Set hardware to run inference on
#
def set_device_params(self, msg, reload = False):
if self.verbose:
logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
if self.inference_engine:
self.inference_engine.set_target_device(msg)
self.inference_engine.set_precision(msg)
if reload == True and self.current_model_data:
# create a task to download model from model zoo
self.set_video_processor_state(VideoProcessorState.Pause)
self.send_message('{{\"set_ai_model\":\"Loading {}\"}}'.format(self.current_model_data.modelName))
task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, self.current_model_data)
task.add_done_callback(self.model_download_callback)
return self.inference_engine.set_target_device(msg)
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Set hardware to run inference on
#
# def set_target_device(self, msg):
# if self.verbose:
# logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
# if self.inference_engine:
# self.inference_engine.set_target_device(msg)
# self.inference_engine.set_precision(msg)
# if self.current_model_data:
# # create a task to download model from model zoo
# self.set_video_processor_state(VideoProcessorState.Pause
# self.send_message('{{\"set_ai_model\":\"Loading {}\"}}'.format(self.current_model_data.modelName))
# task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, self.current_model_data)
# task.add_done_callback(self.model_download_callback)
# return self.inference_engine.set_target_device(msg)
# else:
# assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
# return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Return hardware to run inference on
#
def get_target_device(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
return self.inference_engine.get_target_device()
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Set Inference Precision
#
# def set_precision(self, msg):
# if self.verbose:
# logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
# if self.inference_engine:
# self.inference_engine.set_precision(msg)
# if self.current_model_data:
# # create a task to download model from model zoo
# self.set_video_processor_state(VideoProcessorState.Pause
# self.send_message('{{\"set_ai_model\":\"Loading {}\"}}'.format(self.current_model_data.modelName))
# task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, self.current_model_data)
# task.add_done_callback(self.model_download_callback)
# return self.get_precision()
# else:
# assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
# return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Get Inference Precision
#
def get_precision(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
return self.inference_engine.get_precision()
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Set Confidence Level threshold
#
def set_confidence_level(self, msg):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
jsonData = json.loads(msg)
confidenceLevel = int(jsonData["set_confidence_level"].replace('%',''))
if self.inference_engine:
return self.inference_engine.set_confidence_level(confidenceLevel)
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Return Confidence Level threshold
#
def get_confidence_level(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
return self.inference_engine.get_confidence_level()
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Set Inference State
#
def set_inference_state(self, msg):
if self.verbose:
logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
jsonData = json.loads(msg)
inferenceState = jsonData["set_inference_state"]
if self.current_model_data:
#make sure model is loaded
if not self.current_model_data.isFlagSet(Model_Flag.Loaded):
return '{{\"{}\":\"{} is not loaded\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name, self.current_model_data.modelName)
else:
self.runInference = int(inferenceState)
return self.get_inference_state()
else:
return '{{\"{}\":\"Model Data Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Get Current Inference State
#
def get_inference_state(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
return '{{\"{}\":\"{}\"}}'.format(sys._getframe().f_code.co_name, self.runInference)
async def process_video_frame_async(self, executor):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
try:
loop = asyncio.get_event_loop()
task = await loop.run_in_executor(executor, self.process_video_frame)
return task
except CancelledError:
logging.info('-- {0}() - Cancelled'.format(sys._getframe().f_code.co_name))
self.set_video_processor_state(VideoProcessorState.Stop)
return 0
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_obj, exc_tb)
logging.error('!! {0}:{1}() : Exception {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, ex))
return 1
#
# Saves frame data to a file
#
def save_image(self):
cv2.imwrite("./frame.png", self.displayFrame)
#cv2.imwrite("./frame.png", self.frame_org)
#
# Process Video Frame
#
def process_video_frame(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
textX, textY = cv2.getTextSize("FPS", cv2.FONT_HERSHEY_DUPLEX, self._fontScale, 1)[0]
textX = int(textX * self._fontScale * 1.1)
textY = int(textY * self._fontScale * 1.1)
frame = np.array([])
self.fps.reset(self.videoData.get_video_fps())
while True:
try:
if self._state == VideoProcessorState.Stop:
logging.info('>> {0}:{1}() : Stop Video Processor'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
break
if self._state == VideoProcessorState.Pause:
if self._debug:
logging.info('>> {0}:{1}() : Pause Video Processor'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
time.sleep(1)
continue
if self.videoData is None:
logging.info('>> {0}:{1}() : No Video Data'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
time.sleep(0.5)
continue
grabbed, frame = self.videoData.read_frame_queue()
if self._debug:
logging.info("Grabbed {} frame size {}".format(grabbed, frame.size))
# if (grabbed == False or frame.size == 0):
if (grabbed == False):
time.sleep(1/30)
continue
else:
self.frame_org = np.copy(frame)
if self.runInference == 1:
# Run Inference
frame = self.inference_engine.inference(frame)
if self._annotate:
fps_annotation = 'FPS : {}'.format(self.currentFps)
cv2.putText( frame, fps_annotation, (10, textY + 10), cv2.FONT_HERSHEY_SIMPLEX, self._fontScale, (0,0,255), 2)
self.set_display_frame(frame)
self.currentFps = self.fps.fps(self._playback_sync)
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_obj, exc_tb)
logging.error('!! {0}:{1}() : Exception {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, ex))
if self.verbose:
logging.info('<< {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
| UTF-8 | Python | false | false | 26,907 | py | 81 | VideoProcessor.py | 45 | 0.574089 | 0.566433 | 0 | 699 | 37.493562 | 176 |
bpJedisim/CS_ML_DL_Courses | 4,698,694,247,774 | 4eb4f954d83129618674b1f83b4ce1b5e7ea3ef5 | 4746ae085e941eab7d3d98e07c4318104c61db47 | /Machine_Learning_Univ_Course_(2017Fall)/Homeworks/hw06/prac/spam/create_vocab/gen_features.py | 8d489de19d2d594bf765c5f20c62de08bd6b58cc | []
| no_license | https://github.com/bpJedisim/CS_ML_DL_Courses | b21b912d6f5cd8cb5b088405c1fc1a46c5d202bd | 838f05915cb25605f574756fea77274692c1122e | refs/heads/master | 2020-08-01T22:54:16.616123 | 2019-09-15T14:28:14 | 2019-09-15T14:28:14 | 211,144,312 | 0 | 0 | null | true | 2019-09-26T17:24:13 | 2019-09-26T17:24:12 | 2019-09-15T14:28:16 | 2019-09-15T14:28:15 | 191,185 | 0 | 0 | 0 | null | false | false | #!python
# -*- coding: utf-8 -*-#
"""
Spam Exercise (Qn 2)
@author: Bhishan Poudel
@date: Nov 9, 2017
@email: bhishanpdl@gmail.com
"""
# Imports
import collections
import numpy as np
def create_vocab(fdata,min_freq,fvocab):
# count the words and frequencies
wordcount = collections.Counter()
with open(fdata) as fi:
for line in fi:
wordcount.update(set(line[1:].split()))
pairs = [(w,f) for w,f in wordcount.items() if f>=min_freq ]
# do not include stopwords
# fstopwords = 'stopwords.txt'
# stopwords = np.loadtxt(fstopwords,dtype='str')
# pairs = [(w,f) for w,f in wordcount.items() if f>=min_freq if w not in stopwords]
# sort alphabetically
pairs = sorted(pairs, key=lambda word: word[0], reverse=0)
# sort by number of occurrence
# pairs = sorted(pairs, key=lambda word: word[1], reverse=1)
print("len(vocab) = {}".format(len(pairs)))
with open(fvocab,'w') as fo:
for i in range(len(pairs)):
fo.write("{} {}\n".format(i+1,pairs[i][0]))
# write index token freq
# fo.write("{} {} {}\n".format(i+1,pairs[i][0], pairs[i][1]))
def create_sparse(fdata,fvocab,fsparse):
# read index token freq
# idx,token,freq = np.genfromtxt(fvocab, dtype=str, unpack=True)
# read index and token
idx,token = np.genfromtxt(fvocab, dtype=str, unpack=True)
d = dict(zip(token,idx))
with open(fdata) as fi, \
open(fsparse,'w') as fo:
for i,line in enumerate(fi):
nums = [ int(d[w]) for w in line[1:].split() if w in token ]
nums = sorted(list(set(nums)))
nums = [str(n)+":1" for n in nums ]
sparse_line = line[0] + " " + " ".join(nums) + "\n"
print("Writing sparse matrix line: {}".format(i+1))
fo.write(sparse_line)
def create_dense(fsparse, fdense,fvocab):
# number of lines in vocab
lvocab = sum(1 for line in open(fvocab))
# create dense file
with open(fsparse) as fi, open(fdense,'w') as fo:
for i, line in enumerate(fi):
words = line.strip('\n').split(':')
words = " ".join(words).split()
label = int(words[0])
indices = [int(w) for (i,w) in enumerate(words) if int(i)%2]
row = [0]* (lvocab+1)
row[0] = label
# using for loop
# for idx in indices:
# row[idx] = 1
# use listcomps
row = [ 1 if i in indices else row[i] for i in range(len(row))]
l = " ".join(map(str,row)) + "\n"
fo.write(l)
print('Writing dense matrix line: ', i+1)
# print("\nwords = {}".format(words))
# print("label = {}".format(label))
# print("idx = {}".format(idx))
# print("row = {}".format(row))
def main():
# datafiles
# fdata, min_freq = 'data.txt', 2
fdata, min_freq = 'spam_train.txt', 30
fsparse = 'sparse.txt'
fvocab = "vocab.txt"
fdense = 'dense.txt'
# create_vocab(fdata,min_freq, fvocab)
# create_sparse(fdata,fvocab,fsparse)
# create_dense(fsparse, fdense,fvocab)
# compare labels
l1 = np.loadtxt('label1.txt')
l2 = np.loadtxt('label2.txt')
for i,j in zip(l1,l2):
print(i-j)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,524 | py | 285 | gen_features.py | 165 | 0.523837 | 0.51277 | 0 | 122 | 27.885246 | 87 |
sntciitbhu/sntc_website_beta | 10,711,648,477,225 | 0c5097a1c7069240c873f41ba914e6a8de0c56fc | 5f6874113f86669d3220c9d5c247dab0a8abca01 | /apps/tac/migrations/0005_auto_20200514_1918.py | b0d3abc104197b59d09afd1f7041cefde99ca7b9 | []
| no_license | https://github.com/sntciitbhu/sntc_website_beta | f5e0f0b0deec9b291b7c4c6cf0d54b7cf069596c | 26a17ac7a401229a53fd428132fe072bdbb260b9 | refs/heads/master | 2021-12-23T19:26:03.161225 | 2020-05-18T09:54:25 | 2020-05-18T09:54:25 | 252,948,125 | 0 | 0 | null | false | 2021-09-22T18:50:51 | 2020-04-04T08:35:08 | 2020-05-18T09:54:29 | 2021-09-22T18:50:49 | 38,516 | 0 | 0 | 4 | JavaScript | false | false | # Generated by Django 3.0.5 on 2020-05-14 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tac', '0004_auto_20200514_1900'),
]
operations = [
migrations.RemoveField(
model_name='tac_detail',
name='facebook_link',
),
migrations.RemoveField(
model_name='tac_detail',
name='git_link',
),
migrations.RemoveField(
model_name='tac_detail',
name='insta_link',
),
migrations.RemoveField(
model_name='tac_detail',
name='linkedin_link',
),
migrations.RemoveField(
model_name='tac_detail',
name='twitter_link',
),
migrations.RemoveField(
model_name='tac_detail',
name='youtube_link',
),
migrations.AddField(
model_name='tac_detail',
name='facebook',
field=models.URLField(blank=True, null = True ,default=None, max_length=500),
),
migrations.AddField(
model_name='tac_detail',
name='git',
field=models.URLField(blank=True, null = True ,default=None, max_length=500),
),
migrations.AddField(
model_name='tac_detail',
name='insta',
field=models.URLField(blank=True, null = True ,default=None, max_length=500),
),
migrations.AddField(
model_name='tac_detail',
name='linkedin',
field=models.URLField(blank=True, null = True ,default=None, max_length=500),
),
migrations.AddField(
model_name='tac_detail',
name='twitter',
field=models.URLField(blank=True, null = True ,default=None, max_length=500),
),
migrations.AddField(
model_name='tac_detail',
name='youtube',
field=models.URLField(blank=True, null = True ,default=None, max_length=500),
),
]
| UTF-8 | Python | false | false | 2,069 | py | 62 | 0005_auto_20200514_1918.py | 45 | 0.530691 | 0.507008 | 0 | 67 | 29.880597 | 89 |
jShainline/soens_sim | 6,365,141,564,780 | 3be0605b8b65ebb2c342a6e0186fff09daeb6a0c | 6a56fbd216432eb7f55e9ebd30275edd0d8fbcb9 | /synapse/_bak/s__load_wr_synapse_test_data.py | cab187b22f6e3af79e6032ad44e19d0a50c5636f | []
| no_license | https://github.com/jShainline/soens_sim | fea00d736a0d5cdb1267e06cf0ee55a4ca8c7bb7 | 51784fcba09e563f7353c84572cebf31fb7aa11a | refs/heads/master | 2021-04-02T23:03:28.019615 | 2021-03-31T01:46:51 | 2021-03-31T01:46:51 | 248,327,761 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #%%
import numpy as np
from matplotlib import pyplot as plt
import time
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
# from soen_sim import input_signal, synapse, dendrite, neuron
from _plotting import plot_fq_peaks, plot_fq_peaks__dt_vs_bias, plot_wr_data__currents_and_voltages, plot_wr_comparison__synapse
from _functions import save_session_data, load_session_data, read_wr_data, V_fq__fit, inter_fluxon_interval__fit, inter_fluxon_interval, inter_fluxon_interval__fit_2, inter_fluxon_interval__fit_3, chi_squared_error
from _function_more import synapse_model__parameter_sweep
from util import physical_constants
from soen_sim import input_signal, synapse
p = physical_constants()
# plt.close('all')
#%% load wr data, determine quantities of interest
I_sy_vec = [23,28,33,38,28,28,28,28,33,33,33,33]#uA
L_si_vec = [77.5,77.5,77.5,77.5,7.75,77.5,775,7750,775,775,775,775]#nH
tau_si_vec = [250,250,250,250,250,250,250,250,10,50,250,1250]#ns
data_file_list = []
num_files = len(I_sy_vec)
for ii in range(num_files):
data_file_list.append('syn_Ispd20.00uA_Isy{:04.2f}uA_Lsi{:07.2f}nH_tausi{:04.0f}ns_dt10.0ps_tsim1000ns.dat'.format(I_sy_vec[ii],L_si_vec[ii],tau_si_vec[ii]))
for ii in range(num_files):
print('ii = {:d} of {:d}'.format(ii+1,num_files))
directory = 'wrspice_data/fitting_data'
file_name = data_file_list[ii]
data_dict = read_wr_data(directory+'/'+file_name)
#plot wr time traces
data_to_plot = ['L0#branch','L3#branch','v(2)']
plot_save_string = file_name
plot_wr_data__currents_and_voltages(data_dict,data_to_plot,plot_save_string) | UTF-8 | Python | false | false | 1,639 | py | 161 | s__load_wr_synapse_test_data.py | 155 | 0.708359 | 0.633923 | 0 | 40 | 40 | 214 |
Oluwadurotimi10/Reuseable_pipeline_components | 5,927,054,911,096 | 9baadc92b4af8f4ecb2c9518a5429768c594b459 | bb568739839c42e023d32c625d2238c507c4f48f | /TensorFlow/components/visualization/visuals.py | b88dd451d179853cda92fc0ee92475f2589e0bf8 | []
| no_license | https://github.com/Oluwadurotimi10/Reuseable_pipeline_components | dcfaac2dfcf0f626278cbf5bdbb80563bd30abe0 | 629ea1f247e18f49abb630850d844c9460e43aec | refs/heads/main | 2023-05-26T19:45:55.095048 | 2021-06-09T12:53:11 | 2021-06-09T12:53:11 | 326,209,579 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #importing libraries
import argparse
from typing import NamedTuple
def visuals(test_loss,test_acc,matrix_data) -> NamedTuple('output', [('mlpipeline_ui_metadata', 'UI_metadata'),
('mlpipeline_metrics', 'Metrics')]):
#importing libraries
import joblib
import numpy as np
import pandas as pd
import json
#loading the metrics
test_loss =joblib.load(test_loss)
test_acc = joblib.load(test_acc)
matrix_data = joblib.load(matrix_data)
vocab = [0,1]
metadata = {
'outputs' : [{
'type': 'confusion_matrix',
'format': 'csv',
'schema':[
{'name': 'target', 'type': 'CATEGORY'},
{'name': 'predicted', 'type': 'CATEGORY'},
{'name': 'count', 'type': 'NUMBER'},
],
'source': matrix_data.to_csv(header=False, index=False),
'storage':'inline',
'labels': list(map(str,vocab)),
}]
}
metrics = {
'metrics': [{
'name': 'Accuracy',
'numberValue': float(test_acc),
'format': "PERCENTAGE",
}, {
'name': 'Loss',
'numberValue': float(test_loss),
'format': "PERCENTAGE",
}]}
from collections import namedtuple
output = namedtuple('output', ['mlpipeline_ui_metadata', 'mlpipeline_metrics'])
visual = output(json.dumps(metadata), json.dumps(metrics))
with open('mlpipeline-ui-metadata.json', 'w') as met:
met.write(visual.mlpipeline_ui_metadata)
with open('mlpipeline-metrics.json', 'w') as mat:
mat.write(visual.mlpipeline_metrics)
#defining and parsing arguments
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--test_loss')
parser.add_argument('--test_acc')
parser.add_argument('--matrix_data')
args = parser.parse_args()
visuals(args.test_loss, args.test_acc, args.matrix_data)
"""
#saving pred and actual as csv file
vocab = [0,1]
cm = confusion_matrix(y_test, y_pred, labels=vocab)
cm_data = []
for target_index,target_row in enumerate(cm):
for predicted_index, count in enumerate(target_row):
cm_data.append((vocab[target_index], vocab[predicted_index], count))
cm_df = pd.DataFrame(cm_data, columns=['target','predicted','count'])
#serialize data to be used for confusion matrix
joblib.dump(cm_df, 'matrix_data')
""" | UTF-8 | Python | false | false | 2,460 | py | 26 | visuals.py | 8 | 0.584959 | 0.583333 | 0 | 75 | 31.813333 | 112 |
AlissonMacedo/gestao_escolar | 4,767,413,748,378 | 8c9f965e1e1baa936dae799f41f34f53df2f7aeb | 5f365910d9459e2ad17770565351e3f06889336c | /apps/departamentos/migrations/0003_auto_20190401_1529.py | 7dffee2149f9ce71f2e9dbd6c53855a32485ba91 | []
| no_license | https://github.com/AlissonMacedo/gestao_escolar | 0d40fa84d627bc9a571314dd256a78692ecf57b0 | cd42d950565496e8ffefbfac9603f8e1c3de85f0 | refs/heads/master | 2020-04-15T01:00:08.400258 | 2019-04-23T16:11:28 | 2019-04-23T16:11:28 | 164,259,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.7 on 2019-04-01 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('departamentos', '0002_departamento_empresa'),
]
operations = [
migrations.AlterField(
model_name='departamento',
name='nome',
field=models.CharField(max_length=18),
),
]
| UTF-8 | Python | false | false | 398 | py | 112 | 0003_auto_20190401_1529.py | 79 | 0.600503 | 0.547739 | 0 | 18 | 21.111111 | 55 |
mbenitezm/taskr | 3,925,600,127,029 | f3a4f3c940eabf5ebefa3b6ca2ef629abe4d2ffe | 79ff9f61117e27f2fb54a7052f210e2a8e50e675 | /bin/modules/render.py | c7bbe609cc128d2e709d5c1c13915debd6180449 | []
| no_license | https://github.com/mbenitezm/taskr | b0f3a8976e9868a2920951f8dfe5b4347a30e66e | 33f29d7ac9b90767804a974cfae8e021ea5b898f | refs/heads/master | 2020-12-27T21:33:07.590557 | 2014-08-14T05:23:02 | 2014-08-14T05:23:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def open_html():
html = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="">
<meta name="author" content="">
'''
def title(title="Untitled"):
return "<title>"+title+"</title>"
def html_assets():
g,c,b,go = read_assets()
s.wfile.write('<script type="text/javascript">')
s.wfile.write(g)
s.wfile.write('</script>')
s.wfile.write('<style>')
s.wfile.write(b)
s.wfile.write('</style>')
s.wfile.write('<script type="text/javascript">')
s.wfile.write(c)
s.wfile.write('</script>')
s.wfile.write('<script type="text/javascript">')
s.wfile.write(go)
s.wfile.write('</script>')
s.wfile.write("</head>")
s.wfile.write("<body>")
html_body(s)
s.wfile.write("</body></html>")
| UTF-8 | Python | false | false | 928 | py | 13 | render.py | 11 | 0.602371 | 0.599138 | 0 | 31 | 28.935484 | 78 |
scottclowe/eat-it | 12,884,901,905,492 | a4149cc6aafa3a2865acc1cf216a33227b1f7efd | 8971e9132cbcaca6f8513b3f451a71509f566a0e | /scripts/no_rfa_doubled3.py | 67263f4323ced23b48c7c5dcb23b879743373074 | []
| no_license | https://github.com/scottclowe/eat-it | 5cb605b560050f47ab17920a5bed26da74b4928e | c55ac9a2f88012c7d08a1e7ea495097746730420 | refs/heads/master | 2016-09-01T23:28:29.928957 | 2015-09-12T12:48:46 | 2015-09-12T12:48:46 | 42,356,547 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sklearn
import pandas as pd
import numpy as np
import datetime
import itertools
import copy
import pickle
import sklearn.svm
import sklearn.linear_model
import sklearn.cross_validation
import sklearn.decomposition
import sklearn.manifold
import sklearn.metrics
from eat_it import StratifiedPercentileKFold
from eat_it import scalers
from eat_it import params
train = pd.read_csv('data/train.csv', encoding="utf-8")
# Add age in days
end_dt = datetime.datetime.strptime('2015-1-1', "%Y-%m-%d")
train['Age'] = [(end_dt - datetime.datetime.strptime(open_dt, "%m/%d/%Y")).days for open_dt in train['Open Date']]
# add size as boolean field
train['isBig'] = train['City Group']=='Big Cities'
# add each of the big cities as boolean field
#train['isIstanbul'] = train['City']=='İstanbul'
#train['isAnkara'] = train['City']=='Ankara'
#train['isIzmir'] = train['City']=='İzmir'
# add boolean field for type
train['isFC'] = train['Type']=='FC'
train['isDT'] = train['Type']=='DT'
train['isMB'] = train['Type']=='MB'
# Note when there is the missing 17 fields
train['missingSource'] = train[params.xor_cols].apply(lambda x: np.all(x==0), axis=1)
with open('data/genuinetestmap.pkl', 'rb') as hf:
gtm = pickle.load(hf)
gtest = pd.read_csv('data/genuinetest.csv', encoding="utf-8")
# Add age in days
end_dt = datetime.datetime.strptime('2015-1-1', "%Y-%m-%d")
gtest['Age'] = [(end_dt - datetime.datetime.strptime(open_dt, "%m/%d/%Y")).days for open_dt in gtest['Open Date']]
# add size as boolean field
gtest['isBig'] = gtest['City Group']=='Big Cities'
# add each of the big cities as boolean field
#gtest['isIstanbul'] = gtest['City']=='İstanbul'
#gtest['isAnkara'] = gtest['City']=='Ankara'
#gtest['isIzmir'] = gtest['City']=='İzmir'
# add boolean field for type
gtest['isFC'] = gtest['Type']=='FC'
gtest['isDT'] = gtest['Type']=='DT'
gtest['isMB'] = gtest['Type']=='MB'
# Note when there is the missing 17 fields
gtest['missingSource'] = gtest[params.xor_cols].apply(lambda x: np.all(x==0), axis=1)
test = pd.read_csv('data/test.csv', encoding="utf-8")
# Add age in days
end_dt = datetime.datetime.strptime('2015-1-1', "%Y-%m-%d")
test['Age'] = [(end_dt - datetime.datetime.strptime(open_dt, "%m/%d/%Y")).days for open_dt in test['Open Date']]
# add size as boolean field
test['isBig'] = test['City Group']=='Big Cities'
# add each of the big cities as boolean field
#test['isIstanbul'] = test['City']=='İstanbul'
#test['isAnkara'] = test['City']=='Ankara'
#test['isIzmir'] = test['City']=='İzmir'
# add boolean field for type
test['isFC'] = test['Type']=='FC'
test['isDT'] = test['Type']=='DT'
test['isMB'] = test['Type']=='MB'
# Note when there is the missing 17 fields
test['missingSource'] = test[params.xor_cols].apply(lambda x: np.all(x==0), axis=1)
# Merge Test and Train together, without having revenue for all entries
unlabelled_data = pd.concat((train, gtest), ignore_index=True)
#####################################
# Don't use public test revenues
#data = train
# Add known revenues from public test data
gtestrevenue = pd.read_csv('data/genuinetestrevenue.csv', encoding="utf-8")
labelled_test = pd.merge(gtest, gtestrevenue, on='Id')
# Merge all available training data together
data = pd.concat((train, labelled_test), ignore_index=True)
#####################################
# Assemble list of columns
Pcols = ['P'+str(i) for i in range(1,38)]
PMcols = params.xor_cols
PVcols = [i for i in Pcols if i not in params.xor_cols]
Gcols = ['Age']
Ocols = ['isBig','isFC','isDT','isMB']
cols = Pcols + Gcols + Ocols
# Targets
y = data['revenue'].values
X_indices = data['Id'].values
uX_indices = unlabelled_data['Id'].values
index_is_labelled = np.array([i in X_indices for i in uX_indices])
index_is_train = np.array([i in train['Id'].values for i in uX_indices])
unlabelled_data_nomissing = np.logical_not(unlabelled_data['missingSource'].values)
data_nomissing = np.logical_not(data['missingSource'].values)
test_nomissing = np.logical_not(test['missingSource'].values)
# Other (already one-hot columns) can stay as they are
XO = data.as_matrix(Ocols).astype(np.float)
tXO = test.as_matrix(Ocols).astype(np.float)
# Need to take logs because sometimes Age can't be mapped correctly by BoxCox
u = np.log(unlabelled_data.as_matrix(Gcols).astype(np.float))
d = np.log(data.as_matrix(Gcols).astype(np.float))
t = np.log(test.as_matrix(Gcols).astype(np.float))
s = scalers.BoxCoxScaler().fit(u)
XG = s.transform(d)
tXG = s.transform(t)
# Valid-always columns
u = unlabelled_data.as_matrix(PVcols).astype(np.float)
d = data.as_matrix(PVcols).astype(np.float)
t = test.as_matrix(PVcols).astype(np.float)
s = scalers.BoxCoxScaler().fit(u)
XPV = s.transform(d)
uXPV = s.transform(u)
tXPV = s.transform(t)
# Missing-sometimes columns
u = unlabelled_data.as_matrix(PMcols).astype(np.float)[unlabelled_data_nomissing]
d = data.as_matrix(PMcols).astype(np.float)
t = test.as_matrix(PMcols).astype(np.float)
s = scalers.BoxCoxScaler(known_min=0).fit(u)
XPM = s.transform(d)
uXPM = s.transform(u)
tXPM = s.transform(t)
###############################
# Build model
X_list = []
tX_list = []
cols_ = []
X_list.append(XG)
tX_list.append(tXG)
X_list.append(XO)
tX_list.append(tXO)
cols_ += Gcols
cols_ += Ocols
s = sklearn.decomposition.FastICA(n_components=2, random_state=889, tol=0.000001, max_iter=10000).fit(uXPV)
XDR_ = s.transform(XPV)
tXDR_ = s.transform(tXPV)
PDRcols_ = ['PV_ICA_'+str(i) for i in range(XDR_.shape[1])]
cols_ += PDRcols_
XS2 = sklearn.manifold.MDS(n_components=1, random_state=888).fit_transform(uXPV)
XDR_2 = XS2[index_is_labelled,:]
tXDR_2 = np.zeros((tXPV.shape[0],1))
my_ids = uX_indices[np.logical_not(index_is_train)]
my_XS2 = XS2[np.logical_not(index_is_train),:]
for i,uid in enumerate(my_ids):
true_ids = gtm[uid]
for true_id in true_ids:
tXDR_2[true_id] = my_XS2[i]
PDR2cols_ = ['PV_MDS_'+str(i) for i in range(XS2.shape[1])]
cols_ += PDR2cols_
X_ = np.concatenate([XG, XO, XDR_], axis=1)
tX_ = np.concatenate([tXG, tXO, tXDR_], axis=1)
print(cols_)
print(X_.shape)
print(tX_.shape)
clf = sklearn.linear_model.Lasso()
clf.fit(X_, y)
ty1 = clf.predict(tX_)
#######
X_ = np.concatenate([XG, XO, XDR_2], axis=1)
tX_ = np.concatenate([tXG, tXO, tXDR_2], axis=1)
print(cols_)
print(X_.shape)
print(tX_.shape)
clf = sklearn.linear_model.Lasso()
clf.fit(X_, y)
ty0 = clf.predict(tX_)
###############################
X_list = []
tX_list = []
cols_ = []
X_list.append(XG[data_nomissing,:])
tX_list.append(tXG[test_nomissing,:])
X_list.append(XO[data_nomissing,:])
tX_list.append(tXO[test_nomissing,:])
cols_ += Gcols
cols_ += Ocols
s = sklearn.decomposition.FastICA(n_components=2, random_state=890, tol=0.000001, max_iter=100000).fit(uXPM)
XDR_ = s.transform(XPM[data_nomissing,:])
tXDR_ = s.transform(tXPM[test_nomissing,:])
PDRcols_ = ['PM_ICA_'+str(i) for i in range(XDR_.shape[1])]
X_list.append(XDR_)
tX_list.append(tXDR_)
cols_ += PDRcols_
X_ = np.concatenate(tuple(X_list), axis=1)
tX_ = np.concatenate(tuple(tX_list), axis=1)
print(cols_)
print(X_.shape)
print(tX_.shape)
clf = sklearn.linear_model.Lasso()
clf.fit(X_, y[data_nomissing])
ty2 = clf.predict(tX_)
###############################
# Take geometric mean
ty = (ty0 * ty1)**0.5
ty[test_nomissing] = (ty0[test_nomissing] * ty1[test_nomissing] * ty2) ** (1/3)
li = np.isnan(ty)
ty[li] = (ty0[li] + ty1[li]) * 0.5
###############################
#####################################
# Overwrite the revenues of known records
uids = gtestrevenue['Id'].values
revs = gtestrevenue['revenue'].values
for uid,rev in zip(uids,revs):
true_ids = gtm[uid]
for true_id in true_ids:
ty[true_id] = np.round(rev)
#####################################
print(sum(np.isnan(ty)))
print(ty1[1095:1100])
print(ty[1095:1100])
sub = pd.DataFrame(test['Id'])
sub['Prediction'] = ty
sub.to_csv('sub_no_rfa_ICA_doubled3_overwrite.csv', index=False)
| UTF-8 | Python | false | false | 7,913 | py | 48 | no_rfa_doubled3.py | 14 | 0.661945 | 0.645125 | 0 | 261 | 29.291188 | 114 |
v-k-k/GalagaGameTestingPythonOpenCV | 2,027,224,597,261 | dd14cc890deaca44a4131d6581932fddd492b45c | 545b7f7905897cb8a9d8082abb78c19551e62562 | /constants.py | 6d086959d06af36cc724e1c95c2ce214df189e80 | []
| no_license | https://github.com/v-k-k/GalagaGameTestingPythonOpenCV | 9083c7764d23d1bd8480189ce91fc3fdfb42d00f | 9b1f96cc9498877c4a33a7e23acda0602101debf | refs/heads/master | 2023-04-16T12:02:37.491064 | 2021-04-30T10:22:31 | 2021-04-30T10:22:31 | 362,190,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from utils import Area, Filters
from dotenv import load_dotenv
import os
load_dotenv()
TESSERACT_EXE = os.environ.get('TESSERACT_PATH')
GAME_SOURCE = os.environ.get('GAME_SOURCE')
DRIVER_PATH = os.environ.get('LOCAL_CHROME_DRIVER_PATH')
BROWSER_PATH = os.environ.get('LOCAL_CHROME_EXECUTABLE_PATH')
DEBUG_MODE = os.environ.get('DEBUG_MODE') == 'true'
PLAYER_AXIS_Y = 0
BINARY_THRESHOLD = 254
CONNECTIVITY = 4
FILTERS = Filters(True, False, False, False)
PLAYER_AREA = Area(411, 420)
ENEMY_AREA = Area(101, 400)
MISSILE_AREA = Area(30, 100)
| UTF-8 | Python | false | false | 546 | py | 23 | constants.py | 20 | 0.730769 | 0.690476 | 0 | 22 | 23.818182 | 61 |
yc-Claire/machine_learning | 18,597,208,410,380 | 5ce221213e987c610b7bec6b13116a3f2ed3505f | 231d783eea300a8c3e6d108df031e6aff7035999 | /1_project/GMM.py | 0bb8c537dfb538fd36fca76c3c1ce2ef0d8fce50 | []
| no_license | https://github.com/yc-Claire/machine_learning | c7b288c4882bfe86acb6397b6404d6cc4b04127e | 6540def874cd4f7325984443ec10e3cc122aafb7 | refs/heads/master | 2022-11-06T23:35:53.415326 | 2020-07-06T10:58:43 | 2020-07-06T10:58:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.preprocessing import Normalizer
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
def load_data(batch_size=100):
train_dataset = dsets.MNIST(root = '../../dataset', #选择数据的根目录
train = True, # 选择训练集
transform = transforms.ToTensor(), #转换成tensor变量
download = False) # 不从网络上download图片
test_dataset = dsets.MNIST(root = '../../dataset', #选择数据的根目录
train = False, # 选择训练集
transform = transforms.ToTensor(), #转换成tensor变量
download = False) # 不从网络上download图片
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size = batch_size
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size
)
return train_loader,test_loader
class GMM:
def __init__(self,Data,K,weights = None,means = None,covars = None):
"""
这是GMM(高斯混合模型)类的构造函数
:param Data: 训练数据
:param K: 高斯分布的个数
:param weigths: 每个高斯分布的初始概率(权重)
:param means: 高斯分布的均值向量
:param covars: 高斯分布的协方差矩阵集合
"""
self.Data = Data
self.K = K
if weights is not None:
self.weights = weights
else:
self.weights = np.random.rand(self.K)
self.weights /= np.sum(self.weights) # 归一化
col = np.shape(self.Data)[1]
if means is not None:
self.means = means
else:
self.means = []
for i in range(self.K):
mean = np.random.rand(col)
#mean = mean / np.sum(mean) # 归一化
self.means.append(mean)
if covars is not None:
self.covars = covars
else:
self.covars = []
for i in range(self.K):
cov = np.random.rand(col,col)
#cov = cov / np.sum(cov) # 归一化
self.covars.append(cov) # cov是np.array,但是self.covars是list
def Gaussian(self,x,mean,cov):
"""
这是自定义的高斯分布概率密度函数
:param x: 输入数据
:param mean: 均值数组
:param cov: 协方差矩阵
:return: x的概率
"""
dim = np.shape(cov)[0]
# cov的行列式为零时的措施
covdet = np.linalg.det(cov + np.eye(dim) * 0.001)
covinv = np.linalg.inv(cov + np.eye(dim) * 0.001)
xdiff = (x - mean).reshape((1,dim))
# 概率密度
prob = 1.0/(np.power(np.power(2*np.pi,dim)*np.abs(covdet),0.5))*\
np.exp(-0.5*xdiff.dot(covinv).dot(xdiff.T))[0][0]
return prob
def GMM_EM(self):
"""
这是利用EM算法进行优化GMM参数的函数
:return: 返回各组数据的属于每个分类的概率
"""
loglikelyhood = 0
oldloglikelyhood = 1
len,dim = np.shape(self.Data)
# gamma表示第n个样本属于第k个混合高斯的概率
gammas = [np.zeros(self.K) for i in range(len)]
while np.abs(loglikelyhood-oldloglikelyhood) > 0.00000001:
oldloglikelyhood = loglikelyhood
# E-step
for n in range(len):
# respons是GMM的EM算法中的权重w,即后验概率
respons = [self.weights[k] * self.Gaussian(self.Data[n], self.means[k], self.covars[k])
for k in range(self.K)]
respons = np.array(respons)
sum_respons = np.sum(respons)
gammas[n] = respons/sum_respons
# M-step
for k in range(self.K):
#nk表示N个样本中有多少属于第k个高斯
nk = np.sum([gammas[n][k] for n in range(len)])
# 更新每个高斯分布的概率
self.weights[k] = 1.0 * nk / len
# 更新高斯分布的均值
self.means[k] = (1.0/nk) * np.sum([gammas[n][k] * self.Data[n] for n in range(len)], axis=0)
xdiffs = self.Data - self.means[k]
# 更新高斯分布的协方差矩阵
self.covars[k] = (1.0/nk)*np.sum([gammas[n][k]*xdiffs[n].reshape((dim,1)).dot(xdiffs[n].reshape((1,dim))) for n in range(len)],axis=0)
loglikelyhood = []
for n in range(len):
tmp = [np.sum(self.weights[k]*self.Gaussian(self.Data[n],self.means[k],self.covars[k])) for k in range(self.K)]
tmp = np.log(np.array(tmp))
loglikelyhood.append(list(tmp))
loglikelyhood = np.sum(loglikelyhood)
for i in range(len):
gammas[i] = gammas[i]/np.sum(gammas[i])
self.posibility = gammas
self.prediction = [np.argmax(gammas[i]) for i in range(len)]
def get_label(filepath):
file=open(filepath,'r')
y=[]
for line in file.readlines():
line=line.rstrip('\n')
y.append(int(line))
file.close()
return y
def load_tensor(filepath,pointnum):
X=[]
for i in range(pointnum): # 10000
data = np.load(filepath+'/arr_{}.npz'.format(i))
x = data['arr_0']
# x=x.ravel()
data.close()
X.append(x)
X=np.array(X)
return X
def run():
# batch_size=10000
# train_loader, test_loader = load_data(batch_size)
# for batch_index, (x,y) in enumerate(test_loader):
# x=np.array(x)
# data = x.reshape(batch_size, 784)
# pca = PCA(n_components=4) # 降到4维
# pca.fit(data) # 训练
# data = pca.fit_transform(data)
# label=np.array(y)
# break
filepath = 'LeNet/y2' # y1: 1000X400 y2: 1000X84
data = load_tensor(filepath, pointnum=1000)
print(np.shape(data))
pca = PCA(n_components=4) # 降到4维
pca.fit(data) # 训练
data= pca.fit_transform(data)
label=get_label('LeNet/label.txt')
print("Mnist数据集的标签:\n",label)
# 对数据进行预处理
data = Normalizer().fit_transform(data)
# 解决画图的中文乱码问题
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
# 数据可视化
plt.scatter(data[:,0],data[:,1],c = label)
plt.title("Mnist数据集显示")
plt.show()
# GMM模型
K = 10
gmm = GMM(data,K)
gmm.GMM_EM()
y_pre = gmm.prediction
print("GMM预测结果:\n",y_pre)
print("GMM正确率为:\n",accuracy_score(label,y_pre))
plt.scatter(data[:, 0], data[:, 1], c=y_pre)
plt.title("GMM结果显示")
plt.show()
if __name__ == '__main__':
run() | UTF-8 | Python | false | false | 7,440 | py | 28 | GMM.py | 11 | 0.521025 | 0.507995 | 0 | 189 | 33.746032 | 150 |
disfear86/class-projects | 19,473,381,753,467 | 4312269e925474c822b3bfc3b779665ed4d6fc23 | c0316e7d525b81be015c68c512a8b590152cf44d | /python/codewars/maskify.py | 2647be75df8abee85b107d4661a00d8926615f07 | []
| no_license | https://github.com/disfear86/class-projects | 73ff183e988e8f6232469d933a27591cc14ef8c7 | d67b08f3529cf7e73495177d02d87d328082fff5 | refs/heads/master | 2021-01-17T18:23:48.369584 | 2017-05-16T17:51:56 | 2017-05-16T17:51:56 | 62,585,100 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def maskify(cc):
lst = list(cc)
for item in enumerate(lst):
if item[0] < (len(lst) - 4):
lst[item[0]] = '#'
return ''.join(lst)
str = "4556364607935616"
print(maskify(str))
| UTF-8 | Python | false | false | 210 | py | 8 | maskify.py | 5 | 0.528571 | 0.438095 | 0 | 9 | 22.333333 | 36 |
fybmain/DormitoryBackend | 17,051,020,167,244 | 5093d799d8637c5a5114acece28918d25ac7dd94 | 19c974cbcd526b91d7e0dbe2fadedac553383268 | /DormitoryBackend/src/dormitory.py | b9a1436608f34a9652c0baa46998663f6d61d9c3 | []
| no_license | https://github.com/fybmain/DormitoryBackend | e7f7d2562f8cfa679f1e8ba6ef57574cdf2090e0 | 25bc697a504ae9f360ce459a65333062b3624606 | refs/heads/master | 2020-04-09T17:22:54.386592 | 2019-01-03T05:21:47 | 2019-01-03T05:21:47 | 160,479,238 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
from .util import http, get_request_json, generate_pagination_list
from .util import string_filter, id_filter, foreign_key_filter, get_filter_condition
from .global_obj import app
from .model import Dormitory, Building, ElectricityMeter, WaterMeter
from .permission import get_permission_condition, check_permission_condition, PermissionDenied
dormitory_filter_properties = {
"id": id_filter,
"number": string_filter,
"building": foreign_key_filter,
"electricity_meter": foreign_key_filter,
"water_meter": foreign_key_filter,
}
dormitory_updatable_properties = {
"number": {
"type": "string",
"pattern": "^[0-9]+$",
},
"building": {
"type": "integer",
},
"electricity_meter": {
"type": "integer",
},
"water_meter": {
"type": "integer",
},
}
dormitory_create_properties = {
"number": {
"type": "string",
"pattern": "^[0-9]+$",
},
"building": {
"type": "integer",
},
}
def get_dormitories(filter: dict, allowed: List[str]):
return Dormitory.select().where(
get_filter_condition(filter, Dormitory)
& get_permission_condition(allowed, Dormitory)
)
def generate_dormitory_info(dormitory: Dormitory):
return {
"id": dormitory.id,
"number": dormitory.number,
"building": {
"id": dormitory.building_id,
"name": dormitory.building.name,
},
"electricity_meter": {
"id": dormitory.electricity_meter_id,
"state": dormitory.electricity_meter.state,
"remaining": float(dormitory.electricity_meter.remaining),
},
"water_meter": {
"id": dormitory.water_meter_id,
"state": dormitory.water_meter.state,
"remaining": float(dormitory.water_meter.remaining),
},
}
@app.route("/dormitory/list", methods=["POST"])
def get_dormitory_list():
instance = get_request_json(schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"page": {
"type": "integer",
},
"limit": {
"type": "integer",
},
"filter": {
"type": "object",
"properties": dormitory_filter_properties,
"additionalProperties": False,
},
},
"required": ["page", "limit", "filter"],
"additionalProperties": False,
})
dormitories = get_dormitories(instance["filter"], ["Management", "Self"])
return http.Success(result=generate_pagination_list(
objs=dormitories,
instance_generator=generate_dormitory_info,
page=instance["page"],
limit=instance["limit"]
))
def obj_process(obj: dict):
if "building" in obj:
building_id = obj["building"]
building = Building.get(id=building_id)
check_permission_condition(building, get_permission_condition(["Management"], Building))
if "electricity_meter" in obj:
electricity_meter_id = obj["electricity_meter"]
electricity_meter = ElectricityMeter.get(id=electricity_meter_id)
check_permission_condition(electricity_meter, get_permission_condition(["Management"], ElectricityMeter))
if "water_meter" in obj:
water_meter_id = obj["water_meter"]
water_meter = WaterMeter.get(id=water_meter_id)
check_permission_condition(water_meter, get_permission_condition(["Management"], WaterMeter))
@app.route("/dormitory/update", methods=["POST"])
def update_dormitory_info():
instance = get_request_json(schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"filter": {
"type": "object",
"properties": dormitory_filter_properties,
"additionalProperties": False,
},
"obj": {
"type": "object",
"properties": dormitory_updatable_properties,
"additionalProperties": False,
},
},
"required": ["filter", "obj"],
"additionalProperties": False,
})
obj_process(instance["obj"])
allow_read_dormitory = get_dormitories(instance["filter"], ["Management", "Self"])
if allow_read_dormitory.count() < 1:
raise Dormitory.DoesNotExist()
allow_write_dormitory = get_dormitories(instance["filter"], ["Management"])
if allow_write_dormitory.count() < 1:
raise PermissionDenied()
for dormitory in allow_write_dormitory:
for(key, value) in instance["obj"].items():
setattr(dormitory, key, value)
dormitory.save()
return http.Success()
@app.route("/dormitory/create", methods=["POST"])
def create_dormitory():
instance = get_request_json(schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"obj": {
"type": "object",
"properties": dormitory_create_properties,
"required": list(dormitory_create_properties.keys()),
"additionalProperties": False,
},
},
"required": ["obj"],
"additionalProperties": False,
})
obj_process(instance["obj"])
dormitory = Dormitory()
for (key, value) in instance["obj"].items():
setattr(dormitory, key, value)
electricity_meter = ElectricityMeter(state="OK", remaining=0)
electricity_meter.save()
dormitory.electricity_meter_id = electricity_meter.id
water_meter = WaterMeter(state="OK", remaining=0)
water_meter.save()
dormitory.water_meter_id = water_meter.id
dormitory.save()
return http.Success(result={
"id": dormitory.id,
"electricity_meter": electricity_meter.id,
"water_meter": water_meter.id,
})
| UTF-8 | Python | false | false | 6,001 | py | 39 | dormitory.py | 33 | 0.582403 | 0.58007 | 0 | 195 | 29.774359 | 113 |
cdevine49/ml_helpers | 17,111,149,736,247 | dfe620d14cc8b1015ade17cf5c5228703f2b5fe5 | 58d634d2bff5877102a7b7ea3944072b8cb56398 | /ml_helpers/tf_helpers.py | 6bc9b1eedd57401d9a1afd3414e7749759081231 | [
"MIT"
]
| permissive | https://github.com/cdevine49/ml_helpers | ab2aea538e53a78779a44990656b7ac465470ee4 | 7e0be9898822e4efeb736d0678aa9ed7746ea5f2 | refs/heads/master | 2020-04-03T04:07:13.251841 | 2019-04-07T16:28:04 | 2019-04-07T16:28:04 | 155,003,810 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
def create_placeholders(n_x, n_y):
X = tf.placeholder(tf.float32, [n_x, None])
Y = tf.placeholder(tf.float32, [n_y, None])
return X, Y | UTF-8 | Python | false | false | 165 | py | 14 | tf_helpers.py | 12 | 0.672727 | 0.648485 | 0 | 6 | 26.666667 | 45 |
OpenBanking-Brasil/ressarcimento | 19,155,554,153,428 | 0667712456916de387a84007a0bb60c1cda4abda | bdd2731705bdb50e58816509c671fddbfd095a28 | /sdks-client/python-sdk-client/swagger_client/__init__.py | 85227557618cf2818b6eb5a432a9920f3109d215 | []
| no_license | https://github.com/OpenBanking-Brasil/ressarcimento | 865ee9757e1d76f8ed9fe5f8e32e171e9a3500ee | 50254340d95f9fae632238b8af422b08a3848b40 | refs/heads/main | 2023-06-07T00:17:51.583921 | 2021-07-01T21:13:59 | 2021-07-01T21:13:59 | 375,832,352 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
# flake8: noqa
"""
Plataforma de Ressarcimento do Open Banking Brasil
APIs da plataforma de Ressarcimento do Open Banking Brasil para a comunicação online com a plaforma. Através da documentação das APIs abaixo é possível realizar os devidos testes de integração. # noqa: E501
OpenAPI spec version: beta-0.0.1
Contact: suporte-ressarcimento@openbankingbrasil.org.br
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.refund_api_v1_api import RefundAPIV1Api
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.refund_notification import RefundNotification
from swagger_client.models.refund_process import RefundProcess
| UTF-8 | Python | false | false | 912 | py | 46 | __init__.py | 17 | 0.796235 | 0.785161 | 0 | 24 | 36.625 | 211 |
ACCarnall/loch_nest_monster | 8,237,747,294,849 | 351a7db767327e342bafe32a77b02b76a1247cef | fc0201220fa4d73c7e68289a80e096fb4215bc3d | /lochnest_monster/nball_sampler.py | 106b426e81299e6d57fffa1488fb1419529570a8 | []
| no_license | https://github.com/ACCarnall/loch_nest_monster | b1e817a60f1afa37ca0c65af056b169e8498cc29 | 166784bfc81f15d1af52fc19124c7d43be2f9b8a | refs/heads/master | 2020-03-15T18:52:31.849544 | 2018-09-03T10:16:17 | 2018-09-03T10:16:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function, division, absolute_import
import numpy as np
from .bounds import nballs, nballs_fill_frac
from .basic_sampler import basic_sampler
class nball_sampler(basic_sampler):
""" Nested sampling implementing the nballs boundary method. This
uses a nearest-neighbours algorithm to draw spheres around each live
point reaching some fraction of the way to its kth nearest neighbour
then samples from within those spheres.
Parameters
----------
lnlike : function
A function which takes an array of parameter values and returns
the natural log of the likelihood at that point.
prior_trans : function
A function which transforms a sample from the unit cube to the
prior volume you wish to sample.
n_dim : int
The number of free parameters you wish to fit.
n_live : int
The number of live points you wish to use.
stop_frac : float
The fraction of the evidence you wish to integrate up to. This
defaults to 0.9.
verbose: bool
Print progress updates as sampling takes place.
live_plot : bool
Show a live-updating plot of the live points during sampling.
use_box : bool
Also constrain samples to be drawn from an n-dimensional box
drawn around the live points. Defaults to False.
box_expansion : float
If also using a bounding box, the volume of the box is expanded
by this factor.
"""
def __init__(self, lnlike, prior_trans, n_dim, n_live=400, stop_frac=0.99,
verbose=True, live_plot=False, use_box=False,
box_expansion=1.):
basic_sampler.__init__(self, lnlike, prior_trans, n_dim, n_live=n_live,
stop_frac=stop_frac, verbose=verbose,
live_plot=live_plot)
self.use_box = use_box
self.box_expansion = box_expansion
# Update the bound every time the volume decreases by 10 percent
self.update_interval = int(0.1*self.n_live)
self.update_bound()
def update_bound(self):
""" Update the bounding object to draw points within. """
if not self.n_samples % self.update_interval:
n_to_sample = int(10*self.update_interval/self.efficiency)
self.bound = nballs(self.live_cubes, n_to_sample=n_to_sample,
use_box=self.use_box,
box_expansion=self.box_expansion)
def draw_new_point(self):
""" Select a new point from the prior within the bound. """
while True:
new_cube = self.bound.draw_point()
if new_cube.max() < 1 and new_cube.min() > 0:
break
return new_cube
| UTF-8 | Python | false | false | 2,798 | py | 15 | nball_sampler.py | 14 | 0.619728 | 0.613653 | 0 | 87 | 31.16092 | 79 |
hardikkhurana19/spy_chat | 7,610,682,086,749 | 9903307f97189aaf824ac438862ed6206f38bf87 | 783edb76ccd13d169cfda8affd4b304931c7ab9e | /Spy_Chat/select_friend.py | 51db0883dfaf5459a9d82020b5070feb5e559649 | []
| no_license | https://github.com/hardikkhurana19/spy_chat | c66c913f2bc40eec0b6611a7c04019197b648ff1 | 503b21ac8c893fdfd3d2b44b32e49bb1ba5de92a | refs/heads/master | 2021-01-02T09:39:14.437830 | 2017-09-16T16:09:27 | 2017-09-16T16:09:27 | 99,266,762 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from globals import friends
from termcolor import colored,cprint
def select_friend():
counter = 1
for friend in friends:
print str(counter)+". " + friend['name']
counter += 1
user_input = int(raw_input(colored("Choose the friend\n", 'yellow')))
if user_input <= counter:
return user_input-1
else:
cprint("wrong Choice", "red")
return 1
| UTF-8 | Python | false | false | 399 | py | 14 | select_friend.py | 14 | 0.611529 | 0.601504 | 0 | 16 | 23.9375 | 73 |
kumaraadi/AIcodes | 14,224,931,699,194 | aa4ff9b90817d1f47833a5eedcb8ba748d34ee47 | 21405f36d9b1ddcb3a20431b9b2d3a4123cea6f4 | /Node.py | 48fb19ffdf835812fda4d832aced87b7a08dfbb6 | []
| no_license | https://github.com/kumaraadi/AIcodes | e503f8b5823eb50885aef72189eb3150f5843b53 | cedb3e666d81bf09768e7580845b35dcdeafc0fe | refs/heads/master | 2020-05-07T15:55:12.377177 | 2019-04-10T20:34:15 | 2019-04-10T20:34:15 | 180,659,632 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
ArrayList<Node> children = []
char[][] state
int heuristic
Node parent
int depth
int nodeNu
int visited
def __init__(self, char[][] state,int heuristic,Node parent,int depth,int Nodenum,int visited){
self.state = state
self.heuristic = heuristic
self.parent = parent
self.depth= depth
self.nodeNum = Nodenum
self.visited = visited
def addchild(Node n):
children.add(n)
| UTF-8 | Python | false | false | 482 | py | 2 | Node.py | 2 | 0.578838 | 0.578838 | 0 | 19 | 23.842105 | 99 |
pfisher3/kegmeter | 19,018,115,206,866 | 25bf837a48d6a4ae19a53c21d581f451207474b3 | 7ce40ea908478c296d34bcf566796a6221ecf277 | /kegmeter-app/kegmeter/app/Interface.py | 019c92d011ffc1341096eadb567941303c77d28d | [
"MIT"
]
| permissive | https://github.com/pfisher3/kegmeter | b73c74e88de19221d092d803384cd56df749837b | edd27ed824efb26dc9dc3abecb787646ba2ac9a6 | refs/heads/master | 2020-06-16T08:27:20.775059 | 2014-12-17T20:56:53 | 2014-12-17T20:56:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PIL import Image, ImageTk
from StringIO import StringIO
import Tkinter
import base64
import colormath.color_objects
import colormath.color_conversions
import logging
import md5
import os
import pkg_resources
import re
import requests
import threading
import time
import ttk
from kegmeter.common import Config, Beer, Checkin, DBClient
pbu_file = pkg_resources.resource_filename(__name__, "images/pbu_40_grey.png")
highlight_color = "#ffff6f"
class ImageLabel(object):
def __init__(self, *args, **kwargs):
self.label = Tkinter.Label(*args, **kwargs)
def pack(self, *args, **kwargs):
self.label.pack(*args, **kwargs)
def load_from_url(self, url, size=None):
logging.debug("loading image from URL: {}".format(url))
try:
imgreq = requests.get(url)
imgreq.raise_for_status()
pil_image = Image.open(StringIO(imgreq.content))
if size is not None:
pil_image.thumbnail(size, Image.ANTIALIAS)
self.image = ImageTk.PhotoImage(pil_image)
self.label.config(image=self.image)
except Exception as e:
logging.error("Couldn't load image: {}".format(e))
class TapDisplay(object):
pack_options = {
"frame": { "side": Tkinter.LEFT, "expand": True, "fill": Tkinter.BOTH, "pady": 10, "padx": 5 },
"beer_name": { "pady": (30, 0) },
"beer_style": {},
"images": {},
"brewery_image": { "side": Tkinter.LEFT, "padx": 10 },
"beer_image": { "side": Tkinter.LEFT, "padx": 10 },
"tap_num": { "side": Tkinter.BOTTOM, "fill": Tkinter.X },
"pct_full_meter": { "side": Tkinter.BOTTOM, "fill": Tkinter.X, "padx": 10, "pady": 20 },
"abv": { "side": Tkinter.BOTTOM },
"brewery_loc": { "side": Tkinter.BOTTOM },
"brewery_name": { "side": Tkinter.BOTTOM },
"beer_description": { "expand": True, "fill": Tkinter.BOTH, "padx": 10 },
"amount_poured_frame": { "expand": True, "fill": Tkinter.Y },
"amount_poured_number": { "side": Tkinter.LEFT, "anchor": Tkinter.NW },
"amount_poured_text": { "side": Tkinter.LEFT, "anchor": Tkinter.NW },
}
def __init__(self, tap_id, parent):
self.tap_id = tap_id
self.beer_id = None
self.active = False
self.frame = Tkinter.Frame(parent, borderwidth=1, relief=Tkinter.GROOVE)
self.pack("frame")
self.frame.pack_propagate(0)
# From top down
self.beer_name = Tkinter.Label(self.frame, text="<beer name>", font=("PT Sans", 24, "bold"))
self.beer_style = Tkinter.Label(self.frame, text="<beer style>", font=("PT Sans", 17))
self.images = Tkinter.Frame(self.frame, pady=50)
self.brewery_image = ImageLabel(self.images, background="#dfdfdf", height=100, width=100)
self.beer_image = ImageLabel(self.images, background="#dfdfdf", height=100, width=100)
self.pack("beer_name", "beer_style", "images", "brewery_image", "beer_image")
# From bottom up
self.tap_num = Tkinter.Label(self.frame, text=tap_id, font=("PT Sans", 16, "bold"))
self.pct_full_meter = ttk.Progressbar(self.frame, maximum=1.0)
self.abv = Tkinter.Label(self.frame, text="0.0%", font=("PT Sans", 20, "bold"), pady=10)
self.brewery_loc = Tkinter.Label(self.frame, text="<brewery location>", font=("PT Sans", 14))
self.brewery_name = Tkinter.Label(self.frame, text="<brewery name>", font=("PT Sans", 18, "bold"))
self.pack("tap_num", "pct_full_meter", "abv", "brewery_loc", "brewery_name")
# Description or amount poured gets remaining space in between
self.beer_description = Tkinter.Text(self.frame, font=("PT Sans", 12), borderwidth=0, wrap=Tkinter.WORD, pady=20)
self.beer_description.tag_config("description", justify=Tkinter.CENTER)
self.amount_poured_frame = Tkinter.Frame(self.frame, pady=20, background=highlight_color)
self.amount_poured_number = Tkinter.Label(self.amount_poured_frame, font=("PT Sans", 36, "bold"), background=highlight_color)
self.amount_poured_text = Tkinter.Label(self.amount_poured_frame, font=("PT Sans", 36), background=highlight_color, text=" ounces poured")
self.pack("beer_description", "amount_poured_number", "amount_poured_text")
self.set_background("#ffffff")
def pack(self, *obj_names):
for obj_name in obj_names:
getattr(self, obj_name).pack(**self.pack_options[obj_name])
def unpack(self, *obj_names):
for obj_name in obj_names:
getattr(self, obj_name).pack_forget()
def set_background(self, color_hex):
for obj in ["frame", "beer_name", "beer_style", "images", "beer_description", "brewery_name", "brewery_loc", "abv"]:
getattr(self, obj).config(background=color_hex)
color = colormath.color_objects.sRGBColor.new_from_rgb_hex(color_hex)
tap_num_color = colormath.color_conversions.convert_color(color, colormath.color_objects.HSLColor)
tap_num_color.hsl_l -= 0.1
tap_num_color = colormath.color_conversions.convert_color(tap_num_color, colormath.color_objects.sRGBColor)
self.tap_num.config(background=tap_num_color.get_rgb_hex())
def update(self, tap):
self.pct_full_meter.config(value=tap["pct_full"])
if tap["beer_id"] == self.beer_id:
return
if not tap["beer_id"]:
self.beer_name.config(text="Empty")
self.beer_style.config(text="")
self.unpack("images", "brewery_name", "brewery_loc", "beer_style", "abv", "pct_full_meter", "beer_description")
self.set_background("#dfdfdf")
return
try:
beer = Beer.new_from_id(tap["beer_id"])
except Exception as e:
logging.error("Couldn't look up beer ID {}: {}".format(tap["beer_id"], e))
return
self.beer = beer
self.pack("images", "brewery_name", "brewery_loc", "beer_style", "abv", "pct_full_meter", "beer_description")
self.set_background("#ffffff")
self.beer_name.config(text=beer.beer_name)
self.beer_style.config(text=beer.beer_style)
self.brewery_name.config(text=beer.brewery_name)
self.brewery_loc.config(text=beer.brewery_loc)
self.abv.config(text="{}%".format(beer.abv))
self.beer_description.delete(1.0, Tkinter.END)
self.beer_description.insert(Tkinter.END, self.beer.description, "description")
self.brewery_image.load_from_url(beer.brewery_label, (100, 100))
self.beer_image.load_from_url(beer.beer_label, (100, 100))
self.beer_id = tap["beer_id"]
def update_active_tap(self, tap):
self.amount_poured = tap.pulses * Config.get("units_per_pulse")[str(tap.tap_id)]
self.amount_poured_number.config(text="{:.2f}".format(self.amount_poured))
if self.active:
return
logging.debug("making tap {} active".format(self.tap_id))
self.active = True
self.beer_description.pack_forget()
self.pack("amount_poured_frame")
self.set_background(highlight_color)
def make_inactive(self):
if not self.active:
return
logging.debug("making tap {} inactive".format(self.tap_id))
self.active = False
self.amount_poured = None
self.amount_poured_frame.pack_forget()
self.pack("beer_description")
self.set_background("#ffffff")
class CheckinDisplay(object):
def __init__(self, parent):
self.checkin_id = None
self.time_since = None
self.frame = Tkinter.Frame(parent, borderwidth=1, relief=Tkinter.GROOVE)
self.frame.pack(side=Tkinter.LEFT, expand=True, fill=Tkinter.BOTH, padx=5, pady=10)
self.frame.pack_propagate(1)
self.avatar_image = ImageLabel(self.frame, height=100, width=100, borderwidth=1, relief=Tkinter.GROOVE)
self.avatar_image.pack(side=Tkinter.LEFT, pady=5, padx=5)
self.description_frame = Tkinter.Frame(self.frame)
self.description_frame.pack(side=Tkinter.LEFT, expand=True, fill=Tkinter.BOTH, padx=5, pady=10)
self.description_frame.pack_propagate(0)
self.description = Tkinter.Text(self.description_frame, font=("PT Sans", 11), borderwidth=0, wrap=Tkinter.WORD)
self.description.pack(fill=Tkinter.BOTH)
self.description.tag_config("b", font=("PT Sans", 11, "bold"))
self.description.tag_config("i", font=("PT Sans", 11, "italic"))
def update(self, checkin):
if checkin.checkin_id == self.checkin_id and checkin.time_since == self.time_since:
return
if checkin.checkin_id != self.checkin_id:
self.avatar_image.load_from_url(checkin.user_avatar, (100, 100))
self.checkin_id = checkin.checkin_id
self.time_since = checkin.time_since
self.description.delete(1.0, Tkinter.END)
self.description.insert(Tkinter.END, checkin.user_name, "b")
self.description.insert(Tkinter.END, " enjoyed a ")
self.description.insert(Tkinter.END, checkin.beer.beer_name, "b")
self.description.insert(Tkinter.END, " by ")
self.description.insert(Tkinter.END, checkin.beer.brewery_name, "b")
self.description.insert(Tkinter.END, "\n")
self.description.insert(Tkinter.END, checkin.time_since, "i")
class KegMeter(object):
def __init__(self, kegmeter_status):
self.kegmeter_status = kegmeter_status
self.checkins = None
def initialize_window(self):
self.window = Tkinter.Tk()
self.window.attributes("-fullscreen", True)
self.window.tk_setPalette(background="White")
self.window.rowconfigure(1, weight=1)
self.title = Tkinter.Label(text="On Tap", font=("PT Sans", 32, "bold"), background="#cfcfcf", borderwidth=1, relief=Tkinter.GROOVE)
self.title.pack(fill=Tkinter.X)
# Taps
self.tap_container = Tkinter.Frame(background="#bfbfc7", padx=10)
self.tap_container.pack(expand=True, fill=Tkinter.BOTH)
self.taps = dict()
for i, tap in enumerate(DBClient.get_taps()):
self.taps[tap["tap_id"]] = TapDisplay(tap["tap_id"], self.tap_container)
# Checkins
self.checkin_container = Tkinter.Frame(background="#dfe7ef", borderwidth=1, relief="sunken")
self.checkin_container.pack(fill=Tkinter.X)
self.checkin_displays = []
for i in range(Config.get("num_checkins")):
self.checkin_displays.append(CheckinDisplay(self.checkin_container))
self.powered_image_pil = Image.open(pbu_file)
self.powered_image = ImageTk.PhotoImage(self.powered_image_pil)
self.powered_image_container = Tkinter.Label(self.checkin_container, height=40, width=166, image=self.powered_image, background="#dfe7ef")
self.powered_image_container.pack(side=Tkinter.RIGHT, padx=10)
def update_active_taps(self):
for tap in self.kegmeter_status.tap_statuses.values():
if tap.is_active():
self.taps[tap.tap_id].update_active_tap(tap)
else:
self.taps[tap.tap_id].make_inactive()
def update_tap_info(self):
for tap in DBClient.get_taps():
self.taps[tap["tap_id"]].update(tap)
def update_checkin_display(self):
if self.checkins is not None:
for checkin, display in zip(self.checkins, self.checkin_displays):
display.update(checkin)
def update_checkins(self):
self.checkins = Checkin.get_latest()
def repeat_call(self, interval, target):
target()
thread = threading.Timer(interval, self.repeat_call, [interval, target])
thread.start()
def main(self):
self.initialize_window()
self.repeat_call(60.0, self.update_tap_info)
self.repeat_call(120.0, self.update_checkins)
self.repeat_call(15.0, self.update_checkin_display)
self.listener = threading.Thread(target=self.update_listener)
self.listener.daemon = True
self.listener.start()
Tkinter.mainloop()
def shutdown(self):
logging.error("Interface exiting")
self.window.quit()
def update_listener(self):
while not self.kegmeter_status.interrupt_event.is_set():
self.kegmeter_status.tap_update_event.wait()
self.kegmeter_status.tap_update_event.clear()
self.update_active_taps()
self.shutdown()
| UTF-8 | Python | false | false | 12,579 | py | 30 | Interface.py | 21 | 0.628826 | 0.617299 | 0 | 314 | 39.06051 | 146 |
c15314356/FYP_Python | 1,030,792,155,517 | a34a28f26a53e29759b65bb52c25e2f06e23813d | bbe4c6a1a9f5c4a62c0dd451d307dca7c04a8805 | /CrimeStatisticFiles/generate_crime_type_stats.py | cd3bd5cbede24dc166087e770c7e7ed86c387935 | []
| no_license | https://github.com/c15314356/FYP_Python | 5cc0065d208c0c92b2dc528fad1447c2b44a335c | 1b919ad2b88da4f3a13e0b72b89805584d2a2ceb | refs/heads/master | 2021-10-26T10:52:47.314441 | 2019-04-12T05:43:24 | 2019-04-12T05:43:24 | 159,813,532 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import pandas as pd
from cassandra.cluster import Cluster
import math
FILEPATH = './data/'
dataset_location = FILEPATH + '2017-01-city-of-london-street.csv'
dataset = pd.read_csv(dataset_location, header=None)
'''
Structure of dataframe
Crime ID 0
Month 1
Reported by 2
Falls within 3
Longitude 4
Latitude 5
Location 6
LSOA code 7
LSOA name 8
Crime type 9
Last outcome category 10
Context 11
'''
columns_names = ('Crime ID',
'Month',
'Reported by',
'Falls within',
'Longitude',
'Latitude',
'Location',
'LSOA code',
'LSOA name',
'Crime type',
'Last outcome category',
'Context')
# crime_statistics = dataset.describe()
# crime_statistics.columns = columns_names
# crime_statistics = crime_statistics.T
# crime_statistics.to_csv(FILEPATH + 'crime_statistics.csv', encoding='utf-8')
# print(dataset[9].describe())
# print(dataset.groupby(9).size())
crime_type_statistics = dataset.groupby(9).size()
crime_type_statistics.to_csv(FILEPATH + 'crime_type_statistics.csv', encoding='utf-8')
| UTF-8 | Python | false | false | 1,212 | py | 746 | generate_crime_type_stats.py | 10 | 0.611386 | 0.590759 | 0 | 48 | 24.229167 | 86 |
divyachandana/objectDetectionKeras | 12,086,038,005,968 | c1bbee13341502fd549f35d1bdbeab107ecb8a80 | c683597057303450536b6299adb22a611007b86c | /generateImageSet.py | 1ec9c78dd9e04986b76a6010cffe8016d053d448 | [
"MIT"
]
| permissive | https://github.com/divyachandana/objectDetectionKeras | a69b0d9233f2e2c31832ad4a029c1e679148c7de | 79d89fbf6684c451676c7a561637bb130b2c7883 | refs/heads/master | 2022-05-24T19:07:25.369044 | 2020-04-28T20:22:49 | 2020-04-28T20:22:49 | 259,394,773 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
# 用于生成PASCAL VOC格式的训练和测试txt
# Author:jefby
# Email: jef199006@gmail.com
import os
import random
import glob
# trainval数据集占所有数据的比例
trainval_percent = 0.9
# train数据集占trainval数据的比例
train_percent = 1
xmlfilepath = 'data/VOC/Annotations'
txtsavepath = 'data/VOC/ImageSets/Main'
total_xml = glob.glob(os.path.join(xmlfilepath, '*.xml'))
num=len(total_xml)
list=range(num)
tv=int(num*trainval_percent)
tr=int(tv*train_percent)
trainval= random.sample(list,tv)
train=random.sample(trainval,tr)
ftrainval = open('data/VOC/ImageSets/Main/trainval.txt', 'w')
ftest = open('data/VOC/ImageSets/Main/test.txt', 'w')
ftrain = open('data/VOC/ImageSets/Main/train.txt', 'w')
fval = open('data/VOC/ImageSets/Main/val.txt', 'w')
for i in list:
name=os.path.basename(total_xml[i])[:-4]+'\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftrain.write(name)
else:
fval.write(name)
else:
ftest.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close() | UTF-8 | Python | false | false | 1,137 | py | 34 | generateImageSet.py | 1 | 0.685341 | 0.67507 | 0 | 46 | 22.304348 | 61 |
sqeu/xinhualy | 17,772,574,683,278 | 7801bca3b25831ffe7257c445a03b6ca999d70bc | f4a7b03a0aad58b844cc1af02595591cc311708b | /xinhualy.py | 7bc911067405488e873390dea6a8b4c6d33b30b5 | []
| no_license | https://github.com/sqeu/xinhualy | 23cdeb1459ee0bff5fa97de0d5bd88114529b3ba | 2049f4c0d6975cf8ccbe62eb57a3fbb1ddd17436 | refs/heads/master | 2022-09-13T15:52:16.660337 | 2019-05-10T22:42:49 | 2019-05-10T22:42:49 | 181,925,868 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 11:13:26 2019
@author: S80240
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from bs4 import BeautifulSoup
import hashlib
import pprint
import random
import requests
import time
_GOOGLEID = hashlib.md5(str(random.random()).encode('utf-8')).hexdigest()[:16]
_COOKIES = {'GSP': 'ID={0}:CF=4'.format(_GOOGLEID)}
_HEADERS = {
'accept-language': 'en-US,en',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/41.0.2272.76 Chrome/41.0.2272.76 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml'
}
_SESSION = requests.Session()
_ENCODING='utf-8'
def _get_page(pagerequest):
"""Return the data for a page on telemetro.com"""
# Note that we include a sleep to avoid overloading the scholar server
time.sleep(2+random.uniform(0, 6))
_GOOGLEID = hashlib.md5(str(random.random()).encode('utf-8')).hexdigest()[:16]
_COOKIES = {'GSP': 'ID={0}:CF=4'.format(_GOOGLEID)}
resp_url = requests.get(pagerequest)
if resp_url.status_code == 200:
return resp_url.text
else:
raise Exception('Error: {0} {1}'.format(resp_url.status_code, resp_url.reason))
def _get_soup(pagerequest):
"""Return the BeautifulSoup for a page"""
html = _get_page(pagerequest)
return BeautifulSoup(html, 'lxml')
def _search_in_soup(soup):
"""Generator that returns Publication objects from the search page"""
return Publication(soup)
def search_pubs_url(url):
"""Search by scholar query and return a generator of Publication objects"""
#url='http://spanish.xinhuanet.com/2015-08/07/c_134489495.htm'
soup = _get_soup(url)
return _search_in_soup(soup)
def _body_in_image_soup(soup,body):
next_soup = _get_soup(soup.findAll("a",{"class": 'nextpage'})[-1]['href'])
for domPC in next_soup.findAll("div", {"class": 'domPC'}):
for row in domPC.findAll('p'):
if not row.find('a'):
body = body +" <br>"+ row.text
#next_soup.findAll("a",{"class": 'nextpage'})
if next_soup.find("img",{"src": lambda L: L and L.endswith('xia.gif')}):
body = _body_in_image_soup(next_soup,body)
return body
def _body_in_soup(soup):
"""Generator that returns Publication objects from the search page"""
summary = soup.find("meta", {"name": 'description'})['content']
body= ""
#soup.findAll('p')
for domPC in soup.findAll("div", {"class": 'domPC'}):
for row in domPC.findAll('p'):
if not row.find('a'):
if summary =="":
summary=row.text
body = body +" <br>"+ row.text
if soup.find("a",{"class": 'nextpage'}):
body = _body_in_image_soup(soup,body)
return summary,body
def clean_bad_chars(text):
bad_chars=['\r','\n']
for bad_char in bad_chars:
text=text.replace(bad_char,'')
return text
class Publication(object):
"""Returns an object for a single publication"""
def __init__(self, __data):
self.bib = dict()
self.bib['title'] = clean_bad_chars(__data.find("h1").text)
if __data.find("meta",{"name": 'section'}):
self.bib['section'] = __data.find("meta",{"name": 'section'})['content']
else:
self.bib['section']=''
if __data.find("meta",{"name": 'pubdate'}):
self.bib['date'] = clean_bad_chars(__data.find("meta",{"name": 'pubdate'})['content'])
else:
self.bib['date']=''
summary,body=_body_in_soup(__data)
self.bib['summary']=clean_bad_chars(summary)
self.bib['body']=clean_bad_chars(body)
def __str__(self):
return pprint.pformat(self.__dict__)
#############################33
import codecs, json
import pandas as pd
from tqdm import tqdm
import requests
main_path='C:\\Users\\S80240\\Desktop\\Everis\\IA\\scrapping\\Twitter\\'
tweet_files=[
'tweets-2015.json',
'tweets-2016.json',
'tweets-2017.json',
'tweets-2018.json',
'tweets-2019-04-15.json'
]
def unshorten_url(session, url):
#time.sleep(2+random.uniform(0, 6))
resp_url=url
try:
resp = session.head(url, allow_redirects=True)
resp_url=resp.url
except Exception as e:
print(e)
print(url)
return resp_url
session = requests.Session()
for tweet_file in tweet_files:
links=[]
with codecs.open(main_path+tweet_file, 'r', 'utf-8') as f:
tweets = json.load(f, encoding='utf-8')
list_tweets = [list(elem.values()) for elem in tweets]
list_columns = list(tweets[0].keys())
tweets_df = pd.DataFrame(list_tweets, columns=list_columns)
for index, tweet in tqdm(tweets_df.iterrows()):
text = tweet['text'].replace('\n',' ').replace(u'\xa0', u' ')
text_list = text.split(' ')
for word in text_list:
if 'xhne.ws' in word:
index = word.find('http')
link = unshorten_url(session,word[index:index+20])
if link.find('spanish.xinhuanet.com')>0:
links.append(link)
len(links)
for link in tqdm(links):
q= search_pubs_url(link)
f= open("..//xinhua_"+tweet_file+".txt","a+")#,errors = 'ignore'
try:
f.write(q.bib['title']+"|"+q.bib['section']+"|"+q.bib['date']+"|"+link+"|"+q.bib['summary']+"|"+q.bib['body']+"\n")
except:
f_e= open("..//xinhua_"+tweet_file+"_exception.txt","a+")
f_e.write(q.bib['title']+"|"+q.bib['section']+"|"+q.bib['date']+"|"+link+"\n")
f_e.close()
f.close() | UTF-8 | Python | false | false | 6,053 | py | 3 | xinhualy.py | 2 | 0.551958 | 0.52949 | 0.000165 | 176 | 32.352273 | 154 |
polyswarm/polyswarm-artifact | 16,655,883,210,640 | 2accb30bf8a415c9da7800f4a2a1421bb197eb6a | d8492e2ebe444157a7d85440d878f4f9c0163641 | /src/polyswarmartifact/__init__.py | 44cae5301619285ea2a973bd01af2e6e50cbcaa9 | [
"MIT"
]
| permissive | https://github.com/polyswarm/polyswarm-artifact | 4c5c1d84e0634486a9992fdc49d3054eeeae900b | 6e4f36fe7b6ec10fc6e94806b8f4b308619ad025 | refs/heads/master | 2022-04-27T13:34:56.076922 | 2022-03-24T17:29:02 | 2022-03-24T17:29:02 | 187,640,216 | 2 | 1 | MIT | false | 2021-10-14T20:42:50 | 2019-05-20T12:54:17 | 2021-10-14T19:14:50 | 2021-10-14T20:42:49 | 135 | 1 | 1 | 1 | Python | false | false | from .artifact_type import ArtifactType
from .exceptions import PolyswarmArtifactException, DecodeError
__version__ = '1.4.4'
| UTF-8 | Python | false | false | 127 | py | 18 | __init__.py | 13 | 0.795276 | 0.771654 | 0 | 4 | 30.75 | 63 |
hluk/pdc-client | 9,689,446,237,170 | 358cab01f75bec9c4b0e9b48cc07ff2caebacb85 | 7cabf7117a74d38e92d1f7fad1cdfb188423186c | /tests/group_resource_permissions/tests.py | 5e1b22ec51cc5feb43b9586c946b8ac183f0dd61 | [
"MIT"
]
| permissive | https://github.com/hluk/pdc-client | a20a88ada43a521911956269b421649d45230183 | 7236fd8b72e675ebb321bbe337289d9fbeb6119f | refs/heads/master | 2021-06-27T02:45:23.205324 | 2018-03-29T11:28:21 | 2018-03-29T13:17:54 | 95,637,783 | 0 | 0 | null | true | 2017-06-28T06:45:44 | 2017-06-28T06:45:44 | 2017-04-04T19:38:22 | 2017-06-23T06:57:34 | 604 | 0 | 0 | 0 | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from pdc_client.test_helpers import CLITestCase
from pdc_client.runner import Runner
class GroupResourcePermissionTestCase(CLITestCase):
def setUp(self):
self.runner = Runner()
self.runner.setup()
def _setup_list(self, api):
api.add_endpoint('auth/group-resource-permissions', 'GET', [
{
"id": x,
"group": "group" + str(x),
"resource": "arches",
"permission": "create"
}
for x in range(1, 30)
])
def test_list(self, api):
self._setup_list(api)
with self.expect_output('list.txt'):
self.runner.run(['group-resource-permissions', 'list', '--resource', 'arches'])
self.assertEqual(api.calls['auth/group-resource-permissions'],
[('GET', {'page': 1, 'resource': 'arches'}),
('GET', {'page': 2, 'resource': 'arches'})])
def test_list_json(self, api):
self._setup_list(api)
with self.expect_output('list.json', parse_json=True):
self.runner.run(['--json', 'group-resource-permissions', 'list', '--resource', 'arches'])
self.assertEqual(api.calls['auth/group-resource-permissions'],
[('GET', {'page': 1, 'resource': 'arches'}),
('GET', {'page': 2, 'resource': 'arches'})])
def _setup_detail(self, api):
obj = {
"id": 1,
"group": "engops",
"resource": "arches",
"permission": "create"
}
api.add_endpoint('auth/group-resource-permissions/1', 'GET', obj)
api.add_endpoint('auth/group-resource-permissions/1', 'PATCH', obj)
api.add_endpoint('auth/group-resource-permissions', 'POST', obj)
api.add_endpoint('auth/group-resource-permissions/1', 'DELETE', {})
def test_info(self, api):
self._setup_detail(api)
with self.expect_output('detail.txt'):
self.runner.run(['group-resource-permissions', 'info', '1'])
self.assertEqual(api.calls['auth/group-resource-permissions/1'], [('GET', {})])
def test_info_json(self, api):
self._setup_detail(api)
with self.expect_output('detail.json', parse_json=True):
self.runner.run(['--json', 'group-resource-permissions', 'info', '1'])
self.assertEqual(api.calls['auth/group-resource-permissions/1'], [('GET', {})])
def test_create(self, api):
self._setup_detail(api)
with self.expect_output('detail.txt'):
self.runner.run(['group-resource-permissions', 'create',
'--group', 'engops',
'--resource', 'arches',
'--permission', 'create'
])
expected_data = {
"group": "engops",
"resource": "arches",
"permission": "create"
}
self.assertEqual(api.calls['auth/group-resource-permissions'],
[('POST', expected_data)])
self.assertEqual(api.calls['auth/group-resource-permissions/1'],
[('GET', {})])
def test_update(self, api):
self._setup_detail(api)
with self.expect_output('detail.txt'):
self.runner.run(['group-resource-permissions', 'update', '1',
'--group', 'engops',
'--resource', 'arches',
'--permission', 'create'])
self.assertEqual(api.calls['auth/group-resource-permissions/1'],
[('PATCH', {'resource': 'arches',
'permission': 'create',
'group': 'engops'}),
('GET', {})])
def test_delete(self, api):
api.add_endpoint('auth/group-resource-permissions', 'DELETE', {})
self.runner.run(['group-resource-permissions', 'delete', '1'])
self.assertEqual(api.calls['auth/group-resource-permissions'], [('DELETE', [1])])
def test_delete_many(self, api):
api.add_endpoint('auth/group-resource-permissions', 'DELETE', {})
self.runner.run(['group-resource-permissions', 'delete', '1', '2'])
self.assertEqual(api.calls['auth/group-resource-permissions'], [('DELETE', [1, 2])])
| UTF-8 | Python | false | false | 4,509 | py | 123 | tests.py | 48 | 0.522289 | 0.515857 | 0 | 107 | 41.140187 | 101 |
tanveerahmad1517/grimesengineering | 15,719,580,343,759 | a256affc3f6c463d652108b5d7af342fa1fa5611 | dbecbfe77d32abb7c56696eda44ffde6cb7cb396 | /ga/services/views.py | eafa2b8bc52ce10e87e16b704dc06ae7ef659e80 | []
| no_license | https://github.com/tanveerahmad1517/grimesengineering | 1c3cb87f65d0c32c55365e330069bed7c9f76dac | 88a539c83d2af899c89566018011d7e1cb533005 | refs/heads/master | 2020-03-30T01:43:06.462646 | 2018-09-08T15:41:55 | 2018-09-08T15:41:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render_to_response
from django.template.context import Context, RequestContext
from ga.services.models import Department
from django.http.response import HttpResponseRedirect
from django.core.urlresolvers import reverse
from ga.jobs.models import Job
#===============================================================================
# HOME PAGE
#===============================================================================
def services(request, department_id, department_slug):
try:
department = Department.objects.get(pk=department_id)
jobs = Job.objects.select_related('images').filter(
status__name='Completed',
display=True,
department=department
).order_by('date')
except Department.DoesNotExist:
return HttpResponseRedirect('/')
for item in jobs:
print item
if not department.slug == department_slug:
return HttpResponseRedirect(
reverse('services:department', kwargs={'department_id':department.id, 'department_slug':department.slug}))
context = {
'nav_selected': 'services',
'department': department,
'jobs': jobs,
}
return render_to_response(
template_name = 'services.html',
dictionary = Context(context),
context_instance = RequestContext(request),
)
| UTF-8 | Python | false | false | 1,388 | py | 33 | views.py | 18 | 0.589337 | 0.589337 | 0 | 39 | 34.589744 | 118 |
pichetzh/jama16-retina-replication | 5,703,716,592,561 | cb7c8d7f79bb2417e13311a5e4a0928ce14fad6e | 8ae20385243cb1d6d2e596bb506b382081aa9033 | /preprocess_messidor2.py | 0553892d4054d58cb9dcb897084b6da43be42353 | [
"MIT"
]
| permissive | https://github.com/pichetzh/jama16-retina-replication | 7cdfe36991ad036843f5de3ed16ee68fd7520fa6 | ec7ea0c270c8e371549b2c8a1817cbf7b947ec25 | refs/heads/master | 2020-04-23T07:59:08.726312 | 2019-02-16T21:08:33 | 2019-02-16T21:08:33 | 171,022,275 | 0 | 0 | MIT | true | 2019-02-16T16:04:18 | 2019-02-16T16:04:18 | 2019-02-03T19:56:03 | 2018-10-09T02:40:43 | 660 | 0 | 0 | 0 | null | false | null | import argparse
import csv
import sys
from shutil import rmtree
from PIL import Image
from glob import glob
from os import makedirs, rename
from os.path import join, splitext, basename, exists
from lib.preprocess import resize_and_center_fundus
parser = argparse.ArgumentParser(description='Preprocess Messidor-2 data set.')
parser.add_argument("--data_dir", help="Directory where Messidor-2 resides.",
default="data/messidor2")
args = parser.parse_args()
data_dir = str(args.data_dir)
labels = join(data_dir, 'labels.csv')
# Create directories for grades.
[makedirs(join(data_dir, str(i))) for i in [0, 1]
if not exists(join(data_dir, str(i)))]
# Create a tmp directory for saving temporary preprocessing files.
tmp_path = join(data_dir, 'tmp')
if exists(tmp_path):
rmtree(tmp_path)
makedirs(tmp_path)
failed_images = []
with open(labels, 'r') as f:
reader = csv.reader(f, delimiter=',')
next(reader)
for i, row in enumerate(reader):
basename, grade = row
im_paths = glob(join(data_dir, "Messidor-2/{}*".format(basename)))
# Find contour of eye fundus in image, and scale
# diameter of fundus to 299 pixels and crop the edges.
res = resize_and_center_fundus(save_path=tmp_path,
image_paths=im_paths,
diameter=299, verbosity=0)
# Status message.
msg = "\r- Preprocessing pair of image: {0:>7}".format(i+1)
sys.stdout.write(msg)
sys.stdout.flush()
if res != 2:
failed_images.append(basename)
continue
# Move the files from the tmp folder to the right grade folder.
for j in range(2):
new_filename = "{0}.00{1}.jpg".format(basename, j)
rename(join(tmp_path, new_filename),
join(data_dir, str(int(grade)), new_filename))
# Clean tmp folder.
rmtree(tmp_path)
print("Could not preprocess {} images.".format(len(failed_images)))
print(", ".join(failed_images))
| UTF-8 | Python | false | false | 2,057 | py | 16 | preprocess_messidor2.py | 10 | 0.629071 | 0.618376 | 0 | 67 | 29.701493 | 79 |
Reidond/workgate-service | 7,834,020,394,850 | c3f8581717efe419e0c4484a80824d5604a4f1d9 | 9e6eed34bf233388ad807845432af5212402e59a | /app/functions/trapmf.py | 4ca36556fb593851eef4299ab9a33004f8c342b6 | []
| no_license | https://github.com/Reidond/workgate-service | dc1523472573b32306eca11b03e8e21790589ddd | 3e7db451ccbca81daba2563b6453272beb948afc | refs/heads/main | 2023-02-13T16:03:32.897819 | 2021-01-11T09:59:05 | 2021-01-11T09:59:05 | 324,210,973 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Трапецієподібно-пірамідальна функція належності
Trapezoidal-pyramidal membership function
"""
import numpy as np
from app.colors import palette
import os
from bokeh.io.export import export_png
from app.browser import BROWSER
from sanic import Blueprint, response
from bokeh.plotting import figure
from bokeh.embed import json_item
import pathlib
from aiofiles import os as async_os
trapmf_bp = Blueprint('functions_trapmf', url_prefix='/trapmf')
def trapmf(x, params):
a, b, c, d = np.asarray(params)
assert a <= b, 'First parameter must be less than or equal to second parameter.'
assert b <= c, 'Second parameter must be less than or equal to third parameter.'
assert c <= d, 'Third parameter must be less than or equal to fourth parameter.'
if type(x) is not np.ndarray:
x = np.asarray([x])
y = np.zeros(len(x))
# Left slope
if a != b:
index = np.logical_and(a < x, x < b)
y[index] = (x[index] - a) / (b - a)
# Right slope
if c != d:
index = np.logical_and(c < x, x < d)
y[index] = (d - x[index]) / (d - c)
# Top
index = np.logical_and(b <= x, x <= c)
y[index] = 1
return y
@trapmf_bp.route('/', methods=[
"POST",
])
async def trapmf_route(request):
start, stop = request.json['x'].split(':')
x = np.linspace(int(start), int(stop))
a = int(request.json['a'])
b = int(request.json['b'])
c = int(request.json['c'])
d = int(request.json['d'])
y = trapmf(x, [a, b, c, d])
p = figure(plot_width=400, plot_height=400)
p.line(x, y, line_width=2, line_color=palette('light').line_color)
return response.json(json_item(p, "trapmf"))
@trapmf_bp.route("/image")
async def trapmf_image_route(request):
start, stop = request.args['x'][0].split(':')
x = np.linspace(int(start), int(stop))
a = int(request.args['a'][0])
b = int(request.args['b'][0])
c = int(request.args['c'][0])
d = int(request.args['d'][0])
y = trapmf(x, [a, b, c, d])
filename = "trapmf.png"
p = figure(plot_width=400, plot_height=400)
p.line(x, y, line_width=2, line_color=palette('light').line_color)
p.toolbar.logo = None
p.toolbar_location = None
export_png(p, filename=filename, height=400, width=400, webdriver=BROWSER)
file_path = os.path.join(pathlib.Path().absolute(), filename)
file_stat = await async_os.stat(file_path)
headers = {"Content-Length": str(file_stat.st_size)}
return await response.file_stream(
file_path,
headers=headers,
chunked=False,
)
| UTF-8 | Python | false | false | 2,625 | py | 17 | trapmf.py | 12 | 0.621465 | 0.611391 | 0 | 91 | 27.362637 | 84 |
massmutual/ddfg19_cliff_effect | 4,999,341,941,813 | ceb1c29ae915f6e4fe72ed5460f5ee06d4e1f3e5 | cfe8b933e7f2af5d317529af7d2d6c0ea4588611 | /services/data.py | 7f58b909b94248c2283cd118584740fc07379480 | []
| no_license | https://github.com/massmutual/ddfg19_cliff_effect | 820d2133804f9473f799f9dc82ff064f074c91cb | 462e5b6d1f64894e211bb9dd371361be8e4bd449 | refs/heads/master | 2020-06-26T15:55:28.901362 | 2019-12-16T21:42:15 | 2019-12-16T21:42:15 | 199,678,800 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
fip_assistance_payment_lookup_data = \
{'group_size': np.arange(1,13,1),
'eligible_grantee': [306,403,492,597,694,828,905,985,1065,1145,1225,1305],
'ineligible_grantee': [158,274,420,557,694,828,905,985,1065,1145,1225,1305]}
fip_lookup_table = pd.DataFrame(fip_assistance_payment_lookup_data)
import pandas as pd
family_contribution_map=[
# array of:
#family size
#min monthly gross income
#max monthly gross income
#contribution tier
[1,0,1005, 0], [1,1005,1307, 1], [1,1307, 1628, 2], [1,1628, 1949, 3], [1,1949, 2271, 4], [1,2271, 2592, 5], [1,2592, 2913, 6],
[2,0,1353, 0], [2,1353, 1759, 1], [2,1759, 2169, 2], [2,2169, 2579, 3], [2,2579, 2989, 4], [2,2989, 3399, 5], [2,3399, 3809, 6],
[3,0,1702, 0], [3,1702, 2213, 1], [3,2213, 2711, 2], [3,2711, 3210, 3], [3,3210, 3708, 4], [3,3708, 4207, 5], [3,4207, 4705, 6],
[4,0,2050, 0], [4,2050, 2665, 1], [4,2665, 3252, 2], [4,3252, 3839, 3], [4,3839, 4427, 4], [4,4427, 5014, 5], [4,5014, 5601, 6],
[5,0,2398, 0], [5,2398, 3117, 1], [5,3117, 3793, 2], [5,3793, 4469, 3], [5,4469, 5145, 4], [5,5145, 5821, 5], [5,5821, 6497, 6],
[6,0,2747, 0], [6,2747, 3571, 1], [6,3571, 4336, 2], [6,4336, 5100, 3], [6,5100, 5865, 4], [6, 5865, 6629,5], [6,6629, 7394, 6],
[7,0,3095, 0], [7,3095, 4024, 1], [7,4024, 4732, 2], [7,4732, 5439, 3], [7,5439, 6147, 4], [7,6147, 6854, 5], [7, 6854, 7562,6],
[8,0,3443, 0], [8,3443, 4476, 1], [8,4476, 5127, 2], [8,5127, 5778, 3], [8,5778, 6428, 4], [8,6428, 7079, 5], [8, 7079, 7730,6],
[9,0,3791, 0], [9,3791, 4928, 1], [9,4928, 5522, 2], [9,5522, 6116, 3], [9,6116, 6710, 4], [9,6710, 7304, 5], [9, 7304, 7898, 6],
[10,0,4139, 0], [10,4139, 5381, 1], [10,5381, 5918, 2], [10,5918, 6455, 3], [10,6455, 6992, 4], [10,6992, 7529, 5], [10, 7529, 8066, 6]
]
tier_values_map =[
#for each "tier"
#$contribution per child per pay period
#$cap per family per pay period (inclusive of all children)
[0, 15, 30, 45, 60, 75, 90],
[0, 45, 83, 121, 159, 197, 235]
]
reimbursement_rate_map=[
#$ per hour per child
['CHILD_CARE_CENTER', 'INFANT', 1, 4], ['CHILD_CARE_CENTER', 'INFANT', 2, 4.25], ['CHILD_CARE_CENTER', 'INFANT', 3, 4.75], ['CHILD_CARE_CENTER', 'INFANT', 4, 5], ['CHILD_CARE_CENTER', 'INFANT', 5, 5.5],
['CHILD_CARE_CENTER', 'PRESCHOOL', 1, 2.75], ['CHILD_CARE_CENTER', 'PRESCHOOL', 2, 3], ['CHILD_CARE_CENTER', 'PRESCHOOL', 3, 3.5], ['CHILD_CARE_CENTER', 'PRESCHOOL', 4, 3.75], ['CHILD_CARE_CENTER', 'PRESCHOOL', 5, 4.25],
['GROUP_CHILD_CARE_HOME', 'INFANT', 1, 3.15], ['GROUP_CHILD_CARE_HOME', 'INFANT', 2, 3.4], ['GROUP_CHILD_CARE_HOME', 'INFANT', 3, 3.9], ['GROUP_CHILD_CARE_HOME', 'INFANT', 4, 4.15], ['GROUP_CHILD_CARE_HOME', 'INFANT', 5, 4.65],
['GROUP_CHILD_CARE_HOME', 'PRESCHOOL', 1, 2.65], ['GROUP_CHILD_CARE_HOME', 'PRESCHOOL', 2, 2.9], ['GROUP_CHILD_CARE_HOME', 'PRESCHOOL', 3, 3.4], ['GROUP_CHILD_CARE_HOME', 'PRESCHOOL', 4, 3.65], ['GROUP_CHILD_CARE_HOME', 'PRESCHOOL', 5, 4.15],
['FAMILY_CHILD_CARE_HOME', 'INFANT', 1, 3.15], ['FAMILY_CHILD_CARE_HOME', 'INFANT', 2, 3.4], ['FAMILY_CHILD_CARE_HOME', 'INFANT', 3, 3.9], ['FAMILY_CHILD_CARE_HOME', 'INFANT', 4, 4.15], ['FAMILY_CHILD_CARE_HOME', 'INFANT', 5, 4.65],
['FAMILY_CHILD_CARE_HOME', 'PRESCHOOL', 1, 2.65], ['FAMILY_CHILD_CARE_HOME', 'PRESCHOOL', 2, 2.9], ['FAMILY_CHILD_CARE_HOME', 'PRESCHOOL', 3, 3.4], ['FAMILY_CHILD_CARE_HOME', 'PRESCHOOL', 4, 3.65], ['FAMILY_CHILD_CARE_HOME', 'PRESCHOOL', 5, 4.15],
['LICENSE_EXCEPT', 'INFANT', 1, 1.6], ['LICENSE_EXCEPT', 'INFANT', 2, 2.95],
['LICENSE_EXCEPT', 'PRESCHOOL', 1, 1.6], ['LICENSE_EXCEPT', 'PRESCHOOL', 2, 2.6]
]
tier_lookup_df = pd.DataFrame(family_contribution_map, columns=['fam_size', 'min', 'max', 'tier'])
tier_values_df = pd.DataFrame(tier_values_map, index=['cont', 'lim'])
reimb_values_df = pd.DataFrame(reimbursement_rate_map, columns=['center_type', 'child_age_group', 'center_score', 'value'])
center_type_list = ['CHILD_CARE_CENTER', 'GROUP_CHILD_CARE_HOME', 'FAMILY_CHILD_CARE_HOME', 'LICENSE_EXCEPT']
child_type_list = ['INFANT', 'PRESCHOOL'] | UTF-8 | Python | false | false | 4,149 | py | 47 | data.py | 30 | 0.593396 | 0.374548 | 0 | 59 | 69.338983 | 252 |
ZhangMeimei-pixel/iroko | 16,681,653,007,379 | 1d6f98ee299de4a3d3b026bce2df3a2bceba7369 | 378ab867300998ea39d7b83f0975562615061f4b | /dc_gym/control/test_bw_control.py | 513b48bb1326c25d4ad5753d5bdf0b89cf6b3c91 | [
"Apache-2.0"
]
| permissive | https://github.com/ZhangMeimei-pixel/iroko | 02f62234d38e54802c17f7fc5dd1daef44af211a | 874e8fd9fba54e53482c44c525c937defb8deeae | refs/heads/master | 2022-01-09T12:04:47.500345 | 2019-05-28T13:19:47 | 2019-05-28T13:19:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ''' Simple test suite to verify the functionality of the bandwidth
control library. Hardcoded. '''
import ctypes
import os
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
class Ring(ctypes.Structure):
pass
bw_lib = ctypes.CDLL(FILE_DIR + '/libbw_control.so')
bw_lib.init_ring.argtypes = [ctypes.c_char_p, ctypes.c_ushort, ctypes.c_uint]
bw_lib.init_ring.restype = ctypes.POINTER(Ring)
bw_lib.send_bw_allocation.argtypes = [
ctypes.c_ulong, ctypes.POINTER(Ring), ctypes.c_ushort]
bw_lib.wait_for_reply.argtypes = [ctypes.POINTER(Ring)]
PACKET_RX_RING = 5
PACKET_TX_RING = 13
rx_ring = bw_lib.init_ring("h1-eth0".encode('ascii'), 20135, PACKET_RX_RING)
tx_ring = bw_lib.init_ring("h1-eth0".encode('ascii'), 20135, PACKET_TX_RING)
bw_lib.send_bw_allocation(50000000, tx_ring, 20130)
bw_lib.wait_for_reply(rx_ring)
bw_lib.teardown_ring(rx_ring)
bw_lib.teardown_ring(tx_ring)
| UTF-8 | Python | false | false | 895 | py | 22 | test_bw_control.py | 19 | 0.721788 | 0.688268 | 0 | 27 | 32.148148 | 77 |
M4NS0/Workspaces | 17,145,509,466,881 | bc95233ef2909b3fa6816d0c1e4126c398fddaba | 418e22b1eb5bf2c466a3cb63ef7b9b1177581365 | /Python/Lógica de Programação I/Exercícios/Lista7/Exercicio5.py | b94b1f9b2768b5856d9c3a0cc4ae54502cb4643f | []
| no_license | https://github.com/M4NS0/Workspaces | 2f85bb6f55eeaf93327021b2028716821062fa77 | 9376c30aa9538b6f80978aeb4b091bcb0e179a62 | refs/heads/master | 2021-07-06T07:39:08.089759 | 2021-06-16T11:14:16 | 2021-06-16T11:14:16 | 216,075,608 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | mulheresF = 0
homensF = 0
menos24 = 0
vivas = 0
falecidas = 0
nasc = int(input("Insira o numero de crianças nascidas no periodo: "))
for cadastro in range (0,nasc,1):
escolha = str (input("Digite 'V' se a criança #{} está viva ou 'F' se falecida: ".format(cadastro)))
if escolha == "V" or escolha == "v":
vivas += 1
print("OK")
if escolha == "F" or escolha == "f":
falecidas += 1
print("OK")
sexo = str(input("Digite 'M' se foi mulher ou 'H' se foi homem: "))
if sexo == "M" or sexo == "m":
mulheresF += 1
if sexo == "H" or sexo == "h":
homensF += 1
meses = int(input("Insira os meses de vida da criança {}: ".format(cadastro)))
if meses<=24:
menos24 += 1
percTotal = (falecidas*100) / (vivas+falecidas)
percHomem = (homensF*100) / (vivas+falecidas)
percMenos24 = (menos24*100) / falecidas
print("\nMorreram {} crianças \n{}% do total das crianças cadastradas \n{}% eram meninos \n{}% morreram com menos de 24 meses ".format(falecidas,percTotal,percHomem,percMenos24))
| UTF-8 | Python | false | false | 1,105 | py | 941 | Exercicio5.py | 738 | 0.589627 | 0.55778 | 0 | 36 | 29.527778 | 178 |
Allan-Perez/CStudies- | 14,353,780,729,851 | b461d2f3cf31af112a1420b58fa646b83931d119 | d6cdb1317cc3ec40299955f821efa1f33ca2962c | /AI_studies/NonInformedSearch/BreadthFirstSearch.py | a947deb903549124fbd3989f4b6c8d2a8e2c5d6a | []
| no_license | https://github.com/Allan-Perez/CStudies- | 37116f9b72e69b7427cbbeb9cd3727e78365e59e | c22ecf915828c70cc1884856acae4817f2c5a9d2 | refs/heads/master | 2020-03-28T21:09:42.460057 | 2018-11-11T21:49:30 | 2018-11-11T21:49:30 | 149,133,964 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tree import StaticNode as Node
import queue
def BreadthFirstSearch(init_state, aim_state, transitionOps, mState_mNodes=False):
visited_nodes = set()
frontier_nodes = queue.Queue()
frontier_nodes.put(Node(init_state, transitionOps=transitionOps, mState_mNodes=mState_mNodes))
while frontier_nodes.qsize()>0:
exploring_node = frontier_nodes.get()
visited_nodes.update([exploring_node])
if exploring_node._state == aim_state:
return exploring_node
offspring_nodes = exploring_node.produce_offspring()
for son_node in offspring_nodes:
if not son_node.in_list(list(visited_nodes)) and \
not son_node.in_list(list(frontier_nodes.queue)):
frontier_nodes.put(son_node)
| UTF-8 | Python | false | false | 707 | py | 72 | BreadthFirstSearch.py | 26 | 0.74116 | 0.739745 | 0 | 18 | 38 | 96 |
robertsawko/pde-and-uq | 17,042,430,242,743 | 94ff73ccec2cbaeee2a291e1464b049178320546 | a76206152efea48d4d2d568db600f46c37b3a285 | /high_dimension/test_sh.py | 6b6fe8a7c708325c734ced345fc3d299413a5ccb | []
| no_license | https://github.com/robertsawko/pde-and-uq | b84a2ebaf1f0b4cc54206ea3d18ebcc726709799 | be4c4dda88d1c63cf355a88f16ecbc4eee1b6203 | refs/heads/master | 2021-01-17T11:35:21.718594 | 2016-04-01T09:44:02 | 2016-04-01T09:44:02 | 31,924,961 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Testing Shinozuka method for signal generation.
'''
from numpy import logspace, linspace, pi
from numpy.random import seed, rand
from matplotlib.pyplot import figure
from scipy.signal import periodogram
from scipy.integrate import trapz
from synthesis import ftransform_analytical, shinozuka
if __name__ == '__main__':
L = 2 * pi
N = 20000
x = linspace(0, L, N)
repeats = 100
fig_corr = figure()
fig_sig = figure()
axs = fig_sig.gca()
axc = fig_corr.gca()
seed(123)
b = 100
maxM = 2**12
target_energy = 0.5
for m in [maxM // 4, maxM // 2, maxM]:
# larger nperseg makes smaller frequencies visible
Sxxs = []
for n in range(repeats):
Phi = rand(m) * 2 * pi
y = shinozuka(x, b=b, delta_w=1 / L, Phi=Phi[0:m])
f, pxx = periodogram(y, fs=1 / (x[1] - x[0]))
omega = 2 * pi * f
Sxxs.append(pxx / 4 / pi)
Sxx = sum(Sxxs) / repeats
axc.loglog(omega, Sxx, label='Empirical m={0:n}'.format(m))
axs.plot(x, y, label='m={0:n}'.format(m))
print('Captured energy percentage with {1:d} modes: {0:0.1f}%'.format(
trapz(Sxx, x=omega) / target_energy * 100, m))
omega = logspace(-1, 4, 10000)
a = ftransform_analytical(omega, b=b)
axc.set_ylim([10**-6, 5 * 10**-3])
axc.loglog(omega, a, label='Analytical')
axs.legend()
axc.legend()
fig_sig.show()
fig_corr.show()
| UTF-8 | Python | false | false | 1,465 | py | 37 | test_sh.py | 30 | 0.571331 | 0.534471 | 0 | 48 | 29.520833 | 78 |
tomaThomas/3D-Scanner | 4,105,988,735,388 | 3da6cbf90d685fcc8c8dfab8ec0365fcfe34f6be | 7aaf691564f08708e1618f3bfbf49a1ab158e343 | /stepper/__init__.py | a09d97a0bc50287ca1f7176ed1029e1c26bd893e | []
| no_license | https://github.com/tomaThomas/3D-Scanner | bceb8fbdeec9f23ad4aac521d06847c21b82e4a3 | 6a711330f736a6aed4f61bccb4147295250badf7 | refs/heads/master | 2020-03-17T10:44:25.138908 | 2018-06-14T11:10:53 | 2018-06-14T11:10:53 | 133,523,679 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .gpio import *
import asyncio
import math
startPin = 2
time_per_step = 0.002
steps_per_scan = 100
step_angle = 2 * math.pi / steps_per_scan
current_angle = 0
print("Init stepper")
def get_steps_per_scan():
return steps_per_scan
def get_current_angle():
return current_angle
def set_steps_per_scan(steps):
global steps_per_scan
global time_per_step
if steps <= 50:
time_per_step = 0.001
else:
time_per_step = 0.002
steps_per_scan = steps
calculate_step_angle()
def calculate_step_angle():
global step_angle
step_angle = 2 * math.pi / steps_per_scan
async def scan_step():
global current_angle
steps = 800 * 4 / steps_per_scan
for i in range(0, int(steps)):
await stepper_step()
current_angle += step_angle
async def stepper_step():
gpio.set(1, True)
await asyncio.sleep(time_per_step)
gpio.set(1, False)
await asyncio.sleep(time_per_step)
def cleanup():
gpio.cleanup()
gpio.init(startPin)
| UTF-8 | Python | false | false | 1,011 | py | 14 | __init__.py | 10 | 0.646884 | 0.619189 | 0 | 57 | 16.736842 | 45 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.