repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Naveen-Babu/weather_credentials | 14,078,902,833,007 | c10523ae55becb2d84f006704ee1929164e6a87d | 6d8cd8e934de6d80c6f76fcd9603c7070ad5ca5b | /usingRequests.py | b6f16818b70d17ae0c83b65ad1ab7a2c195a56e5 | [] | no_license | https://github.com/Naveen-Babu/weather_credentials | 60e93dfa245bd9227b886c098286b69adfe6d757 | 8632988d2c324099d5065d929e3384c49ee9c4b0 | refs/heads/master | 2020-03-25T05:38:27.439294 | 2018-08-07T05:15:39 | 2018-08-07T05:15:39 | 143,458,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
# api-endpoint //sending a request through this url
URL = "http://api.openweathermap.org/data/2.5/weather?q=Hyderabad,IN&APPID=c63077b4593aed1e5101d3bb28ac6160"
location = "Hyderabad" # location given here
# defining a params dictionary for the parameters to be sent to the API
PARAMS = {'address':location}
# sending get request and saving the response from server as response object
r = requests.get(url = URL, params = PARAMS) #two args r url and parameters dictionery
#print (r.text)
print("\n")
# extracting data in json format
data = r.json()
print (data)
print('Retrieved', len(data), 'characters')
| UTF-8 | Python | false | false | 657 | py | 11 | usingRequests.py | 5 | 0.719939 | 0.684932 | 0 | 24 | 26.125 | 108 |
sema/django-2012 | 3,238,405,377,079 | 1d4f559dc5e2d750c0927e6137bfaa2badbcb932 | ef6f58ebad951bd516338a970d487ffa792096f9 | /backends/generic/mosaic_backend/worker.py | 312970ac198e167fd6af034c034fff99f6106b70 | [
"MIT"
] | permissive | https://github.com/sema/django-2012 | f2d7beb56e81ebf4bfbce0a7ac5e963286088804 | 7768c9db5d24bb15b32dca30fad16ca773a04c33 | refs/heads/master | 2016-09-06T20:01:02.303007 | 2012-08-20T05:55:16 | 2012-08-20T05:55:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from urllib2 import HTTPError
import datetime
import json
import logging
import time
import urllib
import urllib2
from symbol import except_clause
logger = logging.getLogger(__name__)
class Worker(object):
'''
Generic worker class. Will periodically do work for the MOSAiC server when instantiated with an 'activity manager'.
'''
def __init__(self, mosaic_url, manager, api_key="SUPER_SECRET"):
'''
Constructs a worker for the MOSAiC server at the specified URL.
The supplied manager should specify its capabilities regarding
activity types through the properties, concrete_activity_type and abstract_activity_type:
ex.:
concrete_activity_type = 'git'
abstract_activity_type = 'repository'
The manager should also be able to perform work when requested to:
A method:
get_activies(url, since) : [activity-dict]
is required
'''
self.manager = manager
parameters = urllib.urlencode({'token': api_key})
common_path = "%s/api/worklist/%s/%s" % (mosaic_url, manager.abstract_activity_type, manager.concrete_activity_type)
self.get_url = "%s?%s" % (common_path, parameters)
self.post_url = "%s/deliver/?%s" % (common_path, parameters)
logger.info("Initialized worker for %s" % common_path)
def run(self, sleep_time=5):
'''
main loop for performing work for the MOSAiC server
'''
import sys
while(True):
try:
work_list = self._get_work()
logger.info("Received work_list of size: %s" % len(work_list))
logger.debug("Received work_list: %s", work_list)
for work in work_list:
url = work['url']
since = work['since']
if since:
since = datetime.datetime.fromtimestamp(since)
activities = self.manager.get_activities(url, since)
logger.debug("Delivering work: %s", activities)
self._deliver_work(activities, url)
time.sleep(sleep_time)
except Exception as e:
logger.exception(e)
time.sleep(sleep_time * 10)
def _get_work(self):
'''
Requests work from the MOSAiC server
'''
try:
data = urllib2.urlopen(self.get_url)
except HTTPError:
logger.error(("Failed to open url: %s" % str(self.get_url)))
return []
json_data = json.load(data)
logger.info("%s -> %s" % (self.get_url, json_data))
return json_data
def _deliver_work(self, activities, url):
'''
Delivers completed work to the MOSAiC server
'''
json_data = json.dumps({'activities': activities, 'url': url})
logger.info("%s <- %s" % (self.post_url, json_data))
try:
urllib2.urlopen(self.post_url, urllib.urlencode({'payload': json_data}))
except HTTPError:
logger.error("Failed to open url: %s" % str(self.post_url))
| UTF-8 | Python | false | false | 3,198 | py | 40 | worker.py | 25 | 0.565353 | 0.563164 | 0 | 86 | 36.174419 | 124 |
chowravc/OASIS_Segmentation_Training_Data_Generation | 7,249,904,828,079 | 3df27fafd3fc4d874b3e0590d8732f2287b0026b | 4cc8db30b130f7efde574f0b9e763fc788b9d360 | /script3_rendering_masks.py | 75718dababd5ee864b42704735132fcca77eaf22 | [] | no_license | https://github.com/chowravc/OASIS_Segmentation_Training_Data_Generation | 2c3381e79627103852932b7299bddc8dc6543dc4 | c5d9b30eb550e01bfa0c661fa3f5a8263496d18f | refs/heads/main | 2023-01-24T13:37:05.679390 | 2020-11-27T15:30:36 | 2020-11-27T15:30:36 | 315,471,604 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Importing Packages
import bpy
import sys
import os
import numpy as np
import random as r
import glob as g
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
# Importing useful python scripts
from capCreator import *
from wallCreator import *
from bubbleCreator import *
from spotCreator import *
from lightUtils import *
from materialUtils import *
from intersectionCheck import *
# CHANGE CODE HERE-----
# If true, separate masks for each island will be produced
# If false, front and back masks will be produced
insanceSegmentation = True
# Resolution of rendered masks
res = (256, 256)
# END OF CHANGE CODE-----
# Deleting wall used to add emission in last script
bpy.data.objects["ChangeEmissionToRed100OnMe"].select_set(True)
bpy.ops.object.delete()
bpy.data.objects["ChangeEmissionToGreen100OnMe"].select_set(True)
bpy.ops.object.delete()
# Finding out number of images to produce
numberOfImages = len(g.glob("./angles/*"))
# Creating separate masks for each island in each image
if insanceSegmentation:
# For each image
for k in range(numberOfImages):
# Deciding name of image
nameID = str(k).zfill(len(str(numberOfImages)))
# Creating a number of randomly decided caps.
# Number of random caps to create.
number = len(open("./angles/"+nameID+".txt", "r").read().splitlines())
# Radius of the sphere the cap will be on.
radius = 1
# Location of the cap.
location = (0, 0, 0)
# If on first image, access these materials as variables
if k == 0:
# Cap materials
frontCapMaterial = bpy.data.materials["frontCapMaterial"]
backCapMaterial = bpy.data.materials["backCapMaterial"]
# Material of wall
materialName = "wallMaterial"
materialColor = (0, 0, 0)
createMaterial(materialName, materialColor, removeShader=True)
wallMaterial = bpy.data.materials[materialName]
# Read information on each cap in image
capAngles = open("./angles/"+nameID+".txt", "r").read().splitlines()
capPhis = open("./phis/"+nameID+".txt", "r").read().splitlines()
capThetas = open("./thetas/"+nameID+".txt", "r").read().splitlines()
# Creating walls
if k == 0:
createWall("Wall0", (2, -2, -10), (-5, -2, -10), (-5, -2, 10), (2, -2, 10), wallMaterial)
createWall("Wall1", (2, 2, -10), (-5, 2, -10), (-5, 2, 10), (2, 2, 10), wallMaterial)
createWall("Wall2", (-5, -2, -10), (-5, 2, -10), (-5, 2, 10), (-5, -2, 10), wallMaterial)
# Creating and linking camera
bpy.ops.object.camera_add(location=(3, 0, 0), rotation=(np.pi/2, 0, np.pi/2))
# Saving masks for each cap
for i in range(number):
# Cap size parameter (Choose cap size between 0 and 10)
cSize = float(capAngles[i])
# Euler angles of the cap.
euler = (float(capThetas[i]), float(capPhis[i]), 0)
# Name the cap.
name = "Cap" + str(i).zfill(len(str(number)))
# Creating cap
createCap(radius, cSize, euler, location, name, frontCapMaterial)
# Changing camera size and activating
bpy.data.cameras['Camera'].lens = 45
bpy.context.scene.camera = bpy.data.objects['Camera']
# Deselect all
bpy.ops.object.select_all(action='DESELECT')
# Setting render path
bpy.context.scene.render.filepath = os.getcwd() + '/masks/' + nameID + "/" + name + '.png'
# Rendering Scene
# Get the scene
scene = bpy.data.scenes["Scene"]
# Set render resolution
scene.render.resolution_x = res[0]
scene.render.resolution_y = res[1]
scene.render.resolution_percentage = 100
bpy.ops.render.render(write_still = True)
# Deselect all
bpy.ops.object.select_all(action='DESELECT')
# Deleting all caps
bpy.data.objects[name].select_set(True)
bpy.ops.object.delete()
# Creating front and back masks
if not insanceSegmentation:
# Saving masks for each image
for k in range(numberOfImages):
# Deciding name of image
nameID = str(k).zfill(len(str(numberOfImages)))
# Number of caps to create.
number = len(open("./angles/"+nameID+".txt", "r").read().splitlines())
# Radius of the sphere the cap will be on.
radius = 1
# Location of the cap.
location = (0, 0, 0)
# Accessing these materials as variables
if k == 0:
# Cap materials
frontCapMaterial = bpy.data.materials["frontCapMaterial"]
backCapMaterial = bpy.data.materials["backCapMaterial"]
# Material of wall
materialName = "wallMaterial"
materialColor = (0, 0, 0)
createMaterial(materialName, materialColor, removeShader=True)
wallMaterial = bpy.data.materials[materialName]
# Accessing relevant cap information for each island
capAngles = open("./angles/"+nameID+".txt", "r").read().splitlines()
capPhis = open("./phis/"+nameID+".txt", "r").read().splitlines()
capThetas = open("./thetas/"+nameID+".txt", "r").read().splitlines()
# Placing individual caps
for i in range(number):
# Cap size parameter (Choose cap size between 0 and 10)
cSize = float(capAngles[i])
# Euler angles of the cap.
euler = (float(capThetas[i]), float(capPhis[i]), 0)
# Name the cap.
name = "Cap" + str(i).zfill(len(str(number)))
# Creating caps with different emission based on back/foreground
if np.pi/2 <= euler[0] < 3*np.pi/2:
createCap(radius, cSize, euler, location, name, frontCapMaterial)
else:
createCap(radius, cSize, euler, location, name, backCapMaterial)
# Creating walls
if k == 0:
createWall("Wall0", (2, -2, -10), (-5, -2, -10), (-5, -2, 10), (2, -2, 10), wallMaterial)
createWall("Wall1", (2, 2, -10), (-5, 2, -10), (-5, 2, 10), (2, 2, 10), wallMaterial)
createWall("Wall2", (-5, -2, -10), (-5, 2, -10), (-5, 2, 10), (-5, -2, 10), wallMaterial)
# Creating and linking camera
bpy.ops.object.camera_add(location=(3, 0, 0), rotation=(np.pi/2, 0, np.pi/2))
# Resizing and activating camera
bpy.data.cameras['Camera'].lens = 45
bpy.context.scene.camera = bpy.data.objects['Camera']
# Deselect all
bpy.ops.object.select_all(action='DESELECT')
# Setting render path
bpy.context.scene.render.filepath = os.getcwd() + '/masks/' + nameID + '.png'
# Rendering Scene
# Get the scene
scene = bpy.data.scenes["Scene"]
# Set render resolution
scene.render.resolution_x = res[0]
scene.render.resolution_y = res[1]
scene.render.resolution_percentage = 100
bpy.ops.render.render(write_still = True)
# Deselect all
bpy.ops.object.select_all(action='DESELECT')
# Deleting all caps
for capID in range(number):
bpy.data.objects["Cap"+str(capID).zfill(len(str(number)))].select_set(True)
bpy.ops.object.delete()
print("")
print("Reached end of script 3. Check if correct masks were created. Don't save Blender file while closing. Thanks!")
print("") | UTF-8 | Python | false | false | 7,684 | py | 6 | script3_rendering_masks.py | 4 | 0.586543 | 0.564289 | 0 | 226 | 33.004425 | 117 |
ANDYsGUITAR/leetcode | 16,501,264,396,640 | 4aed1f7de1d29e205af95ff911c765051c09f9a7 | 865bd0c84d06b53a39943dd6d71857e9cfc6d385 | /032-longest-valid-parentheses/longest-valid-parentheses.py | cffe40717f638263eec332eeaea3d5d24328be4a | [] | no_license | https://github.com/ANDYsGUITAR/leetcode | 1fd107946f4df50cadb9bd7189b9f7b7128dc9f1 | cbca35396738f1fb750f58424b00b9f10232e574 | refs/heads/master | 2020-04-01T18:24:01.072127 | 2019-04-04T08:38:44 | 2019-04-04T08:38:44 | 153,473,780 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
#
# Example 1:
#
#
# Input: "(()"
# Output: 2
# Explanation: The longest valid parentheses substring is "()"
#
#
# Example 2:
#
#
# Input: ")()())"
# Output: 4
# Explanation: The longest valid parentheses substring is "()()"
#
#
class Solution:
def longestValidParentheses(self, s: 'str') -> 'int':
if len(s) == 0:
return 0
dp = [0] * len(s)
for i in range(1, len(s)):
if s[i] == ')' and s[i - 1] == '(':
if i >= 2:
dp[i] = dp[i - 2] + 2
else:
dp[i] = 2
elif s[i] == ')' and s[i - 1] == ')':
if i - dp[i - 1] - 1 >= 0 and s[i - dp[i - 1] - 1] == '(':
if i - dp[i - 1] - 2 >= 0:
dp[i] = dp[i - 1] + dp[i - dp[i - 1] - 2] + 2
else:
dp[i] = dp[i - 1] + 2
print(dp)
return max(dp)
# TLE
# def Pnum(s):
# count = left = right = 0
# for c in s:
# if c == '(':
# left += 1
# else:
# if left != 0:
# left -= 1
# count += 1
# else:
# right += 1
# return count
# def length(s):
# return len(s) if Pnum(s) * 2 == len(s) else 0
# if len(s) % 2 == 0:
# l = len(s)
# else:
# l = len(s) - 1
# ans = 0
# while l >= 2:
# for i in range(len(s) - l + 1):
# ans = length(s[i : i+l])
# if ans > 0:
# break
# if ans > 0:
# break
# else:
# l -= 2
# return ans
| UTF-8 | Python | false | false | 2,027 | py | 148 | longest-valid-parentheses.py | 145 | 0.327084 | 0.304884 | 0 | 74 | 25.959459 | 134 |
gramaziokohler/robotic_assembly_workshop | 12,773,232,740,237 | 2bd960baaac02c26f02c4ad42ced9b3c6bee56ec | 63b40e1506799bfdc7b6d24668c6756997a65a76 | /examples/ex23_load_robot.py | cde66593ab87258c1a34777c607cccf50063caf6 | [
"MIT"
] | permissive | https://github.com/gramaziokohler/robotic_assembly_workshop | 1eaecbb2aa1c429e4a136fd751c2d499cafd0c4b | 252d9750175061fd7d4746a4701afd42882773a5 | refs/heads/master | 2020-04-11T07:48:52.093712 | 2019-02-13T14:46:39 | 2019-02-13T14:46:39 | 161,622,740 | 22 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Creates a `Robot` representing a UR5 robot from a urdf model and the semantics
from a srdf file.
"""
import os
import compas
from compas.robots import RobotModel
from compas.robots import LocalPackageMeshLoader
from compas_fab.robots import Robot
from compas_fab.robots import RobotSemantics
from compas_fab.backends import RosClient
# from compas_fab.ghpython import RobotArtist
compas.PRECISION = '12f'
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, '../data')
PATH = os.path.join(DATA, 'robot_description')
package = 'ur_description'
urdf_filename = os.path.join(PATH, package, "urdf", "ur5.urdf")
srdf_filename = os.path.join(PATH, package, "ur5.srdf")
model = RobotModel.from_urdf_file(urdf_filename)
# Load external geometry files (i.e. meshes)
# loader = LocalPackageMeshLoader(PATH, 'ur_description')
# model.load_geometry(loader)
artist = None
# artist = RobotArtist(model)
semantics = RobotSemantics.from_srdf_file(srdf_filename, model)
robot = Robot(model, artist, semantics, client=None)
robot.info()
| UTF-8 | Python | false | false | 1,038 | py | 97 | ex23_load_robot.py | 47 | 0.758189 | 0.753372 | 0 | 36 | 27.833333 | 79 |
ssongna/sastbx_module_sastbx | 18,116,172,075,740 | 4431e6f940e4933a1c7fbf7e85308ddb5726563d | 9aaee5f65c8dd9d18b1cc549740f4e6be295f091 | /command_line/build_lookup_table.py | 4ddc9c27e226d4aa2b2794ec221caa694c79906b | [] | no_license | https://github.com/ssongna/sastbx_module_sastbx | 285074d6f07c9d440bd750889322746d14448328 | aa5cc2f59a4179db761125fb01fe7aaeda7d5dd0 | refs/heads/master | 2021-08-17T09:01:18.796977 | 2017-11-21T01:25:50 | 2017-11-21T01:25:50 | 111,428,819 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # LIBTBX_SET_DISPATCHER_NAME sastbx.build_lookup_table
from sastbx.data_reduction import build_lookup_file
import sys
if (__name__ == "__main__"):
build_lookup_file.run(args=sys.argv[1:])
| UTF-8 | Python | false | false | 194 | py | 205 | build_lookup_table.py | 164 | 0.71134 | 0.706186 | 0 | 7 | 26.714286 | 54 |
vahiwe/Scale | 6,640,019,448,697 | 5c426f42900ad1e63420539267c868abe90b246e | 0e8f1e05be0c3ff77bca43d29a0b1de1d6602edb | /job/templatetags/crispy.py | 2d9bd5f0539704b3edca1caf96c7dbe173cf53ed | [] | no_license | https://github.com/vahiwe/Scale | e8cf92f54251f575725bd408221a6e59dd7e8b88 | 98fbe2b2338305e697a0054aaf8efb7c74b5387a | refs/heads/master | 2022-09-10T05:16:34.767703 | 2020-05-30T15:03:40 | 2020-05-30T15:03:40 | 256,792,181 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from crispy_forms.utils import render_crispy_form
from django_jinja import library
from jinja2 import contextfunction
@contextfunction
@library.global_function
def crispy(context, form):
return render_crispy_form(form, context=context)
| UTF-8 | Python | false | false | 242 | py | 41 | crispy.py | 25 | 0.81405 | 0.809917 | 0 | 9 | 25.888889 | 52 |
jdmeyer3/CeilometerMetrics | 16,664,473,143,203 | ca6c892cc35e27976bf863580cf09029d8d25603 | 1e883289a12486a93a2faf8dcb9f26af5c71a2eb | /instance.py | 290f6c2fa326ad482a6f1bcbcf1444c6491c6034 | [] | no_license | https://github.com/jdmeyer3/CeilometerMetrics | eb3f7a4e243302b88c3769076b52191216042c0e | 77b24f1520fd2ae7d05f50c4c9cbec95fb16c62c | refs/heads/master | 2017-05-10T16:42:19.865926 | 2017-02-28T04:07:09 | 2017-02-28T04:07:09 | 82,583,767 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Joshua Meyer
# 08 Feb 2017
import shade
import tenantinfo
import time
class Instance(object):
def __init__(self, query_time):
self.query_time = query_time
self.tenant_info = []
self.instance = []
self.network = []
def Connect(self):
cloud = shade.openstack_cloud(cloud ='ceilometer')
return cloud
def Initialize(self):
self.tenant_info = tenantinfo.TenantInfo(self.query_time)
self.instance = self.tenant_info.Instance_Health()
self.network = self.tenant_info.Network_IO()
def Print_Info(self):
self.Initialize()
print "Query: " + str(self.query_time) + " minutes"
for i in range(0, len(self.instance)):
instance_tmp=self.instance[i]
print "*****************************************************"
print "instance: " + str(instance_tmp.get('instance'))
print "average CPU: " + str(instance_tmp.get('avg_cpu')) + " %"
print "Total Memory: " + str(instance_tmp.get('memory')) + " B"
print "Memory Usage: " + str(instance_tmp.get('memory_usage')) + " B"
print "Memory Percent: " + str(instance_tmp.get('memory_percent')) + " %"
print "Disk Write Byte Rate: " + str(instance_tmp.get('disk_write_bytes_rate')) + " B"
print "Disk Read Bytes Rate: " + str(instance_tmp.get('disk_read_bytes_rate')) + " B"
print "******************************************************************"
network_info = self.network[0]
print "Network Incoming Bytes Rate: " + str(network_info.get('avg_incoming_bytes_rate')) + " B"
print "Network Outgoing Bytes Rate: " + str(network_info.get('avg_incoming_bytes_rate')) + " B"
def Monitor(self):
monitor = True
cloud = self.Connect()
self.Initialize()
for i in range(0, len(self.instance)):
instance_tmp = self.instance[i]
if instance_tmp.get('avg_cpu') > 50.00:
cloud.create_server(name='c_three',flavor='m1.small',image='Ubuntu-amd64-1604',network='ceilometernet')
print "creating new instance"
print "instance: " + str(instance_tmp.get('instance')) + " | current cpu usage: " + str(instance_tmp.get('avg_cpu'))
monitor = False
break
else:
print "instance: " + str(instance_tmp.get('instance')) + " | current cpu usage: " + str(instance_tmp.get('avg_cpu'))
while monitor:
time.sleep(60)
self.Monitor()
| UTF-8 | Python | false | false | 2,632 | py | 4 | instance.py | 3 | 0.542173 | 0.533435 | 0 | 63 | 40.777778 | 133 |
MayborodaPavel/simpleSite | 9,766,755,647,064 | 6f0591accc66f2ecee9101b44cfc4e2b12abdf5c | 864622bf8fa231a9ea983a7b9254945702438fa3 | /backend/router.py | 60829fd73b053af233b23939a20a1e713fe093e6 | [] | no_license | https://github.com/MayborodaPavel/simpleSite | 5a2f8d7aae16d63f7e0215035431643dfd07b15d | c4e1846715b6929ef81cd4d859b0799f54a7c727 | refs/heads/master | 2023-01-22T21:50:15.931038 | 2019-10-10T14:02:30 | 2019-10-10T14:02:30 | 214,197,620 | 0 | 0 | null | false | 2023-01-04T12:21:15 | 2019-10-10T14:01:04 | 2019-10-10T14:03:05 | 2023-01-04T12:21:14 | 1,442 | 0 | 0 | 26 | Python | false | false | from collections import defaultdict
import re
from controllers import ContentController
from response import BadResponse
from logger import logger
class Router:
_routes = defaultdict(dict)
instance = None
def __new__(cls):
if cls.instance is None:
cls.instance = super(Router, cls).__new__(cls)
return cls.instance
@property
def routes(self):
return self._routes
@classmethod
def route(cls, url=None, method='GET', controller=None, action=None):
cls._routes[url][method] = (controller, action)
def routing(self, path, method, form_data=None):
for pattern in self._routes.keys():
match = re.fullmatch(pattern, path)
if match is not None:
path = pattern
try:
param = match.group(1)
except IndexError:
param = None
break
if match is None:
return BadResponse()
rules = self._routes[path]
try:
controller, action = rules[method]
except KeyError:
logger.warning(f'{method} for {path} not supported')
return BadResponse(501, 'Not Implemented')
cls = globals()[controller]
func = cls.__dict__[action]
obj = cls()
if all([param, form_data]):
response = func(obj, url_params=param, form_data=form_data)
elif param is not None:
response = func(obj, url_params=param)
elif form_data is not None:
response = func(obj, form_data=form_data)
else:
response = func(obj)
return response
| UTF-8 | Python | false | false | 1,690 | py | 24 | router.py | 18 | 0.562722 | 0.560355 | 0 | 62 | 26.209677 | 73 |
defenders-of-eboracia/streaming-a-star | 9,311,489,124,121 | d38fe72ca455e251e0d03187d9101aaac96cd0e9 | c4c858c9126368872132f3ad7edc04bd169cca7d | /astar.py | 40784f9957eb00c89921a7ffdaa6d187a5f3ffe1 | [] | no_license | https://github.com/defenders-of-eboracia/streaming-a-star | be3c5fcc4fbca3827865b8607179678c9a67e88d | e972fb36168c648795c05452e523c483d3c7f71f | refs/heads/master | 2021-01-17T04:27:40.265181 | 2017-02-23T21:15:36 | 2017-02-23T21:15:36 | 82,955,477 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class AStar:
"""
A-Star implementation adapted from @jrialland's version at
https://github.com/jrialland/python-astar/blob/master/astar.py
"""
def heuristic_cost_estimate(self, start, goal):
# return self.average_min_pies_cost * (start.last_day - start.current_day)
raise NotImplementedError
def _yield_path(self, came_from, last):
yield last
current = came_from[last]
while True:
yield current
if current in came_from:
current = came_from[current]
else:
break
def _reconstruct_path(self, came_from, last):
return list(reversed([p for p in self._yield_path(came_from, last)]))
def calculate_total_cost(self, the_path):
node = the_path[0]
next = the_path[1]
total = 0
while True:
children = node.options(self.available_pies_cost)
if not children:
return total
for cost, node in children:
if node == next:
total += cost
break
the_path = the_path[1:]
try:
node = the_path[0]
next = the_path[1]
except IndexError:
return total
def astar(self, start, goal):
"""applies the a-star path searching algorithm in order to find a route between a 'start' node and a 'root' node"""
closedset = set([]) # The set of nodes already evaluated.
# The set of tentative nodes to be evaluated, initially containing the
# start node
openset = set([start])
came_from = {} # The map of navigated nodes.
g_score = {}
g_score[start] = 0 # Cost from start along best known path.
# Estimated total cost from start to goal through y.
f_score = {}
f_score[start] = self.heuristic_cost_estimate(start, goal)
while len(openset) > 0:
# the node in openset having the lowest f_score[] value
current = min(f_score, key=f_score.get)
if current == goal:
path = self._reconstruct_path(came_from, goal)
return path
openset.discard(current) # remove current from openset
del f_score[current]
closedset.add(current) # add current to closedset
for cost, neighbor in current.options():
if neighbor in closedset:
continue
tentative_g_score = g_score[current] + cost
if (neighbor not in openset) or (tentative_g_score < g_score[neighbor]):
came_from[neighbor] = current
g_score[neighbor] = tentative_g_score
f_score[neighbor] = tentative_g_score + \
self.heuristic_cost_estimate(neighbor, goal)
openset.add(neighbor)
return None | UTF-8 | Python | false | false | 2,970 | py | 8 | astar.py | 6 | 0.545791 | 0.543098 | 0 | 78 | 37.089744 | 123 |
ebehlmann/puzzling | 1,743,756,757,942 | 0ef71b66d3e2ab3fc5e2f77d186b3583267272c5 | 97efbf86d7b83e2400ce8fcabe677702d78b7bd4 | /python/advent_of_code/2015/day_12/day_12.py | 6c731f716751049044ba1ffbd784c4c23e98c6f6 | [] | no_license | https://github.com/ebehlmann/puzzling | aa7b8cc123768810907fda0e4f6823008a72df19 | 78c345278bfb20a23d5f9438833135d15085b787 | refs/heads/master | 2021-12-13T19:32:19.167606 | 2021-12-11T19:18:06 | 2021-12-11T19:18:06 | 123,527,764 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #this is incomplete
import json
def find_sum(mess_of_json):
result = 0
for i in mess_of_json:
if type(i) == list:
result += find_sum(i)
elif type(i) == dict:
result += find_sum(i)
elif type(mess_of_json[i]) == int:
result += mess_of_json[i]
return result
#def find_sum_2(blob):
with open("input.json") as json_file:
json_data = json.load(json_file)
print(find_sum(json_data)) | UTF-8 | Python | false | false | 407 | py | 69 | day_12.py | 67 | 0.633907 | 0.628993 | 0 | 22 | 17.545455 | 37 |
jm-bit/PvP-Chess-Game-Using-Python | 11,682,311,078,683 | 0f27503fcfbe8194d24b290d911585fd0ab90ef2 | 3082396d72744a146322d3c537ded71ea7a7a22b | /CMSC_12_2P_Chess_Game.py | da9b19aa482abdc7fc073f0ed92cf9b820523b6b | [] | no_license | https://github.com/jm-bit/PvP-Chess-Game-Using-Python | e6b45b7d8749c1988320d638986a07ec75be9e32 | e7c747b9630c6e48a5acb86bf19d70055fdafe5b | refs/heads/master | 2021-01-05T01:04:16.732807 | 2020-02-16T03:22:04 | 2020-02-16T03:22:04 | 240,823,305 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import ChessFunc
alternator = 1
while True:
if ChessFunc.dec:
break
x_pos = ""
y_pos = ""
x_loc = ""
y_loc = ""
valid_move = True
ChessFunc.board_print(ChessFunc.board, ChessFunc.coordinates)
ChessFunc.find_king(alternator)
ChessFunc.checkmate(ChessFunc.king_x, ChessFunc.king_y, alternator)
if ChessFunc.end:
break
ChessFunc.check(ChessFunc.king_x, ChessFunc.king_y, alternator)
print("Type \"Save\" to save current Game Data.")
print("Type \"Load\" to load a saved Game Data.")
if ChessFunc.checked:
if alternator == 1:
print("\nPlayer 2 says you are Checked.")
elif alternator == 2:
print("\nPlayer 1 says you are Checked.")
if alternator == 1:
print("\nIt\'s Player 1\'s turn!")
else:
print("\nIt\'s Player 2\'s turn!")
if alternator == 1:
piece_pos = input("\nEnter Position of Piece to Move: ")
if piece_pos == "Save":
file_name = input("Enter File name for Game Data to Save: ")
ChessFunc.save_board(file_name)
break
elif piece_pos == "Load":
file_name = input("Enter File name of Game Data to Load: ")
ChessFunc.load_board(file_name)
continue
else:
x_pos = piece_pos[0] + " "
y_pos = piece_pos[1]
if (ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)])[1] != "1":
print("\nInvalid Move! It\'s Player 1\'s turn to move.")
valid_move = False
else:
board_loc = input("\nWhere do you want to move " + ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] + " : ")
x_loc = board_loc[0] + " "
y_loc = board_loc[1]
else:
piece_pos = input("\nEnter Position of Piece to Move: ")
if piece_pos == "Save":
file_name = input("Enter File name for Game Data to Save: ")
ChessFunc.save_board(file_name)
break
elif piece_pos == "Load":
file_name = input("Enter File name of Game Data to Load: ")
ChessFunc.load_board(file_name)
continue
else:
x_pos = piece_pos[0] + " "
y_pos = piece_pos[1]
if (ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)])[1] != "2":
print("\nInvalid Move! It\'s Player 2\'s turn to move.")
valid_move = False
else:
board_loc = input("\nWhere do you want to move " + ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] + " : ")
x_loc = board_loc[0] + " "
y_loc = board_loc[1]
if valid_move:
# Pawn Function
if ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "P1":
white_pawn = True
ChessFunc.pawn(x_pos, y_pos, x_loc, y_loc, white_pawn, alternator)
ChessFunc.pawn_promotion(y_pos, x_loc, y_loc, alternator)
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "P2":
white_pawn = False
ChessFunc.pawn(x_pos, y_pos, x_loc, y_loc, white_pawn, alternator)
ChessFunc.pawn_promotion(y_pos, x_loc, y_loc, alternator)
# Bishop Function
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "B1":
white_bishop = True
ChessFunc.bishop(x_pos, y_pos, x_loc, y_loc, white_bishop, alternator)
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "B2":
white_bishop = False
ChessFunc.bishop(x_pos, y_pos, x_loc, y_loc, white_bishop, alternator)
# Rook Function
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "R1":
white_rook = True
ChessFunc.rook(x_pos, y_pos, x_loc, y_loc, white_rook, alternator)
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "R2":
white_rook = False
ChessFunc.rook(x_pos, y_pos, x_loc, y_loc, white_rook, alternator)
# Knight Function
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "N1":
white_knight = True
ChessFunc.knight(x_pos, y_pos, x_loc, y_loc, white_knight, alternator)
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "N2":
white_knight = False
ChessFunc.knight(x_pos, y_pos, x_loc, y_loc, white_knight, alternator)
# Queen Function
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "Q1":
white_queen = True
ChessFunc.queen(x_pos, y_pos, x_loc, y_loc, white_queen, alternator)
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "Q2":
white_queen = False
ChessFunc.queen(x_pos, y_pos, x_loc, y_loc, white_queen, alternator)
# King Function
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "K1":
white_king = True
ChessFunc.king(x_pos, y_pos, x_loc, y_loc, white_king, alternator)
elif ChessFunc.board[ChessFunc.coordinates[1].index(y_pos)][ChessFunc.coordinates[0].index(x_pos)] == "K2":
white_king = False
ChessFunc.king(x_pos, y_pos, x_loc, y_loc, white_king, alternator)
ChessFunc.find_king(alternator)
ChessFunc.check(ChessFunc.king_x, ChessFunc.king_y, alternator)
if ChessFunc.checked:
print("\nInvalid Move! Your King is Checked.")
ChessFunc.no_error = False
if ChessFunc.no_error: # Determine if move is valid and executed properly
# Switches after every valid player's move
if alternator == 1:
alternator = 2
else:
alternator = 1
ChessFunc.no_error = True
ChessFunc.checked = False
input("\nPress anything to continue...")
os.system("clear")
if piece_pos != "Save":
if alternator == 1:
print("\nGame Ended! It's a Checkmate. Player 2 won the game.")
elif alternator == 2:
print("\nGame Ended! It's a Checkmate. Player 1 won the game.")
print("------SCORES-----")
print("Player 1 Score: ", ChessFunc.p1_score)
print("Player 2 Score: ", ChessFunc.p2_score)
| UTF-8 | Python | false | false | 5,977 | py | 3 | CMSC_12_2P_Chess_Game.py | 2 | 0.679438 | 0.666388 | 0 | 165 | 35.224242 | 156 |
gyoshil/monsterappetite | 13,786,845,050,387 | e8884adf5e9cb661811c293b2aae21bfa31bf1e2 | a2bedaee8f3f11b0fb78898df03133f710947a69 | /analysis/high_risk.py | 89546ca1eac0532d9d5027ea48ef41518ff2db91 | [] | no_license | https://github.com/gyoshil/monsterappetite | 856257e6fd8768d8802e59ab25e0501076f35db1 | b5a1da78a6c64920b56dca4d2caedc0c4cd2ba6e | refs/heads/master | 2021-01-18T02:25:02.708216 | 2018-04-10T02:22:35 | 2018-04-10T02:22:35 | 24,821,327 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import bson
from bson.codec_options import CodecOptions
from pprint import pprint
import csv
import calculate_N
dataDir = 'C:/Users/GGU/Documents/GitHub/monsterappetite/analysis/Qualtrics/'
bson_file = open('C:/Users/GGU/Documents/GitHub/monsterappetite/analysis/players.bson','rb')
mondgoDBplayers = bson.decode_all(bson_file.read())
###### FUCNTIONS #####
def gatherPlayers(session,condition):
finishedSession_ids = calculate_N.gatherPlayers(session,condition)
high_risk_participant_ids = []
csvFile = "BIQ1.csv"
#['uid', 'V1', 'V8', 'V9', 'Q2_1', 'Q3_1', 'Q4_1', 'Q6_1', 'Q7_1', 'Q8_1', 'Q9_1', 'Q10_1', 'Q11_1', 'Q12_1', 'Q13_1']
id_index1 = 4
id_index2 = 5
id_index3 = 7
id_index4 = 8
with open(dataDir+csvFile, mode='r') as infile:
qualtrics = csv.reader(infile)
count =0
def q2(player):return (player[id_index1]) =='4' or player[id_index1] =='5'
def q3(player):return (player[id_index2]) =='4' or player[id_index2] =='5'
def q6(player):return (player[id_index3]) =='1' or player[id_index3] =='2'
def q7(player):return (player[id_index4]) =='1' or player[id_index4] =='2'
for player in qualtrics:
if ((player[0] in finishedSession_ids) and
( q2(player)
or q3(player)
or q6(player)
or q7(player))
):
participants.append("high")
else:
participants.append("low")
return participant_ids
###### MAIN ########
def high_risk_results (condition):
global count, session1pre, session2pre, session1post, session2post, num_p
session1pre = 0
session1post = 0
session2pre = 0
session2post = 0
high_risk_participant_ids = gatherPlayers(1,condition)
print (str(len(high_risk_participant_ids))+" high risk participants being considered")
print ("who finsihed only Session 1")
for p in mondgoDBplayers:
if(p['_id'] in high_risk_participant_ids):
gatherClicks(p)
print ("1Preclicks : "+str(session1pre))
print ("1Postclicks : "+str(session1post))
print (session1post/session1pre)
print ()
session1pre = 0
session1post = 0
session2pre = 0
session2post = 0
high_risk_participant_ids = gatherPlayers(2,condition)
print (str(len(high_risk_participant_ids))+" high risk participants being considered")
print ("who finsihed Session 2")
for p in mondgoDBplayers:
if(p['_id'] in high_risk_participant_ids):
gatherClicks(p)
print ("1Preclicks : "+str(session1pre))
print ("1Postclicks : "+str(session1post))
print (session1post/session1pre)
print ("2Preclicks : "+str(session2pre))
print ("2Postclicks : "+str(session2post))
print (session2post/session2pre)
| UTF-8 | Python | false | false | 2,655 | py | 124 | high_risk.py | 33 | 0.66629 | 0.626365 | 0 | 89 | 28.831461 | 120 |
catalystneuro/BIDS_ephys | 13,082,470,424,275 | 916e81503af42b9d8875820df788bdac90dec0bd | 7cfc83411e590178f9ec664fe4d8a677bc977bf7 | /BIDS_ext.py | 7d2fd8d2136fa51971ba1cbe60c566c4c0b7d98a | [] | no_license | https://github.com/catalystneuro/BIDS_ephys | 589f1ed4e6755ac2fe5af42085f2bb389c77f342 | 2600a48c9b72cd4df62ed8ed11477253cc593ffc | refs/heads/main | 2023-03-22T02:22:31.619368 | 2021-03-09T02:53:32 | 2021-03-09T02:53:32 | 345,683,057 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from pathlib import Path
import pandas as pd
from pynwb import NWBHDF5IO
REQ_DATASETS = ['dataset_description.json', 'participants.tsv', 'sessions.tsv']
def bep_organize(dataset_path, output_path=None, move_nwb=False):
"""
organize data according to teh BIDS extention proposal
Parameters
----------
dataset_path : [str, Path]
path to the folder containing all the nwb datasets that need organization.
"""
dataset_path = Path(dataset_path)
if output_path is None:
output_path = dataset_path/'BIDSExt'
participants_df = pd.DataFrame(
columns=['Species', 'ParticipantID', 'Sex', 'Birthdate', 'Age', 'Genotype', 'Weight'])
conversion_dict = dict(milli=1e-3, micro=1e-6)
dataset_desc_json = None
dataset_path = Path(dataset_path)
for nwb_file in dataset_path.glob('**/*.nwb'):
channels_df = pd.DataFrame(columns=['channel_id', 'Contact_id', 'type', 'units', 'sampling_frequency'])
sessions_df = pd.DataFrame(columns=['session_id', '#_trials', 'comment'])
with NWBHDF5IO(str(nwb_file), 'r') as io:
nwbfile = io.read()
# subject info:
if nwbfile.subject is not None:
sb = nwbfile.subject
participants_df.loc[len(participants_df.index)] = \
[sb.species, sb.subject_id, sb.sex, sb.date_of_birth, sb.age, sb.genotype, sb.weight]
subject_label = f'sub-{sb.subject_id}'
# dataset info:
if dataset_desc_json is None:
dataset_desc_json = dict(InstitutionName=nwbfile.institution, InstitutionalDepartmentName=nwbfile.lab,
Name='Electrophysiology', BIDSVersion='1.0.X',
Licence='CC BY 4.0',
Authors=[list(nwbfile.experimenter) if nwbfile.experimenter is not None else None])
# sessions info:
trials_len = [len(nwbfile.trials) if nwbfile.trials is not None else None]
sessions_df.loc[len(sessions_df.index)] = \
[nwbfile.session_id, trials_len, nwbfile.session_description]
session_label = f'ses-{nwbfile.session_id}'
# channels_info:
no_channels = nwbfile.acquisition['ElectricalSeries'].data.shape[1]
sampling_frequency = nwbfile.acquisition['ElectricalSeries'].rate
unit = nwbfile.acquisition['ElectricalSeries'].unit
conversion_factor = [i if j == nwbfile.acquisition['ElectricalSeries'].conversion else ''
for i, j in conversion_dict.items()][0]
for chan_no in range(no_channels):
channels_df.loc[len(channels_df.index)] = [chan_no, 'n.a.', 'neural signal', conversion_factor + unit,
sampling_frequency]
# construct the folders:
generic_ephys_name = f'{subject_label}_{session_label}_'
subject_path = output_path/subject_label
ses_path = subject_path/session_label
data_path = ses_path/'ephys'
data_path.mkdir(parents=True,exist_ok=True)
# move nwbfile
bep_nwbfile_path = data_path/(generic_ephys_name + 'ephys.nwb')
if move_nwb:
if not bep_nwbfile_path.exists():
nwb_file.replace(bep_nwbfile_path)
else:
if not bep_nwbfile_path.exists():
bep_nwbfile_path.symlink_to(nwb_file)
# channels.tsv:
bep_channels_path = data_path/(generic_ephys_name + 'channels.tsv')
if not bep_channels_path.exists():
channels_df.to_csv(bep_channels_path, sep='\t')
# create sessions.json
bep_sessions_path = subject_path/f'sub-{subject_label}_sessions.tsv'
if not bep_sessions_path.exists():
print(f'writing for subject: {subject_label}')
sessions_df.to_csv(bep_sessions_path, sep='\t')
# create participants.tsv:
participants_df.to_csv(output_path/'participants.csv')
# create dataset_desrciption.json
with open(output_path/'dataset_description.json', 'w') as j:
json.dump(dataset_desc_json, j)
| UTF-8 | Python | false | false | 4,241 | py | 5 | BIDS_ext.py | 4 | 0.593964 | 0.591134 | 0 | 89 | 46.651685 | 124 |
royels/Affirmpy | 10,548,439,701,416 | 62ea4cb9a295730a36ea4835460a13c16778a781 | 1f1456e43699dc02491fcf42437dba2bdcebadb9 | /affirmpy/errors/resource_not_found_error.py | bc38dff479cb20c1c8e6f8e90fccfa879577f473 | [
"MIT"
] | permissive | https://github.com/royels/Affirmpy | 5e0b8eedbaa26c3f7c56e01f950f262438561352 | 55f89c25916e32ddaa95683ce71a905c1c713c9b | refs/heads/master | 2021-01-10T04:26:50.652502 | 2016-02-06T22:54:54 | 2016-02-06T22:54:54 | 49,981,989 | 1 | 1 | null | false | 2017-05-18T05:21:36 | 2016-01-19T20:46:36 | 2016-02-14T07:35:34 | 2016-02-06T22:55:00 | 12 | 1 | 1 | 1 | Python | null | null | from .error import Error
class ResourceNotFoundError(Error):
pass | UTF-8 | Python | false | false | 71 | py | 13 | resource_not_found_error.py | 11 | 0.774648 | 0.774648 | 0 | 5 | 13.4 | 35 |
smwa/playlist_data | 9,388,798,520,816 | f335eda5804d0bfaef2e8a5770e2182199a1c6e7 | c9a30430ff1f18baa2d98071e68f9d4323f0c70a | /spotify_stats/track_details.py | e072bbed4b5b8a1b0d057a6554c261cb423440bb | [
"MIT"
] | permissive | https://github.com/smwa/playlist_data | 2d1c16e438015986a4e01b8df5b048e628c2394a | 01cd656eecc891dc8dc43dd1b8cc3f67a14bb9de | refs/heads/master | 2023-02-02T22:56:21.020963 | 2020-04-18T15:48:56 | 2020-04-18T15:48:56 | 256,791,292 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
class TrackDetails(object):
def serialize(self):
return json.dumps(self.__dict__)
def __repr__(self):
return self.serialize()
def __init__(self):
self.title = None
self.artist = None
self.duration = None
self.danceability = None
self.energy = None
self.loudness = None
self.speechiness = None
self.instrumentalness = None
self.valence = None
self.tempo = None
self.time_signature = None
self.key = None
| UTF-8 | Python | false | false | 542 | py | 6 | track_details.py | 4 | 0.570111 | 0.570111 | 0 | 22 | 23.636364 | 40 |
tjvick/advent-of-code | 3,521,873,227,731 | 744ef46816497ce42ffdb892831b6abc149348ec | 55901c400648fa51025bb03b90534b3b949c2016 | /2019_python/solutions/day8/part1.py | df140c9aaba871939d983166bdaa700f9406d6a7 | [] | no_license | https://github.com/tjvick/advent-of-code | 4e69b22667360929cf2e7ec00f8511d93ea11c73 | f6a7805f3022bdb5a18921be547ffb22e326c5ae | refs/heads/main | 2023-01-19T01:17:22.302237 | 2023-01-10T21:57:50 | 2023-01-10T21:57:50 | 223,826,215 | 1 | 0 | null | false | 2022-12-09T09:41:17 | 2019-11-24T23:34:53 | 2022-12-01T05:11:04 | 2022-12-09T09:41:17 | 762 | 2 | 0 | 2 | Python | false | false | from collections import Counter
import numpy as np
from functools import reduce
with open('input.txt', 'r') as f:
for line in f:
content = line.strip('\n')
a = np.array(list(content)).reshape([100, 6, 25])
def reduce_fn(acc, layer):
c = Counter(layer.flatten())
if c['0'] < acc[0]:
return c['0'], c['1']*c['2']
return acc
result = reduce(reduce_fn, (layer for layer in a), (np.inf, 0))
print(result[1])
| UTF-8 | Python | false | false | 444 | py | 225 | part1.py | 216 | 0.605856 | 0.576577 | 0 | 21 | 20.047619 | 63 |
david-uni/python-help | 4,818,953,345,194 | 0d7f766b26962a096d98f63cd9e41a8b6585e6ab | a21fda4ae7e445cce6463f2ea2204864c677626d | /homeworks/ex2 EN/ex2_012345678.py | 8167a3269d5fae0367348772c00d5952ce256aee | [] | no_license | https://github.com/david-uni/python-help | dd3e1e3f261b161e8105e499a2418a4c91bd5b50 | a84ececf16480ee373b68aa3c315d627723f6b14 | refs/heads/main | 2023-08-22T09:30:19.189487 | 2021-10-21T20:01:45 | 2021-10-21T20:01:45 | 329,426,121 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Exercise #2. Python for Engineers."""
#########################################
# Question 1 - do not delete this comment
#########################################
a = 3 # Replace the assignment with a positive integer to test your code.
lst = [1, 2, 3, 4, 5] # Replace the assignment with other lists to test your code.
# Write the rest of the code for question 1 below here.
# End of code for question 1
#########################################
# Question 2 - do not delete this comment
#########################################
lst2 = ['hello', 'world', 'course', 'python', 'day']
# Replace the assignment with other lists of strings (str) to test your code.
# Write the code for question 2 using a for loop below here.
# Write the code for question 2 using a while loop below here.
# End of code for question 2
#########################################
# Question 3 - do not delete this comment
#########################################
lst3 = [0, 1, 2, 3, 4] # Replace the assignment with other lists to test your code.
# Write the rest of the code for question 3 below here.
# End of code for question 3
#########################################
# Question 4 - do not delete this comment
#########################################
lst4 = [1, 2, 4, 7] # Replace the assignment with other lists to test your code.
# Write the rest of the code for question 4 below here.
# End of code for question 4
#########################################
# Question 5 - do not delete this comment
#########################################
my_string = 'abaadddefggg' # Replace the assignment with other strings to test your code.
k = 3 # Replace the assignment with a positive integer to test your code.
# Write the rest of the code for question 5 below here.
# End of code for question 5
| UTF-8 | Python | false | false | 1,819 | py | 80 | ex2_012345678.py | 61 | 0.543705 | 0.523914 | 0 | 66 | 26.560606 | 90 |
colinxr/notion-app | 4,131,758,541,596 | f4193a557ecf4b1c7cf137edf1a47d03700eee16 | 803f9a2785567bd797a4561e7fa8cf734781e91d | /__init__.py | 4afcc7cb820007aae3fece77ed0c6b7344dfba74 | [] | no_license | https://github.com/colinxr/notion-app | 98db746cb1d6dce24a4701f3ef0d7372b995dca4 | 832ebc25f8ece1e105603686180be47f1632d628 | refs/heads/master | 2022-12-10T04:18:17.010783 | 2019-12-03T19:36:44 | 2019-12-03T19:36:44 | 218,777,141 | 0 | 0 | null | false | 2022-12-08T06:48:36 | 2019-10-31T13:50:33 | 2019-12-03T19:36:51 | 2022-12-08T06:48:34 | 3,400 | 0 | 0 | 4 | Python | false | false | # from flask import Flask
# # Initialize the app
# app = Flask(__name__, instance_relative_config=True)
# from NotionApp import app as application
| UTF-8 | Python | false | false | 149 | py | 4 | __init__.py | 2 | 0.731544 | 0.731544 | 0 | 6 | 23.833333 | 54 |
aksh0001/algorithms-journal | 10,720,238,409,946 | 07692752f5f017679b8aff5cecaa552e4768e8aa | 6d00ac0aa55d3a60f7a70509f2c5118b134b41e8 | /questions/trees_graphs/DeepestLeavesSum.py | 811e408687a891b68bd02ccc06e14921c6c4eecf | [] | no_license | https://github.com/aksh0001/algorithms-journal | 9ff45646f3c870a2a08182338db9012b7a476d95 | de685690745a5a322e6233e1a3fd10a2d9539076 | refs/heads/master | 2022-01-22T06:16:27.039736 | 2022-01-18T20:10:20 | 2022-01-18T20:10:20 | 173,078,731 | 17 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Given a binary tree, return the sum of values of its deepest leaves.
Input: root = [1,2,3,4,5,null,6,7,null,null,null,null,8]
Output: 15
Approaches: 1) BFS with level tracking; 2) DFS with height helper
@author a.k
"""
from data_structures.trees.BinaryTree import pretty_print_tree, TreeNode, BinaryTree
"""
@Algorithm: 2 passes
1'st pass: Get height of tree to mark this is the level we need the sum of
2'nd pass: DFS through the tree to find the dl sum at that deepest level
"""
class DFS:
def height(self, root: TreeNode) -> int:
"""
Returns the height of the tree used to get the deepest leaves sum
:param root: root of tree
:return: height of tree
"""
return 0 if not root else max(self.height(root.left), self.height(root.right)) + 1
def dfs(self, root: TreeNode) -> int:
"""
Returns the dl sum using dfs.
:param root: root of tree
:Time: O(N)
:return: dl sum
"""
return self.dl_sum(root, 1, self.height(root))
def dl_sum(self, root: TreeNode, curr_level: int, deepest_level: int) -> int:
"""
Returns the deepest level sum using dfs
:param root: root of tree
:param curr_level: current level being considered
:param deepest_level: the pre-calculated deepest level from which to get the sum.
:Time: O(N)
:return: dl sum
"""
if not root: # Empty root has no sum
return 0
if curr_level == deepest_level: # If we are on the deepest level return this sum
return root.key
return self.dl_sum(root.left, curr_level + 1, deepest_level) + self.dl_sum(root.right, curr_level + 1,
deepest_level)
"""
BFS with level tracking
Calculate sum at each level, assuming that this sum represents the deepest leaves sum
When we get to a new level, a deeper level, update the sum to reflect a deeper level
If on same level, increment sum
"""
class BFS:
def deepestLeavesSum(self, root: TreeNode) -> int:
"""
Returns the dl sum of the tree.
:param root: root of tree
:Time: O(N)
:Space: O(N)
:return: dl sum
"""
q, dl_sum, curr_level = [(root, 1)], 0, -1
while q:
rm, rm_level = q.pop(0)
if rm_level != curr_level: # on a new deeper level, reset the sum and the current level
dl_sum, curr_level = rm.key, rm_level
else:
dl_sum += rm.key # If on the same level, simply accumulate the sum
if rm.left:
q.append((rm.left, rm_level+1))
if rm.right:
q.append((rm.right, rm_level+1))
return dl_sum
if __name__ == '__main__':
test = BinaryTree.build([1,2,3,4,5,None,6,7,None,None,None,None,8])
assert DFS().dfs(test) == 15, 'ERROR'
print('PASSED DFS')
assert BFS().deepestLeavesSum(test) == 15, 'ERROR'
print('PASSED BFS') | UTF-8 | Python | false | false | 3,055 | py | 102 | DeepestLeavesSum.py | 101 | 0.581342 | 0.568576 | 0 | 89 | 33.337079 | 110 |
bits2018wilp/python-lab | 17,308,718,223,420 | 9b041f31515daf73bcdfcdf748f2ede8a626c8b1 | 7cb7ebc3be2cc4d7378f71e8c0b0c32d6ad2e22c | /src/main/com/alikas/array_test.py | bbe982bc35b669597cd721e23768fac1c6187a42 | [] | no_license | https://github.com/bits2018wilp/python-lab | 2cf679e3d6745fe4de94b74d301e90191aeab79d | 67bead293b627f876fec1aa32586c8a5d602aa3e | refs/heads/master | 2020-12-31T09:01:03.979408 | 2020-03-22T02:01:33 | 2020-03-22T02:01:33 | 238,964,324 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sklearn.datasets import load_digits
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn import datasets
datasets.load_digits()
cpath = 'D:\\kaggle\\basicshapes\\shapes\\'+ 'circles'
datasets.load_files(cpath)
a = [1, 2, 3, 4, 5]
print(len(a))
print(a[1])
a[2] = 8
print(a.append(88), a)
a = [1, 2, 3, 4, 5]
b = [1, 2, 3, 4, 5, 6]
print( a == b)
print( 5 in b)
| UTF-8 | Python | false | false | 454 | py | 254 | array_test.py | 254 | 0.685022 | 0.636564 | 0 | 21 | 20.619048 | 70 |
lwehmeier/erlang_bridge | 9,139,690,447,055 | 3f16b16c7bf62f5a199333194bd6623a931d5ba6 | 18c79186d52f94b5dd95aca696a5b73c8da7f3af | /bridge.py | d83e5afdba531d7df2bb9246fd8120345f1ab2ed | [] | no_license | https://github.com/lwehmeier/erlang_bridge | 5efc484bc5ef9304c8b299059375e11f636da63b | 8853a6268040a8b683933858d1367957ed8be095 | refs/heads/master | 2020-03-22T20:06:14.516407 | 2018-07-23T18:01:48 | 2018-07-23T18:01:48 | 140,572,901 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import gevent
from gevent import monkey
monkey.patch_all()
import Pyrlang
from Pyrlang import Atom, Process
import rospy
from std_msgs.msg import String, Int16, Bool, Float32
from geometry_msgs.msg import Vector3
bp = None
global registeredPublishers
registeredListeners={}
registeredPublishers={}
def callback(msg, topic):
print(dir(msg))
global bp
rospy.loginfo(rospy.get_caller_id() + "I heard %s", msg.data)
bp.sendString(topic, msg.data)
def callback_vector3(msg, topic):
global bp
bp.sendString(topic, (msg.x, msg.y, msg.z))
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("chatter", String, callback, "chatter")
rospy.spin()
class MyProcess(Process):
def __init__(self, node) -> None:
Process.__init__(self, node)
self._node = node;
self._node.register_name(self, Atom('pyBridge')) # optional
print("registering process - 'pyBridge'")
def handle_one_inbox_message(self, msg):
global registeredPublishers
#print("Incoming", msg)
remotePid = msg[0]
action = msg[1]
if action == Atom('stop'):
exit(0)
msgType = msg[2]
topic = msg[3]
if action == Atom('subscribe'):
if topic+"__"+str(msgType) in registeredListeners.keys():
print("already listening to topic " + topic)
self._node.send(sender=self.pid_, receiver=remotePid, message=(self.pid_, (Atom('err'), Atom('already_subscribed'))))
else:
print("subscribing to " + topic)
if msgType == Atom('string'):
sub=rospy.Subscriber(topic, String, callback, topic)
elif msgType == Atom('int16'):
sub=rospy.Subscriber(topic, Int16, callback, topic)
elif msgType == Atom('vector3'):
sub=rospy.Subscriber(topic, Vector3, callback_vector3, topic)
else:
self._node.send(sender=self.pid_, receiver=remotePid, message=(self.pid_, (Atom('err'), Atom('unknown_message_type'), msgType)))
return
registeredListeners[topic+"__"+str(msgType)]=sub
#print(self.pid_)
#print(remotePid)
self._node.send(sender=self.pid_, receiver=remotePid, message=(self.pid_, (Atom('ok'), topic)))
elif action == Atom('publish'):
data=msg[4]
if not topic in registeredPublishers.keys():
if msgType == Atom('string'):
registeredPublishers[topic]=rospy.Publisher(topic, String, queue_size=0)
rospy.sleep(.1)
if msgType == Atom('int16'):
registeredPublishers[topic]=rospy.Publisher(topic, Int16, queue_size=0)
rospy.sleep(.1)
if msgType == Atom('float32'):
registeredPublishers[topic]=rospy.Publisher(topic, Float32, queue_size=0)
rospy.sleep(0.1)
if msgType == Atom('bool'):
registeredPublishers[topic]=rospy.Publisher(topic, Bool, queue_size=0)
rospy.sleep(.1)
if msgType == Atom('string'):
registeredPublishers[topic].publish(String(data))
elif msgType == Atom('int16'):
registeredPublishers[topic].publish(Int16(data))
elif msgType == Atom('float32'):
registeredPublishers[topic].publish(Float32(data))
elif msgType == Atom('bool'):
registeredPublishers[topic].publish(Bool(data))
else:
self._node.send(sender=self.pid_, receiver=remotePid, message=(self.pid_, (Atom('err'), Atom('unknown_message_type'), msgType)))
return
self._node.send(sender=self.pid_, receiver=remotePid, message=(self.pid_, (Atom('ok'), topic)))
else:
self._node.send(sender=self.pid_, receiver=remotePid, message=(self.pid_, (Atom('err'), Atom('invalid_request'))))
def sendString(self, topic, data):
self._node.send(sender=self.pid_, receiver=(Atom('erl@rpi3'), Atom('erlBridge')), message=(self.pid_, (Atom('push'), topic, data)))
def sendTest(self):
self._node.send(sender=self.pid_, receiver=(Atom('erl@rpi3'), Atom('erlBridge')), message=(self.pid_, (Atom('acc'), (42, 43, 44))))
def main():
global bp
node = Pyrlang.Node("py@rpi3", "Cookie42")
node.start()
# this automatically schedules itself to run via gevent
bp = MyProcess(node)
gevent.sleep(0.1)
gevent.sleep(0.1)
gevent.sleep(0.1)
gevent.sleep(1.1)
bp.sendTest()
#while True:
listener()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 5,095 | py | 4 | bridge.py | 2 | 0.586065 | 0.573503 | 0 | 120 | 41.458333 | 148 |
AMBE1203/BasicML | 2,954,937,517,779 | 2efadb4de2f87e0f60fbfe022ce2d7736378f9cd | 09457b45afd2b1c36730dc8b3c89bb2475b9f435 | /demo/DemoNBC.py | 477c37f038f434070bc3d6db2a929d902bede323 | [] | no_license | https://github.com/AMBE1203/BasicML | 18b32dc3dba2d7b866882103c0733c505fcf6571 | bdd96b6587a1e9a091e9c309d76afbaa10d70f11 | refs/heads/master | 2020-05-17T05:47:45.508320 | 2019-07-15T08:58:04 | 2019-07-15T08:58:04 | 183,542,397 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
import numpy as np
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from scipy.sparse import coo_matrix # for sparse matrix (ma trận thưa có số phần tử == 0 nhiều hơn số phần tử != 0)
from sklearn.metrics import accuracy_score # for evaluating result
# data path and file name
path = './data/ex6DataPrepared/'
train_data_fn = 'train-features.txt'
test_data_fn = 'test-features.txt'
train_label_fn = 'train-labels.txt'
test_label_fn = 'test-labels.txt'
nwords = 2500
# đọc dữ liệu từ file data_fn vs label tương ứng được lưu trong file label_fn
def read_data(data_fn, label_fn):
## read label_fn
with open(path+label_fn) as f:
content = f.readlines()
label = [int(x.strip()) for x in content] # x.trip() xóa khoảng trắng ở đầu và cuối của từng dòng
## read data_fn
with open(path+data_fn) as f:
content = f.readlines()
content = [x.strip() for x in content]
dat = np.zeros((len(content), 3), dtype = int)
for i, line in enumerate(content):
a = line.split(' ')
dat[i,:] = np.array([int(a[0]), int(a[1]), int(a[2])])
data = coo_matrix((dat[:,2], (dat[:,0]-1, dat[:,1]-1)), shape = (len(label), nwords))
return (data, label)
(train_data, train_label) = read_data(train_data_fn, train_label_fn)
(test_data, test_label) = read_data(test_data_fn, test_label_fn)
clf = MultinomialNB()
clf.fit(train_data, train_label)
y_pred = clf.predict(test_data)
print('Training size = %d, accuracy = %.2f%%' %(train_data.shape[0],accuracy_score(test_label, y_pred)*100)) | UTF-8 | Python | false | false | 1,637 | py | 36 | DemoNBC.py | 36 | 0.66182 | 0.648546 | 0 | 41 | 37.609756 | 115 |
gitonga123/Learning-Python | 15,152,644,635,599 | 9e0315e19f527ae9e91e52fac72798abe518e51f | 5bb465dd16d98940c913b3a8a79aa8b81af1c941 | /forrange.py | 441f4e3a4600a499c89fb871387ffd5dce105a95 | [] | no_license | https://github.com/gitonga123/Learning-Python | 24d5d16e234348801e09ac10431fc90c0c223c53 | aaceaa217547de72cc03daa381ab898e721d35ca | refs/heads/master | 2020-04-02T20:20:35.556066 | 2018-01-18T08:25:26 | 2018-01-18T08:25:26 | 62,940,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print('My name is')
for i in range(1,11,2):
print('Jimmy ', 10,' Times (' + str(i) + ')')
| UTF-8 | Python | false | false | 92 | py | 76 | forrange.py | 69 | 0.532609 | 0.467391 | 0 | 4 | 22 | 46 |
pierreNomazy/lfs-sytempay | 4,784,593,579,164 | c7b5ada83dd544d9665d5bdc2b326e349e17f53d | 23d059e79e651b7e85cf2700674cd7f4d07307eb | /urls.py | a04621b839a4a702081b2ed72057a795fed935e4 | [] | no_license | https://github.com/pierreNomazy/lfs-sytempay | 9884866b0f0c6e9fe74d9087c7ae5faad848062d | 6eb26cbb6a5c90daa7029e9270eae237ecb9321d | refs/heads/master | 2016-09-05T09:12:30.355747 | 2013-10-02T09:32:48 | 2013-10-02T09:32:48 | 13,267,100 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls.defaults import *
# Register in the back office of your Bank payement system the urls : retour-server and retour-boutique.
urlpatterns = patterns('systempay.views',
url(r'^-retour-server$', 'retour_server', name="systempay_retour_server"),
url(r'^-retour-boutique$', 'retour_boutique', name="systempay_retour_boutique"),
)
| UTF-8 | Python | false | false | 365 | py | 5 | urls.py | 3 | 0.712329 | 0.712329 | 0 | 6 | 59.833333 | 104 |
saguanamu/likelion_8 | 18,133,351,930,511 | 9eeedee907a38bbb38388869e4f8e7e34a0cd73b | 2383b2ccafe4c71926c4c300d3133cbad371fe60 | /crudapp/admin.py | a0b65689a44009dfb9e6a3b1586a7f1bd9320737 | [] | no_license | https://github.com/saguanamu/likelion_8 | 84cdd248401abde5f8ce00a76f705222e2982a5d | 61ea9d0a7a6c1e17d511f136d855026da4597f70 | refs/heads/master | 2023-06-21T16:44:27.038470 | 2021-07-05T03:29:54 | 2021-07-05T03:29:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Crudapp
# Register your models here.
admin.site.register(Crudapp) | UTF-8 | Python | false | false | 119 | py | 5 | admin.py | 2 | 0.815126 | 0.815126 | 0 | 5 | 23 | 32 |
Lesharudchinskiy/pyworkout | 9,594,956,965,184 | de656a69f8ead2ba4d8f919735baafde3e7b6570 | 09fdff884b5c928350a1af549b80235fca7bf6a1 | /python_workout_lesson_1.py | 22214dacecd1d4ba2521dd469db1b602e998188f | [] | no_license | https://github.com/Lesharudchinskiy/pyworkout | 9c7cef444a11c9d872f21061a2847d8210b8af44 | 3e98587b88acd50973d9295d36683cf9619105f1 | refs/heads/main | 2023-08-13T03:25:53.040284 | 2021-10-15T15:13:58 | 2021-10-15T15:13:58 | 417,543,457 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
def guessing_game():
rand_num = random.randint(1, 100)
while True:
user_num = input('Print your number here:')
if user_num.isdigit():
if int(user_num) > rand_num:
print('Too high, try again')
if int(user_num) < rand_num:
print('Too low, try again')
if int(user_num) == rand_num:
print(f'Just right the number was: {rand_num}')
break
else:
print(f'Your input "{user_num}" - is not digital, please you ONLY digits')
guessing_game()
| UTF-8 | Python | false | false | 618 | py | 1 | python_workout_lesson_1.py | 1 | 0.503236 | 0.496764 | 0 | 23 | 24.869565 | 86 |
Smithgggg/Project | 1,219,770,730,038 | 7fb8b19aa77153b99f9c4b87692b5c7b14aeaf7e | e836e07eed225a8b907eacc9707c52f12e0c6abc | /PlaneWar-master1/hm_02_使用Rect描述英雄.py | 7eb889d5dd8d683d470e11000b50e2f76c256573 | [] | no_license | https://github.com/Smithgggg/Project | 1b67449f61d492769e7940343961262425cc6920 | 947c2c2a5463bedd6243cf55ee4313f66da3efea | refs/heads/main | 2023-07-09T08:48:32.259695 | 2021-08-18T13:15:02 | 2021-08-18T13:15:02 | 397,600,697 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
hero_rect = pygame.Rect(100, 500, 120, 125)
print("主人公の起源 %d %d" % (hero_rect.x, hero_rect.y))
print("ヒーローサイズ %d %d" % (hero_rect.width, hero_rect.height))
print("%d %d" % hero_rect.size)
| UTF-8 | Python | false | false | 230 | py | 5 | hm_02_使用Rect描述英雄.py | 5 | 0.651961 | 0.593137 | 0 | 7 | 28.142857 | 60 |
my-xh/hengDaProject | 12,421,045,422,470 | 6f848bcb1595b052c6edf91177d5041bbe4a03d3 | 28bdfca0131db38323fc28f6178425dc2c86e6ca | /contactApp/views.py | 36c93045be769c08ad30e991acead62ffdf4872e | [] | no_license | https://github.com/my-xh/hengDaProject | d8879d6755b24f230361b25d0e88d205fec98a1d | 45aa43aabc798652a0f05d4e93d1c2c7ae819e4c | refs/heads/master | 2023-04-20T07:04:57.424269 | 2021-05-13T11:46:48 | 2021-05-13T11:46:48 | 345,997,472 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, HttpResponse
from .models import Ad
from .forms import ResumeForm
# Create your views here.
def contact(request):
return render(request, 'contact.html', {
'active_menu': 'contact',
'sub_menu': 'contactus',
})
def recruit(request):
ad_list = Ad.objects.all().order_by('-publish_date')
if request.method == 'POST':
resume_form = ResumeForm(data=request.POST, files=request.FILES)
if resume_form.is_valid():
resume_form.save()
return render(request, 'success.html', {
'active_menu': 'contact',
'sub_menu': 'recruit',
})
else:
resume_form = ResumeForm()
return render(request, 'recruit.html', {
'active_menu': 'contact',
'sub_menu': 'recruit',
'AdList': ad_list,
'resumeForm': resume_form,
})
| UTF-8 | Python | false | false | 897 | py | 40 | views.py | 25 | 0.57971 | 0.57971 | 0 | 30 | 28.9 | 72 |
david-talabard/fastapi-sample | 335,007,487,357 | 136fc32abf97698361721019b080aeb923039fa5 | 7505ebbbcda486bc07b6731028ad7cb583e0e7ab | /api/models/item.py | 4aca4ce4038f9a98ac7929bd8da6994e0e5f53a3 | [] | no_license | https://github.com/david-talabard/fastapi-sample | b165358cb3c454a7593ceff2a11fe1dab9487b12 | d05e37ef7b989c23550c9c3c0dfb28093122a41f | refs/heads/master | 2021-01-08T06:07:44.461423 | 2020-03-31T13:08:53 | 2020-03-31T13:08:53 | 241,936,469 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pydantic import BaseModel
from api.models.base import DBModelMixin
class ItemIn(BaseModel):
"""
Defined an item for a creation
"""
name: str
class Item(DBModelMixin, ItemIn):
"""
Defined an item model
"""
createdBy: str
| UTF-8 | Python | false | false | 262 | py | 19 | item.py | 14 | 0.652672 | 0.652672 | 0 | 16 | 15.3125 | 40 |
srampv/chessui | 18,734,647,377,720 | 68598c2acd516ffcc966d335f78365460ce12e40 | ee2ce6d88083e47453345e6f63862e1a1d81145e | /scripts/chess_db.py | 080d1d93c5a4258f0f7387e8252b9c3566a4638f | [] | no_license | https://github.com/srampv/chessui | ab94509ce8adb814b90c572969057ee089c8e229 | 91e0864c60360c92ab6f9a84e14eb9aa61f34e61 | refs/heads/master | 2023-03-15T22:30:35.838087 | 2021-02-23T07:34:44 | 2021-02-23T07:34:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from peewee import *
import argparse
import csv
import operator
import os
import logging
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
INDEX_TOTAL_GAME_COUNT = "total_game_count"
SQLITE_GAME_LIMIT = 990
INDEX_FILE_POS = "last_pos"
DB_HEADER_MAP = {"White": 0, "WhiteElo": 1, "Black": 2,
"BlackElo": 3, "Result": 4, "Date": 5, "Event": 6, "Site": 7,
"ECO": 8, INDEX_FILE_POS: 9, "FEN": 10, "PlyCount": 11, "EventDate": 12, "EventType": 13}
# FRONTEND_TO_BACKEND_ATTR_MAP = {'White': 'white', 'White Elo'}
def get_operator_fn(op):
return {
'+' : operator.add,
'-' : operator.sub,
'*' : operator.mul,
'/' : operator.div,
'%' : operator.mod,
'^' : operator.xor,
'>' : operator.gt,
'<' : operator.lt,
}[op]
#
# def get_game(db_index, game_num):
# # db_index = self.ref_db_index_book
# print (db_index.Get("game_0_data", regular=True))
# first = db_index.Get("game_{0}_data".format(game_num), regular=True).split("|")[DB_HEADER_MAP[INDEX_FILE_POS]]
# # if game_num+1 < self.pgn_index[INDEX_TOTAL_GAME_COUNT]:
# # second = self.db_index_book.Get("game_{0}_{1}".format(game_num+1,INDEX_FILE_POS))
# # second = self.pgn_index["game_index_{0}".format(game_num+1)][INDEX_FILE_POS]
# try:
# second = db_index.Get("game_{0}_data".format(game_num + 1), regular=True).split("|")[DB_HEADER_MAP[INDEX_FILE_POS]]
# second = int(second)
# except KeyError:
# second = None
#
# file_name = db_index.Get("pgn_filename", regular=True)
# if not os.path.isfile(file_name):
# file_name = file_name.replace("home", "Users")
#
# with open(file_name) as f:
# first = int(first)
#
# f.seek(first)
# line = 1
# lines = []
# first_line = False
# while line:
# line = f.readline()
# temp = line.strip()
# if not first_line:
# temp = '[' + temp
# first_line = True
# # line = line.strip()
# pos = f.tell()
# if second and pos >= second:
# break
# # print pos
# if temp:
# lines.append(temp)
# # f.close()
# # print lines
# return lines
def query_data(game, game_ids=None, order_by_list=None, limit=10, page_number=None, items_per_page=None,
search_terms=None):
query = game.select()
# query = query.where(Game.black ** ('%%%s%%' % ('Carlsen')) | Game.white ** ('%%%s%%' % ('Carlsen')))
# print dir(Game)
# print Game.
query = query.where(getattr(Game,'white') ** ('%%%s%%' % ('Carlsen')))
if search_terms:
for t in search_terms:
if '>' in t or '<' in t or '=' in t:
opr = '='
if '>' in t:
opr = '>'
if '<' in t:
opr = '<'
attr, value = t.split(opr)
query = query.where(get_operator_fn(opr)(getattr(game, attr), value))
# elif t == '1-0' or t == '1/2-1/2' or t == '0-1':
# query = query.where(game.result == t)
# elif t.isdigit():
# # print "digit"
# # print t
# num = int(t)
# if num < 2050:
# # Its a year
# query = query.where(game.date ** ('%%%s%%' % (t)))
else:
query = query.where(game.black ** ('%%%s%%' % (t)) | game.white ** ('%%%s%%' % (t)))
if game_ids and len(game_ids) <= SQLITE_GAME_LIMIT:
query = query.where(getattr(game, 'id') << game_ids)
if order_by_list:
# Construct peewee order by clause
order_by_cond = []
for sort_key in order_by_list:
if sort_key.direction > 0:
order_by_cond.append(getattr(game, sort_key.name).asc())
else:
order_by_cond.append(getattr(game, sort_key.name).desc())
query = query.order_by(*order_by_cond)
# getattr(Game,'black_elo').asc(), getattr(Game,'eco').asc()
if page_number and items_per_page:
query = query.paginate(page_number, items_per_page)
# query = query.limit(limit)
results = [p for p in query]
for r in results:
print ("{0} {1} - {2} {3} {4} {5} {6}".format(r.white, r.white_elo, r.black, r.black_elo, r.result, r.date, r.eco))
# print r.black
return results
# def import_data_csv(out_file):
# db_index = leveldb.LevelDB('../frontend/resources/polyglot_index.db')
# # db.execute_sql("pragma synchronous = off;")
# total_games = int(db_index.Get(INDEX_TOTAL_GAME_COUNT))
# print "total_games: {0}".format(total_games)
# # writer = csv.writer(open(out_file,'wb'), delimiter='|')
#
# with open(out_file, 'wb') as csv_file:
# writer = csv.writer(csv_file, delimiter='|')
# writer.writerow(['white', 'white_elo', 'black', 'black_elo', 'result', 'date', 'event', 'site', 'eco', 'ply'])
# for i in xrange(1, total_games-1):
# try:
#
# headers = db_index.Get("game_{0}_data".format(i), regular=True).split("|")
# # print len(headers)
# row = headers[:9]
# row.append(headers[11])
# # print row
# writer.writerow(row)
# except KeyError:
# print "error getting game {0}".format(i)
# if i % 10000 == 0:
# print i
# # if i>5:
# # break
def import_data(json_path):
Game.create_table(fail_silently=True)
# db_index = leveldb.LevelDB('../frontend/resources/polyglot_index.db')
# db_index = PartitionedLevelDB('../frontend/resources/white.db')
#leveldb_path = '../frontend/resources/paramount_2015.db'
#pgn_path = '../frontend/resources/paramount_2015.pgn'
if not os.path.exists(json_path):
# command = "polyglot make-book -pgn '{0}' -leveldb '{1}' -min-game 1".format(pgn_path, leveldb_path)
# print command
# os.system(command)
print("Need a JSON path")
# db_index = PartitionedLevelDB(leveldb_path)
# db.execute_sql("pragma synchronous = off;")
# print db_index.Get(INDEX_TOTAL_GAME_COUNT)
# total_games = int(db_index.Get(INDEX_TOTAL_GAME_COUNT, regular=True))
# print "total_games: {0}".format(total_games)
# headers = db_index.Get("game_{0}_data".format(g)).split("|")
# records.append ({"White": headers[0], "WhiteElo": headers[1], "Black": headers[2],
# "BlackElo": headers[3], "Result": headers[4], "Date": headers[5], "Event": headers[6], "Site": headers[7],
# "ECO": headers[8]})
# total_games-1
num_games = 0
batch = []
with open(json_path) as fp:
for i, line in enumerate(fp):
# print(line)
try:
j = json.decode(line)
# print(j)
except:
print(line)
continue
# raise
# line = line.replace("\", "\\")
try:
g = Game()
g.offset = j.get('offset', None)
g.offset_8 = j.get('offset_8', None)
g.white = j.get('White', None)
g.white_elo = j.get('WhiteElo', 2400)
g.black = j.get('Black', None)
g.black_elo = j.get('BlackElo', 2400)
g.result = j.get('Result', None)
g.date = j.get('Date', None)
g.event = j.get('Event', None)
g.site = j.get('Site', None)
g.eco = j.get('ECO', '')
if g.white_elo == '*':
g.white_elo = 0
if g.black_elo == '*':
g.black_elo = 0
batch.append(g.as_dict())
try:
# Game.create(g)
# with db.atomic():
if i % 20000 == 0:
print(i)
# g.save()
# Game.create(g)
with db.atomic():
Game.insert_many(batch).execute()
batch = []
# print("num_games: {}".format(num_games))
# num_games+=1
except ValueError:
# raise
print(g.white)
print(g.white_elo)
print(g.black)
print(g.black_elo)
print(g.result)
print(g.date)
print(g.event)
print(g.site)
print(g.eco)
except KeyError:
print("error getting game {0}".format(i))
with db.atomic():
if batch:
Game.insert_many(batch).execute()
batch = []
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', help='Input JSON path')
parser.add_argument('-o', '--output_file', default='game.db', help='Output DB path')
arg = parser.parse_args()
db = SqliteDatabase(arg.output_file)
class Game(Model):
offset = IntegerField(primary_key=True)
offset_8 = IntegerField(index=True, null=True)
white = CharField(index=True, null=True)
white_elo = IntegerField(index=True, null=True)
black = CharField(index=True, null=True)
black_elo = IntegerField(index=True, null=True)
result = CharField(index=True, null=True)
date = DateField(index=True, null=True)
event = CharField(index=True, null = True)
site = CharField(index=True, null=True)
eco = CharField(index=True, null=True)
class Meta:
database = db
# def __str__(self):
# return "White: {0}, white_elo: {1}, black: {2}, black_elo: {3}, result: {4}, date: {5}, event: {6}, site: {7}, " \
# "eco: {8}".format(white, white_elo, black, black_elo, result, date, event, site, eco)
#
def as_dict(self):
return {
'offset': self.offset,
'offset_8': self.offset_8,
'white': self.white,
'white_elo': self.white_elo,
'black': self.black,
'black_elo': self.black_elo,
'result': self.result,
'date': self.date,
'event': self.event,
'site': self.site,
'eco': self.eco,
}
import_data(arg.input_file)
games = query_data(Game, limit=10)
| UTF-8 | Python | false | false | 11,069 | py | 20 | chess_db.py | 14 | 0.479989 | 0.468516 | 0 | 301 | 35.770764 | 132 |
HarshitaSingh97/FetchGoogleTrends | 3,813,930,986,782 | a0d7bf9f32b25ac8f6792b594a53fff61d66e0c6 | b599e8531a940ee32ea47018ea0aea2789d5ea3f | /flask/lib/python3.5/types.py | e5c986abef8a8477a33d0c6bb4da971e910632e9 | [] | no_license | https://github.com/HarshitaSingh97/FetchGoogleTrends | 3bfaba9ac2b365530beeb8f740c6ca8de09e84e0 | 2acc62d42b1a4fc832c78fc1e4290d7531e25dcd | refs/heads/master | 2022-12-10T14:30:47.772224 | 2018-07-13T18:25:46 | 2018-07-13T18:25:46 | 138,040,665 | 3 | 0 | null | false | 2022-07-06T19:49:43 | 2018-06-20T14:02:16 | 2020-11-23T13:03:26 | 2022-07-06T19:49:41 | 44,642 | 2 | 0 | 4 | Python | false | false | /home/harshita/anaconda3/lib/python3.5/types.py | UTF-8 | Python | false | false | 47 | py | 58 | types.py | 49 | 0.829787 | 0.765957 | 0 | 1 | 47 | 47 |
enrijetashino/python-lectures | 15,891,379,023,547 | 53a23b2473285ab2d02b74b0cabccbdec2c42f20 | 01dbcfbd8524291f4ed3f6a6654ac01e8134fee9 | /lecture3.py | 5d7eda4b97885aa28be81b1ee68d1429f22c060c | [] | no_license | https://github.com/enrijetashino/python-lectures | f1ea03f529b08d93be29a88f1f28c86452a2ffb8 | 7c9d0908ddf205950ff63b9a6fc8ef27a8a70290 | refs/heads/master | 2021-04-06T12:42:03.204620 | 2018-06-21T01:57:50 | 2018-06-21T01:57:50 | 124,961,134 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 31 16:26:22 2018
@author: enrijetashino
"""
#------------------------#
# A 3rd Python Session #
#------------------------#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pandas import DataFrame as df
from sklearn import datasets
import seaborn.apionly as sns
# Create a dataframe
dt = {'kids': ['Jack', 'Jill', 'John'],
'ages': [12, 10, 11],
'height': ["4'10", "4'5", "4'8"]}
dt = pd.DataFrame(data = dt)
new = [1, 2, 3]
dt['new'] = new # Add a new column to dataframe
# Renamme the column new
dt.rename(columns = {'new': 'id'}, inplace = True)
# Create a new dataset
dt2 = {'kids': ["Noah", "Emma"], 'ages': [8,10], 'height': ["4'3", "4'5"],
'id': [4, 5]}
dt2 = pd.DataFrame(dt2)
dt = dt.append(dt2, ignore_index=True)
# Or you can ignore the index
# ACCESSING DATA FRAMES IN PYTHON
# To select the variable "kids", use []
dt['kids']
# Or we can use iloc[]
dt.iloc[:,3]
# To select certain observations, use []:
dt.iloc[0,:]
dt.iloc[2:4,:]
# Import babynames dataset
dt = pd.read_csv("/Users/enrijetashino/Desktop/babynames.csv")
dt = pd.DataFrame(dt)
dt.head(5) # Shows the first observations only
dt.tail(5) # Shows the last observations only
dt.columns # Column names
len(dt) # Number of rows
ones = np.repeat(1,len(dt))
ones = pd.Series(ones)
dt = dt.assign(constant = ones.values) # Add a new column of ones
dt['constant']
# Change the name of the new first and last column
dt.rename(columns = {'Unnamed: 0': 'obs', 'constant': 'ones'}, inplace = True)
# Describe the data
dt.describe()
# First, let's look at the most common girl names
# The data is sorted such that it shows the most popular girl name in each year first
# -> Want to keep only the first observation (row) for each year:
dt.groupby('year').first()
# Now, let's get the most common boy names
# First, let's restrict the sample to boys:
dt_boys = dt[dt.sex == "M"]
dt_boys.groupby(['year']).first()
# Now, suppose we want both
popular_names = dt.groupby(["year", "sex"]).first()
# Very often you'll want to sort data in certain way
# For example, in the popular_names - file you might want to show girls first and then
# boys:
# popular_names.sort_values('sex')
data = pd.read_csv("/Users/enrijetashino/Desktop/fueleconomy.csv")
# Dataset shows miles per gallon for a broad range of cars in different years
# Let's get rid of Electric cars and just look at 2015
data_sub = data[(data['year'] == 2015) & (data['fuel'] != 'Electricity')]
data_sub.head(5)
# USING FUNCTIONS ON DATA FRAMES
# You can use functions both on variables and observations
# For example:
data['hwy'].mean()
data['hwy'].describe()
# You can also differentiate by group:
EffYear = data.groupby(['year'])['hwy'].mean()
EffClass = data.groupby('class')['hwy'].mean()
# You can either delete a variable with
data.drop(['trans'], 1, inplace=True) # Delete variable trans
# Or decide which ones you want to keep
data_sub = data[['make', 'model', 'hwy']]
data_sub.head(5)
data_sub = data[list(data.columns[:2])] # Keep columns 0 and 1
# It is particularly convenient to restrict your dataset to only some observations
# Use square brackets [] to include conditions that select only a subset of observations:
data_sub = data[data["year"] >= 2000]
data_sub = data[(data["year"] >= 2000) & (data["make"] == "Ford")]
data_sub = data[(data["make"] == "Buick") | (data["make"] == "Ford")]
# To show the number of observations by make
ObsMake = data.groupby(["make"]).size()
# Or using this way
ObsMake = data.groupby(["make"])["make"].agg(['count'])
# Or it can be done this way
ObsMake = data['make'].value_counts()
# Show the number of models by make in year 1984
data[data['year'] == 1984].groupby('make')['model'].count().to_frame()
# Show the number of models by make through all the years
all_years_make = data.groupby(['year', 'make'])['model'].count().to_frame()
# You can also restrict the sample to only those brands that have at least
# a minimum nr of observations:
# First create the variable "Counts" as follows and add it to the dataframe
data.groupby(['make'])['make'].agg(['count'])
data['Counts'] = data.groupby(['make'])['make'].transform('count')
# Now keep only the observations for which "Counts" > 1000
data[data['Counts'] > 1000]
| UTF-8 | Python | false | false | 4,494 | py | 7 | lecture3.py | 6 | 0.656653 | 0.636182 | 0 | 183 | 23.415301 | 89 |
wuxum/learn-python | 16,733,192,612,990 | 4830538e467f6f1e27996525098eb24c82aa0b12 | 2a9462df27de48353c9f94aeb34f108175bdf51c | /26-try-except.py | 7a454f174c25b88ebe45624b2c454af5b74c8392 | [] | no_license | https://github.com/wuxum/learn-python | c6bc47c5ea7b67d2265ff14949e3b22050857c14 | 8bfff28a254c7c3029f40b84718eb5413d7bbbeb | refs/heads/master | 2020-03-20T14:53:25.935746 | 2018-06-15T14:42:19 | 2018-06-15T14:42:19 | 137,498,202 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# website: https://pythonbasics.org/try-except/
try:
x = input("Enter number: ")
x = x + 1
print(x)
except:
print("Invalid input")
| UTF-8 | Python | false | false | 165 | py | 25 | 26-try-except.py | 24 | 0.606061 | 0.6 | 0 | 8 | 19.625 | 47 |
wangjhfree/pydev | 14,061,722,950,481 | 0c2864bb5f1773534712525fb6ff454e5e9986e8 | 300e0493cbce121ad06e2a83935d1fa932294f44 | /tasks.py | 03e7aa7dce69f58d56bbbefaaa1a13f2d9f799ca | [] | no_license | https://github.com/wangjhfree/pydev | 3ba1a9013bb39f635cf6af7b53a13cb6ee2b5ddb | 04cf3dfb32f857dfe3e7af6f52fc8f5e815f629c | refs/heads/master | 2020-02-07T12:02:23.971281 | 2017-09-30T08:48:37 | 2017-09-30T08:48:37 | 99,209,590 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding=utf-8
from celery import Celery
app = Celery('tasks')
#app.conf.update()
app.config_from_object('celeryconfig')
import time
@app.task
def add(x, y):
time.sleep(5)
return x + y
| UTF-8 | Python | false | false | 221 | py | 22 | tasks.py | 20 | 0.669683 | 0.660633 | 0 | 17 | 12 | 38 |
KaoTzuChi/web-app-api-example-dev | 5,789,615,925,352 | e82c2b37e6dbd6fc09ecf728f2c0917a4b9ef7e5 | 756dfd587b41ef10265781e19343bfae02db4924 | /mywebapp/views/data/data.py | b8179a6e8325a4f2f76584e4f07d86d42b24bbb9 | [] | no_license | https://github.com/KaoTzuChi/web-app-api-example-dev | 1378ac3220b53ccfe1a08a477e5dbe861970059c | 746feb44842a2b0c35034ef5834b337dbf2bf132 | refs/heads/master | 2022-07-17T19:07:22.127206 | 2020-05-20T17:49:28 | 2020-05-20T17:49:28 | 265,639,905 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse
#from django.contrib import messages
from django import forms
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import requests, datetime, json
import mywebapp.settings as settings
from bson.objectid import ObjectId
from views.utility import dict_key
from services.dataaccess import models
from services.dataaccess import serializers as api_ser
#chkoptions = ['a','b','c','d','e','f','g','h']
chkoptions = ['a','b','c','d']
rdooptions = [1.11,2.22,3.33,4.44]
seloptions = [
datetime.date.today().strftime('%Y-%m-%dT%H:%M:%SZ'),
(datetime.date.today()+ datetime.timedelta(weeks=1)).strftime('%Y-%m-%dT%H:%M:%SZ'),
(datetime.date.today()+ datetime.timedelta(weeks=2)).strftime('%Y-%m-%dT%H:%M:%SZ'),
(datetime.date.today()+ datetime.timedelta(weeks=3)).strftime('%Y-%m-%dT%H:%M:%SZ'),
]
#dictkeyvaluess = {'item1':'1', 'item2':'2','item3':'3','item4':'4','item5':'5'}
dictkeyvaluess = dict(item1='', item2='',item3='',item4='',item5='')
def data_query(request):
response = requests.get( settings.BASE_URL + 'read_mycollectionone_all/')
receiveddata = None
if response:
receiveddata = response.json()
return render(request, 'data_query.html', {
'receiveddata': receiveddata,
'chkoptions' : chkoptions,
'rdooptions' : rdooptions,
'seloptions' : seloptions,
'dictkeyvaluess' : dictkeyvaluess,
})
def data_search(request, field, value):
print('data_search field=',field,' value=',value)
response = requests.get( settings.BASE_URL + 'read_mycollectionone_byfield/'+field+'/'+value+'/')
receiveddata = None
if response:
receiveddata = response.json()
return render(request, 'data_query.html', {
'receiveddata': receiveddata,
'chkoptions' : chkoptions,
'rdooptions' : rdooptions,
'seloptions' : seloptions,
'dictkeyvaluess' : dictkeyvaluess,
})
def data_create(request):
receiveddata = models.mycollectiononeModel(
ObjectId('5eb7f31938a5fd5d85ccf595'),
'field1test',
dictkeyvaluess,
datetime.date.today(),
3.33,
['c','a']
)
return render(request, 'data_create.html', {
'chkoptions' : chkoptions,
'rdooptions' : rdooptions,
'seloptions' : seloptions,
'dictkeyvaluess' : dictkeyvaluess,
'receiveddata':receiveddata,
})
def data_detail(request, id):
response = requests.get( settings.BASE_URL + 'read_mycollectionone_byid/'+id+'/')
receiveddata = None
if response:
receiveddata = response.json()
receiveddata['field14'] = float(receiveddata['field14'] )
return render(request, 'data_detail.html', {
'chkoptions' : chkoptions,
'rdooptions' : rdooptions,
'seloptions' : seloptions,
'dictkeyvaluess' : dictkeyvaluess,
'receiveddata':receiveddata,
})
def data_update(request, id):
response = requests.get( settings.BASE_URL + 'read_mycollectionone_byid/'+id+'/')
receiveddata = None
if response:
receiveddata = response.json()
receiveddata['field14'] = float(receiveddata['field14'] )
return render(request, 'data_update.html', {
'chkoptions' : chkoptions,
'rdooptions' : rdooptions,
'seloptions' : seloptions,
'dictkeyvaluess' : dictkeyvaluess,
'receiveddata':receiveddata,
})
def data_create_action(request):
if request.method == 'POST':
posteddata = dict(request.POST)
field12dict = dict()
for k in dictkeyvaluess:
if len(posteddata['field12.'+k][0].strip())>0:
field12dict.update({ k : posteddata['field12.'+k][0].strip()})
model_data = {
'objectid' : posteddata['objectid'][0],
'field11' : posteddata['field11'][0].strip(),
'field12' : field12dict,
'field13' : posteddata['field13'][0],
'field14' : float(posteddata['field14'][0]),
'field15' : posteddata['field15']
}
response = requests.post( settings.BASE_URL + 'create_doc_in_mycollectionone_return_newone/', json=json.dumps(model_data) )
#postedform = DataForm(request.POST)
#if form.is_valid():
#book_inst.due_back = form.cleaned_data['renewal_date']
#book_inst.save()
#return HttpResponseRedirect(reverse('all-borrowed') )
else:
pass
return redirect('/data/query/')
def data_update_action(request):
if request.method == 'POST':
posteddata = dict(request.POST)
field12dict = dict()
for k in dictkeyvaluess:
if len(posteddata['field12.'+k][0].strip())>0:
field12dict.update({ k : posteddata['field12.'+k][0].strip()})
model_data = {
'objectid' : posteddata['objectid'][0],
'field11' : posteddata['field11'][0].strip(),
'field12' : field12dict,
'field13' : posteddata['field13'][0],
'field14' : float(posteddata['field14'][0]),
'field15' : posteddata['field15']
}
response = requests.post( settings.BASE_URL + 'replace_doc_in_mycollectionone_return_newone/', json=json.dumps(model_data) )
else:
pass
return redirect('/data/query/')
def data_delete_action(request):
if request.method == 'POST':
posteddata = dict(request.POST)
model_data = {
'objectid' : posteddata['objectid'][0],
}
response = requests.post( settings.BASE_URL + 'delete_doc_in_mycollectionone_return_count/', json=json.dumps(model_data) )
else:
pass
return redirect('/data/query/')
'''
class DataForm(forms.Form):
field11 = forms.CharField(help_text="Enter a date for field11.")
#field12 = forms.MultiValueField(help_text="Enter a date for field12.")
field12 = forms.MultipleChoiceField(help_text="Enter a date for field12.")
#field13 = forms.DateTimeField(help_text="Enter a date for field13.")
field13 = forms.ChoiceField(help_text="Enter a date for field13.")
#field14 = forms.DecimalField(help_text="Enter a date for field14.")
field14 = forms.ChoiceField(help_text="Enter a date for field14.")
field15 = forms.MultipleChoiceField(help_text="Enter a date for field15.")
def clean_field11(self):
check = self.cleaned_data['field11']
if len(check) < 5:
raise ValidationError(_('Invalid data - <5'))
if len(check) > 12:
raise ValidationError(_('Invalid data - >12'))
return check
BooleanField, CharField, ChoiceField, TypedChoiceField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, FileField, FilePathField, FloatField, ImageField,
IntegerField, GenericIPAddressField, MultipleChoiceField, TypedMultipleChoiceField, NullBooleanField,
RegexField, SlugField, TimeField, URLField, UUIDField, ComboField, MultiValueField,
SplitDateTimeField, ModelMultipleChoiceField, ModelChoiceField.
''' | UTF-8 | Python | false | false | 7,280 | py | 60 | data.py | 39 | 0.633526 | 0.609873 | 0 | 193 | 36.683938 | 133 |
lbkelly/j_acezero | 2,860,448,225,581 | c8b4d153ae7067d17d22d9324dbd83ec53e27b75 | 931ac366e2928af6e211949e75de02d55ccb593b | /Programs/1b/ace_zero-v09/ace0/charts.py | b42cda49252beeebf5d07d6c434513553f7dba59 | [] | no_license | https://github.com/lbkelly/j_acezero | 0279c7024410c273dc010ad4f682aac401826a27 | 84745e0ff54df64168253cdccf46c63a96fd517a | refs/heads/master | 2022-12-07T17:32:36.283051 | 2020-08-26T04:31:15 | 2020-08-26T04:31:15 | 290,392,035 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/env python
"""
Plotting routines for debugging and visualisation.
This module contains function for plotting the traces of aircraft using matplotlib.
"""
__author__ = 'mikepsn'
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from mpl_toolkits.mplot3d import axes3d
import datetime
import platform
import sys
import numpy as np
import os
def draw_trajectories(trace1, trace2, vname, cname, path, viper_score, cobra_score, run_no): #
title = vname + "(Blue" + str(viper_score) + ") VS " + cname + "(Red " + str(cobra_score) + ")" + "run " + str(run_no)
header = "Blue: " + str(viper_score) + " VS Red: " + str(cobra_score)
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.grid(True)
plt.title(header, loc='left')
plt.axis('equal')
ax.set_xlabel('x(object)')
ax.set_ylabel('y(object)')
ax.set_zlabel('z(object)')
ta1, xa1, ya1, za1 = ([], [], [], [])
for timeslice in trace1:
t, x, y, z, vx, vy, vz, accel_x, accel_y, accel_z, psi, theta, phi, v, weight, fuel = timeslice
ta1.append(t)
xa1.append(x)
ya1.append(y)
za1.append(z)
xmin = min(xa1)
xmax = max(xa1)
ymin = min(ya1)
ymax = max(ya1)
zmin = min(za1)
zmax = max(za1)
ta2, xa2, ya2, za2 = ([], [], [], [])
for timeslice in trace2:
t, x, y, z, vx, vy, vz, accel_x, accel_y, accel_z, psi, theta, phi, v, weight, fuel = timeslice
ta2.append(t)
xa2.append(x)
ya2.append(y)
za2.append(z)
xmin = min(min(xa2), xmin)
xmax = max(max(xa2), xmax)
ymin = min(min(ya2), ymin)
ymax = max(max(ya2), ymax)
zmin = min(min(za2), zmin)
zmax = max(max(za2), zmax)
# Fix aspect ratio
max_range = np.array([xmax - xmin, ymax - ymin, zmax - zmin]).max() / 2.0
mid_x = (xmax + xmin) * 0.5
mid_y = (ymax + ymin) * 0.5
mid_z = (zmax + zmin) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
# Plot Trajectories
plt.plot(xa1, ya1, za1, color='b')
plt.plot(xa2, ya2, za2, color='r')
# Draw t=0.0 marker for trace1
#ax.text(xa1[0], ya1[0], za1[0]+1, "t = %2.1f" % ta1[0], color='b', alpha=0.5)
ax.scatter(xa1[0], ya1[0], za1[0], color='b', marker='o', s=100, alpha=0.5)
# and now trace2
#ax.text(xa2[0], ya2[0], za2[0]+1, "t = %2.1f" % ta2[0], color='r', alpha=0.5)
ax.scatter(xa2[0], ya2[0], za2[0], color='r', marker='o', s=100, alpha=0.5)
# Draw t=tmax marker for trace1
#ax.text(xa1[-1], ya1[-1], za1[-1]+1, "t = %2.1f" % ta1[-1], color='b', alpha=0.5)
ax.scatter(xa1[-1], ya1[-1], za1[-1], color='b', marker='*', s=100, alpha=0.5)
# and now for trace 2
#ax.text(xa2[-1], ya2[-1], za2[-1]+1, "t = %2.1f" % ta2[-1], color='r', alpha=0.5)
ax.scatter(xa2[-1], ya2[-1], za2[-1], color='r', marker='*', s=100, alpha=0.5)
filename = title.replace(" ", "")
plt.savefig(path + '/' + filename + "_frontview.png")
ax.view_init(azim=90, elev=270)
plt.savefig(path + '/' + filename + "_topview.png")
ax.view_init(azim=90, elev=0)
plt.savefig(path + '/' + filename + "_sideview.png")
def draw_platform_trace(trace, filename):
""" Plots the trajectory of a single platform model over time."""
title = "Platform Trajectory: (%s)\n%s\n%s)" % (datetime.datetime.now(),
platform.platform(),
sys.version)
ta, xa, ya, za = ([], [], [], [])
for timeslice in trace:
t, x, y, z, psi, theta, phi, v, weight, fuel = timeslice
#print (t, x, y, z), type(z), type(za)
ta.append(t)
xa.append(x)
ya.append(y)
za.append(z)
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.grid(True)
plt.title(title, loc='left')
plt.axis('equal')
plt.plot(xa, ya, za)
#ax.plot(xa, ya, zs=0.0, zdir='z', color='b', linestyle='--', alpha=0.5)
#ax.plot(xa, za, zs=0.0, zdir='y', color='r', linestyle='--', alpha=0.5)
#ax.plot(ya, za, zs=0.0, zdir='x', color='g', linestyle='--', alpha=0.5)
#ax.set_xlim([0.0, 200.0])
#ax.set_ylim([0.0, 200.0])
#ax.set_zlim([0.0, 200.0])
ax.text(xa[0], ya[0], za[0]+3, "t = %2.1f" % ta[0], color='b', alpha=0.3)
ax.scatter(xa[0], ya[0], za[0], color='b', marker='o', s=100, alpha=0.3)
ax.text(xa[-1], ya[-1], za[-1]+3, "t = %2.1f" % ta[-1], color='b', alpha=0.3)
ax.scatter(xa[-1], ya[-1], za[-1], color='b', marker='>', s=100, alpha=0.3)
plt.show()
#plt.savefig('test_results/' + filename)
def multiple_run_chart_3d(traces):
title = "1v1 WVR Air Combat: {} runs".format(len(traces))
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.grid(True)
plt.title(title, loc='left')
plt.axis('equal')
ax.set_xlabel('x(object)')
ax.set_ylabel('y(object)')
ax.set_zlabel('z(object)')
for (trace1, trace2) in traces:
ta1, xa1, ya1, za1 = ([], [], [], [])
for timeslice in trace1:
t, x, y, z, psi, theta, phi, v, weight, fuel = timeslice
ta1.append(t)
xa1.append(x)
ya1.append(y)
za1.append(z)
# xmin = min(xa1)
# xmax = max(xa1)
# ymin = min(xa1)
# ymax = max(ya1)
# zmin = min(za1)
# zmax = max(za1)
ta2, xa2, ya2, za2 = ([], [], [], [])
for timeslice in trace2:
t, x, y, z, psi, theta, phi, v, weight, fuel = timeslice
ta2.append(t)
xa2.append(x)
ya2.append(y)
za2.append(z)
# xmin = min(min(xa2), xmin)
# xmax = max(max(xa2), xmax)
# ymin = min(min(ya2), xmin)
# ymax = max(max(ya2), ymax)
# zmin = min(min(za2), zmin)
# zmax = max(max(za2), zmax)
# ax.set_xlim(xmin,xmax)
# ax.set_ylim(ymin,ymax)
# ax.set_zlim(zmin,zmax)
ax.set_zlim(0,15000)
# Plot Trajectories
plt.plot(xa1, ya1, za1, color='b')
plt.plot(xa2, ya2, za2, color='r')
#
# # Draw t=0.0 marker for trace1
# #ax.text(xa1[0], ya1[0], za1[0]+1, "t = %2.1f" % ta1[0], color='b', alpha=0.5)
# ax.scatter(xa1[0], ya1[0], za1[0], color='b', marker='o', s=100, alpha=0.5)
# # and now trace2
# #ax.text(xa2[0], ya2[0], za2[0]+1, "t = %2.1f" % ta2[0], color='r', alpha=0.5)
# ax.scatter(xa2[0], ya2[0], za2[0], color='r', marker='o', s=100, alpha=0.5)
#
# # Draw t=tmax marker for trace1
# #ax.text(xa1[-1], ya1[-1], za1[-1]+1, "t = %2.1f" % ta1[-1], color='b', alpha=0.5)
# ax.scatter(xa1[-1], ya1[-1], za1[-1], color='b', marker='*', s=100, alpha=0.5)
# # and now for trace 2
# #ax.text(xa2[-1], ya2[-1], za2[-1]+1, "t = %2.1f" % ta2[-1], color='r', alpha=0.5)
# ax.scatter(xa2[-1], ya2[-1], za2[-1], color='r', marker='*', s=100, alpha=0.5)
plt.show()
def multiple_run_chart(traces):
title = "1v1 WVR Air Combat: {} runs".format(len(traces))
fig = plt.figure()
ax = fig.add_subplot(111)
plt.grid(True)
plt.title(title, loc='left')
plt.axis('equal')
ax.set_xlabel('x(object)')
ax.set_ylabel('y(object)')
for (trace1, trace2) in traces:
ta1, xa1, ya1 = ([], [], [])
for timeslice in trace1:
t, x, y, z, psi, theta, phi, v, weight, fuel = timeslice
ta1.append(t)
xa1.append(x)
ya1.append(y)
ta2, xa2, ya2 = ([], [], [])
for timeslice in trace2:
t, x, y, z, psi, theta, phi, v, weight, fuel = timeslice
ta2.append(t)
xa2.append(x)
ya2.append(y)
# Plot Trajectories
plt.plot(xa1, ya1, color='b', alpha = 0.3)
plt.plot(xa2, ya2, color='r', alpha = 0.3)
plt.show() | UTF-8 | Python | false | false | 8,001 | py | 221 | charts.py | 65 | 0.519935 | 0.469441 | 0 | 249 | 31.136546 | 122 |
mrezekial/School | 11,656,541,262,383 | f1c2452bfd906370158b1bb87bfacdc2e29c1627 | 5e3ab0e2ff80524b563f67f9962b73d6956a12b5 | /Python/Lab 3/Final Work for Presentation/lab3-3.py | 9dd077a648837767a6aee112811e6b6ba79ad4b4 | [] | no_license | https://github.com/mrezekial/School | 9e2aa7bed507604836fb6b0ae8a4fe4c3193e881 | 9579c45c3b24cdf128b6519eb381548aec7abf43 | refs/heads/master | 2021-01-22T20:29:25.488108 | 2015-03-09T18:33:45 | 2015-03-09T18:33:45 | 31,838,623 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import lab03
if __name__ == '__main__':
print "Keegan Bailey\nC0369801\nComp112 -- Lab03\n\n"
print "the sum of a series of numbers."
print "###########################################################"
print "# the series ends when the user enters a negative number #"
print "###########################################################"
answer = lab03.stNum()
print "Your total sum of all numbers is",answer
| UTF-8 | Python | false | false | 443 | py | 181 | lab3-3.py | 118 | 0.460497 | 0.424379 | 0 | 10 | 42.8 | 71 |
Aasthaengg/IBMdataset | 9,990,093,952,543 | 855cb58d6c9ef6ad76fc435b83d4c03f58dd4562 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03211/s126688388.py | e6a14ea746a9a3cbb36289cb99a1becffea1f3b4 | [] | no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s = input()
ans = abs(753 - int(s[0: 3]))
for i in range(1, len(s) - 2):
num = int(s[i: i + 3])
ans = min(ans, abs(753 - num))
print(ans) | UTF-8 | Python | false | false | 147 | py | 202,060 | s126688388.py | 202,055 | 0.503401 | 0.428571 | 0 | 8 | 17.5 | 34 |
JJK96/SDM | 9,852,654,990,604 | f1aba71c2c4d4f62036f264270f2ad18cf9b7b73 | 8067633708b74bd2edd596a4c9ad0b4c1660e3fa | /consultant.py | cecbead219e4a40701283cf3848c18bddaeb6d01 | [] | no_license | https://github.com/JJK96/SDM | 9b3a17f170fa1373300f17a19edfc9042fc4d025 | 50586005acbe965503bdf09b02751d432924769d | refs/heads/master | 2020-08-16T19:09:03.069797 | 2019-11-29T08:16:14 | 2019-11-29T08:16:14 | 215,540,381 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import threading
import traceback
import uuid
from socket import socket
import rpyc
from charm.toolbox.pairinggroup import G1, pair
from rpyc.utils.authenticators import SSLAuthenticator
from rpyc.utils.server import ThreadedServer
import config
from client import Client
from funcs import *
from serialization import *
import threading
import time
# DEBUG
class ConsultantClient():
def __init__(self, ip, port, id, public_key):
# self.ip = ip
# self.port = port
self.id = id
self.public_key = public_key
# self.conn = rpyc.ssl_connect(ip, port, config=config.config, keyfile="cert/consultant/key.pem", certfile="cert/consultant/certificate.pem")
class Consultant(Client):
"""
This is also the group manager (GM)
"""
def __init__(self, τ):
print('init')
self.τ = τ
self.system_setup(τ)
self.G = {}
self.signingkey = gen_signing_key()
self.id = str(uuid.uuid4())
self.group_auth()
def create_consultant_user(self):
self.member_join(self)
def connect_server(self):
self.server = rpyc.ssl_connect(config.SERVER_IP, config.SERVER_PORT, keyfile="cert/client/key.pem",
certfile="cert/client/certificate.pem", config=config.config)
def system_setup(self, τ):
"""
Instantiates the scheme. Has as inputs:
o Security parameter `τ`
This function is executed by the GM, and outputs the system public key `PKs`,
the group secret key `SKg` for all group members and the master key MK for the GM.
"""
curve = 'SS512'
group = PairingGroup(curve, secparam=τ)
g, P, Q = [group.random(G1) for _ in range(3)]
q = group.order()
α, x, y, λ, σ = [num_Zn_star_not_one(q, group.random, ZR) for _ in range(5)]
X = g ** x
Y = g ** y
Pp = P ** λ
Qp = Q ** (λ - σ)
self.PKs = {'l': 21, 'curve': curve, 'secparam': τ, 'group': group, 'q': q, 'g': g, 'X': X, 'Y': Y}
self.SKg = {'α': α, 'P': P, 'Pp': Pp, 'Q': Q, 'Qp': Qp}
self.MK = {'x': x, 'y': y, 'λ': λ, 'σ': σ}
self.t = 1
self.ts = []
# a = pair(g1**2, g2**3)
# b = pair(g1, g2) ** 6
# group.init(ZR, 10)
# code.interact(local=dict(globals(), **locals()))
###
# AuthCodGen
# Generates the group membership certificates
###
def group_auth(self):
"""
This function is executed by the GM and makes the membership certificate for every member in `G`. Takes as input:
o Identities {ID_i }; 1 <= i <= N of all members {M_i}; 1 <= i <= N in `G`
o The system public key `self.PKs`
o The master key `self.MK`
This function outputs Membership certificates {CT_i}; 1 <= i <= N for all members
"""
group = self.PKs['group']
x = self.MK['x']
y = self.MK['y']
## Step 1
ai = group.random(G1)
bi = ai ** y
ci = ai ** (x + hash_Zn(self.id, group) * x * y)
self.CTi = {'IDi': self.id, 'ai': ai, 'bi': bi, 'ci': ci}
## Step 2: keep CTi secret!
def _check_unique_ai(self, ai):
""" Returns True if ai is unique (does not exist already), false if it is not. """
for M in self.G.values():
if M.CTi['ai'] == ai:
return False
return True
def member_join(self, M):
"""
This function is executed by the GM, interacting with old members when there are new members who wish to join
the group. It takes as input:
o The certificates {CT_i}; 1 <= i <= N of all members in `G`
o The identities {ID_N+i }; 1 <= i <= n of all newly joining members {M_N+i}; 1 <= i <= n in `G`
o The system public key `self.PKs`
o The master key `self.MK`
This function outputs Membership certificates {CT_N+i}; 1 <= i <= N for all newly joining members, updated
membership certificates for the old members {M_i}; 1 <= i <= N, and an updated parameter of the system public key PKs.
"""
group = self.PKs['group']
q = self.PKs['q']
X = self.PKs['X']
x = self.MK['x']
y = self.MK['y']
if M.id not in self.G:
print(self.G.keys())
print(M.id)
## Step 1
t = num_Zn_star_not_one(q, group.random, ZR)
self.PKs['X'] = X ** t
self.CTi['ci'] = self.CTi['ci'] ** t
self.t *= t
self.ts.append((time.time(), t))
if not hasattr(self, 'server'):
self.connect_server()
self.server.root.update_public_key(group.serialize(t))
self.server.root.add_client(M.id, serialize_public_key(M.public_key))
## Step 2
ai = group.random(G1)
while (not self._check_unique_ai(ai)):
ai = group.random(G1)
bi = ai ** y
ci = ai ** (self.t * (x + hash_Zn(M.id, group) * x * y))
CTi = {'IDi': M.id, 'ai': ai, 'bi': bi, 'ci': ci}
M.CTi = CTi
print("sending CTi")
# Add the new members to the member group
self.G[M.id] = M
return serialize_CTi(M.CTi, self.PKs)
else:
return None
## Step 3: let old members update ci, we do this already in member.update_certificate
## Step 4: new members keep CTi secret!
def member_leave(self, M):
"""
This function is executed by the GM, interacting with the members after some members have left the group.
It takes as input:
o The certificates {CT_i}; 1 <= i <= N of all members in `G`
o The identities {ID_ji }; 1 <= i <= n of all leaving members {M_ji}; 1 <= i <= n in `G`
o The system public key `self.PKs`
This function outputs updates membership certificates for the remaining members, and an updated parameter
of the system public key PKs.
"""
group = self.PKs['group']
q = self.PKs['q']
X = self.PKs['X']
## Step 1
t = num_Zn_star_not_one(q, group.random, ZR)
self.PKs['X'] = X ** t
self.CTi['ci'] = self.CTi['ci'] ** t
self.t *= t
self.ts.append((time.time(), t))
t = group.serialize(t)
del self.G[M.id]
if not hasattr(self, 'server'):
self.connect_server()
self.server.root.update_public_key(t)
## Step 2: let remaining members update ci, we do this already in member.update_certificate
## Step 3: remaining members keep CTi secret!
###
# /AuthCodGen
###
###
# DataDcrypt
# Decrypts the encrypted data
###
def get_decryption_key(self, Up, CTi):
"""
This function is executed by the GM to make a decryption key for the member. It takes as input:
o The auxiliary information `(Up, CTi)`
o System public key `self.PKs`
o Group system key `self.SKg`
o Master key `self.MK`
This functions outputs the decryption key `D` or Access Denied for the member.
"""
X = self.PKs['X']
Y = self.PKs['Y']
g = self.PKs['g']
group = self.PKs['group']
Q = self.SKg['Q']
σ = self.MK['σ']
member = pair(CTi['ai'], Y) == pair(g, CTi['bi']) and \
pair(X, CTi['ai']) * pair(X, CTi['bi']) ** hash_Zn(CTi['IDi'], group) == pair(g, CTi['ci'])
if member:
D = pair(Q, Up) ** σ
return D
else:
raise Exception("Access Denied")
###
# /DataDcrypt
###
def get_public_params(self):
return self.PKs
def upload_file(self, file_contents, keywords, client_id):
assert self.CTi is not None, "Consultant needs a certificate!"
assert hasattr(self, 'server'), "Server has not yet been initialized!"
D = file_contents
IR, R, Ed = self.index_gen(D, keywords, client_id)
Er = self.data_encrypt(R, Ed)
IrSerialized = serialize_IL(IR, self.PKs)
self.server.root.add_file(IrSerialized, serialize_Er(Er, self.PKs), client_id)
def get_files_by_keywords(self, keywords):
assert self.CTi is not None, "Consultant needs a certificate!"
assert hasattr(self, 'server'), "Server has not yet been initialized!"
files = []
group = self.PKs['group']
trapdoor = self.make_trapdoor(keywords)
CTi_serialized = serialize_CTi(self.CTi, self.PKs)
signature = sign_message(self.signingkey, trapdoor)
search_results = self.server.root.search_index(serialize_trapdoor(trapdoor, self.PKs), CTi_serialized,
signature)
if search_results == config.ACCESS_DENIED:
return config.ACCESS_DENIED
for i, result in enumerate(search_results):
result = deserialize_Er(result, self.PKs)
Up, ν = self.data_aux(result)
# D = group.deserialize(self.consultant.root.get_decryption_key(group.serialize(Up), CTi_serialized))
D = self.get_decryption_key(Up, self.CTi)
Rp, Ed = self.member_decrypt(result, D, ν)
files.append(decrypt_document(Rp, Ed))
return files
class ConsultantServer(rpyc.Service):
def __init__(self):
self.consultant = Consultant(512)
self.start_server()
def on_connect(self, conn):
self.ip, port = socket.getpeername(conn._channel.stream.sock)
print(self.ip, port)
def exposed_get_public_parameters(self):
print("get public parameters")
return serialize_PKs(self.consultant.PKs)
def exposed_get_public_key(self):
print("get public key")
return serialize_public_key(self.consultant.signingkey.public_key())
def exposed_get_update_t(self, last_update: float):
update = (time.time(), self.consultant.PKs['group'].init(ZR, 1))
for (timestamp, t) in filter(lambda x: x[0] > last_update, self.consultant.ts):
update = (timestamp, update[1] * t)
update = (update[0], self.consultant.PKs['group'].serialize(update[1]))
return update
def exposed_join(self, port, id, public_key: bytes):
print("join")
client = self.consultant.G.get(id, ConsultantClient(self.ip, port, id, deserialize_public_key(public_key)))
try:
serialized_cti = self.consultant.member_join(client)
SKg = serialize_SKg(self.consultant.SKg, self.consultant.PKs)
return serialized_cti, SKg
except Exception:
traceback.print_exc()
def exposed_leave(self, id):
print("leave")
member = self.consultant.G[id]
assert member is not None
self.consultant.member_leave(member)
def exposed_get_decryption_key(self, Up, CTi):
print("get decryption key")
PKs = self.consultant.PKs
CTi = deserialize_CTi(CTi, PKs)
Up = PKs['group'].deserialize(Up)
D = self.consultant.get_decryption_key(Up, CTi)
return PKs['group'].serialize(D)
def start_server(self):
authenticator = SSLAuthenticator("cert/consultant/key.pem", "cert/consultant/certificate.pem")
server = ThreadedServer(self, port=8001, protocol_config=config.config, authenticator=authenticator)
thread = threading.Thread(target=server.start)
thread.start()
def get_clients(self):
return list(self.consultant.G.keys())
if __name__ == "__main__":
ConsultantServer()
# c = ConsultantServer()
# c.exposed_get_public_parameters()
| UTF-8 | Python | false | false | 11,776 | py | 14 | consultant.py | 9 | 0.566675 | 0.561995 | 0 | 339 | 33.663717 | 149 |
mickac/hospital_shift_managment_system | 962,072,699,900 | 31022bcaf002d4827f72e91e55d8cea6aef856d5 | ce280cff220dc6ea9a73914e76e273c87308cc84 | /lekarze/migrations/0013_remove_shiftcounter_department.py | e74abd8424fe3993180bb5d9b868bd34d14e4a83 | [] | no_license | https://github.com/mickac/hospital_shift_managment_system | 3e838dfee81dc0a1f211f56ce11c2a4ead5f0916 | 5572a5c5be4d17c26a7a663dcd7f8015685063a9 | refs/heads/master | 2021-12-15T12:00:31.264791 | 2020-01-29T00:41:52 | 2020-01-29T00:41:52 | 236,874,259 | 0 | 0 | null | false | 2021-12-13T20:36:18 | 2020-01-29T00:34:22 | 2020-01-29T00:41:54 | 2021-12-13T20:36:16 | 419 | 0 | 0 | 3 | JavaScript | false | false | # Generated by Django 3.0.2 on 2020-01-26 14:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lekarze', '0012_auto_20200126_1554'),
]
operations = [
migrations.RemoveField(
model_name='shiftcounter',
name='department',
),
]
| UTF-8 | Python | false | false | 338 | py | 24 | 0013_remove_shiftcounter_department.py | 13 | 0.591716 | 0.5 | 0 | 17 | 18.882353 | 47 |
BlenderCN-Org/compAS | 10,419,590,704,468 | 4da37995d545537e4e8d0d13bc4b537cddbced38 | 60a267a7136b3cec2727824122bc6cda28c331e5 | /src/compas/geometry/utilities.py | 7134c1536560ae1f1e623fdf62f25204cecb1d86 | [
"MIT"
] | permissive | https://github.com/BlenderCN-Org/compAS | 4a257637d181188c0b68210f1126fa826be226d5 | 9796066a2dc26f39fe6ad0a0d44a1ef8a84a608a | refs/heads/master | 2020-05-30T02:11:18.495302 | 2017-03-21T13:42:17 | 2017-03-21T13:42:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from compas.exceptions import BRGInputError
__author__ = ['Tom Van Mele', ]
__copyright__ = 'Copyright 2016 - Block Research Group, ETH Zurich'
__license__ = 'MIT License'
__email__ = 'vanmelet@ethz.ch'
def multiply_matrices(A, B):
r"""Mutliply a matrix with a matrix.
This is a pure Python version of the following linear algebra procedure:
.. math::
\mathbf{A} \cdot \mathbf{B} = \mathbf{C}
with :math:`\mathbf{A}` a *m* by *n* matrix, :math:`\mathbf{B}` a *n* by *o*
matrix, and :math:`\mathbf{C}` a *m* by *o* matrix.
Parameters:
A (sequence of sequence of float): The first matrix.
B (sequence of sequence of float): The second matrix.
Returns:
list of list of float: The result matrix.
Raises:
BRGGeometryInputError:
If the shapes of the matrices are not compatible.
BRGGeometryInputError:
If the row length of B is inconsistent.
Examples:
>>> A = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]
>>> B = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]
>>> dot_matrices(A, B)
[[4.0, 0.0, 0.0], [0.0, 4.0, 0.0], [0.0, 0.0, 4.0]]
"""
n = len(B) # number of rows in B
o = len(B[0]) # number of cols in B
if not all([len(row) == o for row in B]):
raise BRGInputError('Row length in matrix B is inconsistent.')
if not all([len(row) == n for row in A]):
raise BRGInputError('Matrix shapes are not compatible.')
B = zip(*B)
return [[sum(x * y for x, y in zip(row, col)) for col in B] for row in A]
def multiply_matrix_vector(matrix, vector):
r"""Multiply a matrix with a vector.
This is a Python version of the following linear algebra procedure:
.. math::
\mathbf{A} \cdot \mathbf{x} = \mathbf{b}
with :math:`\mathbf{A}` a *m* by *n* matrix, :math:`\mathbf{x}` a vector of
length *n*, and :math:`\mathbf{b}` a vector of length *m*.
Parameters:
matrix (list of list): The matrix.
vector (list): The vector.
Returns:
list: The resulting vector
Raises:
BRGGeometryInputError:
If not all rows of the matrix have the same length as the vector.
Examples:
>>> matrix = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]
>>> vector = [1.0, 2.0, 3.0]
>>> dot_matrix_vector(matrix, vector)
[2.0, 4.0, 6.0]
"""
v = len(vector)
if not all([len(row) == v for row in matrix]):
raise BRGInputError('Matrix shape is not compatible with vector length.')
return [sum(x * y for x, y in zip(row, vector)) for row in matrix]
# ==============================================================================
# Debugging
# ==============================================================================
if __name__ == "__main__":
pass
| UTF-8 | Python | false | false | 2,877 | py | 311 | utilities.py | 105 | 0.537018 | 0.506083 | 0 | 94 | 29.606383 | 81 |
lixiang2017/leetcode | 5,918,464,970,710 | da0247485348f5ff27a5a4b2144adde341651cd1 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/0171.0_Excel_Sheet_Column_Number.py | 43146661f02265024eddfd997374f0f8e04b5b75 | [] | no_license | https://github.com/lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
T:O(7),S:O(1)
执行用时:36 ms, 在所有 Python3 提交中击败了91.49% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了34.57% 的用户
'''
class Solution:
def titleToNumber(self, columnTitle: str) -> int:
num = 0
for t in columnTitle:
num *= 26
num += ord(t) - ord('A') + 1
return num
'''
2021/07/30 10:20
执行用时:28 ms, 在所有 Python3 提交中击败了97.58%的用户
内存消耗:15 MB, 在所有 Python3 提交中击败了5.31%的用户
'''
class Solution:
def titleToNumber(self, columnTitle: str) -> int:
num = 0
for c in columnTitle:
num *= 26
num += ord(c) - ord('A') + 1
return num
| UTF-8 | Python | false | false | 748 | py | 3,076 | 0171.0_Excel_Sheet_Column_Number.py | 2,900 | 0.54902 | 0.46732 | 0 | 29 | 20.103448 | 53 |
hone1er/hackerRank | 11,441,792,921,672 | dedc185bf1e13d08b7ea847079d7628f1563b5ac | e9a62cae4e815b205457bcd742a2d401882a4347 | /Python/diagonalDiff.py | bba7be993a2203957638298d323997e3a77de65c | [] | no_license | https://github.com/hone1er/hackerRank | 8abe63680ae22900da8580fa388e2c87e4bffa05 | 825c2d2675497789235b9d37f347644856e5ea40 | refs/heads/master | 2020-05-07T09:23:46.088037 | 2019-12-17T19:53:07 | 2019-12-17T19:53:07 | 180,375,265 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/bin/python3
import os
# Complete the diagonalDifference function below.
def diagonalDifference(arr):
primary = sum([arr[n][n] for n in range(len(arr))])
secondary = sum([arr[len(arr)-(n+1)][n] for n in range(len(arr))])
return abs(primary-secondary)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = []
for _ in range(n):
arr.append(list(map(int, input().rstrip().split())))
result = diagonalDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
| UTF-8 | Python | false | false | 555 | py | 28 | diagonalDiff.py | 27 | 0.6 | 0.596396 | 0 | 24 | 22.125 | 70 |
rbernand/computorv2 | 1,537,598,319,099 | 3ab8eeef8eddc5d88f4fb3160440d3d5a852effd | df623e48f65621a84c51ad3017b2fafd27f69fdb | /computor/parser.py | ab06992f2066741a40549242c423b56861b08742 | [] | no_license | https://github.com/rbernand/computorv2 | 19ceb1b0c6287a120c3ff14a352bb47da1d8a5ea | 85e55236087f955b2c1f1b279b5ca8ce58c7b0d6 | refs/heads/master | 2021-03-24T12:55:57.722152 | 2017-07-17T13:51:27 | 2017-07-17T13:51:27 | 88,269,477 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
from computor import LOG
from computor.exceptions import ComputorSyntaxError
from computor.tokens import Token, Variable, Function
class Parser:
SEPARATORS = Token.OPERATORS + ('(', ')')
REGEX_FUNCTION = re.compile(r'^(?!i)([a-zA-Z]+)\((?!i)([a-zA-Z]+)\)$')
REGEX_VARIABLE = re.compile(r'^(?!i)([a-zA-Z]+)$')
PRIORITIES = [
'+',
'-',
'*',
'/',
'^',
]
def __init__(self):
pass
def lex_line(self, line):
matrix_depth = 0
tokens = []
current = ""
for char in line:
if char == '[':
matrix_depth += 1
elif char == ']':
matrix_depth -= 1
if matrix_depth > 0:
current += char
continue
if char.isspace():
continue
if char == ')':
break
if char == '(':
tokens.append(self.lex_line(line))
continue
if char in Parser.SEPARATORS:
if current:
tokens.append(current)
tokens.append(char)
current = ""
else:
current += char
if current:
tokens.append(current)
return tokens
def _parse(self, tokens):
if len(tokens) == 1:
if isinstance(tokens[0], list):
return self._parse(tokens[0])
return Token.factory(tokens[0])
if tokens == []:
return
else:
for operator in Parser.PRIORITIES:
if operator in tokens:
sep = tokens.index(operator)
value = tokens.pop(sep)
return Token.factory(
value,
self._parse(tokens[:sep]),
self._parse(tokens[sep:]))
def parse_calculation(self, line):
tokens = self.lex_line(iter(line))
return self._parse(tokens)
def parse_var_or_func(self, line):
try:
funcname, varname = self.REGEX_FUNCTION.match(line).groups()
return Function(funcname, varname)
except AttributeError:
LOG.debug('"%s" is not a funtion name', line)
try:
varname, = self.REGEX_VARIABLE.match(line).groups()
return Variable(line)
except AttributeError:
raise ComputorSyntaxError('"%s" is neither a valid var name or func name.' % line)
def parse_input(self, line):
if '=' in line:
try:
left, right = line.split('=')
if right == '?':
left, right = None, left
else:
return self.parse_var_or_func(left), self.parse_calculation(right)
except ValueError:
raise ComputorSyntaxError("Too many '='")
return None, self.parse_calculation(line)
| UTF-8 | Python | false | false | 2,986 | py | 21 | parser.py | 19 | 0.473878 | 0.471199 | 0 | 96 | 30.104167 | 94 |
eduardorasgado/divide-and-conquer-algorithms | 17,695,265,269,547 | 684799c546d84fe58d83cc3a1ae9dc8f069c736d | 65ab152e1ed3b63251e9cc93029f20005a7d014d | /search.py | eed0c31115b661abcf70c71432cff033ac86e883 | [
"MIT"
] | permissive | https://github.com/eduardorasgado/divide-and-conquer-algorithms | 4535ea0aed40398a967a39a357df20b5699d8ce7 | 9821a0a75b62bcb4c3d636bc1f1b944771658d23 | refs/heads/master | 2020-03-12T14:21:31.987034 | 2018-04-23T08:35:55 | 2018-04-23T08:35:55 | 130,665,397 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 09 23:38:49 2017
Search
Graphs for pathfinder
A* Algorithm
@author: Eduardo
"""
#import math
#import collections
import heapq
grid = [[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0]]
start = (0,0)
goal = (len(grid)-1,len(grid[0])-1)
cost = 1
gvalue = 0
delta = [[-1,0], #up
[0,-1], #left
[1, 0], #down
[0, 1]] #right
delta_name = ['^','<','v','>']
def search_2(grid,init,goal,cost):
count_for_zeros = 0
total_grids = len(grid)*len(grid[0])
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != 1:
x = i
y = j
if i == goal[0] and j == goal[1]:
return "here",[i,j]
print(grid[i][j],"x=%s,y=%s"%(x,y))
count_for_zeros += 1
print(total_grids, count_for_zeros)
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
class graph:
def __init__(self,height,width,landmarks):
self.height = height
self.width = width
self.landmarks = []
self.graph = self.create_graph()
def in_bounds(self, results):
xs=[]
ys=[]
new_results=[]
for i in range(len(results)):
xs.append(results[i][0])
ys.append(results[i][1])
for x in range(len(xs)):
if 0 <= xs[x] < self.height and 0 <= ys[x] < self.width:
new_results.append((xs[x],ys[x]))
return new_results
def passable(self, results):
new_results = []
save = []
for i in landmarks:
for j in results:
verify = (i[0]==j[0])
verify2 = i[1]==j[1]
if verify and verify2:
#print "catcha:",i
save.append(j)
if len(save)==0:
return results
for i in range(len(save)):
if i>0:
new_results = self.passable(new_results)
if i==0:
for coord in range(len(results)):
verify = (results[coord][0]==save[i][0])
verify2 = (results[coord][1]==save[i][1])
if verify and verify2:
pass
else:
new_results.append(results[coord])
#print "new: ", new_results
return new_results
def create_graph(self):
graph = {}
for x in range(self.height):
for y in range(self.width):
id = (x, y)
if grid[x][y] !=1:
results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]
results = self.in_bounds(results)
#print id,results
results = self.passable(results)
graph[id] = results
return graph
def show_graph(self):
for key in self.graph:
print ("%s=%s \n"%(str(key),str(self.graph[key])))
def slicer(grid):
for i in range(len(grid)):
if len(grid[i]) != len(grid[i-1]):
raise ValueError
height = len(grid)
width = len(grid[0])
landmarks = []
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] != 0:
landmarks.append((i,j))
return height,width,landmarks
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star_search(graph, start, goal,cost):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for key in graph.graph:
verify = (key == current)
if verify:
key_1 = graph.graph[key]
for next in key_1:
new_cost = cost_so_far[current] + 1
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
#print "camefrom=%s,cost_so_far=%s"%(came_from, cost_so_far)
#print " "
for key in cost_so_far:
verify = (key[0] == goal[0])
verify2 = (key[1] == goal[1])
if verify and verify2:
winner = [cost_so_far[key],goal[0],goal[1]]
return winner, came_from, cost_so_far
def reconstruct_path(came_from, start, goal):
current = goal
path = [current]
while current != start:
current = came_from[current]
path.append(current)
#path.append(start) # optional
path.reverse() # optional
return path
height,width,landmarks = slicer(grid)
grapho = graph(height,width,landmarks)
grapho.show_graph()
print(" ")
#print grapho.graph
try:
winner, came_from, cost_so_far = a_star_search(grapho, start, goal,cost)
print(winner)
print("El camino que debe tomar:")
print(reconstruct_path(came_from, start, goal))
except Exception as e:
print("fail")
input() | UTF-8 | Python | false | false | 5,875 | py | 22 | search.py | 21 | 0.468255 | 0.448511 | 0 | 197 | 27.832487 | 76 |
GIScience/openrouteservice-examples | 17,154,099,417,660 | a2ebf99b720a792169fea69327664f9e86e46add | 901aa2395b6e36a53e7873e6ab1a11c2f342f50c | /python/Health_Care_Access_Madagascar.py | d6f60a1e8b851860207c059dabb81f3cc353035e | [
"Apache-2.0"
] | permissive | https://github.com/GIScience/openrouteservice-examples | e4287d82591f8cf7523b3384a94eabdee3fa4dd6 | 76c3c9f6ce20786c837200a35cbe1875737ddfaa | refs/heads/master | 2023-07-10T03:35:05.895443 | 2023-06-26T19:11:50 | 2023-06-26T19:11:50 | 126,886,924 | 76 | 51 | Apache-2.0 | false | 2023-06-26T18:51:01 | 2018-03-26T20:36:57 | 2023-06-18T23:16:59 | 2023-06-26T18:51:01 | 96,083 | 73 | 46 | 8 | Jupyter Notebook | false | false | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.12.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Analysis of Access to Health Care using openrouteservice
# > Note: All notebooks need the [environment dependencies](https://github.com/GIScience/openrouteservice-examples#local-installation)
# > as well as an [openrouteservice API key](https://openrouteservice.org/dev/#/signup) to run
# ## Abstract
# In the case of a disaster (natural or man made), a country is not only affected by the intensity of the disaster but
# also by its own vulnerability to it.
# Countries have different kind of opportunities to prepare for such catastrophes,
# to respond finally to start the recovery.
# Many less developed countries, e.g. Madagascar, are in particular prone to disasters, not only because of the higher
# probability of occurrence, but also due to a potentially lower ability to cope up during and after the event.
#
# In this example we will focus on vulnerability in terms of access to health care.
# The access to health facilities can be highly unequal within a country.
# Consequently, some areas and communities are more vulnerable to disasters effects than others.
# Quantifying and visualizing such inequalities is the aim of this notebook.
#
# The notebook gives an overview on health sites distribution and the amount of population with access to those by foot
# and by car for Madagascar.
# Open source data from OpenStreetMap and tools (such as the openrouteservice) were used to create accessibility
# isochrones for each hospital and to derive analysis results about the population percentage with access to
# health facilities per district.
# The findings show that the inhabitants of 69 of 119 (58%) districts don't have any access to hospitals in a one-hour
# walking range, and those of 43 of 119 (36%) districts in a one-hour car driving range.
#
# ### Workflow:
# * **Preprocessing**: Get data for districts, health facilities, population density, population count per district.
# * **Analysis**:
# * Compute accessibility to health care facilities using openrouteservice API
# * Derive the percentage of people with access to health care per district.
# * **Result**: Visualize results as choropleth maps.
#
#
# ### Datasets and Tools:
# * [Shapefile of district boundaries][boundaries] - Admin Level 2 (data from Humanitarian Data Exchange, 05/07/2018)
# * [Shapefile of health facilities][facilities] (data from Humanitarian Data Exchange, 05/07/2018)
# * [Raster file of population density][pop] - Worldpop Data (data from Humanitarian Data Exchange, 05.07.2018)
# * [openrouteservice][ors] - generate isochrones on the OpenStreetMap road network
# * [python implementation of zonal statistic by perrygeo][zonal_stats] - generate population count per district
#
# [boundaries]: https://data.humdata.org/dataset/madagascar-administrative-boundary-shapefiles-level-1-4
# [facilities]: https://data.humdata.org/dataset/madagascar-healthsites
# [pop]: https://data.humdata.org/dataset/worldpop-madagascar
# [ors]: https://openrouteservice.org/
# [zonal_stats]: https://gist.github.com/perrygeo/5667173
# # Python Workflow
# +
import os
from IPython.display import display
import folium
from folium.plugins import MarkerCluster
from openrouteservice import client
import time
import pandas as pd
import fiona as fn
from shapely.geometry import shape, mapping
from shapely.ops import cascaded_union
# import zonal stats function from python file, get it here: https://gist.github.com/perrygeo/5667173
from zonal_stats import *
# -
# ## Preprocessing
# For this study different kind of data were used. First a map were created with folium, a python package.
# The boundaries of the districts as well as the health sites were given as shapefiles, which were printed on the map.
# The dataset about the health sites is from 2018.
# * Folium map
# * [Shapefile of district boundaries](https://data.humdata.org/dataset/madagascar-administrative-boundary-shapefiles-level-1-4) - Admin Level 2 (data from Humanitarian Data Exchange, 05/07/2018)
# * [Shapefile of health facilities](https://data.humdata.org/dataset/madagascar-healthsites) (data from Humanitarian Data Exchange, 05/07/2018)
# * [Raster file of population density](https://data.humdata.org/dataset/worldpop-madagascar) - Worldpop Data (data from Humanitarian Data Exchange, 05.07.2018)
# +
# insert your ORS api key
api_key = '{your-ors-api-key}'
ors = client.Client(key=api_key)
# make sure to provide the right filenames
districts_filename = 'data/mdg_polbnda_adm2_Distritcts_BNGRC_OCHA.shp'
health_facilities_filename = 'data/healthsites.shp'
population_raster_filename = 'data/MDG_ppp_2020_adj_v2.tif'
# these files will be generated during processing
isochrones_car_filename = 'data/iso_union_car.shp'
isochrones_car_per_district_filename = 'data/iso_car_per_district.shp'
isochrones_foot_filename = 'data/iso_union_foot.shp'
isochrones_foot_per_district_filename = 'data/iso_foot_per_district.shp'
# final file with all generated information
output_file = 'data/districts_final.geojson'
# -
# ### Create district dictionary and facilities dictionary
# +
districts_dictionary = {}
with fn.open(districts_filename, 'r') as districts:
for feature in districts:
district_id = int(feature['id'])
districts_dictionary[district_id] = {
'District Code': feature['properties']['DIST_PCODE'],
'District Name': feature['properties']['DISTRICT_N'],
'Population Count': 0,
'Car: Pop. with access': 0,
'Car: Pop. with access [%]': 0.0,
'Foot: Pop. with access': 0,
'Foot: Pop. with access [%]': 0.0,
'geometry': feature['geometry']
}
print('created dictionary for %s districts' % len(districts_dictionary))
facilities_dictionary = {}
with fn.open(health_facilities_filename, 'r') as facilities:
for feature in facilities:
facility_id = int(feature['id'])
facilities_dictionary[facility_id] = {
'geometry': feature['geometry']
}
print('created dictionary for %s facilities' % len(facilities_dictionary))
# -
# ### Let's get an overview and look at a map of the districts and health facilities
# +
map_outline = folium.Map(tiles='Stamen Toner', location=([-18.812718, 46.713867]), zoom_start=5)
# Import health facilities
cluster = MarkerCluster().add_to(map_outline) # To cluster hospitals
for facility_id in facilities_dictionary:
folium.Marker(list(reversed(facilities_dictionary[facility_id]['geometry']['coordinates']))).add_to(cluster)
# Import district boundaries
district_simp = []
for district_id in districts_dictionary:
geom = shape(districts_dictionary[district_id]['geometry'])
# we simplify the geometry just for the purpose of visualisation
# be aware that some browsers e.g. chrome might fail to render the entire map if there are to many coordinates
simp_geom = geom.simplify(0.005, preserve_topology=False)
simp_coord = mapping(simp_geom)
folium.GeoJson(simp_coord).add_to(map_outline)
district_simp.append(simp_coord)
map_outline.save(os.path.join('results', '1_health_facilities_overview.html'))
map_outline
# -
# ## Analysis
# We will follow these steps:
# * Get Isochrones from openrouteservice
# * Compute Health Access Area per District
# * Compute Population Count per District
# * Compute Population with Access per District
# * Save output as GeoJSON file
# ### Get Isochrones from openrouteservice
# The accessibility of hospitals in a one-hour range is of note.
# Therefore, isochrones with a one-hour walk range and one-hour car drive range around each hospital were created with
# the open source tool openrouteservice.
# This might take several minutes depending on the number of health facilities
# (currently we can send 40 requests per minute).
# +
# request isochrones from ORS api for car
request_counter = 0
iso_car = []
for facility_id in facilities_dictionary.keys():
loc = facilities_dictionary[facility_id]
try:
iso_params = {'locations': loc['geometry']['coordinates'],
'profile': 'driving-car',
'range_type': 'time',
'segments': 3600, # 3600 = 1hour
'attributes': {'total_pop', 'area'}}
request = ors.isochrones(**iso_params)
request_counter += 1
lon, lat = loc['geometry']['coordinates']
iso_car.append(shape(request['features'][0]['geometry']))
if len(iso_car) % 39 == 0:
time.sleep(60)
except Exception as err:
pass
print('requested %s isochrones for car from ORS API' % request_counter)
# generate cascaded union of all isochrones
iso_union_car = cascaded_union(iso_car)
print('computed cascaded union of all isochrones')
# save isochrones to shapefiles
schema = {'geometry': 'Polygon',
'properties': {'id': 'int'}}
index = 0
with fn.open(isochrones_car_filename, 'w', 'ESRI Shapefile', schema) as c:
for poly in iso_union_car:
index += 1
c.write({'geometry': mapping(poly),
'properties': {'id': index}})
print('saved isochrones as shapefiles for car.')
# +
# request isochrones from ORS api for pedestrian
request_counter = 0
iso_foot = []
for facility_id in facilities_dictionary.keys():
loc = facilities_dictionary[facility_id]
try:
iso_params = {'locations': loc['geometry']['coordinates'],
'profile': 'foot-walking',
'range_type': 'time',
'segments': 3600, # 3600 = 1hour
'attributes': {'total_pop', 'area'}}
request = ors.isochrones(**iso_params)
request_counter += 1
lon, lat = loc['geometry']['coordinates']
iso_foot.append(shape(request['features'][0]['geometry']))
if len(iso_foot) % 39 == 0:
time.sleep(60)
except Exception as err:
pass
print('requested %s isochrones for foot from ORS API' % request_counter)
# generate cascaded union of all isochrones
iso_union_foot = cascaded_union(iso_foot)
print('computed cascaded union of all isochrones')
# save isochrones to shapefiles
schema = {'geometry': 'Polygon',
'properties': {'id': 'int'}}
index = 0
with fn.open(isochrones_foot_filename, 'w', 'ESRI Shapefile', schema) as c:
for poly in iso_union_foot:
index += 1
c.write({'geometry': mapping(poly),
'properties': {'id': index}})
print('saved isochrones as shapefiles for pedestrian.')
# -
# #### Let's look at the map of the isochrones
# +
# Create isochrones with one-hour foot walking range
map_isochrones = folium.Map(tiles='Stamen Toner', location=([-18.812718, 46.713867]),
zoom_start=5) # New map for isochrones
def style_function(color): # To style isochrones
return lambda feature: dict(color=color)
union_coord_car = mapping(iso_union_car)
for l in union_coord_car['coordinates']:
switched_coords = [[(y, x) for x, y in l[0]]]
folium.features.PolygonMarker(switched_coords,
color='#ff751a',
fill_color='#ff751a',
fill_opacity=0.2,
weight=3).add_to(map_isochrones)
union_coord_foot = mapping(iso_union_foot)
for l in union_coord_foot['coordinates']:
switched_coords = [[(y, x) for x, y in l[0]]]
folium.features.PolygonMarker(switched_coords,
color='#ffd699',
fill_color='#ffd699',
fill_opacity=0.2,
weight=3).add_to(map_isochrones)
map_isochrones.save(os.path.join('results', '2_isochrones.html'))
map_isochrones
# -
# ### Compute Health Access Area per District
# +
# schema of the new shapefile
schema = {'geometry': 'Polygon',
'properties': {'district_fid': 'int'}}
# creation of the new shapefile with the intersection for car
car_iso_district_dict = {}
foot_iso_district_dict = {}
counter = 0
with fn.open(isochrones_car_per_district_filename, 'w', driver='ESRI Shapefile', schema=schema) as output:
for district in fn.open(districts_filename):
for isochrone in fn.open(isochrones_car_filename):
if shape(district['geometry']).intersects(shape(isochrone['geometry'])):
prop = {'district_fid': district['id']}
car_iso_district_dict[counter] = district['id']
output.write(
{'geometry': mapping(shape(district['geometry']).intersection(shape(isochrone['geometry']))),
'properties': prop})
counter += 1
print('created %s isochrones per district for car' % counter)
# creation of the new shapefile with the intersection for pedestrian
counter = 0
with fn.open(isochrones_foot_per_district_filename, 'w', driver='ESRI Shapefile', schema=schema) as output:
for district in fn.open(districts_filename):
for isochrone in fn.open(isochrones_foot_filename):
if shape(district['geometry']).intersects(shape(isochrone['geometry'])):
prop = {'district_fid': district['id']}
foot_iso_district_dict[counter] = district['id']
output.write(
{'geometry': mapping(shape(district['geometry']).intersection(shape(isochrone['geometry']))),
'properties': prop})
counter += 1
print('created %s isochrones per district for pedestrian' % counter)
# -
# ### Compute Population Count per District
# The population data were given as a raster file for the whole country. In this study the focus lies on the single
# districts why the data has to be reduced down to the given district boundaries.
# The population data is a prediction for 2020.
# This has to be considered when comparing with the health sites data (from 2018).
stats = zonal_stats(districts_filename, population_raster_filename, nodata_value=-999, global_src_extent=False)
total_population = 0
for element in stats:
district_id = int(element['fid'])
districts_dictionary[district_id]['Population Count'] = element['sum']
total_population += element['sum']
print('computed population count per district.')
print('Madagascar has a total population of %s inhabitants.' % int(total_population))
# ### Compute Population with Access per District
# To receive the percentage of population with access to health facilities per district,
# the amount of people with access per district were divided by the districts inhabitants and multiplied by 100.
# +
# compute zonal statistics for car
stats_car = zonal_stats(isochrones_car_per_district_filename, population_raster_filename, nodata_value=-999,
global_src_extent=False)
for element in stats_car:
district_id = int(car_iso_district_dict[element['fid']])
try:
pop_iso = districts_dictionary[district_id]['Car: Pop. with access'] + element['sum']
pop_total = districts_dictionary[district_id]['Population Count']
districts_dictionary[district_id]['Car: Pop. with access'] = pop_iso
districts_dictionary[district_id]['Car: Pop. with access [%]'] = 100 * pop_iso / pop_total
except:
pass
print('computed population count with access per district for car.')
# compute zonal statistics for pedestrian
stats_foot = zonal_stats(isochrones_foot_per_district_filename, population_raster_filename, nodata_value=-999,
global_src_extent=False)
for element in stats_foot:
district_id = int(foot_iso_district_dict[element['fid']])
try:
pop_iso = districts_dictionary[district_id]['Foot: Pop. with access'] + element['sum']
pop_total = districts_dictionary[district_id]['Population Count']
districts_dictionary[district_id]['Foot: Pop. with access'] = pop_iso
districts_dictionary[district_id]['Foot: Pop. with access [%]'] = 100 * pop_iso / pop_total
except:
pass
print('computed population count with access per district for foot.')
# -
# ### Save Output
# +
# save data from districts dictionary as shapefiles
schema = {'geometry': 'Polygon',
'properties': {
'code': 'str',
'name': 'str',
'pop_count': 'float',
'pop_car': 'float',
'pop_car_perc': 'float',
'pop_foot': 'float',
'pop_foot_perc': 'float'
}
}
with fn.open(output_file, 'w', driver='GeoJSON', schema=schema) as c:
for district_id in districts_dictionary.keys():
props = {
'code': districts_dictionary[district_id]['District Code'],
'name': districts_dictionary[district_id]['District Name'],
'pop_count': districts_dictionary[district_id]['Population Count'],
'pop_car': districts_dictionary[district_id]['Car: Pop. with access'],
'pop_car_perc': districts_dictionary[district_id]['Car: Pop. with access [%]'],
'pop_foot': districts_dictionary[district_id]['Foot: Pop. with access'],
'pop_foot_perc': districts_dictionary[district_id]['Foot: Pop. with access [%]']
}
# we simplify the geometry
geom = shape(districts_dictionary[district_id]['geometry'])
# we simplify the geometry just for the purpose of visualisation
# be aware that some browsers e.g. chrome might fail to render the entire map if there are to many coordinates
simp_geom = geom.simplify(0.005, preserve_topology=False)
c.write({'geometry': mapping(simp_geom),
'properties': props})
print('created %s with all information.' % output_file)
# -
# ## Results
# The table shows the results of the analysis ordered by districts.
# Two choropleth maps were created, one with the population percentage with access by foot and one with access by car.
# show attributes
df_total = pd.DataFrame.from_dict(districts_dictionary, orient='index')
display(pd.DataFrame.from_dict(districts_dictionary, orient='index').round(2)[0:5])
print('display first 5 entries of the final results.')
# #### Show Map for Access to Health Facilities by Car
# +
map_choropleth_car = folium.Map(tiles='Stamen Toner', location=([-18.812718, 46.713867]), zoom_start=5)
map_choropleth_car.choropleth(geo_data=output_file,
data=df_total,
columns=['District Code', 'Car: Pop. with access [%]'],
key_on='feature.properties.code',
fill_color='BuPu',
legend_name='Car: Pop. with access [%]')
map_choropleth_car.save(os.path.join('results', '3a_choropleth_car.html'))
map_choropleth_car
# -
# #### Show Map for Access to Health Facilities by Foot
# +
map_choropleth_foot = folium.Map(tiles='Stamen Toner', location=([-18.812718, 46.713867]), zoom_start=5)
map_choropleth_foot.choropleth(geo_data=output_file,
data=df_total,
columns=['District Code', 'Foot: Pop. with access [%]'],
key_on='feature.properties.code',
fill_color='BuPu',
legend_name='Foot: Pop. with access [%]')
map_choropleth_foot.save(os.path.join('results', '3b_choropleth_foot.html'))
map_choropleth_foot
# -
# ## Conclusion
# There is a small amount of hospitals in Madagascar, which are undistributed over the country.
# Consequently, a high percentage of the population don't have fast access to health sites.
# The findings show that the inhabitants of 69 of 119 districts don't have any access in a one-hour walking range,
# and those of 43 of 119 districts in a one-hour car driving range.
# The received maps (map_choropleth_foot and map_choropleth_car) show the population in percentage with access to
# health facilities by foot and by car.
#
# This study used open source data and tools. Therefore, results can be generated with a low amount money.
# However, free data and tools can have limits for the analysis.
# The data can show characteristics of incompleteness and inconsistency and the tools don't have for instance arranged
# support for users.
| UTF-8 | Python | false | false | 20,605 | py | 25 | Health_Care_Access_Madagascar.py | 10 | 0.672604 | 0.658432 | 0 | 472 | 42.654661 | 195 |
venom9x1/Big5-Textual-Analysis-Backend | 13,005,161,010,399 | 2a41330af7d152a18f8b1d30d37738fbc20a6390 | 859bb3e856fb68f741414b14282e03053c08a47c | /app.py | d26b29534edd9c115fa193dcec388fb1910d6fc3 | [
"MIT"
] | permissive | https://github.com/venom9x1/Big5-Textual-Analysis-Backend | b202722085a291a68d630ba1f060739a60b8b7a1 | 4f75fbba41172048aa7f830dc87f01b70171719a | refs/heads/master | 2023-01-04T23:24:55.057223 | 2019-10-12T06:41:29 | 2019-10-12T06:41:29 | 214,589,521 | 1 | 0 | MIT | false | 2023-01-04T12:21:24 | 2019-10-12T06:15:30 | 2019-10-12T06:49:24 | 2023-01-04T12:21:22 | 78,404 | 1 | 0 | 15 | Jupyter Notebook | false | false | from __future__ import division
from math import sqrt
from flask import Flask, render_template, request, jsonify
from collections import Counter
from flask import Flask, request
from predict import Predictor
from model import Model
app = Flask(__name__)
M = Model()
predictor = Predictor()
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
text = request.json
prediction = predictor.predict([text])
return jsonify(prediction)
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True, debug=True)
| UTF-8 | Python | false | false | 621 | py | 17 | app.py | 8 | 0.694042 | 0.687601 | 0 | 27 | 22 | 58 |
KratosMultiphysics/Kratos | 953,482,754,441 | e6ef3c5fe930eb7c31970706a0a1dc71b81e369a | 56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a | /applications/ShapeOptimizationApplication/python_scripts/algorithms/algorithm_relaxed_gradient_projection.py | a6ccfa79fd1294fea9e22ec8a9525c04bc32e5a3 | [
"BSD-3-Clause"
] | permissive | https://github.com/KratosMultiphysics/Kratos | 82b902a2266625b25f17239b42da958611a4b9c5 | 366949ec4e3651702edc6ac3061d2988f10dd271 | refs/heads/master | 2023-08-30T20:31:37.818693 | 2023-08-30T18:01:01 | 2023-08-30T18:01:01 | 81,815,495 | 994 | 285 | NOASSERTION | false | 2023-09-14T13:22:43 | 2017-02-13T10:58:24 | 2023-09-12T08:24:14 | 2023-09-14T13:22:43 | 1,979,018 | 882 | 218 | 488 | C++ | false | false | # ==============================================================================
# KratosShapeOptimizationApplication
#
# License: BSD License
# license: ShapeOptimizationApplication/license.txt
#
# Main authors: Ihar Antonau
#
# ==============================================================================
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# Kratos Core and Apps
import KratosMultiphysics as Kratos
import KratosMultiphysics.ShapeOptimizationApplication as KSO
from KratosMultiphysics.LinearSolversApplication import dense_linear_solver_factory
# Additional imports
from .algorithm_base import OptimizationAlgorithm
from .. import mapper_factory
from KratosMultiphysics.ShapeOptimizationApplication.loggers import data_logger_factory
from KratosMultiphysics.ShapeOptimizationApplication.utilities.custom_timer import Timer
from KratosMultiphysics.ShapeOptimizationApplication.utilities.custom_variable_utilities import WriteDictionaryDataOnNodalVariable
import numpy as np
# ==============================================================================
class AlgorithmRelaxedGradientProjection(OptimizationAlgorithm):
# --------------------------------------------------------------------------
def __init__(self, optimization_settings, analyzer, communicator, model_part_controller):
default_algorithm_settings = Kratos.Parameters("""
{
"name" : "relaxed_gradient_projection",
"max_iterations" : 100,
"max_inner_iter" : 100,
"relative_tolerance" : 1e-3,
"line_search" : {
"line_search_type" : "manual_stepping",
"step_size" : 1.0
}
}""")
self.algorithm_settings = optimization_settings["optimization_algorithm"]
self.algorithm_settings.RecursivelyValidateAndAssignDefaults(default_algorithm_settings)
self.optimization_settings = optimization_settings
self.mapper_settings = optimization_settings["design_variables"]["filter"]
self.analyzer = analyzer
self.communicator = communicator
self.model_part_controller = model_part_controller
self.design_surface = None
self.mapper = None
self.data_logger = None
self.optimization_utilities = None
self.objectives = optimization_settings["objectives"]
self.constraints = optimization_settings["constraints"]
self.constraint_gradient_variables = {}
self.constraint_buffer_variables = {}
self.constraint_laplace_multipliers = {}
for itr, constraint in enumerate(self.constraints.values()):
constraint_id = constraint["identifier"].GetString()
self.constraint_gradient_variables.update({
constraint_id : {
"gradient": Kratos.KratosGlobals.GetVariable("DC"+str(itr+1)+"DX"),
"mapped_gradient": Kratos.KratosGlobals.GetVariable("DC"+str(itr+1)+"DX_MAPPED")
}
})
self.constraint_buffer_variables.update({
constraint_id : {
"buffer_value": 0.0,
"buffer_value-1": 0.0,
"buffer_size": 1e-12,
"buffer_size_factor": 2.0,
"central_buffer_value": 0.0,
"lower_buffer_value": - 1e-12,
"upper_buffer_value": 1e-12,
"g_i-1": 0.0,
"g_i-2": 0.0,
"g_i-3": 0.0,
"max_constraint_change": 0.0
}
})
self.step_size = self.algorithm_settings["line_search"]["step_size"].GetDouble()
self.line_search_type = self.algorithm_settings["line_search"]["line_search_type"].GetString()
self.max_iterations = self.algorithm_settings["max_iterations"].GetInt() + 1
self.relative_tolerance = self.algorithm_settings["relative_tolerance"].GetDouble()
self.s_norm = 0.0
self.max_inner_iter = self.algorithm_settings["max_inner_iter"].GetDouble()
self.buffer_coeff_update = 2.0 / self.max_inner_iter
self.optimization_model_part = model_part_controller.GetOptimizationModelPart()
self.optimization_model_part.AddNodalSolutionStepVariable(KSO.INV_HESSIAN)
self.optimization_model_part.AddNodalSolutionStepVariable(KSO.PROJECTION)
self.optimization_model_part.AddNodalSolutionStepVariable(KSO.CORRECTION)
self.optimization_model_part.AddNodalSolutionStepVariable(KSO.SEARCH_DIRECTION)
# --------------------------------------------------------------------------
def CheckApplicability(self):
if self.objectives.size() > 1:
raise RuntimeError("Gradient projection algorithm only supports one objective function!")
if self.constraints.size() == 0:
raise RuntimeError("Gradient projection algorithm requires definition of at least one constraint!")
# --------------------------------------------------------------------------
def InitializeOptimizationLoop(self):
self.model_part_controller.Initialize()
self.analyzer.InitializeBeforeOptimizationLoop()
self.design_surface = self.model_part_controller.GetDesignSurface()
self.mapper = mapper_factory.CreateMapper(self.design_surface, self.design_surface, self.mapper_settings)
self.mapper.Initialize()
self.model_part_controller.InitializeDamping()
self.data_logger = data_logger_factory.CreateDataLogger(self.model_part_controller, self.communicator, self.optimization_settings)
self.data_logger.InitializeDataLogging()
self.optimization_utilities = KSO.OptimizationUtilities
# --------------------------------------------------------------------------
def RunOptimizationLoop(self):
timer = Timer()
timer.StartTimer()
for self.optimization_iteration in range(1,self.max_iterations):
Kratos.Logger.Print("")
Kratos.Logger.Print("===============================================================================")
Kratos.Logger.PrintInfo("ShapeOpt", timer.GetTimeStamp(), ": Starting optimization iteration ", self.optimization_iteration)
Kratos.Logger.Print("===============================================================================\n")
timer.StartNewLap()
self.__initializeNewShape()
self.__analyzeShape()
self.__computeBufferValue()
self.__computeShapeUpdate()
self.__logCurrentOptimizationStep()
self.__updateBufferZone()
Kratos.Logger.Print("")
Kratos.Logger.PrintInfo("ShapeOpt", "Time needed for current optimization step = ", timer.GetLapTime(), "s")
Kratos.Logger.PrintInfo("ShapeOpt", "Time needed for total optimization so far = ", timer.GetTotalTime(), "s")
if self.__isAlgorithmConverged():
break
else:
self.__determineAbsoluteChanges()
# --------------------------------------------------------------------------
def FinalizeOptimizationLoop(self):
self.data_logger.FinalizeDataLogging()
self.analyzer.FinalizeAfterOptimizationLoop()
# --------------------------------------------------------------------------
def __initializeNewShape(self):
self.model_part_controller.UpdateTimeStep(self.optimization_iteration)
self.model_part_controller.UpdateMeshAccordingInputVariable(KSO.SHAPE_UPDATE)
self.model_part_controller.SetReferenceMeshToMesh()
# --------------------------------------------------------------------------
def __analyzeShape(self):
self.communicator.initializeCommunication()
self.communicator.requestValueOf(self.objectives[0]["identifier"].GetString())
self.communicator.requestGradientOf(self.objectives[0]["identifier"].GetString())
for constraint in self.constraints.values():
con_id = constraint["identifier"].GetString()
self.communicator.requestValueOf(con_id)
self.communicator.requestGradientOf(con_id)
self.analyzer.AnalyzeDesignAndReportToCommunicator(self.optimization_model_part, self.optimization_iteration, self.communicator)
# compute normals only if required
surface_normals_required = self.objectives[0]["project_gradient_on_surface_normals"].GetBool()
for constraint in self.constraints.values():
if constraint["project_gradient_on_surface_normals"].GetBool():
surface_normals_required = True
if surface_normals_required:
self.model_part_controller.ComputeUnitSurfaceNormals()
# project and damp objective gradients
objGradientDict = self.communicator.getStandardizedGradient(self.objectives[0]["identifier"].GetString())
WriteDictionaryDataOnNodalVariable(objGradientDict, self.optimization_model_part, KSO.DF1DX)
if self.objectives[0]["project_gradient_on_surface_normals"].GetBool():
self.model_part_controller.ProjectNodalVariableOnUnitSurfaceNormals(KSO.DF1DX)
self.model_part_controller.DampNodalSensitivityVariableIfSpecified(KSO.DF1DX)
# project and damp constraint gradients
for constraint in self.constraints.values():
con_id = constraint["identifier"].GetString()
conGradientDict = self.communicator.getStandardizedGradient(con_id)
gradient_variable = self.constraint_gradient_variables[con_id]["gradient"]
WriteDictionaryDataOnNodalVariable(conGradientDict, self.optimization_model_part, gradient_variable)
if constraint["project_gradient_on_surface_normals"].GetBool():
self.model_part_controller.ProjectNodalVariableOnUnitSurfaceNormals(gradient_variable)
self.model_part_controller.DampNodalSensitivityVariableIfSpecified(gradient_variable)
# --------------------------------------------------------------------------
def __computeBufferValue(self):
# compute new buffer size and buffer values
for constraint in self.constraints.values():
identifier = constraint["identifier"].GetString()
g_i = self.communicator.getStandardizedValue(identifier)
g_i_m1 = self.constraint_buffer_variables[identifier]["g_i-1"]
buffer_size_factor = self.constraint_buffer_variables[identifier]["buffer_size_factor"]
self.constraint_buffer_variables[identifier]["buffer_value-1"] = self.constraint_buffer_variables[identifier]["buffer_value"]
if self.optimization_iteration > 1:
if abs(g_i - g_i_m1) > self.constraint_buffer_variables[identifier]["max_constraint_change"]:
self.constraint_buffer_variables[identifier]["max_constraint_change"] = abs(g_i - g_i_m1)
max_constraint_change = self.constraint_buffer_variables[identifier]["max_constraint_change"]
self.constraint_buffer_variables[identifier]["buffer_size"] = max(buffer_size_factor * max_constraint_change, 1e-12)
buffer_size = self.constraint_buffer_variables[identifier]["buffer_size"]
self.constraint_buffer_variables[identifier]["lower_buffer_value"] = self.constraint_buffer_variables[identifier]["central_buffer_value"] \
- buffer_size
self.constraint_buffer_variables[identifier]["upper_buffer_value"] = self.constraint_buffer_variables[identifier]["central_buffer_value"] \
+ buffer_size
if self.__isConstraintActive(constraint):
if constraint["type"].GetString() == "=":
self.constraint_buffer_variables[identifier]["buffer_value"] = min(1 - abs(g_i) / buffer_size, 2.0)
else:
lower_buffer_value = self.constraint_buffer_variables[identifier]["lower_buffer_value"]
self.constraint_buffer_variables[identifier]["buffer_value"] = min( (g_i - lower_buffer_value) / buffer_size, 2.0 )
else:
self.constraint_buffer_variables[identifier]["buffer_value"] = 0.0
# --------------------------------------------------------------------------
def __computeShapeUpdate(self):
self.mapper.Update()
self.mapper.InverseMap(KSO.DF1DX, KSO.DF1DX_MAPPED)
for constraint in self.constraints.values():
con_id = constraint["identifier"].GetString()
gradient_variable = self.constraint_gradient_variables[con_id]["gradient"]
mapped_gradient_variable = self.constraint_gradient_variables[con_id]["mapped_gradient"]
self.mapper.InverseMap(gradient_variable, mapped_gradient_variable)
self.inner_iter = 1
while not self.__checkInnerConvergence():
self.direction_has_changed = False
Kratos.Logger.PrintInfo("ShapeOpt", "Inner Iteration to Find Shape Update = ", self.inner_iter)
self.__computeControlPointUpdate()
self.mapper.Map(KSO.CONTROL_POINT_UPDATE, KSO.SHAPE_UPDATE)
self.model_part_controller.DampNodalUpdateVariableIfSpecified(KSO.SHAPE_UPDATE)
self.d_norm = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.SHAPE_UPDATE)
self.__checkConstraintValue()
self.inner_iter += 1
self.__saveLineSearchData()
# --------------------------------------------------------------------------
def __checkInnerConvergence(self):
Kratos.Logger.PrintInfo("Check Convergence of the inner loop:")
if self.inner_iter == 1:
return False
elif self.direction_has_changed and self.inner_iter <= self.max_inner_iter:
return False
else:
return True
def __checkConstraintValue(self):
index = -1
for constraint in self.constraints.values():
if self.__isConstraintActive(constraint):
index += 1
identifier = constraint["identifier"].GetString()
g_i = self.communicator.getStandardizedValue(identifier)
g_a_variable = self.constraint_gradient_variables[identifier]["gradient"]
shape_update = Kratos.Vector()
gradient = Kratos.Vector()
self.optimization_utilities.AssembleVector(self.design_surface, gradient, g_a_variable)
self.optimization_utilities.AssembleVector(self.design_surface, shape_update, KSO.SHAPE_UPDATE)
new_g_i = g_i + np.dot(gradient, shape_update)
Kratos.Logger.PrintInfo("Constraint ", identifier, "\n Linearized new value = ", new_g_i)
if new_g_i > 0.0:
if self.relaxation_coefficients[index] < 1.0:
self.relaxation_coefficients[index] = min(self.relaxation_coefficients[index] + self.buffer_coeff_update, 1.0)
self.direction_has_changed = True
elif self.correction_coefficients[index] < 2.0:
self.correction_coefficients[index] = min (self.correction_coefficients[index] + self.buffer_coeff_update, 2.0)
self.direction_has_changed = True
Kratos.Logger.PrintInfo("Constraint ", identifier, "\n W_R, W_C = ", self.relaxation_coefficients[index], self.correction_coefficients[index])
# --------------------------------------------------------------------------
def __LineSearch(self):
Kratos.Logger.PrintInfo("Line Search ...")
if self.line_search_type == "manual_stepping":
self.__manualStep()
elif self.line_search_type == "QNBB_method":
if self.optimization_iteration == 1:
self.max_step_size = self.step_size
# Do initial small step
self.step_size /= 5
self.__manualStep()
else:
self.__QNBBStep()
elif self.line_search_type == "BB_method":
if self.optimization_iteration == 1:
self.max_step_size = self.step_size
# Do initial small step
self.step_size /= 5
self.__manualStep()
else:
self.__BBStep()
def __manualStep(self):
step_norm = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.CONTROL_POINT_UPDATE)
if abs(step_norm) > 1e-10:
step = Kratos.Vector()
self.optimization_utilities.AssembleVector(self.design_surface, step, KSO.CONTROL_POINT_UPDATE)
step *= 1.0 / step_norm
self.optimization_utilities.AssignVectorToVariable(self.design_surface, step, KSO.SEARCH_DIRECTION)
step *= self.step_size
self.optimization_utilities.AssignVectorToVariable(self.design_surface, step, KSO.CONTROL_POINT_UPDATE)
def __QNBBStep(self):
self.s_norm = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.SEARCH_DIRECTION)
if abs(self.s_norm) > 1e-10:
s = Kratos.Vector()
self.optimization_utilities.AssembleVector(self.design_surface, s, KSO.SEARCH_DIRECTION)
s /= self.s_norm
self.optimization_utilities.AssignVectorToVariable(self.design_surface, s, KSO.SEARCH_DIRECTION)
for index, node in enumerate(self.design_surface.Nodes):
i = index * 3
y_i = np.array(self.prev_s[i: i+3]) - np.array(s[i: i+3])
d_i = np.array(self.d[i:i+3])
if np.dot(y_i, y_i) < 1e-9:
step_i = self.max_step_size
else:
step_i = abs(np.dot(d_i, y_i) / np.dot(y_i, y_i))
if step_i > self.max_step_size:
step_i = self.max_step_size
node.SetSolutionStepValue(KSO.INV_HESSIAN, step_i)
s[i] = s[i] * step_i
s[i+1] = s[i+1] * step_i
s[i+2] = s[i+2] * step_i
self.optimization_utilities.AssignVectorToVariable(self.design_surface, s, KSO.CONTROL_POINT_UPDATE)
def __BBStep(self):
self.s_norm = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.SEARCH_DIRECTION)
if abs(self.s_norm) > 1e-10:
s = Kratos.Vector()
self.optimization_utilities.AssembleVector(self.design_surface, s, KSO.SEARCH_DIRECTION)
s /= self.s_norm
self.optimization_utilities.AssignVectorToVariable(self.design_surface, s, KSO.SEARCH_DIRECTION)
y = self.prev_s - s
if np.dot(y, y) < 1e-9:
step = self.max_step_size
else:
step = abs(np.dot(y, self.d) / np.dot(y, y))
if step > self.max_step_size:
step = self.max_step_size
s = s * step
self.optimization_utilities.AssignVectorToVariable(self.design_surface, s, KSO.CONTROL_POINT_UPDATE)
def __saveLineSearchData(self):
self.prev_s = Kratos.Vector()
self.d = Kratos.Vector()
self.optimization_utilities.AssembleVector(self.design_surface, self.d, KSO.CONTROL_POINT_UPDATE)
self.optimization_utilities.AssembleVector(self.design_surface, self.prev_s, KSO.SEARCH_DIRECTION)
# --------------------------------------------------------------------------
def __computeControlPointUpdate(self):
"""adapted from https://msulaiman.org/onewebmedia/GradProj_2.pdf"""
if self.inner_iter == 1:
self.g_a, self.g_a_variables, self.relaxation_coefficients, self.correction_coefficients = self.__getActiveConstraints()
Kratos.Logger.PrintInfo("ShapeOpt", "Assemble vector of objective gradient.")
nabla_f = Kratos.Vector()
p = Kratos.Vector()
self.optimization_utilities.AssembleVector(self.design_surface, nabla_f, KSO.DF1DX_MAPPED)
f_norm = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.DF1DX_MAPPED)
if abs(f_norm) > 1e-10:
nabla_f *= 1.0/f_norm
if len(self.g_a) == 0:
Kratos.Logger.PrintInfo("ShapeOpt", "No constraints active, use negative objective gradient as search direction.")
p = nabla_f * (-1.0)
self.optimization_utilities.AssignVectorToVariable(self.design_surface, p, KSO.SEARCH_DIRECTION)
self.optimization_utilities.AssignVectorToVariable(self.design_surface, p, KSO.PROJECTION)
Kratos.VariableUtils().SetHistoricalVariableToZero(KSO.CORRECTION, self.design_surface.Nodes)
self.optimization_utilities.AssignVectorToVariable(self.design_surface, p, KSO.CONTROL_POINT_UPDATE)
self.__LineSearch()
return
omega_r = Kratos.Matrix()
self.optimization_utilities.AssembleBufferMatrix(omega_r, self.relaxation_coefficients)
omega_c = Kratos.Vector(self.correction_coefficients)
Kratos.Logger.PrintInfo("ShapeOpt", "Assemble matrix of constraint gradient.")
N = Kratos.Matrix()
self.optimization_utilities.AssembleMatrix(self.design_surface, N, self.g_a_variables)
settings = Kratos.Parameters('{ "solver_type" : "LinearSolversApplication.dense_col_piv_householder_qr" }')
solver = dense_linear_solver_factory.ConstructSolver(settings)
c = Kratos.Vector()
Kratos.Logger.PrintInfo("ShapeOpt", "Calculate projected search direction and correction.")
self.optimization_utilities.CalculateRelaxedProjectedSearchDirectionAndCorrection(
nabla_f,
N,
omega_r,
omega_c,
solver,
p,
c)
# additional normalization step
self.optimization_utilities.AssignVectorToVariable(self.design_surface, p, KSO.PROJECTION)
self.optimization_utilities.AssignVectorToVariable(self.design_surface, c, KSO.CORRECTION)
self.optimization_utilities.AssignVectorToVariable(self.design_surface, p+c, KSO.SEARCH_DIRECTION)
self.optimization_utilities.AssignVectorToVariable(self.design_surface, p+c, KSO.CONTROL_POINT_UPDATE)
self.__LineSearch()
# --------------------------------------------------------------------------
def __getActiveConstraints(self):
active_constraint_values = []
active_constraint_variables = []
active_relaxation_coefficient = []
active_correction_coefficient = []
for constraint in self.constraints.values():
if self.__isConstraintActive(constraint):
identifier = constraint["identifier"].GetString()
g_i = self.communicator.getStandardizedValue(identifier)
buffer_value = self.constraint_buffer_variables[identifier]["buffer_value"]
active_constraint_values.append(g_i)
g_a_variable = self.constraint_gradient_variables[identifier]["mapped_gradient"]
g_a_norm = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, g_a_variable)
g_a_variable_vector = Kratos.Vector()
self.optimization_utilities.AssembleVector(self.design_surface, g_a_variable_vector, g_a_variable)
if abs(g_a_norm) > 1e-10:
g_a_variable_vector /= g_a_norm
self.optimization_utilities.AssignVectorToVariable(self.design_surface, g_a_variable_vector, g_a_variable)
active_constraint_variables.append(g_a_variable)
active_relaxation_coefficient.append(min(buffer_value,1.0))
max_buffer = 2.0
if buffer_value > 1.0:
if buffer_value < max_buffer:
active_correction_coefficient.append(2*(buffer_value - 1))
else:
active_correction_coefficient.append(2*(max_buffer-1))
else:
active_correction_coefficient.append(0.0)
return active_constraint_values, active_constraint_variables, active_relaxation_coefficient, active_correction_coefficient
# --------------------------------------------------------------------------
def __isConstraintActive(self, constraint):
identifier = constraint["identifier"].GetString()
g_i = self.communicator.getStandardizedValue(identifier)
if constraint["type"].GetString() == "=":
return True
elif g_i >= self.constraint_buffer_variables[identifier]["lower_buffer_value"]:
return True
else:
return False
# --------------------------------------------------------------------------
def __updateBufferZone(self):
# adapt the buffer zones for zig-zagging, too much or too little correction
for constraint in self.constraints.values():
identifier = constraint["identifier"].GetString()
g_i = self.communicator.getStandardizedValue(identifier)
g_i_m1 = self.constraint_buffer_variables[identifier]["g_i-1"]
g_i_m2 = self.constraint_buffer_variables[identifier]["g_i-2"]
g_i_m3 = self.constraint_buffer_variables[identifier]["g_i-3"]
buffer_value = self.constraint_buffer_variables[identifier]["buffer_value"]
buffer_value_m1 = self.constraint_buffer_variables[identifier]["buffer_value-1"]
if self.optimization_iteration > 3:
delta_g_1 = g_i - g_i_m1
delta_g_2 = g_i_m1 -g_i_m2
delta_g_3 = g_i_m2 -g_i_m3
if delta_g_1*delta_g_2 < 0 and delta_g_2*delta_g_3 < 0:
self.constraint_buffer_variables[identifier]["buffer_size_factor"] += abs(buffer_value - buffer_value_m1)
if self.optimization_iteration > 1:
delta_g = g_i - g_i_m1
if delta_g >= 0.0 and g_i_m1 > 0:
self.constraint_buffer_variables[identifier]["central_buffer_value"] -= g_i_m1
elif delta_g <= 0.0 and g_i_m1 < 0:
self.constraint_buffer_variables[identifier]["central_buffer_value"] += g_i_m1
self.constraint_buffer_variables[identifier]["central_buffer_value"] = \
max(self.constraint_buffer_variables[identifier]["central_buffer_value"], 0.0)
self.constraint_buffer_variables[identifier]["g_i-3"] = g_i_m2
self.constraint_buffer_variables[identifier]["g_i-2"] = g_i_m1
self.constraint_buffer_variables[identifier]["g_i-1"] = g_i
# --------------------------------------------------------------------------
def __logCurrentOptimizationStep(self):
additional_values_to_log = {}
additional_values_to_log["step_size"] = self.d_norm
additional_values_to_log["inf_norm_p"] = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.PROJECTION)
additional_values_to_log["inf_norm_c"] = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.CORRECTION)
additional_values_to_log["projection_norm"] = self.s_norm
itr = 0
for constraint in self.constraints.values():
identifier = constraint["identifier"].GetString()
additional_values_to_log["c"+str(itr+1)+"_buffer_value"] = self.constraint_buffer_variables[identifier]["buffer_value"]
additional_values_to_log["c"+str(itr+1)+"_buffer_size"] = self.constraint_buffer_variables[identifier]["buffer_size"]
additional_values_to_log["c"+str(itr+1)+"_buffer_size_factor"] = self.constraint_buffer_variables[identifier]["buffer_size_factor"]
additional_values_to_log["c"+str(itr+1)+"_central_buffer_value"] = self.constraint_buffer_variables[identifier]["central_buffer_value"]
additional_values_to_log["c"+str(itr+1)+"_lower_buffer_value"] = self.constraint_buffer_variables[identifier]["lower_buffer_value"]
additional_values_to_log["c"+str(itr+1)+"_upper_buffer_value"] = self.constraint_buffer_variables[identifier]["upper_buffer_value"]
itr += 1
self.data_logger.LogCurrentValues(self.optimization_iteration, additional_values_to_log)
self.data_logger.LogCurrentDesign(self.optimization_iteration)
# --------------------------------------------------------------------------
def __isAlgorithmConverged(self):
if self.optimization_iteration > 1 :
# Check if maximum iterations were reached
if self.optimization_iteration == self.max_iterations:
Kratos.Logger.Print("")
Kratos.Logger.PrintInfo("ShapeOpt", "Maximal iterations of optimization problem reached!")
return True
# Check for relative tolerance
relative_change_of_objective_value = self.data_logger.GetValues("rel_change_objective")[self.optimization_iteration]
if abs(relative_change_of_objective_value) < self.relative_tolerance:
Kratos.Logger.Print("")
Kratos.Logger.PrintInfo("ShapeOpt", "Optimization problem converged within a relative objective tolerance of ",self.relative_tolerance,"%.")
return True
# --------------------------------------------------------------------------
def __determineAbsoluteChanges(self):
self.optimization_utilities.AddFirstVariableToSecondVariable(self.design_surface, KSO.CONTROL_POINT_UPDATE, KSO.CONTROL_POINT_CHANGE)
self.optimization_utilities.AddFirstVariableToSecondVariable(self.design_surface, KSO.SHAPE_UPDATE, KSO.SHAPE_CHANGE)
# ============================================================================== | UTF-8 | Python | false | false | 30,146 | py | 7,188 | algorithm_relaxed_gradient_projection.py | 5,909 | 0.601705 | 0.595071 | 0 | 580 | 50.977586 | 158 |
python-trio/sphinxcontrib-trio | 14,525,579,417,875 | 8765539cd8fe2625d1fea741bee07c4470c71f87 | a9e2bc489e9bb5f963d9bd478d0c5cdff2cc4bed | /sphinxcontrib_trio/__init__.py | 69366614e407c23fb9bb6cc6b714c9573e04caf8 | [
"Apache-2.0",
"MIT"
] | permissive | https://github.com/python-trio/sphinxcontrib-trio | 51baecdc96163b57723bc1272791e30d626498b7 | d76d5d993a97f381c17ab65a4ac38dad56584de7 | refs/heads/master | 2023-03-09T16:19:38.406648 | 2022-05-20T05:59:46 | 2022-05-20T05:59:46 | 91,061,935 | 28 | 8 | NOASSERTION | false | 2023-03-06T06:07:44 | 2017-05-12T07:06:37 | 2022-09-26T12:28:29 | 2023-03-06T06:07:39 | 518 | 26 | 11 | 20 | Python | false | false | """A sphinx extension to help documenting Python code that uses async/await
(or context managers, or abstract methods, or generators, or ...).
This extension takes a somewhat non-traditional approach, though, based on
the observation that function properties like "classmethod", "async",
"abstractmethod" can be mixed and matched, so the the classic sphinx
approach of defining different directives for all of these quickly becomes
cumbersome. Instead, we override the ordinary function & method directives
to add options corresponding to these different properties, and override the
autofunction and automethod directives to sniff for these
properties. Examples:
A function that returns a context manager:
.. function:: foo(x, y)
:with: bar
renders in the docs like:
with foo(x, y) as bar
The 'bar' part is optional. Use :async-with: for an async context
manager. These are also accepted on method, autofunction, and automethod.
An abstract async classmethod:
.. method:: foo
:abstractmethod:
:classmethod:
:async:
renders like:
abstractmethod classmethod await foo()
Or since all of these attributes are introspectable, we can get the same
result with:
.. automethod:: foo
An abstract static decorator:
.. method:: foo
:abstractmethod:
:staticmethod:
:decorator:
The :decorator: attribute isn't introspectable, but the others
are, so this also works:
.. automethod:: foo
:decorator:
and renders like
abstractmethod staticmethod @foo
"""
from ._version import __version__
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.domains.python import PyFunction
from sphinx.domains.python import PyObject
from sphinx.domains.python import PyMethod, PyClassMethod, PyStaticMethod
from sphinx.ext.autodoc import (
FunctionDocumenter, MethodDocumenter, ClassLevelDocumenter, Options, ModuleLevelDocumenter
)
import inspect
try:
from async_generator import isasyncgenfunction
except ImportError:
from inspect import isasyncgenfunction
CM_CODES = set()
ACM_CODES = set()
from contextlib import contextmanager
CM_CODES.add(contextmanager(None).__code__) # type: ignore
try:
from contextlib2 import contextmanager as contextmanager2
except ImportError:
pass
else:
CM_CODES.add(contextmanager2(None).__code__) # type: ignore
try:
from contextlib import asynccontextmanager
except ImportError:
pass
else:
ACM_CODES.add(asynccontextmanager(None).__code__) # type: ignore
extended_function_option_spec = {
"async": directives.flag,
"decorator": directives.flag,
"with": directives.unchanged,
"async-with": directives.unchanged,
"for": directives.unchanged,
"async-for": directives.unchanged,
}
extended_method_option_spec = {
**extended_function_option_spec,
"abstractmethod": directives.flag,
"staticmethod": directives.flag,
"classmethod": directives.flag,
"property": directives.flag,
}
autodoc_option_spec = {
"no-auto-options": directives.flag,
}
################################################################
# Extending the basic function and method directives
################################################################
class ExtendedCallableMixin(PyObject): # inherit PyObject to satisfy MyPy
def needs_arglist(self):
if "property" in self.options:
return False
if ("decorator" in self.options
or self.objtype in ["decorator", "decoratormethod"]):
return False
return True
# This does *not* override the superclass get_signature_prefix(), because
# that gets called by the superclass handle_signature(), which then
# may-or-may-not insert it into the signode (depending on whether or not
# it returns an empty string). We want to insert the decorator @ after the
# prefix but before the regular name. If we let the superclass
# handle_signature() insert the prefix or maybe not, then we can't tell
# where the @ goes.
def _get_signature_prefix(self):
ret = ""
if "abstractmethod" in self.options:
ret += "abstractmethod "
# objtype checks are for backwards compatibility, to support
#
# .. staticmethod::
#
# in addition to
#
# .. method::
# :staticmethod:
#
# it would be nice if there were a central place we could normalize
# the directive name into the options dict instead of having to check
# both here at time-of-use, but I don't understand sphinx well enough
# to do that.
#
# Note that this is the code that determines the ordering of the
# different prefixes.
if "staticmethod" in self.options or self.objtype == "staticmethod":
ret += "staticmethod "
if "classmethod" in self.options or self.objtype == "classmethod":
ret += "classmethod "
# if "property" in self.options:
# ret += "property "
if "with" in self.options:
ret += "with "
if "async-with" in self.options:
ret += "async with "
for for_type, render in [("for", "for"), ("async-for", "async for")]:
if for_type in self.options:
name = self.options.get(for_type, "")
if not name.strip():
name = "..."
ret += "{} {} in ".format(render, name)
if "async" in self.options:
ret += "await "
return ret
# But we do want to override the superclass get_signature_prefix to stop
# it from trying to do its own handling of staticmethod and classmethod
# directives (the legacy ones)
def get_signature_prefix(self, sig):
return ""
def handle_signature(self, sig, signode):
ret = super().handle_signature(sig, signode)
# Add the "@" prefix
if ("decorator" in self.options
or self.objtype in ["decorator", "decoratormethod"]):
signode.insert(0, addnodes.desc_addname("@", "@"))
# Now that the "@" has been taken care of, we can add in the regular
# prefix.
prefix = self._get_signature_prefix()
if prefix:
signode.insert(0, addnodes.desc_annotation(prefix, prefix))
# And here's the suffix:
for optname in ["with", "async-with"]:
if self.options.get(optname, "").strip():
# for some reason a regular space here gets stripped, so we
# use U+00A0 NO-BREAK SPACE
s = "\u00A0as {}".format(self.options[optname])
signode += addnodes.desc_annotation(s, s)
return ret
class ExtendedPyFunction(ExtendedCallableMixin, PyFunction):
option_spec = {
**PyFunction.option_spec,
**extended_function_option_spec,
}
class ExtendedPyMethod(ExtendedCallableMixin, PyMethod):
option_spec = {
**PyMethod.option_spec,
**extended_method_option_spec,
}
class ExtendedPyClassMethod(ExtendedCallableMixin, PyClassMethod):
option_spec = {
**PyClassMethod.option_spec,
**extended_method_option_spec,
}
class ExtendedPyStaticMethod(ExtendedCallableMixin, PyStaticMethod):
option_spec = {
**PyStaticMethod.option_spec,
**extended_method_option_spec,
}
################################################################
# Autodoc
################################################################
# Our sniffer never reports more than one item from this set. In principle
# it's possible for something to be, say, an async function that returns
# a context manager ("with await foo(): ..."), but it's extremely unusual, and
# OTOH it's very easy for these to get confused when walking the __wrapped__
# chain (e.g. because async_generator converts an async into an async-for, and
# maybe that then gets converted into an async-with by an async version of
# contextlib.contextmanager). So once we see one of these, we stop looking for
# the others.
EXCLUSIVE_OPTIONS = {"async", "for", "async-for", "with", "async-with"}
def sniff_options(obj):
options = set()
# We walk the __wrapped__ chain to collect properties.
while True:
if getattr(obj, "__isabstractmethod__", False):
options.add("abstractmethod")
if isinstance(obj, classmethod):
options.add("classmethod")
if isinstance(obj, staticmethod):
options.add("staticmethod")
# if isinstance(obj, property):
# options.add("property")
# Only check for these if we haven't seen any of them yet:
if not (options & EXCLUSIVE_OPTIONS):
if inspect.iscoroutinefunction(obj):
options.add("async")
# in some versions of Python, isgeneratorfunction returns true for
# coroutines, so we use elif
elif inspect.isgeneratorfunction(obj):
options.add("for")
if isasyncgenfunction(obj):
options.add("async-for")
# Some heuristics to detect when something is a context manager
if getattr(obj, "__code__", None) in CM_CODES:
options.add("with")
if getattr(obj, "__returns_contextmanager__", False):
options.add("with")
if getattr(obj, "__code__", None) in ACM_CODES:
options.add("async-with")
if getattr(obj, "__returns_acontextmanager__", False):
options.add("async-with")
if hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
elif hasattr(obj, "__func__"): # for staticmethod & classmethod
obj = obj.__func__
else:
break
return options
def update_with_sniffed_options(obj, option_dict):
if "no-auto-options" in option_dict:
return
sniffed = sniff_options(obj)
for attr in sniffed:
# Suppose someone has a generator, and they document it as:
#
# .. autofunction:: my_generator
# :for: loop_var
#
# We don't want to blow away the existing attr["for"] = "loop_var"
# with our autodetected attr["for"] = None. So we use setdefault.
option_dict.setdefault(attr, None)
def passthrough_option_lines(self, option_spec):
sourcename = self.get_sourcename()
for option in option_spec:
if option in self.options:
if self.options.get(option) is not None:
line = " :{}: {}".format(option, self.options[option])
else:
line = " :{}:".format(option)
self.add_line(line, sourcename)
class ExtendedFunctionDocumenter(FunctionDocumenter):
priority = FunctionDocumenter.priority + 1
# You can explicitly set the options in case autodetection fails
option_spec = {
**FunctionDocumenter.option_spec,
**extended_function_option_spec,
**autodoc_option_spec,
}
def add_directive_header(self, sig):
# We can't call super() here, because we want to *skip* executing
# FunctionDocumenter.add_directive_header, because starting in Sphinx
# 2.1 it does its own sniffing, which is worse than ours and will
# break ours. So we jump straight to the superclass.
ModuleLevelDocumenter.add_directive_header(self, sig)
passthrough_option_lines(self, extended_function_option_spec)
def import_object(self):
ret = super().import_object()
# autodoc likes to re-use dicts here for some reason (!?!)
self.options = Options(self.options)
update_with_sniffed_options(self.object, self.options)
return ret
class ExtendedMethodDocumenter(MethodDocumenter):
priority = MethodDocumenter.priority + 1
# You can explicitly set the options in case autodetection fails
option_spec = {
**MethodDocumenter.option_spec,
**extended_method_option_spec,
**autodoc_option_spec,
}
def add_directive_header(self, sig):
# We can't call super() here, because we want to *skip* executing
# FunctionDocumenter.add_directive_header, because starting in Sphinx
# 2.1 it does its own sniffing, which is worse than ours and will
# break ours. So we jump straight to the superclass.
ClassLevelDocumenter.add_directive_header(self, sig)
passthrough_option_lines(self, extended_method_option_spec)
def import_object(self):
# MethodDocumenter overrides import_object to do some sniffing in
# addition to just importing. But we do our own sniffing and just want
# the import, so we un-override it.
ret = ClassLevelDocumenter.import_object(self)
# Use 'inspect.getattr_static' to properly detect class or static methods.
# This also resolves the MRO entries for subclasses.
obj = inspect.getattr_static(self.parent, self.object_name)
# autodoc likes to re-use dicts here for some reason (!?!)
self.options = Options(self.options)
update_with_sniffed_options(obj, self.options)
# Replicate the special ordering hacks in
# MethodDocumenter.import_object
if "classmethod" in self.options or "staticmethod" in self.options:
self.member_order -= 1
return ret
################################################################
# Register everything
################################################################
def setup(app):
app.add_directive_to_domain('py', 'function', ExtendedPyFunction)
app.add_directive_to_domain('py', 'method', ExtendedPyMethod)
app.add_directive_to_domain('py', 'classmethod', ExtendedPyClassMethod)
app.add_directive_to_domain('py', 'staticmethod', ExtendedPyStaticMethod)
app.add_directive_to_domain('py', 'decorator', ExtendedPyFunction)
app.add_directive_to_domain('py', 'decoratormethod', ExtendedPyMethod)
# Make sure sphinx.ext.autodoc is loaded before we try to mess with it.
app.setup_extension("sphinx.ext.autodoc")
# We're overriding these on purpose, so disable the warning about it
del directives._directives["autofunction"]
del directives._directives["automethod"]
app.add_autodocumenter(ExtendedFunctionDocumenter)
app.add_autodocumenter(ExtendedMethodDocumenter)
return {'version': __version__, 'parallel_read_safe': True}
| UTF-8 | Python | false | false | 14,502 | py | 17 | __init__.py | 7 | 0.634602 | 0.633361 | 0 | 400 | 35.255 | 94 |
Hamsterzs/PineiroOrdunoAbel | 2,516,850,855,570 | ed19320396745d9b3b9684b186bbc117016e4f6f | ad1a6a9a274aeb98eb3d0b662a4002685e16bcae | /Est Datos/recursividad/recursividad.py | 1222159429fba3972b493ed1db52552c6c440411 | [] | no_license | https://github.com/Hamsterzs/PineiroOrdunoAbel | d0a32eee203cde94a538328228c172757854c76a | e2bf71d630de38c40ef0ca4d887b9225fd833a59 | refs/heads/master | 2020-08-28T10:30:15.239738 | 2019-11-06T18:48:27 | 2019-11-06T18:48:27 | 217,673,710 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #es una herramienta en el procceso de diseñar algoritmos que consiste en la capacidad
#de que una funcion se mande a llamar a si misma subdividiendo un problema grande en
#pequeños casos hasta el punto que se convierta en un caso trivial o caso base
#el simplificar una solucion con recursividad, no siempre lleva a mejorar la
#eficiencia del algoritmo. considere el siguente codigo como una funcion recursiva
#que genera un loop infinito
def funcionRecursiva(val):
print(val)
funcionRecursiva(val)
print("fin del programa")
return None
# en la funcion anterior no existe caso base por lo cual se vuelve infinita
# para escirbir una funcion recursiva de forma correcta se debe definir un
# caso base para ejecutar la recursividad considera la siguente funcion
# que imprime un conteo en decremento
import time
def printRev (n):
if n > 0:
printRev(n-1)
print(n)
def bomba (t):
if t == 0: #caso base
print("boom")
return 0
else:
time.sleep(1)
print(f"00:00:{t}")
bomba(t-1)
print(f"fin de la funcion{t}")
def suma (n):
if n == 0:
return n
else:
return n + suma (n-1)
def fib (n):
if n == 0 or n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def main():
#funcionRecursiva(10)
#printRev(10)
#bomba(10)
#print(suma(3))
print(fib(5))
main()
# considere la siguente funcion para simular el conte regresivo de una bomba | UTF-8 | Python | false | false | 1,543 | py | 26 | recursividad.py | 26 | 0.639195 | 0.622972 | 0 | 59 | 25.016949 | 85 |
fox1985/mybook | 11,922,829,242,691 | d2906eac8f67c7eed7614d695d3db56e2e6cc3b2 | 37ff4c0682a9de75c8794c8fc2638f1058a14cb8 | /page/urls.py | e377f9ddca7d66d52007dcb0502d429edb7d583c | [] | no_license | https://github.com/fox1985/mybook | 0a47193701ce39b38bfaae8f51c312f96a98f02f | f15c04dedcd54e0ba71de55c6b5477cec1f98ddc | refs/heads/master | 2020-04-04T14:35:40.085681 | 2018-11-07T17:27:49 | 2018-11-07T17:27:49 | 154,287,264 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""mybook URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
Контроллеры функцыи
url(r'^(?:(?P<cat_id>\d+)/)?$', views.index, name="index"),
url(r'^good/?(?P<good_id>\d+)/$', views.good, name="good"),
url(r'^(?:(?P<cat_id>\d+)/)?$', GoodListView.as_view(), name="index"),
url(r'^good/?(?P<good_id>\d+)/$', GoodDetailView.as_view(), name="good"),
"""
from django.conf.urls import url
from page import views
from page.twviews import GoodListView, GoodDetailView
from page.edit import GoodCreate, GoodUpdate, GoodDelete
#Проверка выполнил ли пользователь входа на сайт permission_required разграничения прав пользовтеля
from django.contrib.auth.decorators import login_required, permission_required
urlpatterns = [
url(r'^(?:(?P<cat_id>\d+)/)?$', GoodListView.as_view(), name="index"),
url(r'^good/?(?P<good_id>\d+)/$', GoodDetailView.as_view(), name="good"),
url(r'^(?P<cat_id>\d+)/add/$', GoodCreate.as_view(), name="good_add"),
url(r'^good/(?P<good_id>\d+)/edit/$', GoodUpdate.as_view(), name="good_edit"),
url(r'^good/(?P<good_id>\d+)/delete/$', GoodDelete.as_view(), name="good_delete"),
#Авторизацыя
#url(r'^login/', "django.contrib.auth.views.login", name="login"),
]
from django.conf import settings
from django.conf.urls.static import static
# Чтобы показывалась изображения на локальном сервери
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | UTF-8 | Python | false | false | 2,146 | py | 21 | urls.py | 14 | 0.665335 | 0.661339 | 0 | 70 | 27.614286 | 100 |
danel2005/triple-of-da-centry | 1,795,296,362,937 | 55eedac9d5312a3816e4f7eb7b3eb496604f645e | a2eff08a89f960821b23479d46a70665a27970c0 | /Aviel/8.3.4.py | 8c2855654676e10e720344042891bc1aef78ad87 | [] | no_license | https://github.com/danel2005/triple-of-da-centry | d7a51fa95855b0f8969bc99fca5c4bf3974b7eb8 | 74db9de0ef5c8dad366b4a7c2d244eb6abc5265f | refs/heads/master | 2023-03-17T22:28:29.375165 | 2021-03-19T17:36:06 | 2021-03-19T17:36:06 | 335,373,574 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def inverse_dict(my_dict):
"""
inversing a dict
:param my_dict: the dict we want to inverse
:type my_dict: dict
:return: an inversed dict
:rtype: dict
"""
new_dict = {}
for info in my_dict.items():
if info[1] in new_dict.keys():
new_dict[info[1]] += [info[0]]
else:
new_dict[info[1]] = [info[0]]
for value in new_dict.values():
value.sort()
return new_dict
def main():
course_dict = {'I' : 3, 'love' : 3, 'self.py!' : 2}
print(inverse_dict(course_dict))
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 601 | py | 86 | 8.3.4.py | 79 | 0.517471 | 0.50416 | 0 | 24 | 24.083333 | 55 |
fengyouliang/wheat_detection | 19,026,705,162,264 | 9b5ae1857f0ca175fcd10ada8bb6c3f979104fb3 | 77717d0024c8597fec83600259ea5547abbc183a | /wheat_detection/demo/stratified_split_demo.py | 2f337b86ff9dd27536c943b9eec06ef1d5655e98 | [
"Apache-2.0"
] | permissive | https://github.com/fengyouliang/wheat_detection | 0a090ef5eda7f2c5463996f4795f9ce06dd04050 | d056123426a1260c29b486cbb8e44a88a0a3c5bc | refs/heads/master | 2022-11-17T15:09:29.113493 | 2020-07-18T13:47:34 | 2020-07-18T13:47:34 | 276,532,878 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
csv_path = '/home/fengyouliang/datasets/WHD/kaggle_csv/train_0618.csv'
df = pd.read_csv(csv_path)
# get a df with just image and source columns
# such that dropping duplicates will only keep unique image_ids
image_source = df[['image_id', 'source']].drop_duplicates()
# get lists for image_ids and sources
image_ids = image_source['image_id'].to_numpy()
sources = image_source['source'].to_numpy()
ret = train_test_split(image_ids, sources, test_size=0.2, stratify=sources, random_state=None)
X_train, X_test, y_train, y_test = ret
print(f'# train images: {len(X_train)}')
print(f'# val images: {len(X_test)}')
train_df = df[df['image_id'].isin(X_train)]
val_df = df[df['image_id'].isin(X_test)]
fig = plt.figure(figsize=(10, 15))
counts = train_df['source'].value_counts()
ax1 = fig.add_subplot(2, 1, 1)
a = ax1.bar(counts.index, counts)
counts = val_df['source'].value_counts()
ax2 = fig.add_subplot(2, 1, 2)
a = ax2.bar(counts.index, counts)
plt.show()
| UTF-8 | Python | false | false | 1,100 | py | 31 | stratified_split_demo.py | 27 | 0.685455 | 0.667273 | 0 | 32 | 32.375 | 94 |
raynardfung/mooc-udacity-intro-to-computer-science | 17,282,948,411,013 | d7bbae8af6aecac47a18c8978f826acba7f34742 | 9e9eb854685e3e2c295aa1fa49f07b9bff0eacf8 | /lesson7.py | ccbc987a54e1583cbe8212c05af90ece68eafcee | [] | no_license | https://github.com/raynardfung/mooc-udacity-intro-to-computer-science | fa39ff3abfaee229815117c753402db27bf0904a | b74c1df4152e5b617636c696074f80f80ae2a930 | refs/heads/master | 2016-06-04T17:50:21.538555 | 2015-10-05T18:08:47 | 2015-10-05T18:08:47 | 43,699,784 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #--------------------#
# lesson 7
#--------------------#
# no technical content in this unit, just overviews and fieldtrips to real companies
# and labs
# 3 main themes from this course:
# abstraction: making general rules that can be applied to many situations
# universality: a computer is a universal machine since it can make decisions,
# keep track of data, and iterate
# recursive definitions: something complex can be defined using simple definitions
| UTF-8 | Python | false | false | 461 | py | 7 | lesson7.py | 7 | 0.709328 | 0.704989 | 0 | 12 | 37.333333 | 84 |
carloserodriguez2000/EmailAdrChk | 4,526,895,563,738 | 012c67b75c4de66d2cc6b3509036b4d05bac0614 | 22daaf26baac549194af0e0968ea2f9f545aa606 | /CrazyRFCwikiMain.py | fc115cfa06a107838dcfaaf830c052a93fbb5a4a | [] | no_license | https://github.com/carloserodriguez2000/EmailAdrChk | 06256ae9ca9fdd13168f4d0b85ca6a03222f1083 | 7c3ca7d0c20fad2a8f29d71c90aa39624f26fb50 | refs/heads/master | 2021-01-01T04:29:24.832071 | 2016-05-17T15:35:50 | 2016-05-17T15:35:50 | 58,962,182 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ################################################################################
# hi. This program plays a game inspired by the TV show "BigBang Theory".
def cleanS ( string ):
cleanS = string.strip() # get read off Leading and training spaces.
cleanS = cleanS.lower()
return cleanS
################################################################################
#
def stripComment ( sLine):
# code
print( sLine + 'cheking')
################################################################################
# this cole be used also "str.partition(sep)"
def GetDomain(sLine):
# code
## print( sLine + 'cheking')
words = sLine.split()
pieces = sLine.split('@')
## print(pieces[1])
return (pieces[1])
################################################################################
#
def GetLocalS(sLine):
# code
## print( sLine + 'cheking')
words = sLine.split()
pieces = sLine.split('@')
## print(pieces[0])
return (pieces[0])
################################################################################
#
def checkValidDomain(domainS):
# code
print( domainS + 'cheking')
if(len( domainS) >254):
print('Domain part is longer than 254 chars')
return False
if (' ' in domainS):
##print('domain has space')
return False
if ('-' in domainS):
#alphaNumDomS = domainS.lstrip('-')
alphaNumDomS = domainS.split('-')
##print(alphaNumDomS)
##print(len(alphaNumDomS))
domainS = alphaNumDomS[0]+alphaNumDomS[1]
##print('domain has -\"%s\"' %(domainS) ) # a dot is ok but need more
else :
alphaNumDomS = domainS
if ('.' in domainS):
#alphaNumDomS = domainS.lstrip('.')
alphaNumDomS = domainS.split('.')
##print(alphaNumDomS)
##print(len(alphaNumDomS))
domainS = alphaNumDomS[0]+alphaNumDomS[1]
##print('domain has .\"%s\"' %(domainS) ) # a dot is ok but need more
else :
alphaNumDomS = domainS
print(domainS)
if( domainS.isalnum() == True):
##print('domain is alphanum')
return True
else:
##print('missing alphanum')
return False
# check for IP based domains
## if ((domainS.startswith('[') and domainS.endswith(']'))):
###################
## octets = domainS.strip('[]')
## print ( octets )
## numbers = octets.split(':')
## print ( numbers )
## if( numbers[0].isnumerid() and numbers[1].isnumeric):
## return True
###################
################################################################################
#
def checkValidLocal(localS):
#code
print( localS + 'cheking')
if(len( localS) >64):
print('Local part is longer than 64 chars')
return False
if ( localS[0] == '.'):
return False
## Check for dot and the beginning or concecutive dots
last ='' # initialinze as a empty
for letter in localS :
if (letter == last):
print ('concecutive dots found')
return False ## if it finds consecutive ".." then not valid
if letter == '.':
last = '.' ## find the first dot and hold it.
##################################################################################
## Check for "(zero or more characters)" matching and take it out
r=localS.split('(')
if r[0] =='' :
#Found leading comment
print("Leading(found")
r=r[1]
s= r.split(')') # s contains everyting inside the parens.
print('for: r=',r, 's=',s )
if s[0] =='' :
#Found closing parens. let see if the stuff in the parens is valid
valid = True
for validL in ['"(),:;<>@[\] ']:
if not(validL in s[1]):
valid = False
print('invalid \"%s\" ' %(validL))
print ('End For ',s[0], ' ', s[1])
if ( validL == True ):
print('valid local () ',s[1])
#######################################################################
##print('Original \"%s\". r=\"%s\". s=\"%s\"' (localS, r, s))
print('Original= ', localS, 'r= ', r)
## if (' ' in localS):
## print('localS has space')
## return False
##
## if ('-' in localS):
## #alphaNumDomS = domainS.lstrip('-')
## alphaNumDomS = localS.split('-')
## print(alphaNumDomS)
## print(len(alphaNumDomS))
## localS = alphaNumDomS[0]+alphaNumDomS[1]
## print('domain has -\"%s\"' %(domainS) ) # a dot is ok but need more
## else :
## alphaNumDomS = localS
##
## if ('.' in localS):
## #alphaNumDomS = domainS.lstrip('.')
## alphaNumDomS = localS.split('.')
## print(alphaNumDomS)
## print(len(alphaNumDomS))
## localS = alphaNumDomS[0]+alphaNumDomS[1]
## print('domain has .\"%s\"' %(localS) ) # a dot is ok but need more
## else :
## alphaNumDomS = localS
##
## print(localS)
## if( localS.isalnum() == True):
## print('domain is alphanum')
## return True
## else:
## print('missing alphanum')
## return False
################################################################################
#
def main ():
sLine = input( 'Enter an Email address to check syntax: ')
sLine = cleanS( sLine)
if( len(sLine)>255):
print("Address has %i chars. It is too long. max 256 total" %(len(sLIne)))
domainS = GetDomain(sLine)
localS = GetLocalS(sLine)
print('Domain=\"%s\". Local= \"%s\".' %(domainS, localS))
if (checkValidDomain(domainS) == True):
print ('valid domain')
if( checkValidLocal(localS)== True):
print( 'Local \"%s\" is VALID' % (localS))
## if( checkValidDomain(domainS)== False):
## print( 'Domain \"%s\" is NOT-VALID' % (domainS))
## else :
## print( 'Domain \"%s\" is NOT-VALID' % (domainS))
##
## if( checkValidLocal(localS)== False):
## print( 'Local \"%s\" is NOT-VALID' % (localS))
## else :
## print( 'Local \"%s\" is NOT-VALID' % (localS))
##
##
## indexer = list()
## sLen = len(sLine)
## indexer = range(sLen) #Create an array for 0:len(
## sReverse = list()
## sReverseLine = ''
##
## for index in indexer :
## ##print ((sLen-1)-index)
## sReverse.append( sLine[(sLen-1)-index])
## sReverseLine += sReverse[index]
## ##print(index)
##
## print(sLine)
## print(sReverse)
## print(sReverseLine)
## if( sLine == sReverseLine):
## print( 'String \"%s\" is a palindrome' %(sLine))
##
################################################################################
#
################################################################################
main()
| UTF-8 | Python | false | false | 7,225 | py | 3 | CrazyRFCwikiMain.py | 2 | 0.438201 | 0.432526 | 0 | 220 | 30.840909 | 90 |
zmzmhhhxyj/leetcode | 18,975,165,543,991 | 7656157a23aa071ecfed95cc328e25991dc59a00 | c06b9b1492410639be60f03e00d575ad3e798eba | /leetcode_challenge_week3_6.py | 57e9e41807e6a9b8110a3e635295a0d8756228d5 | [] | no_license | https://github.com/zmzmhhhxyj/leetcode | cd8c622aa6de26a6e2602e4d0b9d17aa1394507f | 4351c8bff8f94744e5faea4cbda9ea771d3e5f28 | refs/heads/master | 2020-11-25T02:03:29.721509 | 2020-05-24T00:22:26 | 2020-05-24T00:22:26 | 228,441,961 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 200420
from typing import List
from collections import deque
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def constructBinaryTree(elemList):
length = len(elemList)
if(length == 0):
return None
_root = TreeNode(elemList[0])
def recr(root, num):
# num: number of node, start by 0 (root)
leftNumber = 2*num+1
rightNumber = 2*num+2
if(leftNumber < length and elemList[leftNumber] != None):
root.left = TreeNode(elemList[leftNumber])
recr(root.left, leftNumber)
else:
root.left = None
if(rightNumber < length and elemList[rightNumber] != None):
root.right = TreeNode(elemList[rightNumber])
recr(root.right, rightNumber)
else:
root.right = None
recr(_root, 0)
return _root
def levelOrder(root: TreeNode) :
res, queue = [],deque([(root,0)])
while queue:
cur,level = queue.popleft()
if cur:
if len(res)<level+1:
res.append([])
res[level].append(cur.val)
queue.append([cur.left,level+1])
queue.append([cur.right,level+1])
print(res)
class Solution:
def bstFromPreorder(self, preorder: List[int]) -> TreeNode:
if not preorder:
return
if len(preorder)==1:
return TreeNode(preorder[0])
i = 0
while i < len(preorder):
if preorder[i]>preorder[0]:
break
i+=1
root = TreeNode(preorder[0])
root.left = self.bstFromPreorder(preorder[1:i])
root.right = self.bstFromPreorder(preorder[i:])
return root
x = Solution()
a = [8,5,1,7,10,12]
tree = x.bstFromPreorder(a)
levelOrder(tree) | UTF-8 | Python | false | false | 1,832 | py | 173 | leetcode_challenge_week3_6.py | 172 | 0.557314 | 0.539301 | 0 | 63 | 28.095238 | 67 |
laurennc/CGMemission | 11,836,929,904,243 | 5c2a181bbf7efd35b7a9ca5128a9cb24d5318c08 | 5eda468fce23ec99250dd365026f0905dcbc9bad | /yt_to_tlac.py | 5cca6aff7f63d5b8f41f61b11d1a7c39d5812656 | [] | no_license | https://github.com/laurennc/CGMemission | fc7b9303081b93475e720cc4a38b68906bec716a | 59d6c73bcf5128fc0c46f41ab9c6e6784bbe23c2 | refs/heads/master | 2021-01-15T15:31:38.710202 | 2016-09-20T21:38:17 | 2016-09-20T21:38:17 | 13,177,868 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from yt.mods import *
import tlac_grid_create_hdf5 as tg
import tlac_analysis as ta
fn="/u/10/l/lnc2115/vega/data/Ryan/r0054/redshift0054"
pf = load(fn, file_style="%s.grid.cpu%%04i") # load data
#val, pos = pf.h.find_max('Density')
fields = ['H_NumberDensity','HI_NumberDensity','Temperature','Metallicity','x-velocity','y-velocity','z-velocity','x','y','z']
center = [ 0.39871597, 0.46913528, 0.46808243]
dims = [320,320,320]
level = 8 #pf.index.max_level
ncells = pf.domain_dimensions * pf.refine_by**level
full_boxsize = (pf.domain_width * ( 1. + 1. / ncells ))
cell_length = (full_boxsize[0]/ncells[0]) #0.90829222634295292 kpc
width = dims[0]*cell_length ##290.65351242974492 kpc
axis = fix_axis('x')
center = np.array(center)
LE,RE = center.copy(),center.copy()
LE[axis] -= width/2.0
RE[axis] += width/2.0
area_axes = [0,1,2]
i = area_axes.index(axis)
del area_axes[i]
LE[area_axes] -= width/2.0
RE[area_axes] += width/2.0
cg = pf.h.covering_grid(level, left_edge=LE, dims = dims)
region_size = (cg.right_edge-cg.left_edge)*pf['cm']
pos_coords = [np.array(cg['x'][:,0,0]),np.array(cg['y'][0,:,0]),np.array(cg['z'][0,0,:])]
vx = cg['x-velocity']
vy = cg['y-velocity']
vz = cg['z-velocity']
velocities = np.zeros(np.concatenate( (np.array(vx.shape),[3])))
velocities[:,:,:,0] = vx
velocities[:,:,:,1] = vy
velocities[:,:,:,2] = vz
#######INITIATE FILE############
output_file = 'grid_lauren.hdf5'
f = tg.open_file(output_file)
tg.write_header(f)
tg.write_grid_data(f,1,dims,region_size,0.0)
####ADD FIELDS OF INTEREST#########
##LOG OPTION
#tg.write_cell_data(f,pos_coords,None,np.log10(cg['Temperature']),np.log10(cg['HI_NumberDensity']),None,None,np.log10(cg['H_NumberDensity']),np.log10(cg['Metallicity']))
##NON LOG OPTION
tg.write_cell_data(f,pos_coords,None,cg['Temperature'],cg['HI_NumberDensity'],None,velocities,cg['H_NumberDensity'],cg['Metallicity'])
tg.write_cloud_data(f)
tg.close_file(f)
# Check
#print "Checking data..."
#ta.grid_summary( ta.grid_load(output_file) )
#print "...done!"
| UTF-8 | Python | false | false | 2,110 | py | 118 | yt_to_tlac.py | 114 | 0.672038 | 0.611848 | 0 | 65 | 31.461538 | 169 |
ZhangDahe/PyProjects | 1,949,915,198,714 | 6186448815a72085eb3a1f420b0d5ca12bcd5ae6 | e727361dd1254b4ff2437f431378c1b32b38d302 | /Codes/PyProjects/NS3_work/HeartBeats/TCP/TCP41_ser.py | b332bcbda7edc51066063a1db753aee73b04baad | [] | no_license | https://github.com/ZhangDahe/PyProjects | 29b2640028d786c76c8dfcc4ad0f465a6e7dd146 | ee96a4d87a62db631d8bf4599295bec39ad20102 | refs/heads/master | 2020-04-27T07:23:30.785470 | 2019-03-06T08:28:56 | 2019-03-06T08:28:56 | 174,102,232 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''最终版'''
'''新建立一个子线程'''
import socket, threading, time
tcpPort = 8888; checkPeriod = 5; checkTimeout = 10
class Mydict(dict):
'''管理字典,更新,新建. 返回掉线的client'''
def __init__(self):
super(Mydict, self).__init__( )
""" 创建或更新字典目录"""
def __setitem__(self, key, value):
super(Mydict, self).__setitem__(key, value) # 调用dict中的setitem方法,设置键 和值
def getSilent(self):
silent = [ ]
limit = time.time( ) - checkTimeout #当前时间 -checkTimeout
for (ip,ipTime) in self.items():
if ipTime < limit:
silent.append(ip)
break
return silent
class Receiver(threading.Thread):
""" 接收tcpp包,并把他们存在字典当中 """
'''父类 threding.Thread'''
def __init__(self, mydict):
super(Receiver, self).__init__( )
self.mydict = mydict #传入的参数
self.recSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.recSocket.bind(('127.0.0.1', tcpPort))
self.recSocket.listen(2)
#改写类的run method.`
def run(self):
coon, addr = self.recSocket.accept()
# try:
while True:
data = coon.recv(10)
print(addr)
if data.decode() == 'ping':
#在字典中保存 ip:时间
self.mydict[addr[0]] = time.time()
print("server is sending 'pang'")
coon.send('pang'.encode())
def main():
mydict = Mydict( )
receiver = Receiver(mydict=mydict) # 实例化对象类.
receiver.start() # 启动线程
print ('heartbeat server listening on port %d' % tcpPort)
while True:
silent = mydict.getSilent( )
print ('Silent clients: %s' %silent)
time.sleep(checkPeriod)
if __name__ == '__main__':
main( ) | UTF-8 | Python | false | false | 1,944 | py | 22 | TCP41_ser.py | 22 | 0.538549 | 0.528912 | 0 | 58 | 29.431034 | 79 |
shubham-gaikwad/Assesing-building-damage-post-disasters | 17,119,739,667,663 | 1bf4e5cd5f4ff30e535c0b458f06303adf6887da | 5a74ffd19d7894e899dac449cc7b944ba4cb6b46 | /data_finalize.py | 085ec061e69e571f05f4919a90efefaf67791e9a | [] | no_license | https://github.com/shubham-gaikwad/Assesing-building-damage-post-disasters | a1e00f0ed7f7b8b30b54904ddc3105afccf71b61 | 830f43db780ff2e7b7740fe485dbd7cb6e0516a8 | refs/heads/master | 2022-04-22T10:26:22.399093 | 2020-04-28T23:22:29 | 2020-04-28T23:22:29 | 259,773,646 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import shutil
import random
val_split = 0.2
base = '/home/abhirag/experiments_on_portugal/'
ds = '/home/abhirag/ONLY_PORTUGAL/'
if os.path.exists(os.path.join(base, 'spacenet_gt')):
shutil.rmtree(os.path.join(base, 'spacenet_gt'))
os.mkdir(os.path.join(base, 'spacenet_gt'))
os.mkdir(os.path.join(base, 'spacenet_gt', 'images'))
os.mkdir(os.path.join(base, 'spacenet_gt', 'labels'))
os.mkdir(os.path.join(base, 'spacenet_gt', 'dataSet'))
for i in [os.path.join(ds, 'masks', j) for j in os.listdir(os.path.join(ds, 'masks'))]:
shutil.copy(i, os.path.join(base, 'spacenet_gt', 'labels'))
shutil.copy('/home/abhirag/ONLY_PORTUGAL/images/portugal-wildfire_' + i.split('/')[-1].split('_')[1] + '_pre_disaster.png', os.path.join(base, 'spacenet_gt', 'images'))
x = os.listdir(os.path.join(base, 'spacenet_gt', 'labels'))
random.shuffle(x)
with open(os.path.join(base, 'spacenet_gt', 'dataSet', 'train.txt'), 'w') as fp:
for f in x[:int((1-val_split)*len(x))]:
fp.write('%s\n' %f)
with open(os.path.join(base, 'spacenet_gt', 'dataSet', 'val.txt'), 'w') as fp:
for f in x[int((1-val_split)*len(x)):]:
fp.write('%s\n' %f)
os.system('python3 compute_mean.py /home/abhirag/experiments_on_portugal/spacenet_gt/dataSet/train.txt --root /home/abhirag/experiments_on_portugal/spacenet_gt/images --output /home/abhirag/experiments_on_portugal/spacenet_gt/dataSet/mean.npy')
| UTF-8 | Python | false | false | 1,394 | py | 9 | data_finalize.py | 3 | 0.681492 | 0.676471 | 0 | 38 | 35.631579 | 244 |
snowowwwl/Coursera_MLIntro_Yandex | 4,140,348,523,475 | 99a9c810cf2c856ef7b49351711c7995461b1534 | 6f3dce332a45f2bce371abfd5bed335876f535bc | /W1_task2_decisiontree/W1_task2_decisiontree.py | fd10659c86ffacd6ac72eddff87233ce6caed804 | [] | no_license | https://github.com/snowowwwl/Coursera_MLIntro_Yandex | 872bdc2f32333bf343a60f0c47531672db62cd70 | ba1aac577f7c35f7039073165ab8580e0b64f8f7 | refs/heads/master | 2020-07-08T17:35:49.440043 | 2019-09-26T10:58:09 | 2019-09-26T10:58:09 | 203,733,863 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Загрузите выборку из файла titanic.csv с помощью пакета Pandas.
Оставьте в выборке четыре признака: класс пассажира (Pclass), цену билета (Fare), возраст пассажира (Age) и его пол (Sex).
Обратите внимание, что признак Sex имеет строковые значения.
Выделите целевую переменную — она записана в столбце Survived.
В данных есть пропущенные значения — например, для некоторых пассажиров неизвестен их возраст. Такие записи при чтении
их в pandas принимают значение nan. Найдите все объекты, у которых есть пропущенные признаки, и удалите их из выборки.
Обучите решающее дерево с параметром random_state=241 и остальными параметрами по умолчанию (речь идет о параметрах
конструктора DecisionTreeСlassifier).
Вычислите важности признаков и найдите два признака с наибольшей важностью. Их названия будут ответами для данной задачи
(в качестве ответа укажите названия признаков через запятую или пробел, порядок не важен).
'''
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
import pydot
import pydotplus
import pandas
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from io import StringIO
transformer = ColumnTransformer(
transformers=[
("OneHot", # Just a name
OneHotEncoder(), # The transformer class
[0] # The column(s) to be applied on.
)
],
remainder='passthrough'
)
data = pandas.read_csv('titanic.csv', index_col='PassengerId')
data_sample = data[['Sex', 'Pclass', 'Age', 'Fare', 'Survived']]
data_sample = data_sample.dropna()
data_sample1 = data_sample[['Sex', 'Pclass', 'Age', 'Fare']]
survived_target = data_sample['Survived']
x = transformer.fit_transform(data_sample1)
print(data_sample1)
print(x)
survived_model = DecisionTreeClassifier(random_state=241)
survived_model.fit(x, survived_target.values)
importances = survived_model.feature_importances_
print(importances)
dot_data = StringIO()
out = tree.export_graphviz(survived_model, feature_names=['Sex', 'Sex', 'Pclass', 'Age', 'Fare'],
out_file = dot_data, filled= True )
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf('survived_model.pdf') | UTF-8 | Python | false | false | 2,937 | py | 20 | W1_task2_decisiontree.py | 18 | 0.753419 | 0.746802 | 0 | 54 | 41 | 122 |
CesarAcjotaMerma/ProyectoFinal4toSemestre | 5,068,061,415,308 | 8a27123d769eec100051ec89eb4748e747d0c964 | d29c95b4634a31e0e1e6935804cbdd127dcf27dd | /Backend/Django/gestion/coordinates/serializers.py | 7fdb3062aa947a5b23de8b6305b15cce4f8a754b | [] | no_license | https://github.com/CesarAcjotaMerma/ProyectoFinal4toSemestre | d791ff7687795db0bffaae6b55235f4308cb2f8d | d345c5ebfd7363de1307561b6c70cd41ed63a013 | refs/heads/master | 2023-08-27T03:00:15.984873 | 2021-10-16T17:47:51 | 2021-10-16T17:47:51 | 417,904,573 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from coordinates.models import Coordinates
class CoordinatesSerializer(serializers.ModelSerializer):
class Meta:
model = Coordinates
fields = ('__all__') | UTF-8 | Python | false | false | 219 | py | 120 | serializers.py | 92 | 0.721461 | 0.721461 | 0 | 9 | 23.444444 | 57 |
MakeMagazinDE/Platinenbohrmaschine | 2,911,987,841,614 | d459d1c76bced76c7a0e3684a72d7194d117447d | 3301fb088440c6c18a36c8dc7e9d3e650fd8b97a | /bohrconfig_10.py | 13e01eac9f531290ff2f12551541d16c15d80c0f | [] | no_license | https://github.com/MakeMagazinDE/Platinenbohrmaschine | 327006914c46fffc6e46ece25c96eaeead0af682 | 80f7fde6e72e5148ef554259beab1e5cb5cf7ffb | refs/heads/master | 2020-03-09T17:32:27.294156 | 2018-04-10T11:07:23 | 2018-04-10T11:07:23 | 128,911,033 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from tkinter import *
from tkinter import ttk
name = "Bohrconfig V01"
filename = 'bohrconf.txt'
mainWin=Tk()
mainWin.geometry('310x330+50+30')
mainWin.resizable(width=FALSE, height=FALSE)
mainWin.title(name)
mainFrame = Frame(master=mainWin)
mainFrame.place(x=0, y=0, width=310, height=330)
bg = StringVar()
ba = StringVar()
wp = StringVar()
bt = StringVar()
wl = StringVar()
hp = StringVar()
su = StringVar()
ss = StringVar()
pw = StringVar()
fx = StringVar()
fy = StringVar()
try:
f = open(filename)
except IOError:
tkMessageBox.showinfo("Fehler", "config.txt fehlt")
for line in f:
line = line.rstrip('\n')
liste = line.split("=")
if liste[0] == 'bg':
bg.set(liste[1])
elif liste[0] == 'ba':
ba.set(liste[1])
elif liste[0] == 'wp':
wp.set(liste[1])
elif liste[0] == 'bt':
bt.set(liste[1])
elif liste[0] == 'wl':
wl.set(liste[1])
elif liste[0] == 'hp':
hp.set(liste[1])
elif liste[0] == 'su':
su.set(liste[1])
elif liste[0] == 'ss':
ss.set(liste[1])
elif liste[0] == 'pw':
pw.set(liste[1])
elif liste[0] == 'fx':
fx.set(liste[1])
elif liste[0] == 'fy':
fy.set(liste[1])
f.close
def Bsave():
try:
f = open(filename, 'w')
except IOError:
tkMessageBox.showinfo("Fehler", "config.txt fehlt")
f.write("bg=" + str(bg.get()) + "\n")
f.write("ba=" + str(ba.get()) + "\n")
f.write("wp=" + str(wp.get()) + "\n")
f.write("bt=" + str(bt.get()) + "\n")
f.write("wl=" + str(wl.get()) + "\n")
f.write("hp=" + str(hp.get()) + "\n")
f.write("su=" + str(su.get()) + "\n")
f.write("ss=" + str(ss.get()) + "\n")
f.write("pw=" + str(pw.get()) + "\n")
f.write("fx=" + str(fx.get()) + "\n")
f.write("fy=" + str(fy.get()) + "\n")
f.close
def Bback():
mainWin.destroy()
ausgabelabel1 = ttk.Label(mainFrame, text='Bohrgeschwindigkeit [m/s]')
ausgabelabel1.place(x=5, y=50, width=200, height=20)
eingabelabel1 = ttk.Entry(mainFrame, textvariable=bg)
eingabelabel1.place(x=200, y=50, width=100, height=20)
ausgabelabel2 = ttk.Label(mainFrame, text='Bohrbeschleunigung [mm/ss]')
ausgabelabel2.place(x=5, y=70, width=200, height=20)
amax = StringVar()
eingabelabel2 = ttk.Entry(mainFrame, textvariable=ba)
eingabelabel2.place(x=200, y=70, width=100, height=20)
ausgabelabel3 = ttk.Label(mainFrame, text='Werkzeuwechselposition [mm]')
ausgabelabel3.place(x=5, y=90, width=200, height=20)
wwpos = StringVar()
eingabelabel3 = ttk.Entry(mainFrame, textvariable=wp)
eingabelabel3.place(x=200, y=90, width=100, height=20)
ausgabelabel4 = ttk.Label(mainFrame, text='Bohrtiefe [mm]')
ausgabelabel4.place(x=5, y=110, width=200, height=20)
btief = StringVar()
eingabelabel4 = ttk.Entry(mainFrame, textvariable=bt)
eingabelabel4.place(x=200, y=110, width=100, height=20)
ausgabelabel5 = ttk.Label(mainFrame, text='Werkzeuglänge [mm]')
ausgabelabel5.place(x=5, y=130, width=200, height=20)
wlang = StringVar()
eingabelabel5 = ttk.Entry(mainFrame, textvariable=wl)
eingabelabel5.place(x=200, y=130, width=100, height=20)
ausgabelabel6 = ttk.Label(mainFrame, text='Home-Position [mm]')
ausgabelabel6.place(x=5, y=150, width=200, height=20)
homepos = StringVar()
eingabelabel6 = ttk.Entry(mainFrame, textvariable=hp)
eingabelabel6.place(x=200, y=150, width=100, height=20)
ausgabelabel7 = ttk.Label(mainFrame, text='Schrittmotor [S/U]')
ausgabelabel7.place(x=5, y=170, width=200, height=20)
spu = StringVar()
eingabelabel7 = ttk.Entry(mainFrame, textvariable=su)
eingabelabel7.place(x=200, y=170, width=100, height=20)
ausgabelabel8 = ttk.Label(mainFrame, text='Spindelsteigung [mm/U]')
ausgabelabel8.place(x=5, y=190, width=200, height=20)
pitch = StringVar()
eingabelabel8 = ttk.Entry(mainFrame, textvariable=ss)
eingabelabel8.place(x=200, y=190, width=100, height=20)
ausgabelabel9 = ttk.Label(mainFrame, text='Spindeldrehzahl max. [%]')
ausgabelabel9.place(x=5, y=210, width=200, height=20)
pwm = StringVar()
eingabelabel9 = ttk.Entry(mainFrame, textvariable=pw)
eingabelabel9.place(x=200, y=210, width=100, height=20)
ausgabelabel10 = ttk.Label(mainFrame, text='Fadenkreuz x.Pos')
ausgabelabel10.place(x=5, y=5, width=200, height=20)
fkx = StringVar()
eingabelabel10 = ttk.Entry(mainFrame, textvariable=fx)
eingabelabel10.place(x=200, y=5, width=100, height=20)
ausgabelabel11 = ttk.Label(mainFrame, text='Fadenkreuz y-Pos.')
ausgabelabel11.place(x=5, y=25, width=200, height=20)
fky = StringVar()
eingabelabel11 = ttk.Entry(mainFrame, textvariable=fy)
eingabelabel11.place(x=200, y=25, width=100, height=20)
image2=PhotoImage(file='ico/save.png')
button2 = Button(master=mainFrame,image=image2, command=Bsave)
button2.place(x=25, y=245, width=50, height=50)
image4=PhotoImage(file='ico/back.png')
button4 = Button(master=mainFrame,image=image4, command=Bback)
button4.place(x=80, y=245, width=50, height=50)
mainWin.mainloop()
| UTF-8 | Python | false | false | 5,203 | py | 6 | bohrconfig_10.py | 4 | 0.64283 | 0.579393 | 0 | 156 | 32.339744 | 72 |
herjh0405/Coding_Test | 7,438,883,402,518 | eb6c77d92fef2493f6ec920dfc6f733e692fa1e8 | 100ff22eb70448a7532216e535e48929e47be898 | /Chapter7. 이진 탐색/7-6. 부품 찾기_계수 정렬.py | 1648f234f4249c57e327cd9e706e32bc41610726 | [] | no_license | https://github.com/herjh0405/Coding_Test | 100b0c2a4aac0aa4d8aa9dad5780ab6c2e7b33d9 | 0df10f057b22d5e9b32494548685b79aa9fbbc07 | refs/heads/master | 2023-05-27T06:46:52.350172 | 2021-06-15T04:34:45 | 2021-06-15T04:34:45 | 358,766,282 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 이진 탐색 말고도 계수 정렬의 개념을 이용하여 문제를 풀 수도 있다.
# 모든 원소의 번호를 포함할 수 있는 크기의 리스트를 만든 뒤에, 리스트의 인덱스에 직접 접근하여
# 특정한 번호의 부품이 매장에 존재하는지 확인
import sys
# 가게의 부품 개수를 입력받기
n = int(input())
array = [0] * 1000000
# 가게에 있는 전체 부품 번호를 입력받아서 기록
for i in input().split() :
array[int(i)] = 1
# 손님이 확인 요청한 부품의 개수를 입력받기
m = int(input())
# 손님이 확인 요청한 전체 부품 번호를 공백으로 구분하여 입력
vi_list = list(map(int, sys.stdin.readline().split()))
for vi in vi_list :
if array[vi] == 1 :
print('yes', end=' ')
else :
print('no', end=' ') | UTF-8 | Python | false | false | 865 | py | 42 | 7-6. 부품 찾기_계수 정렬.py | 36 | 0.566728 | 0.548446 | 0 | 22 | 22.954545 | 55 |
AllStars123/online_shop_django | 6,073,083,764,333 | 36afaf3d48aac144edba1b9713235055cf117b97 | dd116ddf8b7edb0083ff9eeaf5c0b4ecb4199378 | /mainapp/migrations/0009_alter_customers_orders.py | 51d1ff2c6643a46cf13b922eee379b363c994b32 | [] | no_license | https://github.com/AllStars123/online_shop_django | bd11fabf4cd4d2e026b63e8e97bc9ffbbb8fd770 | 39b8e04d78db2e4a8feab1e67580088e7529e0bf | refs/heads/master | 2023-07-08T22:50:53.754670 | 2021-08-06T21:57:14 | 2021-08-06T21:57:14 | 393,510,483 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.5 on 2021-08-05 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0008_auto_20210805_1842'),
]
operations = [
migrations.AlterField(
model_name='customers',
name='orders',
field=models.ManyToManyField(related_name='related_customer', to='mainapp.Orders', verbose_name='Заказы покупателя'),
),
]
| UTF-8 | Python | false | false | 484 | py | 10 | 0009_alter_customers_orders.py | 8 | 0.623932 | 0.557692 | 0 | 18 | 25 | 129 |
MightyCrane/scattering-clustering | 6,665,789,243,642 | 27955ddc3cd53275406f3e9fd4a01a697ec012a8 | b1f43584508c0a759e75697ff1e2fd0cc180df21 | /src/lib/scattering/scattering_methods.py | 27e978cb62d74b744696305f28aac4ab47732ec9 | [] | no_license | https://github.com/MightyCrane/scattering-clustering | 8d9b0f37c084ae3073fa31552571e84fb4dbf381 | fbdc75271658f3e53d55fc6d04995a8058a0bf62 | refs/heads/master | 2023-07-24T00:05:08.251008 | 2021-08-27T08:25:39 | 2021-08-27T08:25:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Methods for intializing ScatNets and other functionalities related to the
scattering transform
"""
import kymatio as km
def scattering_layer(J=3, shape=(32, 32), max_order=2, L=6, debug=False):
"""
Creating a scattering transform "network"
Args:
----
J: Integer
logscale of the scattering (2^J)
shape: tuple (X,Y)
size of the input images
max_order: Integer
number of scattering layers
L: Integer
number of angles used for the Morlet Wavelet
scattering_layer: Kymatio Scattering 2D layer
Kymatio layer that performs a 2D scattering transform
total_scat_coeffs: Integer
Total number of scattering coefficients
"""
total_scat_coeffs = get_num_scattering_coefficients(J=J, shape=shape, max_order=max_order, L=L, debug=debug)
scattering_layer = km.Scattering2D(J=J, shape=shape, max_order=max_order, L=L)
return scattering_layer, total_scat_coeffs
def get_num_scattering_coefficients(J=3, shape=(32, 32), max_order=2, L=3, debug=False):
"""
Computing the total number of scattering coefficients
Args:
-----
J: Integer
logscale of the scattering (2^J)
shape: tuple (X,Y)
size of the input images
max_order: Integer
number of scattering layers
L: Integer
number of angles used for the Morlet Wavelet
total_scat_coeffs: Integer
Total number of scattering coefficients
"""
height = shape[0]//(2**J)
width = shape[0]//(2**J)
channels = 1 + L*J
if(max_order==2):
channels+= L**2*J*(J-1)//2
total_scat_coeffs = channels*height*width
if(debug):
print("Scattering Feature stats given parameters:")
print(f" Total_scat_coeffs: {total_scat_coeffs}")
print(f" Shape: {(channels, height, width)}")
print(f" Height: {height}")
print(f" Hidth: {width}")
print(f" Channels: {channels}")
return total_scat_coeffs
def get_scat_features_per_layer(J=3, L=3):
"""
Computing the number of scattering features corresponding to each order
Args:
-----
J: Integer
logscale of the scattering (2^J)
L: Integer
number of angles used for the Morlet Wavelet
"""
zero_order = 1
first_order = L*J
second_order = L**2*J*(J-1)//2
total_coeffs = zero_order + first_order + second_order
return total_coeffs
def reshape_scat_coeffs(scat_coeffs, method="channelwise"):
"""
Reshaping the scattering coefficients for channelwise or batchwise processing
Args:
-----
input_coeffs: torch Tensor
5-dim Scattering coefficients with shape
(Batch_size, num_patches, n_scat_filters, height scat filter, width scat filter)
method: string
Method used for processing the patches
"""
# reshaping the features for batchwise or channelwise patch processing
if(method=="channelwise"):
scat_coeffs = scat_coeffs.view(scat_coeffs.shape[0],-1,scat_coeffs.shape[-2],scat_coeffs.shape[-1])
elif(method=="batchwise"):
scat_coeffs = scat_coeffs.view(-1,scat_coeffs.shape[-3],scat_coeffs.shape[-2],scat_coeffs.shape[-1])
else:
print(f"Patch processing method {method} is not recognized. It must be one of the following [batchwise, channelwise]")
exit()
return scat_coeffs
#
| UTF-8 | Python | false | false | 3,392 | py | 25 | scattering_methods.py | 20 | 0.643573 | 0.630601 | 0 | 115 | 28.495652 | 126 |
Anupya/leetcode | 4,621,384,842,016 | 9384856d858c900f947bd13696aeef8166378abb | 2b485c67c723151f73ec96da9f6337a0c9857dae | /medium/q395 atleastKRepeatingChar.py | c154eae82f6704f35c2860ecfed24f87f6f1a2d3 | [] | no_license | https://github.com/Anupya/leetcode | c7792e6ac61b655491a1c734f9167281356471d3 | cb45e66a41e0c6a8583bb9c4bf846b470ef4bc0f | refs/heads/master | 2022-10-10T14:01:22.189414 | 2022-09-07T21:36:24 | 2022-09-07T21:36:24 | 151,865,310 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Given a string s and an integer k, return the length of the longest substring of s such that the frequency of each character in this substring is greater than or equal to k.
class Solution:
def longestSubstring(self, s: str, k: int) -> int:
return self.longestSubstringActual(0, len(s), s, k)
def longestSubstringActual(self, start, end, s, k):
# store frequency of all char in s
mydict = {}
substring = s[start:end]
uniqueChar = set(substring)
for char in uniqueChar:
mydict[char] = substring.count(char)
# divide and conquer
for i in range(start, end):
if mydict[s[i]] < k:
left = self.longestSubstringActual(start, i, s, k)
right = self.longestSubstringActual(i+1, end, s, k)
return max(left, right)
return end - start
| UTF-8 | Python | false | false | 935 | py | 149 | q395 atleastKRepeatingChar.py | 148 | 0.573262 | 0.571123 | 0 | 24 | 37.625 | 175 |
AdamZhouSE/pythonHomework | 15,942,918,644,388 | 3081ef4c6c1ebacc5826d8dd731c18c63d8ce855 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2599/60622/317669.py | 9259f978f2c93eba49b3d4176da7890a6625ba0c | [] | no_license | https://github.com/AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | l=input().split()
n=l[0]
m=l[1]
for i in range(n+m):
s=input()
l.append(s)
if l==[]:
pass
else:
print(l) | UTF-8 | Python | false | false | 120 | py | 45,079 | 317669.py | 43,489 | 0.508333 | 0.491667 | 0 | 10 | 11.1 | 20 |
dahlke/PyTrain | 16,423,954,959,518 | fc722fb33a51057e4f930f2da0077f7419d089d7 | 54dcbdff3a619549c823dac81f48dc1895865480 | /lines.py | b37ed6d96e98e35d1420203f179fc43a1deffbaf | [] | no_license | https://github.com/dahlke/PyTrain | f700cf1b877d79c8281347935d614c0df94a37b0 | 18c9f3a3833d193f18f02fb571455729a6e01754 | refs/heads/master | 2016-08-05T04:23:45.697561 | 2013-05-02T00:57:22 | 2013-05-02T00:57:22 | 6,946,515 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import Tk,Frame,Label,Button, PhotoImage, Menu
from tkinter import Frame,Label,Entry,Button,TOP,LEFT,RIGHT,END,BOTTOM
from tkinter.messagebox import showinfo
import driver
import stops
class lines(Frame):
def __init__(self, master=None, bg='black', height=1000, width=1000):
Frame.__init__(self, master, bg='black', height=1000, width=1000)
#self.title("CTA Train Tracker ")
self.driv = driver.Driver()
self.pack()
self.memory=''
lines.make_widgets(self)
def make_widgets(self):
self.lines = ['Red Line', 'Blue Line', 'Brown Line', 'Purple Line',
'Orange Line', 'Green Line', 'Pink Line', 'Yellow Line']
headimg = PhotoImage(file='header.gif')
header = Label(self, image=headimg)
header.image = headimg
header.grid(row=0, columnspan=2)
r = 1
c = 0
for b in self.lines:
rel = 'ridge'
cmd = lambda x=b: self.click(x)
splt = b.split()
if splt[0] not in 'OrangeGreenPinkYellow':
Button(self,text=b, width = 19, height=2, relief=rel,
bg = splt[0], fg = "#FFF", font = ("Helvetica", 16),
command=cmd).grid(row=r,column=c)
else:
Button(self,text=b, width = 19, relief=rel,
bg = splt[0], fg = "#000", height=2, font = ("Helvetica", 16),
command=cmd).grid(row=r,column=c)
c += 1
if c > 1:
c = 0
r += 1
def hello():
print('hello')
def click(self, key):
for line in self.lines:
if key == str(line):
x = self.driv.stopSelection(key)
stops.stops(x, key).maxsize(480, 320)
myapp = lines()
#
# here are method calls to the window manager class
#
myapp.master.title("CTA Train Tracker")
myapp.master.maxsize(480, 320)
# start the program
myapp.mainloop()
| UTF-8 | Python | false | false | 2,031 | py | 9 | lines.py | 8 | 0.535204 | 0.509601 | 0 | 62 | 31.758065 | 85 |
RafaelVallejo/Pentest | 1,297,080,140,525 | 161b6127dc86a5b1bd8f0b92df30462f6ed8e79b | 578621ba1fc1ce7ed01e81902f19d3440cff497a | /Tareas/nmap.py | d9b24ccb1278a8e044371a36c991bd1908762529 | [] | no_license | https://github.com/RafaelVallejo/Pentest | 4c42f25060f75d7ba46d030de53d3c7dd57ad42f | a03011e7ba9aad5b4def8e63619f7c938122a8ba | refs/heads/master | 2020-05-02T03:10:39.132596 | 2019-04-19T06:15:40 | 2019-04-19T06:15:40 | 177,721,944 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ejecución: sudo python nmap.py -i <ip_host> -p <Puertos separados por coma, por espacio o por guión (para un rango)>
from sys import argv,exit as eexit,stderr
from scapy.all import sr1,sr,IP,ICMP,TCP,RandShort
import optparse
import argparse
def printError(msg, exit = False):
"""
Función para imprimir error de argumentos.
Recibe: msg (String) , exit (Bool)
Devuelve: mensaje de error y termina ejecución de script
"""
stderr.write('Error:\t%s\n' % msg)
if exit:
eexit(1)
def checkOptions(options):
"""
Función para verificar que los argumentos no estén vacíos.
Recibe: options (de argparse)
Devuelve: mensaje de error y llama a la función printError
"""
if options.host is None:
printError('Debes especificar la IP del host.', True)
def addOptions():
"""
Función para agregar las opciones al script: -p <lista de puertos, separados por coma o por espacios, o por un guión para indicar un rango>
-i <dirIPHostAescanear>
Devuelve: valores asignados a puertos y host
"""
parser = argparse.ArgumentParser()
parser.add_argument('-p','--port', dest='puertos', default=['80'], nargs='+', help='Puertos para realizar escaneo.')
parser.add_argument('-i','--host', dest='host', default=None, help='IP de host a escanear.')
opts = parser.parse_args()
return opts
def informe(ip, abiertos, cerrados, filtrados):
"""
Función para imprimir el resultado del escaneo de los puertos.
Recibe: ip (String), abiertos (lista de puertos abiertos), cerrados (lista de puertos cerrados), filtrados (lista de puertos filtrados)
Devuelve: muestra en salida estándar el resultado del escaneo
"""
print 'Host: %s\n' % ip
for port in abiertos: print 'Puerto %i: abierto' % port
for port in cerrados: print 'Puerto %i: cerrado' % port
for port in filtrados: print 'Puerto %i: filtrado' % port
def escaneo(host,puertos):
"""
Función que realiza el escaneo en el host de los puertos recibidos.
Recibe: host (String), puertos (lista separada por comas o por espacios)
Devuelve: abiertos (lista de puertos abiertos), cerrados (lista de puertos cerrados), filtrados (lista de puertos filtrados),
"""
abiertos, cerrados, filtrados = [], [], []
puerto_origen = RandShort()
for puerto in puertos:
respuesta = sr1(IP(dst=host)/TCP(sport=puerto_origen,dport=puerto,flags="S"),timeout=2,verbose=0) # SYN
if respuesta is not None:
if respuesta.getlayer(TCP).flags == 0x12: # SYN/ACK
reset = sr(IP(dst=host)/TCP(sport=puerto_origen,dport=puerto,flags="R"),timeout=2,verbose=0) # RST
abiertos.append(puerto)
elif respuesta.getlayer(TCP).flags == 0x14: # RST/ACK
cerrados.append(puerto)
elif(int(respuesta.getlayer(ICMP).type)==3 and int(respuesta.getlayer(ICMP).code) in [1,2,3,9,10,13]): # Filtrado
filtrados.append(filtrados)
else:
filtrados.append(puerto)
return abiertos, cerrados, filtrados
def puertosFormato(puertos):
"""
Función para parsear la lista de puertos leída, ya sea separada por comas, por espacios o con un guión (para un rango de puertos)
Recibe: puertos (argumento leído con -p)
Devuelve: lista de enteros con los puertos
"""
if '-' in puertos[0]:
print int(puertos[0].split('-')[0])
return [int(i) for i in range(int(puertos[0].split('-')[0]),int(puertos[0].split('-')[1])+1)]
elif ',' in puertos[0]:
return [int(i) for i in puertos[0].split(',')]
else:
return [int(i) for i in puertos]
# main
opts = addOptions()
checkOptions(opts)
puertos = puertosFormato(opts.puertos)
abiertos,cerrados,filtrados = escaneo(opts.host,puertos)
informe(opts.host,abiertos,cerrados,filtrados)
| UTF-8 | Python | false | false | 4,259 | py | 4 | nmap.py | 4 | 0.608675 | 0.600424 | 0 | 93 | 44.612903 | 143 |
diegopajarito/MACT20.21_Digital_tools_Big_Data_part_2 | 18,159,121,747,191 | 652f3f0e9ba5c58ee60a0930a353478e327599a2 | ff8f16cd11816832d5fc42435fb6bc9e6b003eee | /session4/c_animations_data.py | 1e2c24ac6eaf0a538648e5bb6a535c1acbf2f738 | [
"Apache-2.0"
] | permissive | https://github.com/diegopajarito/MACT20.21_Digital_tools_Big_Data_part_2 | 3ef5d3cb9e396bb489a108c6a67fe2bb2519bc69 | 8b86299c210b3f322abee06fd789386939962328 | refs/heads/main | 2023-03-18T09:11:59.198809 | 2021-03-10T16:47:39 | 2021-03-10T16:47:39 | 333,069,480 | 0 | 1 | Apache-2.0 | true | 2021-01-26T11:53:49 | 2021-01-26T11:53:48 | 2021-01-26T11:53:42 | 2021-01-26T11:53:39 | 0 | 0 | 0 | 0 | null | false | false | # encoding: utf-8
##################################################
# This script shows how to create animated plots using matplotlib and a basic dataset
# Multiple tutorials inspired the current design but they mostly came from:
# hhttps://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1
# Note: the project keeps updating every course almost yearly
##################################################
#
##################################################
# Author: Diego Pajarito
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# We need to import numpy and matplotlib library
# importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
import matplotlib
# Read files and prepare data
data = pd.read_csv('../data/2021_seguiment-covid19-bcn.csv')
#data = pd.read_csv('https://opendata-ajuntament.barcelona.cat/data/dataset/4f3ffbda-d5be-4f2a-a836-26a77be6df1a/resource/f627ac0a-d05f-416d-9773-eeb464a3fc44/download')
data.columns = ['date_indicator', 'frequency_indicator', 'place', 'name_indicator',
'name_variable', 'value', 'unit', 'source']
# data comes with multiple indicators, we need to pick just one of it for our initial plot
data = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (diari)']
# We need the data to be in time format to calculate values in days after day zero
data['date_indicator'] = pd.to_datetime(data['date_indicator'])
initial_day = data['date_indicator'].min()
data['day_after_zero'] = data['date_indicator'] - initial_day
data['day_after_zero'] = data['day_after_zero']/np.timedelta64(1, 'D')
# we also extract some values to set the plot limits
max_day = data['day_after_zero'].max().astype(int)
max_cases = data['value'].max()
title = 'Covid-19 cases BCN'
# We then prepare the writer and animation file options
Writer = animation.writers['ffmpeg']
writer = Writer(fps=20, metadata=dict(artist='MaCTResearcher'), bitrate=1800)
# If error using anaconda try to install ffmpeg
# conda install -c conda-forge ffmpeg
# We create an initial plot with basic configuration a single line
fig = plt.figure(figsize=(10, 6))
plt.xlim(0, max_day)
plt.ylim(0, max_cases)
plt.xlabel('Day after case 1', fontsize=18)
plt.ylabel('Cases', fontsize=18)
plt.title(title, fontsize=20)
# We need to set an animation function to handle individual behaviour per frame
# variable "i" is the frame id that can be used to handle queries or filters for your data
def animate(i):
frame_data = data[data['day_after_zero'] <= i]
p = sns.lineplot(x='day_after_zero', y='value', data=frame_data, color="r")
p.tick_params(labelsize=17)
plt.setp(p.lines, linewidth=1)
ani = matplotlib.animation.FuncAnimation(fig, animate, frames=max_day, repeat=True)
ani.save('covid_cases_bcn.mp4', writer=writer)
print('end')
| UTF-8 | Python | false | false | 3,131 | py | 24 | c_animations_data.py | 17 | 0.696263 | 0.671032 | 0 | 72 | 42.486111 | 169 |
takenet/blip-sdk-python | 1,168,231,120,141 | 0dfa9c5813609a4d6611dda39635a7d35c0f248b | ec32981614943d26f9bddc7eee205406407fdfcb | /src/blip_sdk/extensions/__init__.py | eaabd99205c358f81870ce4abf72a6955626fc6d | [
"MIT"
] | permissive | https://github.com/takenet/blip-sdk-python | a58a4954577229650043e9c270554f25458767ab | f958149b2524d4340eeafad8739a33db71df45ed | refs/heads/master | 2023-08-01T17:52:00.981847 | 2021-10-07T19:16:57 | 2021-10-07T19:16:57 | 377,283,892 | 6 | 3 | MIT | false | 2021-10-07T19:16:58 | 2021-06-15T20:18:30 | 2021-07-14T15:02:53 | 2021-10-07T19:16:57 | 252 | 2 | 0 | 0 | Python | false | false | from .extension_base import ExtensionBase
from .media import MediaExtension
from .chat import ChatExtension
from .artificial_intelligence import AIExtension
from .analytics import AnalyticsExtension
from .contexts import ContextsExtension
| UTF-8 | Python | false | false | 239 | py | 76 | __init__.py | 69 | 0.866109 | 0.866109 | 0 | 6 | 38.833333 | 48 |
MatheusWTF/dither-with-py | 12,936,441,539,124 | de8c4ec2312cf13d98397dba9013c74ed9f31bb6 | 63fbfa9e0efa65140ea4048499af956a33d0f2d6 | /ordena.py | 3a21b64d9137f357e7684d9f651c664d9f85cc9a | [] | no_license | https://github.com/MatheusWTF/dither-with-py | ca18ac9e144e70a6f95afe3da67b574f2c0eb067 | a56700f3e6b13178777e08e75840fbbaff02d634 | refs/heads/master | 2020-03-23T10:34:19.627937 | 2018-07-28T03:18:14 | 2018-07-28T03:18:14 | 141,450,177 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding: utf-8
# author: Matheus D. rodrigues
### Imports
from PIL import Image
from time import time
### Sub Programs
def ordena(img):
lista = []
pix = img.load()
for y in range(img.size[1]):
for x in range(img.size[0]):
data = [x, y, pix[x, y]]
lista.append(data)
for i in range(0, len(lista)-1):
mini = i
for j in range(i, len(lista)):
colors_i = ((lista[i][2][0]*100) + (lista[i][2][1]*10) + (lista[i][2][2]))/3
colors_j = ((lista[j][2][0]*100) + (lista[j][2][1]*10) + (lista[j][2][2]))/3
print('checking', i, j ,sep=" | ")
if colors_i < colors_j:
mini = j
if mini != i:
print('changing things up')
lista[i], lista[mini] = lista[mini], lista[i]
return lista
def create_canvas(lista, canvas):
pix = canvas.load()
for y in range(canvas.size[1]):
for x in range(canvas.size[0]):
ind = (canvas.size[0]*y)+x
pix[x, y] = lista[ind][2]
return canvas
### Main Program
t0 = time()
myPath = 'c:/Users/MatheusDinizRodrigue/Documents/Testes/PY/Image Manipulation/'
fileName = str(input('Insert the image\'s name: \n'))
typeFile = int(input('1 - .jpg; \n2 - .png.\n'))
typeFile = '.jpg' if typeFile == 1 else '.png'
img = Image.open(myPath+fileName+typeFile).convert('RGB')
lista = ordena(img)
canvas = Image.new('RGB', img.size, (255, 255, 255))
canvas = create_canvas(lista, canvas)
canvas.save(fileName+'_ordena.jpg')
t1 = time()
print("Execution time: %d" %(int(round(t0-t1)))) | UTF-8 | Python | false | false | 1,593 | py | 3 | ordena.py | 3 | 0.559322 | 0.528562 | 0 | 55 | 27.981818 | 88 |
DCC-CC4401/2018-1-sad-T3 | 953,482,787,595 | 680911234e671123091baf3533791a5fcda801e7 | ae66041a537d95aadef6596e31e6428081c6500f | /cc4401Inventory/spacesApp/migrations/0001_initial.py | 135d5f07eb47cc34e61a4e94aab5882eaa382308 | [] | no_license | https://github.com/DCC-CC4401/2018-1-sad-T3 | 616ce4be5beee2e6710f4d763f023829b5b6ab5e | dbb3e2dbcef6c3e1de2061c9f52de061a192c190 | refs/heads/master | 2020-03-13T17:23:15.635691 | 2018-07-10T15:49:18 | 2018-07-10T15:49:18 | 131,216,386 | 1 | 3 | null | false | 2018-07-10T06:36:43 | 2018-04-26T22:18:00 | 2018-07-10T06:22:26 | 2018-07-10T06:36:43 | 985 | 1 | 0 | 0 | HTML | false | null | # Generated by Django 2.0.5 on 2018-06-28 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Space',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='Nombre')),
('description', models.TextField(blank=True, verbose_name='Descripción')),
('image', models.ImageField(blank=True, upload_to='images/items', verbose_name='Imagen del articulo')),
('state', models.CharField(choices=[('D', 'Disponible'), ('P', 'En préstamo'), ('R', 'En reparación')], max_length=1)),
],
options={
'abstract': False,
},
),
]
| UTF-8 | Python | false | false | 929 | py | 40 | 0001_initial.py | 18 | 0.557235 | 0.537797 | 0 | 27 | 33.296296 | 135 |
quintagroup/quintagroup.plonecomments | 16,286,515,987,052 | d912e063a4afc773c34890eefac9871680498d85 | 1ce9ee181195d293766443e6ae64c6ff5f49a3d5 | /quintagroup/plonecomments/tests/testQPloneCommentsCommenting.py | 8e7f89cfbc2c65bc9c0dc4e2773ed791f9746496 | [] | no_license | https://github.com/quintagroup/quintagroup.plonecomments | 69a7e321e85571cc0397eaf21dd23580e0946ea1 | eed5c2c5b9f2f9eba211cd7f9e1dea5a685fa3a8 | refs/heads/master | 2021-01-06T20:37:43.460801 | 2014-07-17T09:18:56 | 2014-07-17T09:18:56 | 5,807,860 | 0 | 1 | null | false | 2014-07-17T09:18:56 | 2012-09-14T10:18:10 | 2013-10-07T20:41:41 | 2014-07-17T09:18:56 | 494 | 1 | 3 | 0 | Python | null | null | #
# Test adding comments possibility on switching on/off moderation
#
from Products.CMFCore.utils import getToolByName
from zExceptions import Unauthorized
from quintagroup.plonecomments.tests.base import FunctionalTestCase
from quintagroup.plonecomments.tests.config import USERS, PROPERTY_SHEET, \
DM_USERS_IDS, COMMON_USERS_IDS
class TestCommBase(FunctionalTestCase):
def afterSetUp(self):
self.loginAsPortalOwner()
self.request = self.app.REQUEST
# VERY IMPORTANT to guarantee product skin's content visibility
self._refreshSkinData()
# Add all users
self.membership = getToolByName(self.portal, 'portal_membership', None)
for user_id in USERS.keys():
self.membership.addMember(user_id, USERS[user_id]['passw'],
USERS[user_id]['roles'], [])
# Add users to Discussion Manager group
portal_groups = getToolByName(self.portal, 'portal_groups')
dm_group = portal_groups.getGroupById('DiscussionManager')
[dm_group.addMember(u) for u in DM_USERS_IDS]
# Allow discussion for Document
portal_types = getToolByName(self.portal, 'portal_types', None)
doc_fti = portal_types.getTypeInfo('Document')
doc_fti._updateProperty('allow_discussion', 1)
# Make sure Documents are visible by default
# XXX only do this for plone 3
self.portal.portal_workflow.setChainForPortalTypes(('Document',),
'plone_workflow')
# Add testing documents to portal. Add one document for avery user.
# For testing behaviors, where made some changes to document state
# it's more usefull.
self.discussion = getToolByName(self.portal, 'portal_discussion',
None)
self.all_users_id = DM_USERS_IDS + COMMON_USERS_IDS
for user_id in self.all_users_id:
doc_id = 'doc_%s' % user_id
self.portal.invokeFactory('Document', id=doc_id)
doc_obj = getattr(self.portal, doc_id)
doc_obj.edit(text_format='plain',
text='hello world from %s' % doc_id)
# Create talkback for document and Add comment to doc_obj
self.discussion.getDiscussionFor(doc_obj)
doc_obj.discussion_reply('A Reply for %s' % doc_id,
'text of reply for %s' % doc_id)
class TestMixinAnonymOn:
def afterSetUp(self):
pass
def testAddCommentToDocAnonymUsers(self):
# ADDING COMMENTS MUST ALLOWED for anonymous users
self.login('dm_admin')
doc_obj = getattr(self.portal, "doc_anonym")
discussion_for = self.discussion.getDiscussionFor(doc_obj)
replies_before = len(discussion_for.getReplies())
# Create talkback for document and Add comment
self.logout()
doc_obj.discussion_reply("Anonym reply", "text of 'anonym' reply")
self.login('dm_admin')
replies_after = len(discussion_for.getReplies())
self.failUnless(replies_after - replies_before,
"Anonymous user CAN'T really add comment in terned ON "
"*Anonymous commenting mode*.")
def testAddCommentToDocNotAnonymUsers(self):
# All users CAN ADD COMMENTS
not_anonym_users = [u for u in self.all_users_id if not u == 'anonym']
failed_users = []
for u in not_anonym_users:
self.login('dm_admin')
doc_id = "doc_%s" % u
doc_obj = getattr(self.portal, doc_id)
discussion_for = self.discussion.getDiscussionFor(doc_obj)
replies_before = discussion_for.getReplies()
self.login(u)
# Create talkback for document and Add comment
doc_obj.discussion_reply("%s's reply" % u,
"text of '%s' reply" % u)
# Check is comment added
self.login('dm_admin')
replies_after = discussion_for.getReplies()
disparity = len(replies_after) - len(replies_before)
if not disparity:
failed_users.append(u)
self.failIf(failed_users,
"%s - user(s) can not really add comment." % failed_users)
class TestMixinAnonymOff:
def afterSetUp(self):
all_users_id = DM_USERS_IDS + COMMON_USERS_IDS
self.not_like_anonym = ['admin', 'member', 'dm_admin', 'dm_member']
self.like_anonym = [u
for u in all_users_id
if u not in self.not_like_anonym]
def testAddCommentToDocLikeAnonymUsers(self):
# ADDING COMMENTS MUST REFUSED for anonymous users
failed_users = []
for u in self.like_anonym:
self.login('dm_admin')
doc_obj = getattr(self.portal, "doc_%s" % u)
discussion_for = self.discussion.getDiscussionFor(doc_obj)
replies_before = discussion_for.getReplies()
# Create talkback for document and Add comment
self.logout()
if not u == 'anonym':
self.login(u)
self.assertRaises(Unauthorized, doc_obj.discussion_reply,
"%s's reply" % u, "text of '%s' reply" % u)
self.login('dm_admin')
replies_after = discussion_for.getReplies()
disparity = len(replies_after) - len(replies_before)
if disparity:
failed_users.append(u)
self.failIf(failed_users,
"%s user(s) CAN really add comment in terned OFF "
"*Anonymous commenting mode*." % failed_users)
def testAddCommentToDocNotLikeAnonymUsers(self):
# All users CAN ADD COMMENTS
failed_users = []
for u in self.not_like_anonym:
self.login('dm_admin')
doc_id = "doc_%s" % u
doc_obj = getattr(self.portal, doc_id)
discussion_for = self.discussion.getDiscussionFor(doc_obj)
replies_before = discussion_for.getReplies()
self.login(u)
# Create talkback for document and Add comment
doc_obj.discussion_reply("%s's reply" % u,
"text of '%s' reply" % u)
# Check is comment added
self.login('dm_admin')
replies_after = discussion_for.getReplies()
disparity = len(replies_after) - len(replies_before)
if not disparity:
failed_users.append(u)
self.failIf(failed_users,
"%s - user(s) can not really add commentin terned OFF "
"*Anonymous commenting mode*." % failed_users)
class TestMixinModerationOn:
def afterSetUp(self):
# Get Moderation state
pp = getToolByName(self.portal, 'portal_properties')
config_ps = getattr(pp, PROPERTY_SHEET, None)
EnableAnonymComm = getattr(config_ps, "enable_anonymous_commenting")
# Group users depending on Anonymous commenting enabling/disabling
if EnableAnonymComm:
self.allowable_dm_users = DM_USERS_IDS
self.allowable_common_users = COMMON_USERS_IDS
self.illegal_dm_users = []
self.illegal_common_users = []
else:
self.allowable_dm_users = ['dm_admin', 'dm_member']
self.allowable_common_users = ['admin', 'member']
self.illegal_dm_users = [u
for u in DM_USERS_IDS
if not u in self.allowable_dm_users]
self.illegal_common_users = [u
for u in COMMON_USERS_IDS
if not u in self.allowable_common_users]
def testAddCommentToNotPublishedReplyDMUsers(self):
# DiscussionManager's group's members with Manager or Member roles
# CAN ADD COMMENTS to reply IN ANY STATE (published/not published)
failed_users = []
for u in self.allowable_dm_users:
self.login(u)
doc_obj = getattr(self.portal, "doc_%s" % u)
# Get reply to this document
reply = self.discussion.getDiscussionFor(doc_obj).getReplies()[0]
# Create talkback for reply and Add comment
self.discussion.getDiscussionFor(reply)
reply.discussion_reply("%s's reply" % u, "text of '%s' reply" % u)
discussion_for = self.discussion.getDiscussionFor(reply)
replies_to_reply = discussion_for.getReplies()
if not replies_to_reply:
failed_users.append(u)
self.failIf(failed_users, "%s - member(s) of DiscussionManager group "
"CAN'T really ADD comment" % failed_users)
# This is actual only in terned OFF *Anonymous commenting mode*
failed_users = []
for u in self.illegal_dm_users:
self.login(u)
doc_obj = getattr(self.portal, "doc_%s" % u)
# Get reply to this document
reply = self.discussion.getDiscussionFor(doc_obj).getReplies()[0]
# Create talkback for reply and Add comment
self.discussion.getDiscussionFor(reply)
self.assertRaises(Unauthorized, reply.discussion_reply,
"%s's reply" % u, "text of '%s' reply" % u)
discussion_for = self.discussion.getDiscussionFor(reply)
replies_to_reply = discussion_for.getReplies()
if replies_to_reply:
failed_users.append(u)
self.failIf(failed_users,
"%s user(s) CAN really add comment in terned OFF "
"*Anonymous commenting mode*." % failed_users)
"""
def testAddCommentToNotPublishedReplyNotDMUsers(self):
# Users without DiscussionManager role CAN'T ACCESS an so ADD COMMENTS
# TO NOT PUBLISHED reply.
manager = 'dm_admin'
for u in self.allowable_common_users:
self.login(manager)
doc_obj = getattr(self.portal, "doc_%s" % u)
reply = self.discussion.getDiscussionFor(doc_obj).getReplies()[0]
discussion_for = self.discussion.getDiscussionFor(reply)
reply_to_reply = discussion_for.getReplies()
reply_to_reply_before = len(reply_to_reply)
self.logout()
if not u=='anonym':
self.login(u)
# On adding reply to not published reply MUST generte
# Unauthorized exception
self.assertRaises(Unauthorized, reply.discussion_reply,
"Reply %s" % u, "text of %s reply" % u)
"""
def testAddCommentToPublishedReplyALLUsers(self):
# All users CAN ADD COMMENTS to published reply
manager = 'dm_admin'
allowable_users = self.allowable_dm_users + self.allowable_common_users
illegal_users = self.illegal_dm_users + self.illegal_common_users
all_users = allowable_users + illegal_users
# 1. Publish comments
self.login(manager)
for u in all_users:
doc_obj = getattr(self.portal, "doc_%s" % u)
reply = self.discussion.getDiscussionFor(doc_obj).getReplies()[0]
reply.discussion_publish_comment()
# 2.Check adding reply to reply for allowable users
failed_users = []
for u in allowable_users:
self.logout()
if not u == 'anonym':
self.login(u)
# Create talkback for document and Add comment
self.discussion.getDiscussionFor(reply)
reply.discussion_reply("Reply %s" % u, "text of %s reply" % u)
# Check is comment added
self.login(manager)
discussion_for = self.discussion.getDiscussionFor(reply)
reply_to_reply = discussion_for.getReplies()
if not reply_to_reply:
failed_users.append(u)
self.failIf(failed_users, "%s - user(s) can not really add comment to"
" PUBLISHED reply." % failed_users)
# 3.Check adding reply to reply for illegal users
for u in illegal_users:
self.logout()
if not u == 'anonym':
self.login(u)
# On adding reply to not published reply MUST generte
# Unauthorized exception
self.discussion.getDiscussionFor(reply)
self.assertRaises(Unauthorized, reply.discussion_reply,
"Reply %s" % u, "text of %s reply" % u)
class TestMixinModerationOff:
def afterSetUp(self):
# Get Moderation state
pp = getToolByName(self.portal, 'portal_properties')
config_ps = getattr(pp, PROPERTY_SHEET, None)
EnableAnonymComm = getattr(config_ps, "enable_anonymous_commenting")
# Group users depending on Anonymous commenting enabling/disabling
if EnableAnonymComm:
self.allowable_users = DM_USERS_IDS + COMMON_USERS_IDS
self.illegal_users = []
else:
self.allowable_users = ['dm_admin', 'dm_member', 'admin', 'member']
self.illegal_users = [u
for u in self.all_users_id
if not u in self.allowable_users]
# Add testing document to portal in Moderation OFF mode.
self.discussion = getToolByName(self.portal, 'portal_discussion', None)
self.doc_moder_off_id = 'doc_moderation_off'
self.portal.invokeFactory('Document', id=self.doc_moder_off_id)
doc_obj = getattr(self.portal, self.doc_moder_off_id)
doc_obj.edit(text_format='plain',
text='hello world from in moderation off mode')
# Create talkback for document and Add comment to 'doc_moderatio_off'
self.discussion.getDiscussionFor(doc_obj)
doc_obj.discussion_reply("A Reply to '%s'" % self.doc_moder_off_id,
"text of reply to '%s'" % self.doc_moder_off_id)
def testAddCommentToReplyAllowableUsers(self):
# Users CAN ADD COMMENTS
failed_users = []
for u in self.allowable_users:
self.logout()
if not u == 'anonym':
self.login(u)
doc_obj = getattr(self.portal, self.doc_moder_off_id)
# Get reply to this document
discussion_for = self.discussion.getDiscussionFor(doc_obj)
reply_to_doc = discussion_for.getReplies()[0]
# Create talkback for reply and Add comment
discussion_for = self.discussion.getDiscussionFor(reply_to_doc)
replies_before = discussion_for.getReplies()
if not replies_before:
self.discussion.getDiscussionFor(reply_to_doc)
reply_to_doc.discussion_reply("%s's reply" % u,
"text of '%s' reply" % u)
replies_after = discussion_for.getReplies()
disparity = len(replies_after) - len(replies_before)
if not disparity:
failed_users.append(u)
self.failIf(failed_users,
"%s - member(s) CAN'T really ADD comment in terned off"
" comments Moderation mode." % failed_users)
def testAddCommentToReplyIllegalUsers(self):
# This users CAN'T ADD COMMENTS
# This is actual only in terned OFF *Anonymous commenting mode*
for u in self.illegal_users:
self.logout()
if not u == 'anonym':
self.login(u)
doc_obj = getattr(self.portal, self.doc_moder_off_id)
# Get reply to this document
discussion_for = self.discussion.getDiscussionFor(doc_obj)
reply_to_doc = discussion_for.getReplies()[0]
# Create talkback for reply and Add comment
self.discussion.getDiscussionFor(reply_to_doc)
self.assertRaises(Unauthorized, reply_to_doc.discussion_reply,
"%s's reply" % u, "text of '%s' reply" % u)
class TestModerationAnonymComm(TestCommBase, TestMixinAnonymOn,
TestMixinModerationOn):
def afterSetUp(self):
TestCommBase.afterSetUp(self)
# Preparation for functional testing
# Tern On Moderation and tern on Anonymous commenting
self.request.form['enable_anonymous_commenting'] = 'True'
self.request.form['enable_moderation'] = 'True'
self.portal.prefs_comments_setup()
# Initialize base classes
TestMixinAnonymOn.afterSetUp(self)
TestMixinModerationOn.afterSetUp(self)
class TestModerationOFFAnonymComm(TestCommBase, TestMixinAnonymOff,
TestMixinModerationOn):
def afterSetUp(self):
TestCommBase.afterSetUp(self)
# Preparation for functional testing
# Tern On Moderation and tern off Anonymous commenting
self.request.form['enable_moderation'] = 'True'
self.portal.prefs_comments_setup()
# Initialize base classes
TestMixinAnonymOff.afterSetUp(self)
TestMixinModerationOn.afterSetUp(self)
class TestAnonymCommOFFModeration(TestCommBase, TestMixinAnonymOn,
TestMixinModerationOff):
def afterSetUp(self):
TestCommBase.afterSetUp(self)
# Preparation for functional testing
# Tern On Anonymous commenting and tern off Moderation
self.request.form['enable_anonymous_commenting'] = 'True'
self.portal.prefs_comments_setup()
# Initialize base classes
TestMixinAnonymOn.afterSetUp(self)
TestMixinModerationOff.afterSetUp(self)
class TestOFFModerationOFFAnonymComm(TestCommBase, TestMixinAnonymOff,
TestMixinModerationOff):
def afterSetUp(self):
TestCommBase.afterSetUp(self)
# Preparation for functional testing
# Tern Off Moderation and tern off Anonymous commenting
self.portal.prefs_comments_setup()
# Initialize base classes
TestMixinAnonymOff.afterSetUp(self)
TestMixinModerationOff.afterSetUp(self)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestModerationAnonymComm))
suite.addTest(makeSuite(TestModerationOFFAnonymComm))
suite.addTest(makeSuite(TestAnonymCommOFFModeration))
suite.addTest(makeSuite(TestOFFModerationOFFAnonymComm))
return suite
| UTF-8 | Python | false | false | 18,741 | py | 53 | testQPloneCommentsCommenting.py | 23 | 0.592604 | 0.592018 | 0 | 456 | 40.098684 | 81 |
NUmamahesh15/Twitter-Replica | 12,738,873,032,602 | fb48b7c95169de949ed6ed1f567284fd20a02db4 | 5d0ca62c5d9437854af7f5f33793e28bb06b7787 | /profiledisplay.py | 7982b3df8d76bacdfb8d4a3d69f31ea52396c14c | [] | no_license | https://github.com/NUmamahesh15/Twitter-Replica | 372c79aa89bde411c8341bca8d4842a3c76a77f6 | 71c92dace285438e65b073c117a7d180c5fb0deb | refs/heads/master | 2022-11-28T12:21:23.486343 | 2020-08-10T08:21:14 | 2020-08-10T08:21:14 | 286,417,399 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import webapp2;
import os;
import jinja2;
import random;
from google.appengine.ext import ndb;
from google.appengine.api import users
from myuser import MyUser
from twitter import Twitter
from datetime import datetime
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True
)
# Function to check if the user is in the following list
def checkFollowing(user, followingList):
flag = False
for userName in range(len(followingList)):
if str(followingList[userName]) == user:
flag = True
return flag
class ProfileDisplay(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
user = str(self.request.params.items()[0][1])
# Fetch current users information
cur_user = users.get_current_user()
cur_user_key = ndb.Key('MyUser',cur_user.user_id())
current_user = cur_user_key.get()
# To fetch the ID of the searched user
query_user = MyUser.query()
query_user = query_user.filter(MyUser.userName == user).fetch(keys_only=True)
searched_user_id = query_user[0].id()
# to fetch the user profile using the ID retrieved.
searched_user_key = ndb.Key('MyUser',searched_user_id)
searched_user = searched_user_key.get()
# check if the user exists in the current users following list
check_user_exists = checkFollowing(searched_user.userName, current_user.followingUsers)
# Retrieving the tweets of the searched user
userTweet = Twitter.query().filter(Twitter.userName==searched_user.userName).order(-Twitter.timestamp)
template_values = {'searched_user':searched_user,'exists':check_user_exists,'user_tweets':userTweet.fetch(50),'current_user':current_user,
'followingLen':len(searched_user.followingUsers),'followedLen':len(searched_user.followedUsers)}
template = JINJA_ENVIRONMENT.get_template('profiledisplay.html')
self.response.write(template.render(template_values))
#
def post(self):
self.response.headers['Content-Type'] = 'text/html'
action = self.request.get('button')
# Fetch the current users details
cur_user = users.get_current_user()
cur_user_key = ndb.Key('MyUser',cur_user.user_id())
current_user = cur_user_key.get()
# Get the ID of the user who needs to be followed
user = str(self.request.params.items()[0][1])
query_user = MyUser.query()
query_user = query_user.filter(MyUser.userName == user).fetch(keys_only=True)
searched_user_id = query_user[0].id()
# get the user profile
searched_user_key = ndb.Key('MyUser',searched_user_id)
searched_user = searched_user_key.get()
if action == 'FOLLOW':
# append the userName to the followingUsers list of the current user
current_user.followingUsers.append(user)
current_user.put()
# append the userName to the followedUsers list of the searched user
searched_user.followedUsers.append(current_user.userName)
searched_user.put()
# self.redirect('/profiledisplay')
elif action == "UNFOLLOW":
# Remove the user from the followingUsers list of the current user
current_user.followingUsers.remove(user)
current_user.put()
# Remove the user from the followedUsers list of the searched user
searched_user.followedUsers.remove(current_user.userName)
searched_user.put()
query = "name ={0}".format(searched_user.userName)
self.redirect('/profiledisplay?'+query)
| UTF-8 | Python | false | false | 3,689 | py | 18 | profiledisplay.py | 10 | 0.676877 | 0.672811 | 0 | 85 | 42.4 | 146 |
highb33kay/codewarsBot | 14,474,039,804,224 | db38c4a8f9dc4f078055d966ed2380868b3d9713 | 7b0fd1301f581f7731b40dc629825907e231141d | /commands/getuserinfo.py | f008227e6a31cecf058b30d623093667e588f7c8 | [] | no_license | https://github.com/highb33kay/codewarsBot | 1d6ca28134448018dda76cea652b5a70b714f00e | d50a14ec0db1655c15172b3801c126af39e5f909 | refs/heads/master | 2023-03-19T12:19:02.787826 | 2020-12-31T17:01:28 | 2020-12-31T17:01:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import discord, requests
async def getuserinfo(message,args):
if(args == []):
await message.channel.send(f'{message.author.mention}, you must specify a valid username')
return
data = requests.get(f"https://www.codewars.com/api/v1/users/{args[0]}").json()
if "success" in data:
await message.channel.send(f"{message.author.mention}, I couldn't find {args[0]}")
return
name = data['name'] if data['name'] != '' else 'N/A'
embed_with_user_data = discord.Embed(title=f"{args[0]}'s info", color=0x00ff00)
embed_with_user_data.add_field(name="👤 User info", value=f"Username: {data['username']} \n Name: {name} \n Clan: {data['clan']}", inline=True)
ranks = ""
for x in data['ranks']:
if x != "languages":
ranks += f"Rank: {x}, {data['ranks'][x]['name']}, {data['ranks'][x]['color']} \n"
embed_with_user_data.add_field(name="📈 User stats", value=f"Honor: {data['honor']} \n LeaderBoard position: {data['leaderboardPosition']} \n Ranks: {ranks}", inline=True)
languages = ""
skills = ""
for x in data['ranks']['languages']: languages += f"{x.capitalize()}: {data['ranks']['languages'][x]['name']}, {data['ranks']['languages'][x]['color']}, {str(data['ranks']['languages'][x]['score'])} score \n"
try:
for skill in data['skills']: skills += skill + ", "
if skills != '': skills = skills[:len(skills)-2]
if skills == '': skills = "N/A"
except:
skills = "N/A" #If it doesn't have skills pass
#Fix so embed have less than 1024 words
add_and_more = False
languages = languages.split(' ')
while len(languages) > 80:
add_and_more = True
languages.pop(len(languages)-1)
if add_and_more:
languages.append('and')
languages.append('more...')
languages = ' '.join([x for x in languages])
embed_with_user_data.add_field(name="💻 Languagues and skills", value=f"Languages: {languages} \n Skills: {skills}", inline=False)
embed_with_user_data.add_field(name="💾 Code Challenges", value=f"Authored: {data['codeChallenges']['totalAuthored']} \n Completed: {data['codeChallenges']['totalCompleted']}", inline=True)
embed_with_user_data.add_field(name="💻 Languagues and skills", value=f"Languages: {languages} \n Skills: {skills}", inline=False)
embed_with_user_data.add_field(name="💾 Code Challenges", value=f"Authored: {data['codeChallenges']['totalAuthored']} \n Completed: {data['codeChallenges']['totalCompleted']}", inline=True)
kyu_level = data['ranks']['overall']['rank']
if kyu_level == -1: embed_with_user_data.set_image(url="https://i.ibb.co/16whS56/1.png")
elif kyu_level == -2: embed_with_user_data.set_image(url="https://i.ibb.co/MDBvfKw/2.png")
elif kyu_level == -3: embed_with_user_data.set_image(url="https://i.ibb.co/7kn00Jt/3.png")
elif kyu_level == -4: embed_with_user_data.set_image(url="https://i.ibb.co/TRH8Y6b/4.png")
elif kyu_level == -5: embed_with_user_data.set_image(url="https://i.ibb.co/q9x4W6F/5.png")
elif kyu_level == -6: embed_with_user_data.set_image(url="https://i.ibb.co/dLmn8Tx/6.png")
elif kyu_level == -7: embed_with_user_data.set_image(url="https://i.ibb.co/G96g8Cd/7.png")
elif kyu_level == -8: embed_with_user_data.set_image(url="https://i.ibb.co/K0YdSVc/8.png")
await message.channel.send(embed=embed_with_user_data)
| UTF-8 | Python | false | false | 3,424 | py | 14 | getuserinfo.py | 11 | 0.633294 | 0.618614 | 0 | 57 | 58.754386 | 212 |
mayankagg9722/Placement-Preparation | 18,691,697,703,245 | f36d1195eb625b8f218a3c116d916fc2bc40cfc7 | 7f11252c169d3bafca08de9fde71489409d04979 | /rep.py | a542935f209939eb98e23438d8793619dddd3ee5 | [] | no_license | https://github.com/mayankagg9722/Placement-Preparation | ee4388dc89363a4b1adb0ecea344ce76763f1aec | 30593f86d1497adecca98e30a439368771c11061 | refs/heads/master | 2021-03-16T10:22:35.543180 | 2020-11-29T07:51:31 | 2020-11-29T07:51:31 | 119,993,686 | 174 | 59 | null | false | 2020-11-29T07:51:32 | 2018-02-02T14:49:11 | 2020-11-03T00:00:28 | 2020-11-29T07:51:31 | 13,155 | 135 | 51 | 0 | C++ | false | false | def printRepeating(arr, size):
print("The repeating elements are: ")
for i in range(0, size):
print("index:",i)
print("index element:",arr[i])
print ("final element:",arr[abs(arr[i])])
if arr[abs(arr[i])] >= 0:
arr[abs(arr[i])] = -arr[abs(arr[i])]
else:
print ("Ans:",abs(arr[i]), end = " ")
arr = [1, 2, 3, 1, 3, 6, 6]
arr_size = len(arr)
printRepeating(arr, arr_size) | UTF-8 | Python | false | false | 470 | py | 273 | rep.py | 263 | 0.487234 | 0.468085 | 0 | 17 | 26.705882 | 49 |
sachinrudr/textbookfindr | 5,763,846,120,364 | 4fadc6d9a09a80628ee03e4dfb5a86b0beea6cb9 | 72cae103a1180a2c1bd920995e72fbc230f2ffa4 | /textbookfindr/bazaar/urls.py | bc8609aa9fd1537cf67cdf612ca700bcd1c32353 | [] | no_license | https://github.com/sachinrudr/textbookfindr | a0025c422230213f957149242e3179a9545ace62 | 3692305cccad77690d1fa3b24500c2017493f7fb | refs/heads/master | 2016-05-31T14:20:21.458054 | 2014-07-28T17:39:39 | 2014-07-28T17:39:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns, url
from bazaar import views
urlpatterns = patterns('',
url(r'^$', views.index, name = 'index'),
url(r'^json/(?P<request_type>\w+)/$', views.json, name = 'json'),
) | UTF-8 | Python | false | false | 215 | py | 18 | urls.py | 12 | 0.632558 | 0.632558 | 0 | 9 | 22.888889 | 69 |
otto-torino/django-baton | 4,432,406,291,908 | 0ae0b9a2efcf26424a4a16b2f58fae60d54cade5 | c710fa2a979b19d26a45c0821b1c4a46134ba0ae | /testapp/app/app/tests/test_e2e_menu_mobile.py | ab48a55580c09eebb556cef845075a3ee7f69648 | [
"MIT"
] | permissive | https://github.com/otto-torino/django-baton | 4e1c6fb321e613ac8af9dba496e508fa6a194cb4 | a20b2f84c41639c6626357c1486fa86f9ef935c1 | refs/heads/master | 2023-08-26T20:27:31.907955 | 2023-08-11T09:45:30 | 2023-08-11T09:45:30 | 81,565,159 | 827 | 103 | MIT | false | 2023-08-28T17:26:42 | 2017-02-10T12:54:24 | 2023-08-26T16:16:49 | 2023-08-28T17:26:41 | 36,801 | 762 | 82 | 19 | Python | false | false | import time
from django.test import TestCase
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from .utils import element_has_css_class
import os
os.environ['WDM_LOG_LEVEL'] = '0'
class TestBatonMenuMobile(TestCase):
def setUp(self):
service = Service(ChromeDriverManager(version='114.0.5735.90').install())
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-dev-shm-usage')
self.driver = webdriver.Chrome(
service=service,
options=chrome_options,
)
self.driver.set_window_size(480, 600)
self.driver.implicitly_wait(10)
self.login()
def tearDown(self):
self.driver.quit()
def login(self):
self.driver.get('http://localhost:8000/admin')
username_field = self.driver.find_element(By.ID, "id_username")
password_field = self.driver.find_element(By.ID, "id_password")
button = self.driver.find_element(By.CSS_SELECTOR, 'input[type=submit]')
username_field.send_keys('admin')
time.sleep(1)
password_field.send_keys('admin')
time.sleep(1)
button.click()
# selenium sees the navbar as visible because it's just moved to the left,
# we cannot use is_displayed method
def navbar_is_invisible(self, navbar):
left = int(navbar.value_of_css_property('left').replace('px', ''))
width = int(navbar.value_of_css_property('width').replace('px', ''))
return left + width <= 0
def navbar_is_visible(self, navbar):
left = int(navbar.value_of_css_property('left').replace('px', ''))
return left == 0
def test_menu(self):
# Wait until baton is ready
wait = WebDriverWait(self.driver, 10)
wait.until(element_has_css_class((By.TAG_NAME, 'body'), "baton-ready"))
time.sleep(2)
navbar = self.driver.find_element(By.CLASS_NAME, 'sidebar-menu')
self.assertEqual('menu-open' in self.driver.find_element(By.TAG_NAME, 'body').get_attribute('class').split(), False)
self.assertEqual(self.navbar_is_invisible(navbar), True)
toggler = self.driver.find_element(By.CSS_SELECTOR, ".navbar-toggler")
toggler.click()
self.assertEqual(self.navbar_is_visible(navbar), True)
self.assertEqual('menu-open' in self.driver.find_element(By.TAG_NAME, 'body').get_attribute('class').split(), True)
root_voices = navbar.find_elements(By.CSS_SELECTOR, '.depth-0 > li')
close_button = self.driver.find_element(By.CLASS_NAME, 'fa-times')
close_button.click()
self.assertEqual('menu-open' in self.driver.find_element(By.TAG_NAME, 'body').get_attribute('class').split(), False)
self.assertEqual(self.navbar_is_invisible(navbar), True)
toggler.click()
# system title voice
self.assertEqual(root_voices[0].get_attribute('innerText'), 'SYSTEM')
self.assertEqual(root_voices[0].is_displayed(), True)
self.assertEqual('title' in root_voices[0].get_attribute('class').split(), True)
self.assertEqual(len(root_voices), 4)
| UTF-8 | Python | false | false | 3,515 | py | 116 | test_e2e_menu_mobile.py | 50 | 0.660882 | 0.650925 | 0 | 85 | 40.352941 | 124 |
beforeuwait/code_daqsoft | 3,238,405,378,433 | f144261632c45805a7e9caa438e5d72ad4b040fb | 50c23021b19aef84c9c0ed8f8116b1b395df3205 | /linkipYQ/config.py | 9a3fde0d72fc6315625b68ac4f47acb77c6f3de0 | [] | no_license | https://github.com/beforeuwait/code_daqsoft | d87891c6a409841dd495ab85aadb48cb348f9891 | 6178fdbc08a54b2827c1a80297684a628d4f9c08 | refs/heads/master | 2021-09-28T20:33:14.164879 | 2018-11-20T09:16:52 | 2018-11-20T09:16:52 | 108,245,470 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf8
import os
os.chdir(os.path.split(os.path.realpath(__file__))[0])
BLANK = '\u0001'
ENCODE = 'utf-8'
TIME_DELAY = 3
HDFS = '/user/spider/linkip_yq/%s'
# INFO
USER_INFO = {
'name': 'gujing8835',
'password': 'gugugu110',
'type': 1,
}
PROXIES = {
"http": "http://HY3JE71Z6CDS782P:CE68530DAD880F3B@proxy.abuyun.com:9010",
"https": "http://HY3JE71Z6CDS782P:CE68530DAD880F3B@proxy.abuyun.com:9010",
}
REQUEST_DATA = {
'rangeId': 1,
'currPage': 1,
'themeId': 0,
'topicId': 0,
'sentiment': 1,
'type': 0,
'startDay': '2017-12-05 00:00',
'endDay': '2017-12-11 23:59',
'page': 100,
'allKeywords': '',
'orKeywords': '',
'noKeywords': '',
'tuKeywords': '',
'keyWordLocation': 5
}
# headers
HEADERS = {
'Host': 'yq.linkip.cn',
'oginin': 'http://yq.linkip.ccn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
}
HEADERS_XML = {
'Host': 'yq.linkip.cn',
'Origin': 'http://yq.linkip.cn',
'Referer': 'http://yq.linkip.cn/user/qwyq.do',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
# url
URL_HOME = 'http://yq.linkip.cn/user/login.do'
URL_LOGIN = 'http://yq.linkip.cn/user/index.do'
URL_DATA = 'http://yq.linkip.cn/user/getdata.do'
URL_YQ = 'http://yq.linkip.cn/user/snapshot.do?id=%s'
# 请求结果
REQUESTS_RESULT = {
'response': 'bad_requests',
'cookies': '',
'url': '',
'data': '',
'status_code': '',
'error': ''
}
COOKIE_TEXT = ' userName=gujing8835; userPass=gugugu110; JSESSIONID=%s'
COOKIE_DICT = {
'Cookies': ''
}
NEWS_LIST = {
'list': [],
'page': 1,
'json_error': False,
'error': ''
}
NEWS_INFO_CONTENT = {
'content': '',
'source': '',
'author': '',
'time': '',
}
# xpath
INFO_PARSE = {
'content': '//div[@id="content"]',
'source': '//span[@id="media"]/text()',
'author': '//span[@id="author"]/text()',
'time': '//span[@id="source"]/text()',
}
LIST_ELEMENTS = ['id', 'title', 'createtime', 'url', 'type', 'xss', 'source', 'score', 'sentiment', 'content']
# path
NEWS_LIST_FILE = os.path.abspath('news_list.txt')
NEWS_LIST_HISTORY_FILE = os.path.abspath('news_list_history.txt')
NEWS_LIST_IDS_FILE = os.path.abspath('news_list_id.txt')
NEWS_LIST_IDS_HISTORY_FILE = os.path.abspath('news_list_ids_history.txt')
NEWS_INFO_FILE = os.path.abspath('news_info.txt')
NEWS_INFO_HISTORY_FILE = os.path.abspath('news_info_history.txt')
for each in [NEWS_LIST_FILE,
NEWS_LIST_HISTORY_FILE,
NEWS_LIST_IDS_FILE,
NEWS_INFO_FILE,
NEWS_INFO_HISTORY_FILE,
NEWS_LIST_IDS_HISTORY_FILE
]:
if not os.path.exists(each):
f = open(each, 'w')
f.close()
# 关键词
KEYWORDS = [
('西安旅游', '48810'),
('旅游', '52682'),
('陕西旅游', '52683'),
('保定', '52684'),
('大理', '52685'),
('云南石林', '48964'),
# ('云南世博园', '48965'),
# ('云南腾冲火山', '48966'),
# ('丽江古城', '48967'),
# ('玉龙雪山', '48968'),
# ('西双版纳热带植物园 ', '48969'),
# ('崇圣寺三塔文化 ', '48970'),
('普达措国家公园 ', '48971'),
('天山大峡谷 ', '48972'),
('雅安', '48973'),
('绵阳罗浮山', '48974'),
('乌鲁木齐', '48975'),
('吐鲁番', '48976'),
('和田', '48977'),
('丽江', '48978'),
('克拉玛依', '48979'),
('克孜勒苏柯尔克孜', '48980'),
('博尔塔拉', '48981'),
('哈密地区', '48982'),
('阿勒泰', '48983'),
('昌吉', '48984'),
('塔城', '48985'),
('喀什', '48986'),
('新疆伊犁', '48987'),
('新疆巴音', '48988'),
('新疆阿克苏', '48989'),
('玉溪', '48990'),
('德宏', '48991'),
('昆明', '48993'),
('河北承德', '54038'),
('承德', '54040'),
] | UTF-8 | Python | false | false | 4,084 | py | 79 | config.py | 35 | 0.537408 | 0.44942 | 0 | 165 | 22.012121 | 135 |
machengchao/1805 | 17,239,998,745,692 | f0570b7a7f0d57dce1cfc05b743ef1567e0173b7 | 554eb44a85f6dfb3316f106c8fa4b6dd12db5b41 | /07day/05-高富帅.py | 584be8c3280f4220ba44d8283a5a61df8ba40a4f | [] | no_license | https://github.com/machengchao/1805 | 3d8297a368dbefb0b19b27c65a3340b52f019bcc | 705b1316c0fbdda79f2b9fb7722b67a6450f672e | refs/heads/master | 2020-03-21T14:45:23.118350 | 2018-06-28T01:29:10 | 2018-06-28T01:29:10 | 138,674,091 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | sex = input("请输入性别")
if sex =='男':
weight=input('请输入您的身高')
money=input('请输入您的财富')
color=input('请输入您的颜值')
if weight>'180'and money>'1000' and color>'90':
print('高富帅')
else:
print('稳住,别浪')
if sex == '女':
pifucolor=input('请输入您的皮肤颜色')
money=input('请输入您的财富')
colo=input('请输入您的颜色')
if pifucolor=='白色' and money>'800' and colo<'90':
print('白富美')
else:
print('哈哈哈')
| UTF-8 | Python | false | false | 563 | py | 40 | 05-高富帅.py | 40 | 0.556845 | 0.524362 | 0 | 17 | 24.294118 | 53 |
angeloxx/swagger2modsec | 566,935,701,192 | 35ee9a8895d9ce565ba4b7ded0903e5d952e7735 | 989583da8a56167afab91de67c29957b23a8d1a7 | /swagger.py | e0f19c10af7576e8eedde80e2fabbf71eed8ca8d | [
"MIT"
] | permissive | https://github.com/angeloxx/swagger2modsec | 77df8de8fc125b1c3026753be39158506ba85bdb | 7b23047162fe42125e4648c1593b19d75758493e | refs/heads/master | 2020-04-16T08:05:37.414134 | 2019-02-03T15:13:01 | 2019-02-03T15:13:01 | 165,411,416 | 7 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import logging, sys, re, os, json, requests, io, yaml, coloredlogs
from py_essentials import hashing as hs
class Swagger:
def __init__(self, filename, logger):
self.endpoints = []
self.checksum = ""
self.filename = filename
self.logger = logger
try:
with open(self.filename) as json_file:
self.swagger = json.load(json_file)
except Exception as e:
self.logger.error("Error reading file: {0}".format(str(e)))
sys.exit(1)
self.checksum = hs.fileChecksum(self.filename, "sha256")
self.__getEndpoints()
def __getEndpoints(self):
for path in self.swagger["paths"]:
self.endpoints.append(path)
def getEndpointMethods(self, endpoint):
ret = []
if not endpoint in self.swagger["paths"]:
return ret
for method in self.swagger["paths"][endpoint]:
ret.append(method)
return ret
def getEndpoints(self):
return self.endpoints
def getEndpointURIParameterValidator(self, _endpoint, _parameter, _method = ""):
# NOTE: the validator for an URI parameter SHOULD be the same, the script
# will use the FIRST match if method is empty and REQUIRED=false is not supported
try:
for method in self.swagger["paths"][_endpoint]:
if method != "" or method == _method:
for parameterValue in self.swagger["paths"][_endpoint][method]["parameters"]:
if parameterValue["name"] == _parameter and "type" in parameterValue:
if parameterValue["type"] == "integer":
return "[0-9]+"
if parameterValue["type"] == "string":
return "[\w\s\d]+"
if parameterValue["type"] == "number":
if parameterValue["format"] == "double":
return "(-?)(0|([1-9][0-9]*))(\\.[0-9]+)?"
except Exception as e:
self.logger.error("getEndpointURIParameterValidator({0},{1},{2})".format(_endpoint, _parameter, _method))
self.logger.error("{0}".format(e))
sys.exit(1)
return ""
def getEndpointArguments(self, _endpoint, _method):
ret = []
for parameterValue in self.swagger["paths"][_endpoint][_method]["parameters"]:
ret.append(parameterValue["name"])
return ret
def endpointRequestURI(self, endpoint):
if not "{" in endpoint:
return "@streq {}".format(endpoint)
endpointURI = endpoint.replace("/","\/")
for parameter in re.findall("\{(\w+)\}",endpoint):
validator = self.getEndpointURIParameterValidator(endpoint,parameter)
endpointURI = endpointURI.replace("{0}".format("{"+parameter+"}"), validator)
return "^{}$".format(endpointURI)
def exportYaml(self,yamlfile):
yamlfilecontent = yaml.dump(self.swagger, default_flow_style=False)
with open(yamlfile, 'w') as f:
f.write(yamlfilecontent) | UTF-8 | Python | false | false | 3,229 | py | 6 | swagger.py | 3 | 0.550015 | 0.543822 | 0 | 90 | 34.844444 | 117 |
sansom/stock | 11,871,289,622,485 | 9e04ba3b3efa12837cbe6b85be09ccf01bfc7fba | e44d8a6d6f914c632304a64c907c62dc53c0f2d1 | /bond_monitor.py | c512d30585689ebd7c10ce8b01e51befe4805e6d | [] | no_license | https://github.com/sansom/stock | abe489f269410a86e09774cdddfce12d8bdf631b | 5ae1aa20d488f4919132344b03af6cac67e495e2 | refs/heads/master | 2021-04-09T11:03:27.113259 | 2018-03-16T08:36:55 | 2018-03-16T08:36:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-*-coding=utf-8
'''
可转债监控
'''
import tushare as ts
from setting import get_engine
engine = get_engine('db_bond')
import pandas as pd
import datetime
class ConvertBond():
def __init__(self):
self.conn=ts.get_apis()
self.allBonds=ts.new_cbonds(pause=2)
self.onSellBond=self.allBonds.dropna(subset=['marketprice'])
self.today=datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
def stockPrice(self,code):
stock_df = ts.get_realtime_quotes(code)
price = float(stock_df['price'].values[0])
return price
def dataframe(self):
price_list=[]
for code in self.onSellBond['scode']:
price_list.append(self.stockPrice(code))
self.onSellBond['stock_price']=price_list
self.onSellBond['ratio'] = self.onSellBond['stock_price'] / self.onSellBond['convprice'] * 100 - self.onSellBond['marketprice']
self.onSellBond['Updated']=self.today
self.onSellBond.to_sql('tb_bond',engine,if_exists='replace')
def closed(self):
ts.close_apis(self.conn)
def calculation():
df=pd.read_sql('bond',engine,index_col='index')
df['ration']=df['stock_price']/df['convprice']*100-df['marketprice']
# print df[df['ration']>0]
df.to_sql('tb_bond',engine,if_exists='replace')
def main():
bond=ConvertBond()
bond.dataframe()
bond.closed()
# calculation()
if __name__=='__main__':
main()
print 'done' | UTF-8 | Python | false | false | 1,453 | py | 1 | bond_monitor.py | 1 | 0.629245 | 0.622315 | 0 | 48 | 29.083333 | 135 |
sport5/WarGame | 14,053,133,004,964 | 998c8e603c6b56f503a7dee9f4371dc801a744c6 | 17734df8eefe7b0581aa089324da2d62166047f8 | /weather.py | df5bcc439a065956ad697c952f69e8cb170d561b | [] | no_license | https://github.com/sport5/WarGame | 6859cc4f6d54c022ea50020c22cdae7171b5dd52 | b9666decf5e07df6b6eec608a4a1913345a7a85d | refs/heads/master | 2019-06-16T22:12:19.171924 | 2017-08-24T21:58:04 | 2017-08-24T21:58:04 | 99,438,966 | 0 | 0 | null | false | 2017-08-17T23:20:51 | 2017-08-05T17:52:31 | 2017-08-06T00:47:24 | 2017-08-17T23:20:06 | 978 | 0 | 0 | 1 | Python | null | null | import discord
from discord.ext.commands import Bot
import random
import time
import json
import asyncio
import datetime
client = Bot(command_prefix="!")
data = {}
@client.event
@asyncio.coroutine
def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
channel = "334855505325588481"
@client.command(pass_context = True)
@asyncio.coroutine
def Weather(ctx):
print("starting")
day = "day"
while True:
now = datetime.datetime.now()
if now.minute == 7:
with open("day.txt") as json_file:
data = json.load(json_file)
for p in data[day]:
cd = str(p['CurrentDay'])
weth = str(p['Weather'])
natu = str(p['NaturalDisaster'])
yield from client.send_message(client.get_channel('334786244343234572'), ("TODAY - " + cd + weth + natu))
yield from asyncio.sleep(62.0)
client.run('MzUwMzM2MTgzNjUxNDAxNzQ4.DICjgg.IhLnBDVlr9e7qwSNjGRR1PdxC7M')
| UTF-8 | Python | false | false | 1,083 | py | 14 | weather.py | 7 | 0.592798 | 0.550323 | 0 | 39 | 26.74359 | 117 |
johanthoren/check_sl_delay | 2,207,613,223,289 | b38ed5f615ad5b2731ad1d79519e41bda64dcfa6 | 34891339865080b36b8759d89b6395db2619fc52 | /check_sl_delay/__init__.py | ac8d153aa53c9b305ced48d16e6afeb517328b46 | [
"ISC"
] | permissive | https://github.com/johanthoren/check_sl_delay | 37c9476c5e63b7b01885b1cd8158077db65d735a | 173e4ef5ddf43acefbb6ba047894c934eae9509a | refs/heads/master | 2023-04-10T08:49:37.342665 | 2020-03-27T11:19:07 | 2020-03-27T11:19:07 | 248,331,735 | 0 | 0 | ISC | false | 2021-04-20T20:03:26 | 2020-03-18T20:07:26 | 2020-03-27T11:19:17 | 2021-04-20T20:03:26 | 115 | 0 | 0 | 1 | Python | false | false | """Top-level package for check_sl_delay."""
__author__ = """Johan Thorén"""
__email__ = 'johan@thoren.xyz'
__version__ = '0.1.3'
| UTF-8 | Python | false | false | 131 | py | 9 | __init__.py | 4 | 0.584615 | 0.561538 | 0 | 5 | 25 | 43 |
cln-m4rie/flask_boilerplate | 11,785,390,262,054 | 1489c9a9566d3e422959c080cdc216b16807bd22 | 0434513d81295d6f135b87ab7e41856015c5240f | /flask_boilerplate/config/log.py | 706139d84afe1101a70acde298f4de833de663dc | [] | no_license | https://github.com/cln-m4rie/flask_boilerplate | 06a941b5fb8fc28149c0cc61d5c1416e00e53435 | ff79d3aad4ef2f8450bdc43e6ff9742d8fa26238 | refs/heads/master | 2022-10-16T21:04:41.605047 | 2020-06-06T07:59:01 | 2020-06-06T07:59:01 | 265,758,017 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Dict
from ..const import Const
log_setting: Dict = {
"version": 1,
"formatters": {"file": {"format": "[%(asctime)s] %(levelname)s in %(module)s: %(message)s"}},
"handlers": {
"file": {
"class": "logging.handlers.TimedRotatingFileHandler",
"formatter": "file",
"filename": str(Const.PathConfig.log_path("app.log")),
"backupCount": 3,
"when": "D",
}
},
"root": {"level": "DEBUG", "handlers": ["file"]},
}
| UTF-8 | Python | false | false | 520 | py | 14 | log.py | 11 | 0.515385 | 0.511538 | 0 | 18 | 27.888889 | 97 |
Kaixin1007/ECE143-Assignments | 163,208,782,325 | d4f167c669021769131fcb92837b2d81140333f4 | a5cbeb54ebbcd831d17b93a0031a6afee1e1dc6f | /solvefrob.py | 3340a0ffb59b72d90702dd5fcf0603436b31f8a0 | [] | no_license | https://github.com/Kaixin1007/ECE143-Assignments | d43faa835957d2d7095fe503fabfa80c0db2e0d3 | d9d58df334e85025e244d216946c15aef85492f6 | refs/heads/main | 2023-02-11T05:19:54.682408 | 2021-01-12T00:39:28 | 2021-01-12T00:39:28 | 328,828,719 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
def solvefrob(coefs,b):
'''
:param coefs: the list of a_i coefficients
:param b: value
:return:
'''
assert isinstance(coefs, list)
assert isinstance(b, int)
assert b > 0
for i in coefs:
assert i > 0
coefs = np.array(coefs)
b = np.array(b)
max_val = b/coefs
max_val = max_val.astype(int)
x_potential = []
for i in range(coefs.shape[0]):
arr = np.arange(max_val[i] + 1) * np.array([coefs[i]])
x_potential.append(arr)
sum = 0
for i in range(coefs.shape[0]):
res = []
for j in range(coefs.shape[0]):
if j == i:
res.append(max_val[i] + 1)
else:
res.append(1)
sum = sum + x_potential[i].reshape(tuple(res))
# print(sum)
# print("-------------")
res = []
for i in np.array(np.where(sum == b)).T:
res.append(tuple(i))
return res
# print(solvefrob([1,2,3,5],10))
| UTF-8 | Python | false | false | 1,029 | py | 33 | solvefrob.py | 31 | 0.485909 | 0.471331 | 0 | 40 | 23.625 | 62 |
myat90thu/mt_cbm_forex | 2,774,548,901,524 | bbe9d66d16698d47e1d57e9989b90ce575de2f48 | 2eb77c8f3360d990bcac2050aa7de2cd906e04ae | /mt_cbm_forex/__manifest__.py | 8129c4a1d0e946419d6474de76f3bdfd17a8a7da | [] | no_license | https://github.com/myat90thu/mt_cbm_forex | d00a0462c43d1569fddcd8c1ab61fded8545d2aa | 42ff93e0feb6b62b037e25511e6061a8bfe345ad | refs/heads/master | 2020-06-20T00:19:04.189312 | 2019-07-15T04:53:26 | 2019-07-15T04:53:26 | 196,926,061 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
{
'name': "MT CBM Forex Exchange Rate",
'summary': "Sync exchange rate from Central Bank of Myanmar",
'description': """
- Sync by Manual (Under Accounting > Configuration > CBM Forex)
- If there has existing rate for the same date, it will override.
""",
'author': "MYAT THU",
'version': '12.0.0.1.0',
'depends': ['base','account',],
'data': [
"security/ir.model.access.csv",
"views/views.xml",
"data/mt_cbm_forex_setup.xml",
],
}
| UTF-8 | Python | false | false | 532 | py | 2 | __manifest__.py | 2 | 0.56203 | 0.548872 | 0 | 18 | 28.555556 | 73 |
mrabadi/behavlyope | 5,454,608,496,590 | 9576a456d98136970388b9788b0ef9d93c8b8632 | 2c57e3451b455f99b18a813d5fdfc9c3259c6a46 | /deprecated/demo2.py | f24d6866717af7970130c47784b4f5903926cadb | [] | no_license | https://github.com/mrabadi/behavlyope | 438a0afc6d6c4411fd252dc7925ba3f2459e2e15 | 46a9c1f845464050bd5626808fde3a723c9077fb | refs/heads/master | 2020-03-16T17:21:49.119155 | 2019-03-07T05:36:48 | 2019-03-07T05:36:48 | 132,828,624 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import datetime
import time
import random
import util
from util import defaults
from psychopy import visual, core, event
COLORS = ['red', 'green', 'yellow']
config = {}
if len(sys.argv) == 2:
config_file = sys.argv[1]
if not os.path.exists(config_file):
sys.exit('config file path %s does not exist'%config_file)
config = util.load_config(config_file)
params = defaults.load_params(config)
if not os.path.exists(params['save_location']):
os.makedirs(params['save_location'])
start_time = datetime.datetime.now().strftime('%Y%m%d.%H%M%S')
params['start_time'] = start_time
# Initialize data file writer
save_file_path = os.path.join(params['save_location'], start_time)
save_file = open(save_file_path, "w")
# Write params
save_file.write('params: ' + str(params) + '\n')
# Set up conditions
class condition(object):
def __init__(self, x, y, color=[1,1,1]):
self.x = x
self.y = y
self.color = color
conditions = []
fixation_cross_size = params['fixation_cross_size']
stimulus_radius = params['stimulus_radius']
max_x = params['x_size'] - stimulus_radius
min_x = -max_x
max_y = params['y_size'] - stimulus_radius
min_y = -max_y
grid_size = params['grid_size']
for color in COLORS:
x = min_x
while x <= (max_x - grid_size):
y = min_y
while y <= (max_y - grid_size):
if not (-fixation_cross_size < x < fixation_cross_size or -fixation_cross_size < y < fixation_cross_size):
for n in range(params['n_trials_per_location']):
conditions.append(condition(x, y, color))
y += grid_size
x += grid_size
random.shuffle(conditions) # shuffle the conditions
# initialize window
# TODO: This uses the 'testMonitor' and needs to be fixed!
win = visual.Window([params['screen_x'],params['screen_y']], monitor="testMonitor", units="deg", screen=params['screen_number'], rgb=params['screen_rgb'])
fixation = visual.GratingStim(win, tex=None, mask='cross', sf=0, size=fixation_cross_size,
name='fixation', autoLog=False)
trial_number = 0
num_conditions = len(conditions)
fixation = visual.GratingStim(win, tex=None, mask='cross', sf=0, size=1,
name='fixation', autoLog=False)
stimulus = visual.Circle(win, radius=stimulus_radius, fillColor='white')
clock = core.Clock()
quit = False
while trial_number < num_conditions and not quit:
trial_condition = conditions[trial_number]
presentation_time = random.choice(params['presentation_time'])
isi = random.choice(params['isi_ms']) / 1000.
trial_data = {'trial_num': trial_number,
'x': trial_condition.x,
'y': trial_condition.y,
'color': trial_condition.color,
'isi': isi,
'presentation_time': presentation_time,
'catch_trial': False}
allKeys = event.waitKeys()
# Exit if q pushed
if 'q' in allKeys:
break
# present fixation
fixation.draw()
win.flip()
# wait isi_ms before presenting stimulus
core.wait(isi)
# present stimulus if not a catch trial
present_stimulus = True
if random.random() < params['prob_catch_trial']:
trial_data['catch_trial'] = True
present_stimulus = False
else:
trial_number += 1 # only increment trial if stimulus presented
stim_start = time.time()
response = None
while (time.time() - stim_start) < (presentation_time / 1000.) and response is None:
fixation.draw()
if present_stimulus:
stimulus.setPos([trial_condition.x, trial_condition.y])
stimulus.lineColor = trial_condition.color
stimulus.fillColor = trial_condition.color
stimulus.draw()
win.flip()
allKeys = event.getKeys()
if len(allKeys) > 0:
if 'j' in allKeys:
response = 'red'
elif 'k' in allKeys:
response = 'blue'
elif 'l' in allKeys:
response = 'purple'
elif 'f' in allKeys:
response = 'not_seen'
elif 'q' in allKeys:
response = 'quit'
quit = True
else:
response = 'invalid'
event.clearEvents()
while (time.time() - stim_start) < (params['timeout_ms'] / 1000.) and response is None:
fixation.draw()
win.flip()
allKeys = event.getKeys()
if len(allKeys) > 0:
if 'j' in allKeys:
response = 'red'
elif 'k' in allKeys:
response = 'blue'
elif 'l' in allKeys:
response = 'purple'
elif 'f' in allKeys:
response = 'not_seen'
elif 'q' in allKeys:
response = 'quit'
quit = True
else:
response = 'invalid'
event.clearEvents()
print('\a')
fixation.draw()
win.flip()
trial_data['response'] = response if response is not None else 'TIMEOUT'
event.clearEvents()
save_file.write(str(trial_data) + '\n')
save_file.close()
| UTF-8 | Python | false | false | 5,178 | py | 14 | demo2.py | 12 | 0.585747 | 0.581112 | 0 | 159 | 31.559748 | 154 |
divinorum-webb/tableau-api-lib | 13,967,233,693,413 | bdc5fae407a3078523d36cc457f686474bb63f9c | 5c3ab4a045f6df4edb647e509ca7f79c9268a5a6 | /src/tableau_api_lib/utils/pagination.py | 9d1abfb60387947a7f85a6d52ded099357bbca2c | [
"MIT"
] | permissive | https://github.com/divinorum-webb/tableau-api-lib | d1bafa5c876ce26c5884b6b1bc53c0969565c52b | 99a1d3c2a4d7dc20d1c4a619d58cc756ace1db41 | refs/heads/master | 2023-04-04T22:52:37.753153 | 2022-10-15T11:51:45 | 2022-10-15T11:51:45 | 203,291,381 | 84 | 29 | MIT | false | 2023-03-29T13:40:22 | 2019-08-20T03:18:01 | 2023-03-22T17:50:50 | 2022-10-15T11:51:45 | 23,647 | 71 | 27 | 13 | Python | false | false | from types import MethodType
from typing import Any, Dict, List, Optional, Tuple, Union
from typeguard import typechecked
from tableau_api_lib.exceptions import ContentNotFound, PaginationError
def get_page_attributes(query: dict, query_func: MethodType) -> Tuple:
"""Returns page attributes (pageNumber, pageSize, totalAvailable) from a REST API paginated response.
Args:
query: The results of the GET request query, containing paginated data.
query_func: A TableauServerConnection method that will issue a GET request to Tableau Server.
Returns:
A tuple describing the active page number, the page size, and the total items available.
Raises:
PaginationError: An error triggered when pagination is attempted on a non-paginated object.
"""
try:
pagination = query["pagination"]
page_number = int(pagination["pageNumber"])
page_size = int(pagination["pageSize"])
total_available = int(pagination["totalAvailable"])
return page_number, page_size, total_available
except KeyError:
raise PaginationError(query_func)
def extract_pages(
query_func: object,
content_id: Optional[str] = None,
*,
starting_page: int = 1,
page_size: int = 1000,
limit: Optional[int] = None,
parameter_dict: Optional[Dict[str, Any]] = None,
) -> Union[List[Dict[str, Any]], Dict]:
"""Extracts all available pages from a paginated Tableau Server API response.
Args:
query_func: A function that will issue a GET request via the Tableau REST API.
content_id: The luid for the desired content [group_id, site_id, etc].
starting_page: The page number to start on. Defaults to the first page (page_number = 1).
page_size: The maximum number of objects (results) to be returned in any given page.
limit: The maximum number of objects to return. By default there is no limit.
parameter_dict: A dict whose values are appended to the REST API URL endpoint as URL parameters.
Returns:
A list of JSON / dicts containing the contents of the paginated items.
"""
parameter_dict = parameter_dict or {}
extracted_pages = []
page_number = starting_page
extracting = True
while extracting:
parameter_dict.update({"pageNumber": f"pageNumber={page_number}", "pageSize": f"pageSize={page_size}"})
query_results = process_query(query_func=query_func, content_id=content_id, parameter_dict=parameter_dict)
page_number, page_size, total_available = get_page_attributes(query=query_results, query_func=query_func)
if total_available == 0:
return [{}]
extracted_pages, extracting, page_number = update_pagination_params(
query_results, extracted_pages, page_number, page_size, total_available, limit, extracting
)
return extracted_pages
@typechecked
def process_query(query_func: MethodType, content_id: Optional[str], parameter_dict: Dict[str, Any]) -> Dict[Any, Any]:
"""Processes a dynamic GET request via the Tableau REST API.
Some of the tableau-api-lib methods require a content ID while others will throw an error if an unexpected
content ID value is passed in when the function is invoked. This function handles both scenarios dynamically.
Args:
query_func: The tableau-api-lib method that will be invoked.
content_id: The luid for the content variety being queried.
parameter_dict: The dict describing optional additional query parameters.
Returns:
The JSON / dict response from the Tableau Server whose content is being queried.
Raises:
PaginationError: An error triggered when pagination is attempted on a non-paginated object.
"""
if content_id:
try:
query_results = query_func(content_id, parameter_dict=parameter_dict).json()
except TypeError:
raise PaginationError(func=query_func)
else:
try:
query_results = query_func(parameter_dict=parameter_dict).json()
except TypeError:
raise PaginationError(func=query_func)
return query_results
def update_pagination_params(
query_results: Dict[str, Any],
extracted_pages: Any,
page_number: int,
page_size: int,
total_available: int,
limit: int,
extracting: bool,
) -> Tuple:
"""Updates pagination parameters when iterating through all available pages for a variety of content.
Args:
query_results: The JSON / dict response received from Tableau Server following a GET request.
extracted_pages: The pages that have been extracted from the REST API request.
page_number: tbd
page_size: tbd
total_available: tbd
limit: tbd
extracting: tbd
Returns:
Pagination parameters describing active state of the pagination process.
Raises:
ContentNotFound: An exception thrown when no content of the variety queried exists on the Tableau Server.
"""
try:
outer_key = [key for key in query_results.keys() if key != "pagination"].pop()
inner_key = list(query_results[outer_key].keys()).pop()
extracted_pages += query_results[outer_key][inner_key]
except IndexError:
raise ContentNotFound()
if limit:
if limit <= len(extracted_pages):
extracted_pages = extracted_pages[:limit]
extracting = False
elif total_available <= (page_number * page_size):
extracting = False
else:
page_number += 1
return extracted_pages, extracting, page_number
| UTF-8 | Python | false | false | 5,636 | py | 128 | pagination.py | 125 | 0.679737 | 0.678318 | 0 | 143 | 38.412587 | 119 |
davidmugambI/form-with-etra-fields | 3,178,275,850,525 | 2c4e8b9f5baf5285f52cf323da7a8edef646eb39 | c283c7fa120b931969af0ebb482e29eb5b14ff71 | /webapp/migrations/0004_auto_20190418_0801.py | e18a028937e0d3982891844d27cba82c02dec02f | [] | no_license | https://github.com/davidmugambI/form-with-etra-fields | d6751e0a628dd1a2e63be4f2034aeab90fb13254 | b38ae585d52d94166052528ddf8b39a95eef2ad1 | refs/heads/master | 2020-05-17T14:47:49.223995 | 2019-04-27T13:04:05 | 2019-04-27T13:04:05 | 183,773,331 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2 on 2019-04-18 05:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0003_auto_20190328_1204'),
]
operations = [
migrations.AlterField(
model_name='zalegodet',
name='gender',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='zalegodet',
name='lang',
field=models.CharField(max_length=30),
),
]
| UTF-8 | Python | false | false | 540 | py | 12 | 0004_auto_20190418_0801.py | 7 | 0.562963 | 0.5 | 0 | 23 | 22.478261 | 50 |
cpiggott/python-class | 5,196,910,440,300 | 529f770fa744be8036556cd4239ff4d303412443 | b70e79fb7c89cfb915b13526155e21aa2ba6ab1a | /day2/Complete/ex5.py | 6a50aeca768c6b142360a4fb249b88c1e9d772a0 | [] | no_license | https://github.com/cpiggott/python-class | 8f6b85500b994c10188c9014c46716c554256954 | af859b0fa512d517d2c53f67855122d0f0a8e76c | refs/heads/master | 2020-05-18T05:51:37.637198 | 2015-10-07T20:39:58 | 2015-10-07T20:39:58 | 41,827,856 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | my_name = 'Chris Piggott'
my_age = 24
my_height = 70.5 # inches
my_weight = 220 # lbs
my_eyes = 'Blue'
my_teeth = 'White'
my_hair = 'Brown'
print "Let's talk about %s." % my_name
print "He's %f inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "He's %d years old." % my_age
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "If I add %d, %d, and %d I get %d." % (
my_age, my_height, my_weight, my_age + my_height + my_weight)
| UTF-8 | Python | false | false | 472 | py | 3 | ex5.py | 3 | 0.616525 | 0.599576 | 0 | 17 | 26.764706 | 65 |
kkujansuu/stk | 5,634,997,137,977 | 0013d1f81ebadc52624b37e50709154d8f9e18c4 | 761814704c0c5113f4a332461c6300f5feb7e876 | /bp/gedcom/models/gedcom_utils.py | a24141bcdf7fcdad06e7b0b9e00b089655116c1c | [] | no_license | https://github.com/kkujansuu/stk | 34c6df4e2d516b6f9a3ebd902d1189e5760f6424 | 0f8d6ba035e3cca8dc756531b7cc51029a549a4f | refs/heads/main | 2023-04-04T07:14:02.571180 | 2021-04-18T08:40:33 | 2021-04-18T08:40:33 | 359,075,073 | 0 | 0 | null | false | 2021-04-18T07:59:17 | 2021-04-18T07:30:29 | 2021-04-18T07:34:33 | 2021-04-18T07:59:17 | 0 | 0 | 0 | 0 | Python | false | false | # Isotammi Genealogical Service for combining multiple researchers' results.
# Created in co-operation with the Genealogical Society of Finland.
#
# Copyright (C) 2016-2021 Juha Mäkeläinen, Jorma Haapasalo, Kari Kujansuu,
# Timo Nallikari, Pekka Valta
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Utilities for processing gedcom data
"""
import importlib
import logging
import os
import traceback
from flask import session
from flask_security import current_user
from flask_babelex import _
from werkzeug.utils import secure_filename
from models import util # , syslog
from bp.gedcom import GEDCOM_DATA, GEDCOM_APP # , APP_ROOT, ALLOWED_EXTENSIONS
from .. import transformer
# Default document server
DOC_SERVER = "http://mwikitammi.paas.datacenter.fi/index.php"
# --------------------- GEDCOM functions ------------------------
def init_log(logfile):
""" Define log file and save one previous log. """
try:
if os.open(logfile, os.O_RDONLY):
os.rename(logfile, logfile + "~")
except:
pass
logging.basicConfig(
filename=logfile, level=logging.INFO, format="%(levelname)s:%(message)s"
)
def history_init(gedcom_fname):
""" Initialize history file. """
history_file_name = gedcom_fname + "-history"
open(history_file_name, "w").write(
"{}: Uploaded {}\n".format(util.format_timestamp(), gedcom_fname)
)
def history_append(gedcom_fname, line):
""" Add a line to history file. """
history_file_name = gedcom_fname + "-history"
open(history_file_name, "a").write("{}\n".format(line))
def history_append_args(args):
""" Add given arguments to history file. """
history_file_name = args.input_gedcom + "-history"
with open(history_file_name, "a") as f:
for name, value in sorted(vars(args).items()):
f.write("- {}={}\n".format(name, value))
f.write("- User={}\n".format(current_user.username))
def get_info(input_gedcom, enc):
"""
Read gedcom HEAD info and count level 0 items.
Uses the transformation framework.
"""
class Options:
display_changes = False
encoding = enc
class Nullinfo:
pass
from .. import gedcom_info_parser
try:
t = transformer.Transformer(
transform_module=gedcom_info_parser,
display_callback=display_changes,
options=Options(),
)
t.transform_file(input_gedcom)
return t.transformation.info
except: # pragma: no cover
traceback.print_exc()
return Nullinfo()
def analyze(input_gedcom, enc):
""" Get statistics of given gedcom file. """
class Options:
display_changes = False
encoding = enc
class Nullinfo:
pass
from .. import gedcom_analyze
try:
t = transformer.Transformer(
transform_module=gedcom_analyze,
display_callback=display_changes,
options=Options(),
)
t.transform_file(input_gedcom)
return t.transformation.info
except:
traceback.print_exc()
return "error"
def read_gedcom(filename):
""" Return all gedcom file rows using default or ISO8859-1 encoding. """
try:
return open(filename).readlines()
except UnicodeDecodeError:
return open(filename, encoding="ISO8859-1").readlines()
def get_gedcom_user():
""" Return current user name. """
return session.get("gedcom_user", current_user.username)
def get_gedcom_folder(username=None):
""" Return user's gedcom data directory. """
if username is None:
username = get_gedcom_user()
return os.path.join(GEDCOM_DATA, username)
def gedcom_fullname(gedcom):
""" Return gedcom filename. """
return os.path.join(get_gedcom_folder(), secure_filename(gedcom))
def get_metadata(gedcom):
""" Return given gedcom metadata from *-meta file. """
gedcom_folder = get_gedcom_folder()
gedcom_fullname = os.path.join(gedcom_folder, secure_filename(gedcom))
return get_metadata2(gedcom_fullname)
def get_metadata2(gedcom_fullname):
""" Return given gedcom file metadata from corresponding meta file. """
try:
metaname = gedcom_fullname + "-meta"
return eval(open(metaname).read())
except FileNotFoundError:
return {}
def save_metadata(gedcom, metadata):
""" Save updated or new gedcom metadata. """
gedcom_folder = get_gedcom_folder()
metaname = os.path.join(gedcom_folder, secure_filename(gedcom) + "-meta")
open(metaname, "w").write(repr(metadata))
def get_transforms():
""" Search available transformations and return list of their properties. """
class Transform:
pass
trans_dir = os.path.join(GEDCOM_APP, "transforms")
names = sorted(
[
name
for name in os.listdir(trans_dir)
if name.endswith(".py") and not name.startswith("_")
]
)
transforms = []
for name in names:
t = Transform()
t.name = name
t.modname = name[0:-3]
transformer = importlib.import_module("bp.gedcom.transforms." + t.modname)
# have to reload because the user may have changed language -> name and docline may change
importlib.reload(transformer)
doc = transformer.__doc__
if doc:
t.doc = doc
t.docline = doc.strip().splitlines()[0]
t.docline = _(t.docline)
else:
t.doc = ""
t.docline = ""
if hasattr(transformer, "docline"):
t.docline = transformer.docline
doclink = ""
if hasattr(transformer, "doclinks"):
lang = session.get("lang", "")
doclink = transformer.doclinks.get(lang, "")
if not doclink and hasattr(transformer, "doclink"):
doclink = transformer.doclink
if doclink.startswith("/"):
doclink = DOC_SERVER + doclink
t.doclink = doclink
t.displayname = transformer.name if hasattr(transformer, "name") else t.modname
t.version = getattr(transformer, "version", "")
transforms.append(t)
# yield t
return sorted(transforms, key=lambda t: t.displayname)
def list_gedcoms(username):
""" Search transformations and return list of their names and metadata. """
gedcom_folder = get_gedcom_folder(username)
try:
names = sorted(
[
name
for name in os.listdir(gedcom_folder)
if name.lower().endswith(".ged")
],
key=lambda s: s.lower(),
)
except:
names = []
files = []
class File:
pass
for name in names:
f = File()
f.name = name
gedcom_fullname = os.path.join(gedcom_folder, name)
f.metadata = get_metadata2(gedcom_fullname)
if username == current_user.username or f.metadata.get("admin_permission"):
files.append(f)
return files
def removefile(fname):
""" Remove file. """
try:
os.remove(fname)
except FileNotFoundError:
pass
def display_changed_lines(old_lines, new_lines, linenum=None):
"""Print diff list of two line sets as html in user language strating from linenum."""
if old_lines is None:
print("<div><b>" + _("Added:") + "</b></div><gedcom-text>", end="")
for line in new_lines:
print(line)
print("</gedcom-text>")
print("<hr>")
return
if not new_lines:
print("<div><b>" + _("Deleted:") + "</b></div><gedcom-replaced>", end="")
if linenum:
print(
f"{_('starting from line ')}<a href='#' class='gedcomlink'>{linenum}</a>"
)
for line in old_lines:
print(line)
print("</gedcom-replaced>")
print("<hr>")
return
print("<div><b>" + _("Replaced:") + "</b>")
if linenum:
print(f"{_('starting from line ')}<a href='#' class='gedcomlink'>{linenum}</a>")
print("</div><gedcom-replaced>", end="")
for line in old_lines:
print(line)
print("</gedcom-replaced>")
print("<div><b>" + _("With:") + "</b></div><gedcom-text>", end="")
for line in new_lines:
print(line)
print("</gedcom-text>")
print()
print("<hr>")
def print_differences(lines1, lines2, linenum):
# remove duplicates from the beginning and from the end
while lines1 and lines2:
if lines1[0] != lines2[0]:
break
lines1 = lines1[1:]
lines2 = lines2[1:]
if linenum is not None:
linenum += 1
while lines1 and lines2:
if lines1[-1] != lines2[-1]:
break
lines1 = lines1[:-1]
lines2 = lines2[:-1]
display_changed_lines(lines1, lines2, linenum)
def display_changes(lines, item, linenum=None):
"""Print diff list of two line sets as html in user language?"""
class Out:
def __init__(self):
self.lines = []
def emit(self, line):
self.lines.append(line)
if not item:
new_lines = []
else:
out = Out()
if isinstance(item, list):
for it in item:
it.print_items(out)
else:
item.print_items(out)
new_lines = out.lines
print_differences(lines, new_lines, linenum)
| UTF-8 | Python | false | false | 10,058 | py | 218 | gedcom_utils.py | 147 | 0.598449 | 0.592979 | 0 | 342 | 28.403509 | 98 |
tuanvo-ini/pom-pytest | 1,958,505,121,260 | 4056c82b8fd7650e13e72b8b0a19d444c41764e3 | aa8fee4fbcc1793de3d9639a7ed9e8a459991e69 | /test_scritps/conftest.py | 3487c2bab7e427c7ed759f38ecb55ee748b58736 | [] | no_license | https://github.com/tuanvo-ini/pom-pytest | 42f7da51040f5f2df09e9a669a1cdc8dd32c3f9a | 3be8cb5bbc24618a4256216b501e89082d53e70c | refs/heads/master | 2023-02-15T02:25:31.854076 | 2021-01-09T13:18:27 | 2021-01-09T13:18:27 | 324,697,000 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from datetime import datetime
import pytest
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from config.test_config import TestConfig
@pytest.fixture(scope="class")
def init_driver(request):
"""
"""
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(options=chrome_options, executable_path=TestConfig.CHROME_EXECUTABLE_PATH)
driver.maximize_window()
driver.get(TestConfig.LOGIN_PAGE_URL)
request.cls.driver = driver
yield driver
driver.quit()
# set up a hook to be able to check if a test has failed
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
# check if a test has failed
@pytest.fixture(scope="function", autouse=True)
def test_failed_check(request):
yield
# request.node is an "item" because we use the default
# "function" scope
if request.node.rep_setup.failed:
print("setting up a test failed!", request.node.nodeid)
elif request.node.rep_setup.passed:
if request.node.rep_call.failed:
driver = request.node.funcargs['init_driver']
take_screenshot(driver, request.node.nodeid)
print("executing test failed", request.node.nodeid)
def take_screenshot(driver, nodeid):
time.sleep(1)
file_name = f'{nodeid}_{datetime.today().strftime("%Y_%m_%d_%H_%M")}.png'
file_name = file_name.replace(".py", "")
file_name = file_name.replace("/","_")
file_name = file_name.replace("::","__")
print("Filename: {}".format(file_name))
driver.save_screenshot('screenshots/' + file_name) | UTF-8 | Python | false | false | 2,056 | py | 19 | conftest.py | 15 | 0.677529 | 0.677043 | 0 | 64 | 31.140625 | 104 |
Hackforid/Ebooking | 16,045,997,854,172 | 2ee9a450a94851f47e1d4282cc0f0fbaa0bb2844 | ef848dd624c905de03dcf92657f6ff25444ca591 | /models/inventory.py | f7eca82e2b80f3bf88c379eded0fae0b9286f8cb | [] | no_license | https://github.com/Hackforid/Ebooking | 7db22cc6d7953890a9db38a46750549d77b4ded4 | fe104b7402a1482e3603b18b12ba4350cfdf4a72 | refs/heads/master | 2021-01-11T10:23:27.728953 | 2015-08-04T06:10:44 | 2015-08-04T06:10:44 | 78,326,519 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from tornado.util import ObjectDict
from models import Base
from sqlalchemy import Column
from sqlalchemy.dialects.mysql import BIT, INTEGER, VARCHAR, DATETIME, BIGINT
class InventoryModel(Base):
__tablename__ = 'inventory'
__table_args__ = {
'mysql_charset': 'utf8', 'mysql_engine': 'InnoDB'}
id = Column(INTEGER, primary_key=True, autoincrement=True)
merchant_id = Column("merchantId", INTEGER, nullable=False, default=0)
hotel_id = Column("hotelId", INTEGER, nullable=False, default=0)
roomtype_id = Column("roomTypeId", INTEGER, nullable=False, default=0)
base_hotel_id = Column("baseHotelId", INTEGER, nullable=False, default=0)
base_roomtype_id = Column("baseRoomTypeId", INTEGER, nullable=False, default=0)
month = Column(INTEGER, nullable=False, default=0)
day1 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day2 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day3 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day4 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day5 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day6 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day7 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day8 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day9 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day10 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day11 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day12 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day13 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day14 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day15 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day16 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day17 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day18 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day19 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day20 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day21 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day22 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day23 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day24 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day25 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day26 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day27 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day28 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day29 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day30 = Column(VARCHAR(50), nullable=False, default="-1|-1")
day31 = Column(VARCHAR(50), nullable=False, default="-1|-1")
is_online = Column('isOnline', BIT, nullable=False, default=0)
is_delete = Column('isDelete', BIT, nullable=False, default=0)
@classmethod
def get_by_id(cls, session, id):
return session.query(InventoryModel)\
.filter(InventoryModel.id == id)\
.filter(InventoryModel.is_delete == 0)\
.first()
@classmethod
def get_by_merchant_id_and_hotel_id(cls, session, merchant_id, hotel_id):
return session.query(InventoryModel)\
.filter(InventoryModel.merchant_id == merchant_id)\
.filter(InventoryModel.hotel_id == hotel_id)\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def get_by_roomtype_id_and_date(cls, session, roomtype_id, year, month):
month = InventoryModel.combin_year_month(year, month)
return session.query(InventoryModel)\
.filter(InventoryModel.roomtype_id == roomtype_id)\
.filter(InventoryModel.month == month)\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def get_by_merchant_id_and_hotel_id_and_date(cls, session, merchant_id, hotel_id, year, month):
month = InventoryModel.combin_year_month(year, month)
return session.query(InventoryModel)\
.filter(InventoryModel.merchant_id == merchant_id)\
.filter(InventoryModel.hotel_id == hotel_id)\
.filter(InventoryModel.month == month)\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def get_by_merchant_id_and_hotel_id_and_days(cls, session, merchant_id, hotel_id, days):
months = [InventoryModel.combin_year_month(day[0], day[1]) for day in days]
return session.query(InventoryModel)\
.filter(InventoryModel.merchant_id == merchant_id)\
.filter(InventoryModel.hotel_id == hotel_id)\
.filter(InventoryModel.month.in_(months))\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def get_by_merchant_hotel_roomtype_date(cls, session, merchant_id, hotel_id, roomtype_id, year, month):
month = InventoryModel.combin_year_month(year, month)
return session.query(InventoryModel)\
.filter(InventoryModel.merchant_id == merchant_id)\
.filter(InventoryModel.hotel_id == hotel_id)\
.filter(InventoryModel.roomtype_id == roomtype_id)\
.filter(InventoryModel.month == month)\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def get_by_merchant_hotel_roomtype_dates(cls, session, merchant_id, hotel_id, roomtype_id, days):
'''
days as [(year, months),]
'''
months = [InventoryModel.combin_year_month(day[0], day[1]) for day in days]
return session.query(InventoryModel)\
.filter(InventoryModel.merchant_id == merchant_id)\
.filter(InventoryModel.hotel_id == hotel_id)\
.filter(InventoryModel.roomtype_id == roomtype_id)\
.filter(InventoryModel.month.in_(months))\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def get_by_merchant_and_dates(cls, session, merchant_id, days):
'''
days as [(year, months),]
'''
months = [InventoryModel.combin_year_month(day[0], day[1]) for day in days]
return session.query(InventoryModel)\
.filter(InventoryModel.merchant_id == merchant_id)\
.filter(InventoryModel.month.in_(months))\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def get_by_roomtype_and_dates(cls, session, roomtype_id, days):
'''
days as [(year, months),]
'''
months = [InventoryModel.combin_year_month(day[0], day[1]) for day in days]
return session.query(InventoryModel)\
.filter(InventoryModel.roomtype_id == roomtype_id)\
.filter(InventoryModel.month.in_(months))\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def delete_by_roomtype_id(cls, session, roomtype_id, commit=True):
session.query(InventoryModel)\
.filter(InventoryModel.roomtype_id == roomtype_id)\
.filter(InventoryModel.is_delete == 0)\
.update({'isDelete': 1})
if commit:
session.commit()
else:
session.flush()
@classmethod
def insert_by_year(cls, session, merchant_id, hotel_id, roomtype_id, base_hotel_id, base_roomtype_id, year):
inventories = []
for month in range(1,13):
inventory = cls.get_by_merchant_hotel_roomtype_date(session, merchant_id, hotel_id, roomtype_id, year, month)
if inventory:
continue
else:
_month = InventoryModel.combin_year_month(year, month)
inventory = InventoryModel(merchant_id=merchant_id, hotel_id=hotel_id, roomtype_id=roomtype_id, base_hotel_id=base_hotel_id, base_roomtype_id=base_roomtype_id, month=_month)
inventories.append(inventory)
session.add_all(inventories)
session.commit()
return inventories
@classmethod
def insert_in_months(cls, session, merchant_id, hotel_id, roomtype_id, base_hotel_id, base_roomtype_id, months, commit=True):
inventories = []
dates= cls.get_months(months)
for date in dates:
inventory = cls.get_by_merchant_hotel_roomtype_date(session, merchant_id, hotel_id, roomtype_id, date[0], date[1])
if inventory:
continue
else:
_month = InventoryModel.combin_year_month(date[0], date[1])
inventory = InventoryModel(merchant_id=merchant_id, hotel_id=hotel_id, roomtype_id=roomtype_id, month=_month, base_hotel_id=base_hotel_id, base_roomtype_id=base_roomtype_id)
inventories.append(inventory)
session.add_all(inventories)
if commit:
session.commit()
else:
session.flush()
return inventories
@classmethod
def insert_in_four_month(cls, session, merchant_id, hotel_id, roomtype_id, base_hotel_id, base_roomtype_id):
inventories = []
dates= cls.get_months(4)
for date in dates:
inventory = cls.get_by_merchant_hotel_roomtype_date(session, merchant_id, hotel_id, roomtype_id, date[0], date[1])
if inventory:
continue
else:
_month = InventoryModel.combin_year_month(date[0], date[1])
inventory = InventoryModel(merchant_id=merchant_id, hotel_id=hotel_id, roomtype_id=roomtype_id, month=_month, base_hotel_id=base_hotel_id, base_roomtype_id=base_roomtype_id)
inventories.append(inventory)
session.add_all(inventories)
session.commit()
return inventories
@classmethod
def insert_all_in_months(cls, session, roomtypes, months):
inventories = []
dates= cls.get_months(months)
for roomtype in roomtypes:
for date in dates:
inventory = cls.get_by_roomtype_id_and_date(session, roomtype.id, date[0], date[1])
if inventory:
continue
else:
_month = InventoryModel.combin_year_month(date[0], date[1])
inventory = InventoryModel(merchant_id=roomtype.merchant_id, hotel_id=roomtype.hotel_id, roomtype_id=roomtype.id, month=_month, base_hotel_id=roomtype.base_hotel_id, base_roomtype_id=roomtype.base_roomtype_id)
inventories.append(inventory)
session.add_all(inventories)
session.commit()
return inventories
@classmethod
def insert_all_in_four_month(cls, session, roomtypes):
inventories = []
dates= cls.get_months(4)
for roomtype in roomtypes:
for date in dates:
inventory = cls.get_by_roomtype_id_and_date(session, roomtype.id, date[0], date[1])
if inventory:
continue
else:
_month = InventoryModel.combin_year_month(date[0], date[1])
inventory = InventoryModel(merchant_id=roomtype.merchant_id, hotel_id=roomtype.hotel_id, roomtype_id=roomtype.id, month=_month, base_hotel_id=roomtype.base_hotel_id, base_roomtype_id=roomtype.base_roomtype_id)
inventories.append(inventory)
session.add_all(inventories)
session.commit()
return inventories
@classmethod
def get_by_room_ids_and_months(cls, session, roomtype_ids, months):
return session.query(InventoryModel)\
.filter(InventoryModel.roomtype_id.in_(roomtype_ids))\
.filter(InventoryModel.month.in_(months))\
.filter(InventoryModel.is_delete == 0)\
.all()
@classmethod
def complete_in_four_months(cls, session, roomtype_ids):
dates = cls.get_months(4)
months = [cls.combin_year_month(date[0], date[1]) for date in dates]
inventories = cls.get_by_room_ids_and_months(session, roomtype_ids, months)
need_complete_roomtype_ids = []
for roomtype_id in roomtype_ids:
for month in months:
for inventory in inventories:
if inventory.roomtype_id == roomtype_id and inventory.month == month:
break
else:
need_complete_roomtype_ids.append(roomtype_id)
@classmethod
def get_months(cls, n):
today = datetime.date.today()
year, month = today.year, today.month
dates = []
for i in range(n):
if month > 12:
month = month - 12
year = year + 1
dates.append((year, month))
month = month + 1
return dates
@classmethod
def update(cls, session, merchant_id, hotel_id, roomtype_id, year, month, day, price_type, val):
inventory = cls.get_by_merchant_id_and_hotel_id_and_date(session, merchant_id, hotel_id, roomtype_id, year, month)
val = val if val >= 0 else 0
val = val if val <= 99 else 99
if not inventory:
return
inventory.set_val_by_day(day, price_type, val)
session.commit()
@classmethod
def combin_year_month(cls, year, month):
return int("{}{:0>2d}".format(year, month))
def get_day(self, day, type=None):
if day < 1 or day > 31:
return 0
day_key = 'day' + str(day)
value = getattr(self, day_key)
if type:
return int(value.split('|')[type])
else:
prices = value.split('|')
auto, manual = int(prices[0]), int(prices[1])
auto = auto if auto >=0 else 0
manual = manual if manual >= 0 else 0
return auto + manual
def get_day_count(self, day):
if day < 1 or day > 31:
return 0
day_key = 'day' + str(day)
value = getattr(self, day_key)
counts = value.split('|')
return int(counts[0]), int(counts[1])
def deduct_val_by_day(self, day, val):
day_key = 'day' + str(day)
value = getattr(self, day_key)
count_auto, count_manual = [count if count >= 0 else 0 for count in [int(count) for count in value.split('|')]]
if count_auto + count_manual < val:
return -1, -1
if count_auto >= val:
num_auto = val
num_manual = 0
remain_auto = count_auto - num_auto
remain_manual = count_manual
else:
num_auto = count_auto
num_manual = val - num_auto
remain_auto = 0
remain_manual = count_manual - num_manual
value = "{}|{}".format(remain_auto, remain_manual)
setattr(self, day_key, value)
return num_auto, num_manual
def recovery_val_by_day(self, day, num_auto, num_manual):
day_key = 'day' + str(day)
value = getattr(self, day_key)
count_auto, count_manual = [count if count >= 0 else 0 for count in [int(count) for count in value.split('|')]]
value = "{}|{}".format(count_auto + num_auto, count_manual + num_manual)
setattr(self, day_key, value)
return num_auto, num_manual
def add_val_by_day(self, day, price_type, val):
day_key = 'day' + str(day)
value = getattr(self, day_key)
count_auto, count_manual = [int(count) for count in value.split('|')]
if price_type == 0:
remain_manual = count_manual
remain_auto = count_auto if count_auto >= 0 else 0
remain_auto = remain_auto + val
remain_auto = self.fix_inventory_count_range(remain_auto)
else:
remain_auto = count_auto
remain_manual = count_manual if count_manual >= 0 else 0
remain_manual = remain_manual + val
remain_manual = self.fix_inventory_count_range(remain_manual)
value = "{}|{}".format(remain_auto, remain_manual)
setattr(self, day_key, value)
def fix_inventory_count_range(self, count):
count = count if count >= 0 else 0
count = count if count <= 99 else 99
return count
def set_val_by_day(self, day, price_type, val):
day_key = 'day' + str(day)
value = getattr(self, day_key)
price_reserved, price_manual = value.split('|')
if price_type == 0:
price_reserved = val
else:
price_manual = val
value = "{}|{}".format(price_reserved, price_manual)
setattr(self, day_key, value)
def todict(self):
return ObjectDict(
id=self.id,
merchant_id=self.merchant_id,
hotel_id=self.hotel_id,
roomtype_id=self.roomtype_id,
base_hotel_id=self.base_hotel_id,
base_roomtype_id=self.base_roomtype_id,
month=self.month,
day1=self.day1,
day2=self.day2,
day3=self.day3,
day4=self.day4,
day5=self.day5,
day6=self.day6,
day7=self.day7,
day8=self.day8,
day9=self.day9,
day10=self.day10,
day11=self.day11,
day12=self.day12,
day13=self.day13,
day14=self.day14,
day15=self.day15,
day16=self.day16,
day17=self.day17,
day18=self.day18,
day19=self.day19,
day20=self.day20,
day21=self.day21,
day22=self.day22,
day23=self.day23,
day24=self.day24,
day25=self.day25,
day26=self.day26,
day27=self.day27,
day28=self.day28,
day29=self.day29,
day30=self.day30,
day31=self.day31,
is_online=self.is_online,
is_delete=self.is_delete,
)
| UTF-8 | Python | false | false | 18,397 | py | 146 | inventory.py | 117 | 0.57879 | 0.557754 | 0 | 441 | 40.709751 | 229 |
tulioreisc/imdb_crawler | 10,746,008,193,055 | f8f13eec5c6acddd68e1d1cb753e546a1a977d8a | cf39d363edce3de35fcfa76ec603d6ce93e3486c | /imdb/spiders/imdb_topgross.py | b2df94f02d5cf41f15535e345a00d7ce64c19dec | [] | no_license | https://github.com/tulioreisc/imdb_crawler | 1a4b6f59cceece566d058e58c161fe720324ee67 | de2cbd9a39e898ddd9822adc4de688e7ed9fa48d | refs/heads/master | 2020-09-10T16:17:42.097187 | 2019-12-28T13:31:07 | 2019-12-28T13:31:07 | 221,755,778 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import scrapy
class ImdbTopgrossSpider(scrapy.Spider):
name = 'imdb_topgross'
allowed_domains = ['https://www.boxofficemojo.com/']
year = 2018
start_urls = ['https://www.boxofficemojo.com/year/world/{}/'.format(year)]
def parse(self, response):
i = 0
for item in (response.css('tr'))[1:]:
#print("item: ", item)
yield {
'rank': item.css('td.mojo-field-type-rank ::text').get(),
'title': item.css('td.mojo-field-type-release a ::text').getall(),
'boxoffice': item.xpath("//td[@class='a-text-right mojo-field-type-money']/text()").getall()[i],
'domestic': item.xpath("//td[@class='a-text-right mojo-field-type-money']/text()").getall()[i+1],
'foreign': item.xpath("//td[@class='a-text-right mojo-field-type-money']/text()").getall()[i+2],
'release': item.css('td.mojo-field-type-release a ::attr(href)').getall(),
#'domestic': item[3],
#'foreign': item[4],
#'release': item[5],
}
i = i + 3
| UTF-8 | Python | false | false | 1,149 | py | 14 | imdb_topgross.py | 8 | 0.520453 | 0.509138 | 0 | 27 | 41.555556 | 113 |
jasongforbes/plankton | 7,129,645,729,195 | 70667f1460240d9058bdd0a4e549ee93a0c5f4a8 | 472a25d53a4538ee462337c37d8054e4e45f75b7 | /src/data/test_data.py | 618baa053c5278c715feb4265413c6594399931a | [] | no_license | https://github.com/jasongforbes/plankton | d87a21852a859c7c01c74722ed62eef24be5109f | f2bfe0c6548ba361c585c8c60ddf14d3a0fd8ea5 | refs/heads/master | 2016-08-05T18:47:50.957895 | 2015-02-17T20:38:36 | 2015-02-17T20:38:36 | 28,862,696 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Feb 1, 2015
@author: Jason
'''
from data import image_data as im
import numpy as np
import glob
import os
class test_data(im.image_data):
'''
classdocs
'''
def __init__(self, folders, image_shape, num_classes):
'''
Constructor
'''
super(test_data, self).__init__(folders, image_shape)
self._predictions = np.zeros((self._num_images, num_classes), dtype=float)
def _type_info(self):
return "test"
def _on_read_folder(self, folder):
pass
def _read_image_args(self, fileNameDir, folder):
return [(fileNameDir, filename, self._image_shape, None) for filename in fileNameDir[2]]
def _get_classification(self, image):
return 0
def predict(self, classifier):
self._predictions = classifier.predict(self.x)
def output(self, filepath ):
out = np.hstack((self.image_names,self.predictions))
labels = self._labels[:]
labels.insert(0, "image")
np.savetxt(filepath, out, delimiter=',',fmt="%s", header=','.join(labels), comments='')
@property
def predictions(self):
return self._predictions
@predictions.setter
def predictions(self,value):
self._predictions = value | UTF-8 | Python | false | false | 1,317 | py | 17 | test_data.py | 16 | 0.590737 | 0.584662 | 0 | 52 | 24.346154 | 96 |
rbshadow/Python_URI | 515,396,080,514 | 078b46e330a6c0df41128b793d4c89756a9a415d | 9d0b3029a2c10683e6c7bda94887154857bfe634 | /Beginner/URI_1071.py | aef27457abaf203e8fdb589158cf1b5077fcd195 | [
"MIT"
] | permissive | https://github.com/rbshadow/Python_URI | 559b25a559fbe955c0e1fe6bdc1c39e30f5c18a9 | 4f7df8cdea0eba5c550bb3016b1a7ab6dc723d56 | refs/heads/master | 2020-02-26T15:53:55.367919 | 2018-10-04T00:43:31 | 2018-10-04T00:43:31 | 70,911,515 | 3 | 0 | MIT | false | 2018-02-15T17:35:56 | 2016-10-14T13:13:13 | 2018-01-25T21:24:30 | 2018-02-15T17:35:56 | 36 | 0 | 0 | 0 | Python | false | null | def math():
up_one = int(input())
up_two = int(input())
count = 0
if (up_one > up_two) and up_two < 0:
for i in range(up_two, up_one):
if i % 2 == 1:
count = count + i
print(count - up_two)
elif (up_one < up_two) and up_one < 0:
for i in range(up_one, up_two):
if i % 2 == 1:
count = count + i
print(count - up_one)
elif up_one > up_two:
for i in range(up_two, up_one):
if i % 2 == 1:
count = count + i
print(count)
else:
for i in range(up_one, up_two):
if i % 2 == 1:
count = count + i
print(count)
if __name__ == '__main__':
math()
| UTF-8 | Python | false | false | 749 | py | 134 | URI_1071.py | 133 | 0.417891 | 0.403204 | 0 | 33 | 21.69697 | 42 |
chaos5958/CtCI-6th-Edition-Python | 12,154,757,486,834 | f6dfb1e2737c30dcaf1fba20f83fe11978e9c508 | 02178c939a625a529297b49ad2d48455619c5e22 | /chapter_04/graph.py | 7bfc63cf5440d1deb797b7feefe4696538a17244 | [] | no_license | https://github.com/chaos5958/CtCI-6th-Edition-Python | e7b80c8ef77928f08a29764937ca793b9ed30d8b | 859974dcc581daf2358484af7e755eb6efa8935d | refs/heads/master | 2022-12-31T03:07:31.154560 | 2022-10-26T08:26:14 | 2022-10-26T08:26:14 | 303,285,420 | 0 | 0 | null | true | 2020-10-12T05:20:49 | 2020-10-12T05:20:48 | 2020-10-12T01:47:40 | 2020-10-09T21:33:18 | 40 | 0 | 0 | 0 | null | false | false | from collections import defaultdict, deque
class GraphByList():
def __init__(self):
self.lists = defaultdict(list)
def add_edge(self, parent, child):
self.lists[parent].append(child)
def remove_edge(self, parent, child):
self.lists[parent].remove(child)
def dfs_internal(self, v, visited):
print(f"{v} ")
visited.add(v)
for neighbor in self.lists[v]:
if neighbor not in visited:
self.dfs_internal(neighbor, visited)
def dfs(self, v):
visited = set()
self.dfs_internal(v, visited)
def bfs(self, v):
visited = set()
queue = deque()
queue.appendleft(v)
while len(queue) > 0:
new_v = queue.popleft()
visited.add(new_v)
print(f"{new_v} ")
for neighbor in self.lists[new_v]:
if neighbor not in visited:
queue.appendleft(neighbor)
class GraphByMatrix():
def __init__(self):
pass
# definition of function
def generate_edges(graph):
edges = []
# for each node in graph
for node in graph.lists:
# for each neighbour node of a single node
for neighbour in graph.lists[node]:
# if edge exists then append
edges.append((node, neighbour))
return edges
def main():
# declaration of graph as dictionary
graph = GraphByList()
graph.add_edge('a','c')
graph.add_edge('b','c')
graph.add_edge('b','e')
graph.add_edge('c','d')
graph.add_edge('c','e')
graph.add_edge('c','a')
graph.add_edge('c','b')
graph.add_edge('e','b')
graph.add_edge('d','c')
graph.add_edge('e','c')
# Driver Function call
# to print generated graph
# print(generate_edges(graph))
# graph.dfs('a')
graph.bfs('a')
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 1,921 | py | 20 | graph.py | 20 | 0.54607 | 0.545549 | 0 | 80 | 23.025 | 52 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.