repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
victoralfonzo/justdonttouchred
| 3,796,751,111,207 |
c153da5e1fc08eb358eb32e3373628231c9f6b57
|
21ce6721b26e412ff2591ffc63657cd106a841e2
|
/main.py
|
eabfd0a791910891030133b1b13cdcf5a39aca2a
|
[] |
no_license
|
https://github.com/victoralfonzo/justdonttouchred
|
b93c707b805bb0771c00057abadfbbb4631fa7c3
|
4e3cdf0b1c2e4071ee5052a423c2695812b47686
|
refs/heads/master
| 2020-05-16T15:53:41.865915 | 2020-02-20T06:08:18 | 2020-02-20T06:08:18 | 183,145,169 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pygame
import pytmx
import random
from sprites import *
from settings import *
from tilemap import*
from itertools import *
def changeMap(maps, mapcounter):
map = maps[mapcounter]
all_sprites.empty()
obstacles.empty()
enemies.empty()
flag.empty()
jump.empty()
for tile_object in map.tmxdata.objects:
if tile_object.name == 'player':
player = Player(tile_object.x, tile_object.y,16,24)
if tile_object.name == 'wall':
ob = Obstacle(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
if tile_object.name == 'enemy':
e = Enemy(tile_object.x, tile_object.y,
tile_object.width, tile_object.height,player)
all_sprites.add(e)
if tile_object.name == 'flag':
fl = Flag(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
if tile_object.name == 'jump':
ju = JumpBox(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
return map
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("Just Don't Touch Red" )
clock = pygame.time.Clock()
#bg_img = pygame.image.load("bg.png")
#bg_rect = bg_img.get_rect()
start = pygame.image.load("startscreen.png")
start_rect = start.get_rect()
winner = pygame.image.load("winner.png")
winner_rect = winner.get_rect()
loser = pygame.image.load("endscreen.png")
loser_rect = loser.get_rect()
maps = []
one = TiledMap("maps/2.tmx")
two = TiledMap("maps/3.tmx")
three = TiledMap("maps/1.tmx")
four = TiledMap("maps/4.tmx")
maps.append(one)
maps.append(two)
maps.append(three)
maps.append(four)
#create player sprite and add it to group
player = Player(WIDTH/2,HEIGHT/2,16,32)
map = maps[0]
for tile_object in map.tmxdata.objects:
if tile_object.name == 'player':
player = Player(tile_object.x, tile_object.y,16,24)
if tile_object.name == 'wall':
ob = Obstacle(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
if tile_object.name == 'enemy':
e = Enemy(tile_object.x, tile_object.y,
tile_object.width, tile_object.height,player)
all_sprites.add(e)
if tile_object.name == 'flag':
fl = Flag(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
if tile_object.name == 'jump':
ju = JumpBox(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
all_sprites.add(player)
score = 0
camera = Camera(WIDTH*4,HEIGHT)
running = True
mapcounter = 0
mapchange = False
pressed = False
won = False
endscreen = False
while running:
if mapcounter == len(maps):
won = True
else:
map = maps[mapcounter]
if mapchange:
all_sprites.empty()
obstacles.empty()
enemies.empty()
flag.empty()
jump.empty()
for tile_object in map.tmxdata.objects:
if tile_object.name == 'player':
player = Player(tile_object.x, tile_object.y,16,24)
if tile_object.name == 'wall':
ob = Obstacle(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
if tile_object.name == 'enemy':
e = Enemy(tile_object.x, tile_object.y,
tile_object.width, tile_object.height,player)
all_sprites.add(e)
if tile_object.name == 'flag':
fl = Flag(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
if tile_object.name == 'jump':
ju = JumpBox(tile_object.x, tile_object.y,
tile_object.width, tile_object.height)
mapchange = False
all_sprites.add(player)
map_img = map.make_map()
map_rect = map_img.get_rect()
#process eventsa
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
pressed = True
#updates
all_sprites.update()
camera.update(player)
#screen.blit(bg_img,bg_rect)
screen.blit(map_img, camera.apply_rect(map_rect))
for sprite in all_sprites:
screen.blit(sprite.image, camera.apply(sprite))
hits = pygame.sprite.spritecollide(player,flag,False)
if len(hits)>0:
mapcounter+=1
mapchange = True
hits = pygame.sprite.spritecollide(player,jump,False)
if len(hits)>0:
player.jump(-20)
hits = pygame.sprite.spritecollide(player,enemies,False)
if len(hits)>0:
endscreen = True
if pressed ==False:
screen.blit(start,start_rect)
player.movement[0] = 0
else:
player.movement[0] = 4
if won:
screen.blit(winner,winner_rect)
all_sprites.empty()
if endscreen:
screen.blit(loser,loser_rect)
keys = pygame.key.get_pressed()
if True in keys:
mapchange = True
endscreen = False
mapcounter = 0
clock.tick(FPS)/1000
pygame.display.flip()
pygame.quit()
|
UTF-8
|
Python
| false | false | 5,391 |
py
| 8 |
main.py
| 3 | 0.57967 | 0.572065 | 0 | 194 | 26.78866 | 74 |
luliu31415926/programming_contest_workbook
| 17,136,919,534,659 |
d95420ca6590f9b551950c3958eabc4e184f277a
|
ed1cc13d31a2bb7b34a401565c9179286e4e3dfb
|
/dining_poj_3281.py
|
c1ba2744d00c43a008e43c1aceed97300c01a62d
|
[] |
no_license
|
https://github.com/luliu31415926/programming_contest_workbook
|
367b3df2c9e6bada224bee51aa5f2ab017f72c43
|
32901d675da24d87b53dc6e9266cf05462e50450
|
refs/heads/master
| 2020-03-09T08:30:00.699033 | 2018-05-10T23:39:47 | 2018-05-10T23:39:47 | 128,690,524 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#dining poj 3281
# convert to max flow
# s=> (limit each food used once) food =>(foods cows like) cows => (only cows with matched food can flow over) cows => (match cows with drinks) drink=>( limit each drink use once) sink
inpt=iter(['4 3 3','2 2 1 2 3 1','2 2 2 3 1 2','2 2 1 3 1 2','2 1 1 3 3'])
N,F,D=tuple(map(int,next(inpt).split()))
dinic=Dinic(2+F+D+N*2)
source=0
sink=1+F+D+N*2
for food in range(1,F+1):
dinic.add_edge(source,food,1)
for drink in range(F+1,F+1+D):
dinic.add_edge(drink,sink,1)
for i in range(1,N+1):
dinic.add_edge(F+D+i,F+D+N+i,1)
for i in range(1,N+1):
line=list(map(int,next(inpt).split()))
f,d=line[:2]
for food in line[2:2+f]:
dinic.add_edge(food,F+D+i,1)
for drink in line[-d:]:
dinic.add_edge(F+D+N+i,drink+F,1)
max_flow=dinic.max_flow(source,sink)
print (max_flow)
|
UTF-8
|
Python
| false | false | 846 |
py
| 32 |
dining_poj_3281.py
| 31 | 0.626478 | 0.566194 | 0 | 25 | 32.88 | 185 |
SamHurley/ABase
| 14,920,716,426,571 |
da669a99ad5d362aad846913a39260f4f73815c8
|
9d10278e7ad2eaa7d6abe863f220f005d91800d4
|
/BaseConsts.py
|
c9c747dba693f90cbb7b6fb9dd87e668b12d7ec8
|
[] |
no_license
|
https://github.com/SamHurley/ABase
|
11792a1cb55fa111b145b5b3be918a739dd272d8
|
d3998383e2c79bc1d0f6b81954e7d9f4efd2ef74
|
refs/heads/master
| 2016-03-01T03:41:40.160744 | 2015-07-20T14:24:30 | 2015-07-20T14:24:30 | 22,006,816 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" UPPER BUTTONS - NOTES - 10 - 17 (corner LEDs - 68 - 75)
SIDE BUTTONS - NOTES - 18 - 25
PADS - NOTES/CCs - 36 - 67 (starting in lower left)
FADERS - NOTES/CCs - 1 - 9
DISPLAY - CCs - 34 - 35
"""
"""
CLEAR ALL PAD/BUTTON LEDS = (240, 0, 1, 97, 12, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 247)
"""
from BaseModel import BASE_MODEL_1
""" The product IDs for the two different Base models. """
BASE_1_ID = 12
BASE_2_ID = 17
""" The product ID to use, which is determined by the value in BaseModel.py. """
PRODUCT_ID = BASE_1_ID if BASE_MODEL_1 else BASE_2_ID
""" The SysEx message to send for performing a factory reset. """
FACTORY_RESET = (240, 0, 1, 97, PRODUCT_ID, 6, 247)
""" The SysEx message to send for turning local LED control off. """
LOCAL_CONTROL_OFF = (240, 0, 1, 97, PRODUCT_ID, 8, 0, 247)
""" The SysEx message to send for linking the dual LEDs of the side buttons. """
SIDE_BUTTON_LED_LINK = (240, 0, 1, 97, PRODUCT_ID, 68, 1, 247)
""" The SysEx header to use for setting slider LED types. """
SLIDER_LED_TYPE_HEADER = (240, 0, 1, 97, PRODUCT_ID, 50)
""" The SysEx header to use for setting slider LED colors. """
SLIDER_LED_COLOR_HEADER = (240, 0, 1, 97, PRODUCT_ID, 61)
""" The available slider LED types. """
SLIDER_TYPES = {'SINGLE': 0,
'FULL': 1,
'BIPOLAR': 2}
""" The available slider LED colors. """
SLIDER_COLORS = {'DUAL': 0,
'RED': 1,
'GREEN': 2,
'YELLOW': 3,
'BLUE': 4,
'MAGENTA': 5,
'CYAN': 6,
'WHITE': 7}
""" The fixed slider LED colors to use for sliders that are controlling parameters in Live. """
SLIDER_COLORS_FOR_PARAMS = (SLIDER_COLORS['RED'], SLIDER_COLORS['GREEN'], SLIDER_COLORS['BLUE'])
""" The total number of sliders on the controller. """
NUM_SLIDERS = 9
|
UTF-8
|
Python
| false | false | 2,036 |
py
| 5 |
BaseConsts.py
| 5 | 0.552554 | 0.482318 | 0 | 56 | 35.357143 | 152 |
Cloudxtreme/autoimgsys
| 16,810,502,026,152 |
d1eec75cf06857855d8bcc0a7c26851872a155b1
|
ed8b37837e5d221ec703b627dc2363890da46c2b
|
/ais/plugins/jai/jai.py
|
15385a9b93fc231df65de2a64b1f2e04a063729d
|
[
"Unlicense"
] |
permissive
|
https://github.com/Cloudxtreme/autoimgsys
|
2828054ca532536d5fae6c5aa975d29364b3f5b4
|
55808d0ddefb949a278bc9790c014f3b4fcf6fdb
|
refs/heads/master
| 2020-03-28T14:52:33.288468 | 2017-06-27T13:47:37 | 2017-06-27T13:47:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#
# This software is in the public domain because it contains materials
# that originally came from the United States Geological Survey,
# an agency of the United States Department of Interior.
# For more information, see the official USGS copyright policy at
# http://www.usgs.gov/visual-id/credit_usgs.html#copyright
#
# <author> Rian Bogle </author>
""" JAI module provides representations of JAI gigE cameras controlled by arvais
The first class is the JAI_AD80GE class subclased from ais.Task
This device has two cameras one RGB visible and one Mono NIR.
"""
from ais.plugins.jai.aravis import *
from ais.lib.task import PoweredTask
from ais.lib.relay import Relay
import pprint, time, cv2, traceback, datetime, os
from collections import OrderedDict
class Sensor(object):
def __init__(self, **kwargs):
self.name = kwargs.get("name",None)
self.mac = kwargs.get("mac", None)
self.cam = None
class JAI_AD80GE(PoweredTask):
def run(self, **kwargs):
"""Initalizes camera system, configures camera, and collects image(s)
Args:
**kwargs: Named arguments to configure camera for shot(s)
Used keywords are the following:
date_pattern (opt) : passed as strftime format
used for filename YYYY-MM-DDTHHMMSS
file_prefix (opt) : Prefix for filename 'jai'
date_dir (None, hourly, daily, monthly, yearly) : make subdirs for storage
date_dir_nested (opt): make a separate nested subdir for each y,m,d,h or single subdir as yyyy_mm_dd_hh
sub_dir (opt): add subdirectories to filestore
timeout (opt) : millseconds to wait for image return
sequence (opt): list of dictionaries with the following:
each dict given will be a numbered image
exposure_time (opt) : image exposure time in uSec
33319 (1/30th) default
20 is min
gain (opt) : 0-26db gain integer steps 0 default
height (opt) : requested image height max default
width (opt) : requested image width max default
offset_x (opt) : requested image x offset 0 default
offset_y (opt) : requested image y offest 0 default
"""
try: # we dont want to crash the ais_service so just log errors
#we need to start camerasys as this is task callback
if not self._started:
self.start()
self.last_run['images']=list()
self.last_run['config']=kwargs
self.logger.debug("Shot config is:\n %s" % pprint.pformat(kwargs, indent=4))
persist = kwargs.get("persist", False)
datepattern = kwargs.get("date_pattern", "%Y-%m-%dT%H%M%S" )
split = kwargs.get("date_dir",'Daily')
nest = kwargs.get("date_dir_nested", False)
subdir = kwargs.get("sub_dir", None)
filename = self._gen_filename(kwargs.get('file_prefix', "jai"),
datepattern, subdir=subdir, split = split, nest = nest)
imgtype = kwargs.get("image_type", 'tif')
sequence = kwargs.get('sequence', None)
# Get the sensor configurations
sensor_confs = {'rgb': kwargs.get("rgb", {}), 'nir': kwargs.get("nir", {})}
for sname, sc in sensor_confs.iteritems():
def_fmts = {'rgb': 'BayerRG8', 'nir': 'Mono8'}
if sname in def_fmts.keys():
def_fmt = def_fmts.get(sname)
self._sensors[sname].cam.set_pixel_format_as_string(sc.get("pixel_format", def_fmt))
ob_mode = sc.get('ob_mode', False)
if ob_mode:
self._sensors[sname].cam.write_register(0xa41c,1)
else:
self._sensors[sname].cam.write_register(0xa41c,0)
#create frame bufer
#self._sensors[sname].cam.create_buffers(1);
# start/stop acquisition have to be outside the capture loop.
#self._sensors[sname].cam.start_acquisition_trigger()
#we need to put in the packet delay to improve reliability
self._sensors[sname].cam.set_integer_feature("GevSCPD",4000)
#and set sync mode for image capture
self._sensors[sname].cam.set_string_feature("SyncMode", "Sync")
self._sensors[sname].cam.set_string_feature("AcquisitionMode", "SingleFrame") #no acquisition limits
self._sensors[sname].cam.set_string_feature("TriggerSource", "Software") #wait for trigger t acquire image
self._sensors[sname].cam.set_string_feature("TriggerMode", "On") #Not documented but necesary
self.last_run['time'] = datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S")
for i,shot in enumerate(sequence):
fname=filename+"_"+ "%02d" % i
self.capture_image(fname,imgtype,**shot)
# start/stop acquisition have to be outside the capture loop.
for sens in self._sensors.itervalues():
sens.cam.stop_acquisition()
if not persist:
self.stop()
except Exception as e:
self.stop()
self.logger.error( str(e))
self.logger.error( traceback.format_exc())
self.last_run['success'] = False
self.last_run['error_msg'] = str(e)
return
self.logger.info("JAI_AD80GE ran its task")
self.last_run['success'] = True
def status(self):
status= {}
try:
if not self._started:
self.start()
for sensor in self._sensors.itervalues():
sensor_status = OrderedDict()
sensor_status['Name'] = sensor.name
sensor_status['Mac'] = sensor.mac
ipnum=sensor.cam.get_integer_feature("GevCurrentIPAddress")
o1 = int(ipnum / 16777216) % 256
o2 = int(ipnum / 65536) % 256
o3 = int(ipnum / 256) % 256
o4 = int(ipnum) % 256
sensor_status["Current IP Addr"]='%(o1)s.%(o2)s.%(o3)s.%(o4)s' % locals()
sensor_status["Camera model"] = sensor.cam.get_model_name()
sensor_status["Device Version"] = sensor.cam.get_string_feature("DeviceVersion")
(x,y,w,h) = sensor.cam.get_region()
mw=sensor.cam.get_integer_feature("WidthMax")
mh=sensor.cam.get_integer_feature("HeightMax")
sensor_status["Region size"]= "(%s,%s)" %(w,h)
sensor_status["Image offset"] = "(%s,%s)" %(x,y)
sensor_status["Sensor size"]=sensor.cam.get_sensor_size()
sensor_status["Max size"]= "(%s,%s)" %(mw,mh)
if sensor.cam.use_exposure_time:
sensor_status["Exposure"]=sensor.cam.get_exposure_time()
else:
sensor_status["Exposure"]=sensor.cam.get_integer_feature("ExposureTimeAbs")
sensor_status["Gain"]=sensor.cam.get_gain()
sensor_status["Frame rate"]=sensor.cam.get_frame_rate()
sensor_status["Payload"]=sensor.cam.get_payload()
sensor_status['SyncMode']=sensor.cam.get_string_feature("SyncMode")
sensor_status["AcquisitionMode"]=sensor.cam.get_string_feature("AcquisitionMode")
sensor_status["TriggerSource"]=sensor.cam.get_string_feature("TriggerSource")
sensor_status["TriggerMode"]=sensor.cam.get_string_feature("TriggerMode")
sensor_status["Bandwidth"]=sensor.cam.get_integer_feature("StreamBytesPerSecond")
sensor_status["PixelFormat"]=sensor.cam.get_string_feature("PixelFormat")
sensor_status["ShutterMode"]=sensor.cam.get_string_feature("ShutterMode")
sensor_status["PacketSize"]=sensor.cam.get_integer_feature("GevSCPSPacketSize")
sensor_status["PacketDelay"]=sensor.cam.get_integer_feature("GevSCPD")
status[sensor.name] = sensor_status
self.stop()
except Exception as e:
try:
self.stop()
except:
pass
self.logger.error( str(e))
self.logger.error( traceback.format_exc())
status['Error'] = "Error Encountered:" if str(e)=="" else str(e)
status['Traceback'] = traceback.format_exc()
return status
def configure(self, **kwargs):
self.logger.info("Configuration called")
sensors = kwargs.get('sensors',None)
if sensors is not None:
self._sensors = dict()
self.logger.info("Setting sensors for JAI camera")
for s in sensors :
name =s.get("name", None)
self._sensors[name] = Sensor(**s)
self.logger.info("Sensor: %s loaded" %name)
self.initalized = True
self._powerdelay = kwargs.get('relay_delay', 15)
self._powerport = kwargs.get('relay_port', 0)
relay_name = kwargs.get('relay_name', None)
self._powerctlr = None
if relay_name is not None:
#TODO what if we're not running under the ais_service?
self._powerctlr = self.manager.getPluginByName(relay_name, 'Relay').plugin_object
if self._powerctlr is not None:
self.logger.info("JAI power controller set to use: %s on port %s with delay %s"
%(relay_name, self._powerport, self._powerdelay))
else:
self.logger.error("JAI power controller is not set!")
if not isinstance(self._powerctlr, Relay):
self.logger.error("Plugin %s is not available" %relay_name)
def device_reset(self):
try:
if not self._started:
self.start()
for sensor in self._sensors.itervalues():
sensor.cam.set_integer_feature("DeviceReset", 1)
except Exception as e:
try:
self.stop()
except:
pass
self.logger.error( str(e))
self.logger.error( traceback.format_exc())
def start(self):
if not self._started:
self.logger.info("JAI_AD80GE is powering up")
if self._powerctlr is not None:
self._power(True)
self.logger.debug("Power delay for %s seconds" %self._powerdelay)
time.sleep(self._powerdelay)
self.logger.debug("Power delay complete, connecting to camera")
self._ar = Aravis()
for sens in self._sensors.itervalues():
self.logger.debug("Getting Handle for Sensor: %s" %sens.name)
sens.cam = self._ar.get_camera(sens.mac)
if sens.cam.get_float_feature("ExposureTime") > 0:
sens.cam.use_exposure_time = True
else:
sens.cam.use_exposure_time = False
self.logger.info("JAI_AD80GE started")
self._started = True
def stop(self):
try:
for sens in self._sensors.itervalues():
sens.cam.cleanup()
sens.cam=None
except:
for sens in self._sensors.itervalues():
sens.cam= None
self._ar = None
if self._powerctlr is not None:
self._power( False)
self.logger.info("JAI_AD80GE is powering down")
self._started = False
def capture_image(self, name, imgtype="tif", **kwargs):
if self._started:
for sensor in self._sensors.itervalues():
# Setup shot params
if sensor.cam.use_exposure_time:
sensor.cam.set_exposure_time(float(kwargs.get("exposure_time", 33342)))
else:
sensor.cam.set_integer_feature("ExposureTimeAbs", int(kwargs.get("exposure_time", 33342)))
sensor.cam.set_gain(float(kwargs.get("gain", 0)))
#max_width,max_height = sensor.cam.get_sensor_size()
max_width=sensor.cam.get_integer_feature("WidthMax")
max_height=sensor.cam.get_integer_feature("HeightMax")
#Set ROI
sensor.cam.set_region(kwargs.get("offset_x", 0),
kwargs.get("offset_y", 0),
kwargs.get("width", max_width),
kwargs.get("height", max_height))
sensor.cam.create_buffers(1)
if self._sensors['rgb'].cam.use_exposure_time:
exp = self._sensors['rgb'].cam.get_exposure_time()
else:
exp = self._sensors['rgb'].cam.get_integer_feature("ExposureTimeAbs")
gain = self._sensors['rgb'].cam.get_gain();
self.logger.debug("Jai ExposureTime: %d, GainRaw: %d " % (exp,gain) )
rgb_status=6 # ARV_BUFFER_STATUS_FILLING
nir_status=6 # ARV_BUFFER_STATUS_FILLING
tries=10 #exit out after 10 loops if nothing is complete
# we retry frame grabs if they are incomplete: status will report non-zero for a problem.
while ( (rgb_status or nir_status) and tries):
self._sensors['rgb'].cam.start_acquisition()
self._sensors['nir'].cam.start_acquisition()
self._sensors['rgb'].cam.trigger()
rgb_status, rgb_data = self._sensors['rgb'].cam.get_frame()
nir_status, nir_data = self._sensors['nir'].cam.get_frame()
tries-=1
if rgb_status:
self.logger.error("Requesting new frame-set. Problem RGB frame. RGB_status: %d" %(rgb_status))
if nir_status:
self.logger.error("Requesting new frame-set. Problem NIR frame. NIR_status: %d" %(nir_status))
if tries==0:
self.logger.error("Giving up on frame-set. 10 attempts at capturing clean frames.")
#make our filenames
rgb_name = name+ "_rgb." + imgtype
nir_name = name+ "_nir." + imgtype
# convert bayer color to rgb color
rgb_data = cv2.cvtColor(rgb_data, cv2.COLOR_BAYER_RG2RGB)
cv2.imwrite(rgb_name, rgb_data)
cv2.imwrite(nir_name, nir_data)
self.logger.info("Jai capturing and saving image as: %s"%rgb_name)
self.logger.info("Jai capturing and saving image as: %s"%nir_name)
self.last_run['images'].append(rgb_name)
self.last_run['images'].append(nir_name)
else:
self.logger.error("JAI_AD80GE is not started")
raise Exception("JAI Camera is not started.")
def add_sensor(self, name, macaddress):
kwa = {'name': name, 'mac': macaddress}
sensor = Sensor(**kwa)
self._sensors[name] = sensor
def __init__(self,**kwargs):
"""Initializes camera instance
Args:
**kwargs Named arguments to configure the camera(s)
Sensors: dict of name: mac address for each of the sensors on board
"""
super(JAI_AD80GE,self).__init__(**kwargs)
sensors = kwargs.get('sensors', None)
self._sensors = dict()
self.last_run = dict()
self.last_run['success'] = False
self.last_run['error_msg']= "No run attempted"
#Look for sensor config
if sensors is not None:
for s in sensors :
name =s.get("name", None)
self._sensors[name] = Sensor(**s)
self.initalized = True
self._started = False
self._powerdelay = kwargs.get('relay_delay', 30)
self._powerport = kwargs.get('relay_port', 0)
if 'power_ctlr' in kwargs:
try:
self._powerctlr = self._marshal_obj('power_ctlr', **kwargs)
if not isinstance(self._powerctlr, Relay):
raise TypeError
except:
self._powerctlr = None
self.logger.error("Could not marshall Relay Object")
elif 'relay_name' in kwargs:
relay_name = kwargs.get('relay_name', None)
try:
self._powerctlr = self.manager.getPluginByName(relay_name, 'Relay').plugin_object
if not isinstance(self._powerctlr, Relay):
self._powerctlr = None
self.logger.error("Plugin %s is not a Relay Object" %relay_name)
except:
self.logger.error("Plugin %s is not available" %relay_name)
else:
self._powerctlr = None
def _gen_filename(self, prefix="jai", dtpattern="%Y-%m-%dT%H%M%S", subdir=None, split=None, nest=False):
#TODO parse namepattern for timedate pattern?
#datetime.datetime.now().strftime(dtpattern)
now = datetime.datetime.now()
delim = "_"
#set root path to images
if self.filestore is None:
imgpath = "/tmp/jai"
else:
imgpath = self.filestore
#tack on subdir to imgpath if requested
if subdir is not None:
imgpath+="/"+subdir
#try to make imagepath
if not os.path.isdir(imgpath):
try:
os.makedirs(imgpath)
except OSError:
if not os.path.isdir(imgpath):
self.logger.error("Jai cannot create directory structure for image storage")
#if asked to make more subdirs by date do it:
if split is not None:
imgpath = self._split_dir(now,imgpath,split,nest)
#make datepattern for file name if asked for
if dtpattern is not None:
dt = now.strftime(dtpattern)
else:
dt=""
delim=""
#we return the path and name prefix with dt stamp
#save_image adds sensor and sequence number and suffix.
return imgpath+"/"+prefix+delim+dt
def _split_dir(self, atime, root="/tmp/jai",freq="Daily", nested=False):
'''
_split_dir will make a directory structure based on a datetime object
, frequency, and whether or not it should be nested.
'''
if nested:
delim="/"
else:
delim ="_"
if freq in ['year', 'Year', 'yearly', 'Yearly']:
root+='/'+ str(atime.year)
elif freq in ['month', 'Month', 'Monthly', 'monthly']:
root+='/'+str(atime.year)+delim+"%02d"%atime.month
elif freq in ['day', 'daily', 'Day', 'Daily']:
root+='/'+str(atime.year)+delim+"%02d"%atime.month+delim+"%02d"%atime.day
elif freq in ['hour', 'hourly', 'Hour', 'Hourly']:
root+='/'+str(atime.year)+delim+"%02d"%atime.month+delim+"%02d"%atime.day+delim+"%02d"%atime.hour
if not os.path.isdir(root):
try:
os.makedirs(root)
except OSError:
if not os.path.isdir(root):
self.logger.error("Jai cannot create directory structure for image storage")
return root
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
init_args = {
"sensors":(
{"name": "rgb", "mac": "00:0c:df:04:93:94"},
{"name": "nir", "mac": "00:0c:df:04:a3:94"}
),
"power_ctlr":{
'class': "Phidget",
'module': 'ais.plugins.phidget.phidget'
},
'relay_delay': 30,
'relay_port':0
}
run_args = {
'pixel_formats':(
{'sensor':'rgb', 'pixel_format': 'BayerRG8'},
{'sensor':'nir', 'pixel_format': 'Mono8'}
),
'file_prefix': 'hdr',
'sequence':[
{'exposure_time': 20},
{'exposure_time': 40},
{'exposure_time': 120},
{'exposure_time': 240},
{'exposure_time': 480},
{'exposure_time': 960},
{'exposure_time': 1920},
{'exposure_time': 3840},
{'exposure_time': 7680},
{'exposure_time': 15360},
{'exposure_time': 30720},
]
}
jai = JAI_AD80GE(**init_args)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(jai.status())
#jai.run(**run_args)
|
UTF-8
|
Python
| false | false | 21,457 |
py
| 48 |
jai.py
| 24 | 0.526541 | 0.51722 | 0 | 467 | 44.937901 | 122 |
darkknight314/Vector-Based-IR-system
| 18,391,049,984,350 |
57bca7065436003f97b330e8c2383c6bdd262ba6
|
deed991bf26c68b51d799a778744aa68a93a3220
|
/Part 2 Improvement 2/index_creation.py
|
9d3adba574e31a0ca389b592ace14c737814683d
|
[] |
no_license
|
https://github.com/darkknight314/Vector-Based-IR-system
|
81c25b475958909c7de6195ddce6bed8a5d6c0d3
|
65602fe2519260c972d40b8760d1165b8ad6ed61
|
refs/heads/master
| 2022-04-19T10:30:55.041322 | 2020-04-18T21:10:23 | 2020-04-18T21:10:23 | 256,307,428 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from bs4 import BeautifulSoup
import nltk
nltk.download('punkt')
import numpy as np
import pickle
from nltk.util import ngrams
from collections import Counter
INDEX="./pickles/"
file_content = open("wiki_47",encoding="utf8").read()
all_docs = file_content.split("</doc>") #Soup was originally utilised to retieve individual documents but it resulted in incorrect partitioning of documents
all_docs = [BeautifulSoup(doc+"</doc>", "lxml") for doc in all_docs][:-1]
def tokenize(str):
tokens=nltk.word_tokenize(str)
tokens=[token.lower() for token in tokens]
return tokens
#Creation of list of doc ids, doc titles and document text to zip them together
doc_id = []
doc_title = []
doc_text = []
dict_docs={}
for doc in all_docs:
pid=doc.find_all("doc")[0].get("id")
ptitle=doc.find_all("doc")[0].get("title")
ptext=doc.get_text().lower()
doc_id.append(pid)
doc_title.append(ptitle)
doc_text.append(ptext)
dict_docs[pid]=ptitle
indexed_docs = list(zip(doc_id,doc_title,doc_text))
#Creation of vocabulary
tokens=[]
for page in doc_text:
tokens.extend(tokenize(page))
vocabulary = sorted(set(tokens))
tdf={} #Will store the natural term document frequencies
for term in vocabulary:
tdf[term]={}
for doc_iter in indexed_docs:
dc_id=doc_iter[0]
doc_tokens=tokenize(doc_iter[2])
for term in doc_tokens:
if term in tdf:
if dc_id in tdf[term]:
tdf[term][dc_id]=tdf[term][dc_id]+1
else:
tdf[term][dc_id]=1
wt={} #Will store the logarithmically scaled term documnet frequencies
sos={} #Sum of squares of logarithmic term document frequencies for normalization
for doc in doc_id:
sos[doc]=0
for term in vocabulary:
dicti=tdf[term]
wt[term]={}
for key,value in dicti.items():
wt[term][key]=1+np.log10(value)
sos[key]=sos[key]+wt[term][key]**2
norm={} #Normalized logarithmic term document frequencies
for term in vocabulary:
dicti=tdf[term]
norm[term]={}
for key,value in dicti.items():
norm[term][key]=wt[term][key]/(np.sqrt(sos[key]))
idf={} #inverse document frequency of dictionary
for term in vocabulary:
if len(norm[term])==0:
idf[term]=0
else:
idf[term]=np.log10(len(all_docs)/len(norm[term]))
bigrams=[] #list of all bigrams in corpus
bigram_frequency = {} #frequency of bigrams
first_word = {} #Frequency of unigrams
second_word = {} #Frequency of unigrams
for text in doc_text: #fill bigrams
temp=list(ngrams(tokenize(text),2))
bigrams.extend(temp)
unique_bigrams = list(set(bigrams))
total_bigrams = len(bigrams)
for bi in bigrams: #fill bigram_frequency
if bi in bigram_frequency:
bigram_frequency[bi]=bigram_frequency[bi]+1
else:
bigram_frequency[bi]=1
for x in tokens:
first_word[x] = 0
second_word[x] = 0
for x in tokens: #fill first_word and second_word
first_word[x] = first_word[x] + 1
second_word[x] = second_word[x] + 1
chi_square_scores = {}
for bigram in unique_bigrams: #calculate chi-square scores for all bigrams
word1 = bigram[0]
word2 = bigram[1]
o11 = bigram_frequency[bigram]
o21 = first_word[word1] - o11
o12 = second_word[word2] - o11
o22 = total_bigrams - o11 - o21 - o12
chi_score = total_bigrams*(((o11*o22-o21*o12)**2)/((o11+o21)*(o11+o12)*(o21+o22)*(o12+o22)))
if(o21 + o12 > 10):
chi_square_scores[bigram] = chi_score
collocations = sorted(chi_square_scores.items(), key = lambda kv:(kv[1], kv[0]),reverse=True) #sort collocations in ascending order of importance
frequent_collocations = [] #store the top 1000 collocations
count = 0
for (x,y) in collocations:
count = count + 1
if count <= 1000:
frequent_collocations.append(x)
else:
break
#NOW WE HAVE TOP 1000 COLLOCATIONS
biword_tdf ={}
for biterm in frequent_collocations:
biword_tdf[biterm]={}
for doc_iter in indexed_docs: #to create natural term document frequency of frequent collocations
dc_id = doc_iter[0]
doc_bigrams = ngrams(tokenize(doc_iter[2]),2)
for biword in doc_bigrams:
if biword not in biword_tdf:
continue
if dc_id in biword_tdf[biword]:
biword_tdf[biword][dc_id] = biword_tdf[biword][dc_id] + 1
else:
biword_tdf[biword][dc_id]=1
#to calculate bigram normalized logarithmic tf for top 1000 collocations
biword_wt={}
biword_sos={}
for doc in doc_id:
biword_sos[doc]=0
for biword in biword_tdf:
biword_dicti = biword_tdf[biword]
biword_wt[biword]={}
for key,value in biword_dicti.items():
biword_wt[biword][key]=1+np.log10(value)
biword_sos[key] = biword_sos[key] + biword_wt[biword][key]**2
biword_norm={}
for biword in biword_tdf:
biword_dicti = biword_tdf[biword]
biword_norm[biword] = {}
for key,value in biword_dicti.items():
biword_norm[biword][key] = biword_wt[biword][key] / (np.sqrt(biword_sos[key]))
#Creation of index to store normalized tdf and idf values
norm_file = open(INDEX+'Normalized tdf','ab')
pickle.dump(norm, norm_file)
norm_file.close()
idf_file = open(INDEX+'IDF','ab')
pickle.dump(idf, idf_file)
idf_file.close()
dict_file = open(INDEX+'dict_docs','ab')
pickle.dump(dict_docs, dict_file)
dict_file.close()
vocab_file = open(INDEX+'vocabulary','ab')
pickle.dump(vocabulary, vocab_file)
vocab_file.close()
bi_file = open(INDEX+'Bigram tdf','ab')
pickle.dump(biword_tdf, bi_file)
bi_file.close()
bi_norm_file = open(INDEX+'Bigram norm','ab')
pickle.dump(biword_norm, bi_norm_file)
bi_norm_file.close()
|
UTF-8
|
Python
| false | false | 5,720 |
py
| 6 |
index_creation.py
| 5 | 0.653671 | 0.634091 | 0 | 185 | 29.918919 | 167 |
tommeagher/pycar14
| 755,914,282,332 |
b3c2cac8a7cfe9e2361c8e34e91dfee324e7d6f6
|
f34394a4c9c8438e172c3f53472428b7f68d20b9
|
/project4/step_3_complete.py
|
6978bc34e21609e246d97c6809363e62720419b6
|
[
"MIT"
] |
permissive
|
https://github.com/tommeagher/pycar14
|
b3474a5dbb4fda59eeadb2d0b6dd87141931e4fe
|
d727c351d7a1a8e190dab3e0bf6e124471c22101
|
refs/heads/master
| 2021-01-17T17:07:16.772636 | 2015-04-09T20:45:45 | 2015-04-09T20:45:45 | 14,432,067 | 8 | 3 |
MIT
| false | 2019-03-08T02:32:24 | 2013-11-15T18:37:47 | 2018-02-11T20:01:15 | 2019-03-08T02:31:16 | 1,562 | 27 | 40 | 0 |
HTML
| false | null |
#!/usr/bin/env python
import csv
import json
import requests
def main():
# We'll use a local version of this file from now on to save on
# bandwith.
with open('bills.json', 'r') as f:
data = json.load(f)
objects = data['objects']
# Create a csv file to output
with open('bills.csv', 'w') as o:
# Create a csv writer. This will help us format the file
# correctly.
writer = csv.writer(o)
# Write out the header row
writer.writerow([
u'title',
u'label',
u'number',
u'current_status'
])
# Iterate through each dict in the array `objects`
for bill in objects:
writer.writerow([
bill['title_without_number'].encode('utf-8'),
bill['bill_type_label'].encode('utf-8'),
bill['number'],
bill['current_status'].encode('utf-8')
])
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 1,081 |
py
| 38 |
step_3_complete.py
| 27 | 0.479186 | 0.476411 | 0 | 39 | 26.692308 | 68 |
jbcnrlz/biometricprocessing
| 12,455,405,184,807 |
781b7e86576db03a726b454849e6f41bad76527b
|
a07013b46eb0b92c88f034d7ceffe7b0bf9cabf7
|
/generateCorrelationData.py
|
1c3aaf582ded96e74b97fc74a6412aef8b8aec86
|
[] |
no_license
|
https://github.com/jbcnrlz/biometricprocessing
|
64bd77b9543014a4fe9ab1de1b32f73210ee5871
|
9659e9aa8e308d0b16e44740541c22e450f76522
|
refs/heads/master
| 2021-09-13T12:21:31.070435 | 2021-08-30T13:06:54 | 2021-08-30T13:06:54 | 92,350,111 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import argparse, os, cv2, numpy as np
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description='Generate Correlation Data')
parser.add_argument('--sigmoidFile', help='Path for files to separate channels', required=True)
parser.add_argument('--tdlbpFile', help='Path for files to separate channels', required=True)
parser.add_argument('--rgbFile', help='Path for files to separate channels', required=True)
parser.add_argument('--output', help='Folder to output to', required=True)
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
sigmoidFile = cv2.imread(args.sigmoidFile,cv2.IMREAD_UNCHANGED)
tdlbpFile = cv2.imread(args.tdlbpFile,cv2.IMREAD_UNCHANGED)
rgbFile = cv2.imread(args.rgbFile,cv2.IMREAD_UNCHANGED)
fig, axs = plt.subplots(3,3)
fig.suptitle("Correlation between red and other channels")
redLayer = 2
compLayers = [0,1,3]
channelName = ["Green","Blue","Red","Alpha"]
titles = ["Sigmoid DI","3DLBP DI","RGB Image"]
for idxIM, imgType in enumerate([sigmoidFile, tdlbpFile, rgbFile]):
axs[0,idxIM].set_title(titles[idxIM])
for idxCHAN,c in enumerate(compLayers):
if c < imgType.shape[-1]:
axs[idxCHAN,idxIM].scatter(imgType[:,:,redLayer].flatten(),imgType[:,:,c].flatten())
axs[idxCHAN,idxIM].set(ylabel="Red VS "+channelName[c])
for ax in axs.flat:
ax.label_outer()
plt.show()
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 1,554 |
py
| 169 |
generateCorrelationData.py
| 151 | 0.663449 | 0.653153 | 0 | 40 | 37.875 | 100 |
time-in-translation/preprocess-corpora
| 7,567,732,420,501 |
8f4bf251b0c45f33121384bbc32508754622b1ef
|
fde8b1b36d42015316b12c04aac5c965e339982d
|
/preprocess_corpora/preprocessing/preprocess.py
|
d2cf68c06764cec4be80d4643ae5be48cb1c528c
|
[
"MIT"
] |
permissive
|
https://github.com/time-in-translation/preprocess-corpora
|
11faec377a05813db32e91ba21c7eafb33e74469
|
40e3e34955e45fa46b5c75bfe2f05282c151f36d
|
refs/heads/master
| 2023-07-07T07:31:43.723440 | 2023-04-01T19:15:53 | 2023-04-01T19:15:53 | 144,151,807 | 1 | 1 |
MIT
| false | 2023-06-29T14:13:03 | 2018-08-09T12:48:16 | 2022-06-16T08:32:27 | 2023-06-29T14:13:03 | 788 | 1 | 1 | 1 |
Python
| false | false |
import codecs
import glob
import os
import re
import click
from docx import Document
from ..core.constants import GERMAN, ENGLISH, FRENCH, ITALIAN, DUTCH, RUSSIAN, CATALAN
def normalize_apostrophes(line):
"""Converts left single quotation marks to apostrophes if there's a lowercase letter behind it"""
return re.sub(r'\u2019(\w)', r"'\1", line)
def remove_soft_hyphens(line):
"""Removes any soft hyphens or middle dots"""
line = line.replace(u'\u00AC', '') # not sign (Word's soft hyphen)
line = line.replace(u'\u00AD', '') # soft hyphen
line = line.replace(u'\u00B7', '') # middle dot
return line
def remove_double_spaces(line):
"""Removes superfluous spaces"""
return re.sub(r'\s+', ' ', line).strip()
def fix_period_spacing(line):
"""Fixes spacing for periods"""
return re.sub(r'(\w)\s?\.(\w)', r'\1. \2', line).strip()
def fix_hyphenization(language, line):
"""Remove superfluous spaces in hyphenized words"""
line = re.sub(r'(\w)-\s(\w)', r'\1-\2', line)
if language == DUTCH:
line = line.replace('-en ', '- en ') # -en should be converted back to - en
line = line.replace('-of ', '- of ') # -of should be converted back to - of
if language == GERMAN:
line = line.replace('-und ', '- und ') # -und should be converted back to - und
line = line.replace('-oder ', '- oder ') # -oder should be converted back to - oder
return line
def replace_quotes(language, line):
"""Replaces quote symbols with the ones suited for parsing"""
# Generic rules
line = line.replace(u'\u201C', '"') # left double quotation mark (replace with quotation mark)
line = line.replace(u'\u201D', '"') # right double quotation mark (replace with quotation mark)
line = line.replace(u'\u201E', '"') # double low-9 quotation mark (replace with quotation mark)
line = line.replace(u'\u2018', '\'') # left single quotation mark (replace with apostrophe)
line = line.replace(u'\u2019', '\'') # right single quotation mark (replace with apostrophe)
# Language-specific rules
if language in [GERMAN, CATALAN]:
line = line.replace(u'\u00AB', '"') # left-pointing double guillemet (replace with quotation mark)
line = line.replace(u'\u00BB', '"') # right-pointing double guillemet (replace with quotation mark)
line = line.replace(u'\u2039', '\'') # left-pointing single guillemet (replace with apostrophe)
line = line.replace(u'\u203A', '\'') # right-pointing single guillemet (replace with apostrophe)
line = line.replace('<', '\'') # less-than sign (replace with apostrophe)
line = line.replace('>', '\'') # greater-than sign (replace with apostrophe)
if language == FRENCH:
line = re.sub(r'\s\'', '\'', line) # Remove superfluous spacing before apostrophes
if language == DUTCH:
line = line.replace(u'\'\'', '\'') # double apostrophe (replace with single apostrophe)
# apostrophe followed by a capital, dot, space or end of the line (replace with quotation mark)
line = re.sub(r'\'([A-Z]|\.|\s|$)', r'"\1', line)
line = re.sub(r'(,\s)\'', r'\1"', line) # apostrophe preceded by a comma (replace with quotation mark)
line = line.replace('"t ', '\'t ') # "t should be converted back to 't
if language == RUSSIAN:
line = line.replace('""', '"') # Replace double quotation marks by a single one
line = re.sub(r'(^|\.\s?)-\s?', r'\1', line) # Remove hyphens at the start of the line or after punctuation
line = re.sub(r'([.,?!])\s(\"(?:\s|$))', r'\1\2', line) # Remove spaces between punctuation and quotation mark
line = re.sub(r'([.,?!])\s?(\")-', r'\1\2 -', line) # Switch (or create) spacing between quotation and hyphens
line = re.sub(r'(^\")\s', r'\1', line) # Replace superfluous spaces at the start of the line
if language == CATALAN:
line = re.sub(r'"\.[^\.]', '."', line) # Move dots after quotation marks
line = re.sub(r'^-(\S)', r'- \1', line) # Add spaces to dashes at start of line
line = re.sub(r'\s-\s?([.,?!])\s?', r'\1 - ', line) # Switch (or create) spacing between quotation and hyphens
return line
def replace_common_errors(language, line):
"""Replaces some common errors that occurred during OCR"""
# Replace unicode dashes to hyphen-minus
line = re.sub(r'\s?\u2012\s?', ' - ', line) # figure dash
line = re.sub(r'\s?\u2013\s?', ' - ', line) # en dash
line = re.sub(r'\s?\u2014\s?', ' - ', line) # em dash
line = re.sub(r'\s?\u2015\s?', ' - ', line) # horizontal bar
line = re.sub(r'\s?\u4E00\s?', ' - ', line) # Chinese character for one (resembles dash)
# Replace Chinese characters
line = re.sub(u'。\s?', '. ', line) # full stop
line = re.sub(u',\s?', ', ', line) # comma
line = re.sub(u'!\s?', '! ', line) # exclamation mark
line = re.sub(u'?\s?', '? ', line) # question mark
line = re.sub(u';\s?', '; ', line) # semicolon
line = re.sub(u':\s?', ': ', line) # colon
line = line.replace(u'(', '(') # left parenthesis
line = line.replace(u')', ')') # right parenthesis
# Some other replacements
line = line.replace(u',', ', ') # u2063, invisible separator
line = line.replace(u'…', '...') # ellipsis
# Replacements specifically for Italian
if language == ITALIAN:
line = line.replace('E\'', u'È')
line = line.replace('Be\'', u'Bè')
line = line.replace('be\'', u'bè')
line = line.replace('po\'', u'pò')
return line
def preprocess_single(file_in, file_out, language):
lines = []
with codecs.open(file_in, 'r', 'utf-8') as f_in:
for line in f_in:
if line.strip():
line = remove_double_spaces(line)
line = remove_soft_hyphens(line)
line = replace_common_errors(language, line)
line = fix_period_spacing(line)
line = fix_hyphenization(language, line)
line = replace_quotes(language, line)
if language in [ENGLISH, DUTCH, GERMAN]:
line = normalize_apostrophes(line)
lines.append(line)
with codecs.open(file_out, 'w', 'utf-8') as f_out:
for line in lines:
f_out.write(line)
f_out.write('\n')
f_out.write('\n')
click.echo('Finished preprocessing {}'.format(file_in))
def word2txt(folder_in):
for file_in in glob.glob(os.path.join(folder_in, '*.docx')):
document = Document(file_in)
file_txt = os.path.splitext(file_in)[0] + '.txt'
with codecs.open(file_txt, 'w', 'utf-8') as f_out:
full_text = []
for paragraph in document.paragraphs:
full_text.append(paragraph.text)
f_out.write('\n'.join(full_text))
|
UTF-8
|
Python
| false | false | 6,922 |
py
| 53 |
preprocess.py
| 10 | 0.588141 | 0.576109 | 0 | 151 | 44.682119 | 119 |
nitinworkshere/algos
| 7,696,581,446,636 |
558c454f3f0d6077e0d12969725403b63e69ff15
|
b05ec9af3bd888d69056b192a4d15118d57dea06
|
/algos/Microsoft/RemoveDuplicatesInString.py
|
e61c49a7b63ca34221bf40544e9a6803a1137045
|
[] |
no_license
|
https://github.com/nitinworkshere/algos
|
5b6d2979d4af3dbcd54d45ae9a6722ea9e054ef0
|
9a8f3cc92bb8d0f153b82af47f19b6bced7d2163
|
refs/heads/master
| 2021-01-04T21:01:51.553315 | 2020-04-26T09:21:08 | 2020-04-26T09:21:08 | 240,758,577 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Python program to remvoe duplicate characters from an
# input string
NO_OF_CHARS = 256
# Since strings in Python are immutable and cannot be changed
# This utility function will convert the string to list
def toMutable(string):
List = []
for i in string:
List.append(i)
return List
# Utility function that changes list to string
def toString(List):
return ''.join(List)
# Function removes duplicate characters from the string
# This function work in-place and fills null characters
# in the extra space left
def removeDups(string):
bin_hash = [0] * NO_OF_CHARS
ip_ind = 0
res_ind = 0
temp = ''
mutableString = toMutable(string)
# In place removal of duplicate characters
while ip_ind != len(mutableString):
temp = mutableString[ip_ind]
if bin_hash[ord(temp)] == 0:
bin_hash[ord(temp)] = 1
mutableString[res_ind] = mutableString[ip_ind]
res_ind += 1
ip_ind += 1
# After above step string is stringiittg.
# Removing extra iittg after string
return toString(mutableString[0:res_ind])
#https://www.geeksforgeeks.org/remove-duplicates-from-a-string-in-o1-extra-space/
# Python3 implementation of above approach
# Function to remove duplicates
def removeDuplicatesFromString(str2):
# keeps track of visited characters
counter = 0;
i = 0;
size = len(str2);
str1 = list(str2);
# gets character value
x = 0;
# keeps track of length of resultant string
length = 0;
while (i < size):
x = ord(str1[i]) - 97
# check if Xth bit of counter is unset
if ((counter & (1 << x)) == 0):
str1[length] = chr(97 + x)
# mark current character as visited
counter = counter | (1 << x)
length += 1
i += 1
str2 = ''.join(str1);
return str2[0:length];
# Driver code
str1 = "geeksforgeeks";
print(removeDuplicatesFromString(str1));
# This code is contributed by mits
# Driver program to test the above functions
string = "geeksforgeeks"
print
removeDups(string)
# A shorter version for this program is as follows
# import collections
# print ''.join(collections.OrderedDict.fromkeys(string))
# This code is contributed by Bhavya Jain
|
UTF-8
|
Python
| false | false | 2,282 |
py
| 139 |
RemoveDuplicatesInString.py
| 139 | 0.650307 | 0.633655 | 0 | 94 | 23.276596 | 81 |
shubhamsidhu/daftlistings
| 12,472,585,032,417 |
ec7121fbaeca7012c916a2ff1c348f6045b3415d
|
35057f7b5beb8af418d8033b3e4a8be851c9e622
|
/examples/enroute.py
|
ba61c7712dbde56b4917b36793221398e61a1db3
|
[
"MIT"
] |
permissive
|
https://github.com/shubhamsidhu/daftlistings
|
7324810ac330018f4e8e4e3e452030f5fe1077d9
|
ec9e1cee92854563f74c6288e652cc9b4eb97e1e
|
refs/heads/master
| 2023-03-31T21:16:44.031552 | 2021-03-30T21:46:34 | 2021-03-30T21:46:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Get properties to let near or on a public transport route to Blackrock.
from daftlistings import Daft, AreaType, RentType
daft = Daft()
daft.set_area_type(AreaType.ENROUTE)
daft.set_area("Dublin")
daft.set_listing_type(RentType.ANY)
listings = daft.search()
for listing in listings:
print(listing.formalised_address)
print(listing.price)
print(" ")
|
UTF-8
|
Python
| false | false | 368 |
py
| 33 |
enroute.py
| 30 | 0.741848 | 0.741848 | 0 | 16 | 22 | 73 |
ambushed/gillespie
| 6,528,350,306,798 |
f29dc4306652be365ccb204de1860ee5344e446a
|
a737fe71c6f1e13b5077e3075a2be6955c3fc1ad
|
/experiments/GillespieOMC.py
|
3a6494e490d028ac41b263146e1e7c2fd69e451b
|
[] |
no_license
|
https://github.com/ambushed/gillespie
|
8674f2982d7461257f5bd3864acad386600c1679
|
1be8d2b692cb023beee66bde44635e5d32068f97
|
refs/heads/master
| 2021-03-29T07:03:18.202625 | 2016-12-30T22:47:07 | 2016-12-30T22:47:07 | 66,765,642 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from gillespie import Setup
from gillespie import Gillespie
from gillespie.GillespiePrior import GillespiePrior
from gillespie.GillespieAdam import adam
from autograd import value_and_grad
from functools import partial
import autograd.numpy as np
model_file_name = "lotka_volterra.yaml"
setup = Setup(yaml_file_name=model_file_name)
propensities = setup.get_propensity_list()
original_parameters = np.array(setup.get_parameter_list())
species = setup.get_species()
incr = setup.get_increments()
nPaths = setup.get_number_of_paths()
T = setup.get_time_horizon()
seed = 100
numProc = 1
num_adam_iters = 20
observed_data = None
def generateData():
my_gillespie = Gillespie(species=species,propensities=propensities,
increments=incr,nPaths = nPaths,T=T,useSmoothing=True, seed = seed, numProc = numProc)
observed_data = my_gillespie.run_simulation(original_parameters)
return observed_data
def lossFunction(parameters, dummy):
gillespieGrad = Gillespie(species=species,propensities=propensities,increments=incr,
nPaths = nPaths,T=T,useSmoothing=True, seed = seed, numProc = numProc )
simulated_data = gillespieGrad.run_simulation(parameters)
return sum(0.5*(np.array(simulated_data)-np.array(observed_data))**2)
def run_path(parameters,idx):
global num_adam_iters
path_parameters = parameters[idx]
lossFunctionGrad = value_and_grad(lossFunction,idx)
cost_list,param0,param1,param2 = adam(lossFunctionGrad, path_parameters, num_iters=num_adam_iters)
return cost_list[-1],param0[-1],param1[-1],param2[-2]
def get_jacobians(parameters,idx):
path_parameters = parameters[idx]
my_gillespie = Gillespie(species=species,propensities=propensities,increments=incr, nPaths = nPaths,T=T,useSmoothing=True, seed = seed)
gradients = my_gillespie.take_gradients(path_parameters)
return gradients
def gillespieOMC(n_samples = 1000):
global observed_data
observed_data = generateData()
parameter_count = len(setup.get_propensity_list())
prior = GillespiePrior(n_samples=n_samples,parameter_bounds=[(1,2)]*parameter_count)
parameter_space = prior.sample()
runner = partial(run_path, parameter_space)
params = map(runner,range(0,n_samples))
zipped_params = zip(*params)
runner_for_jacobians = partial(get_jacobians, zipped_params)
jacobians = map(runner_for_jacobians,range(0,n_samples))
if __name__ == "__main__":
gillespieOMC(2)
|
UTF-8
|
Python
| false | false | 2,495 |
py
| 24 |
GillespieOMC.py
| 18 | 0.727455 | 0.716232 | 0 | 75 | 32.253333 | 139 |
reyronald/playground
| 4,320,737,135,572 |
51d5964dd0a108422dc6c4f126bad59076030293
|
61355ebbb9444eebb51ac14cc61f114866fa7cf6
|
/Graph Search, Shortest Paths, and Data Structures/2sum/main.py
|
520e1d8db13d8b92395d91a7e09e09dbd07df456
|
[] |
no_license
|
https://github.com/reyronald/playground
|
46c53aacb6498e8c071dc63ca408cea13e7057c0
|
9f4ec66af0108826cdbc3065e0520aaaf70abe7c
|
refs/heads/master
| 2021-01-24T18:39:55.142852 | 2017-10-23T22:30:54 | 2017-10-23T22:30:54 | 84,469,679 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from two_sum import get_integer_set_from_file, find_2sum
INTEGER_SET = {-3, -1, 1, 2, 9, 11, 7, 6, 2}
RESULT = find_2sum(INTEGER_SET, 3, 10)
assert RESULT == 8
INTEGER_SET = {-2, 0, 0, 4}
RESULT = find_2sum(INTEGER_SET, 0, 4)
assert RESULT == 2
INTEGER_SET = {0, 1, 2, 3, 4, 5, 6}
RESULT = find_2sum(INTEGER_SET, 3, 4)
assert RESULT == 2
INTEGER_SET = {0, 1, 2, 3, 4, 5, 6}
RESULT = find_2sum(INTEGER_SET, 30, 40)
assert RESULT == 0
ROOT = "D:/repos/playground/Graph Search, Shortest Paths, and Data Structures/2sum/"
INTEGER_SET = get_integer_set_from_file(ROOT + "algo1-programming_prob-2sum.txt")
RESULT = find_2sum(INTEGER_SET, -10000, 10000)
assert RESULT == 427 # 6582.4 seconds
print RESULT
|
UTF-8
|
Python
| false | false | 704 |
py
| 41 |
main.py
| 38 | 0.669034 | 0.569602 | 0 | 24 | 28.333333 | 84 |
zhangzeyang0/code
| 5,720,896,453,471 |
3e5ffff4447dc235d82be2f0a72a5b5762c32a12
|
cac44338635e5887a9828b5d7172ab20a16c8269
|
/leetcode/-24. 两两交换链表中的节点.py
|
bbe52898c8b63c467a2bc6ba19fbe080aaaf82b5
|
[] |
no_license
|
https://github.com/zhangzeyang0/code
|
170674d09da788a446ac8c0203869b65d603cdf4
|
328fdd303af1c8cde5bc9bb4c4f039e777de20e5
|
refs/heads/master
| 2020-03-25T23:07:31.935534 | 2018-09-18T02:33:16 | 2018-09-18T02:33:16 | 144,259,602 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
'''
交换结点,注意要保存3个结点,当前要交换的2个结点和这个2个结点之前的结点
'''
def swapPairs(self, head):
if not head:
return None
res = p1 = ListNode(0)
res.next = head
while p1.next and p1.next.next:
p0, p1, p2 = p1, p1.next, p1.next.next
p0.next, p1.next, p2.next = p2, p2.next, p1
print(p1.next.val, res.next.val)
return res.next
def swapPairs2(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or head.next is None:
return head
pre = head
nxt = head.next.next
head = head.next
head.next = pre
pre.next = nxt
pre_node = head.next
node = head.next.next
while node and node.next:
pre = node
nxt = node.next.next
node = node.next
node.next = pre
pre.next = nxt
pre_node.next = node
pre_node = node.next
node = pre_node.next
return head
a1 = ListNode(1)
a2 = ListNode(2)
a3 = ListNode(3)
a4 = ListNode(4)
a5 = ListNode(5)
a1.next = a2
a2.next = a3
a3.next = a4
a4.next = a5
t = Solution()
node = t.swapPairs(a1)
while node:
print(node.val)
node = node.next
|
UTF-8
|
Python
| false | false | 1,511 |
py
| 24 |
-24. 两两交换链表中的节点.py
| 24 | 0.524602 | 0.496881 | 0 | 63 | 21.873016 | 55 |
funkhauscreative/swr-test
| 197,568,526,395 |
4ff77b3c7e720e490b8ebdfd7fc050dd2bed00f7
|
bdc3dfaf79a175d4ac0a6ab73d40e3c406f89e2e
|
/mysite/production.py
|
9267180596ef2f428208f67c2ee9c0772ee5052c
|
[] |
no_license
|
https://github.com/funkhauscreative/swr-test
|
59a264f78ae077a0f906e81b620accc3580eadbd
|
b6c22c481bafd27c3165e35dc376c3e0b0b17c57
|
refs/heads/master
| 2020-08-30T20:21:41.182418 | 2019-10-29T22:59:55 | 2019-10-29T22:59:55 | 218,479,099 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .settings import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('DB_DATABASE', ALPHACODE),
'USER': os.environ.get('DB_USER', 'root'),
'PASSWORD': os.environ.get('DB_PASSWORD', 'w4p9tywgsh324s3q3'),
# 'HOST': os.environ.get('DB_HOST', 'srv-captain--digis-db'),
'HOST': os.environ.get('DB_HOST', 'funkhaus.dev'),
'PORT': '3306',
}
}
|
UTF-8
|
Python
| false | false | 458 |
py
| 24 |
production.py
| 21 | 0.563319 | 0.539301 | 0 | 15 | 29.6 | 71 |
DHNicoles/Pythontest
| 6,193,342,890,502 |
7259788402589d0cb8d4c1cc666e6c1f33d9e1e3
|
1c48c6fd9be0dea83ba5e7a22478a291c62226e8
|
/demo_1.py
|
e03e0834b6ea75f874d434a60e87ced6f02c27b8
|
[] |
no_license
|
https://github.com/DHNicoles/Pythontest
|
13a1c1bc9a6ae31ec45928f35bcf0e95ddb1621d
|
5715d158c1d043cb13b0358e1cbe3b89fcd3760b
|
refs/heads/master
| 2016-07-26T13:56:17.515868 | 2015-07-29T01:56:41 | 2015-07-29T01:56:41 | 39,867,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# A = int(raw_input('enter A'));
# B = int(raw_input('enter B'));
# print 'A+B=' ,(A+B),'A-B=' ,(A-B)
# print A**B
# for i in range(11):
# print i,
# print
# i=0
# while(i!=11):
# print i,
# i=i+1
# print
# array = [0,1,2,3,4,5,6,7,8,9,10]
# for ele in array:
# print ele,
# print
# end = False
# while not end :
# val = raw_input('enter a number:\n')
# number = int(val)
# if number>0 :
# print number,'is a zhengshu'
# elif number<0 :
# print number,'is a fushu'
# else:
# print number,'is 0'
# flag = raw_input('y to continue,n to stop:\n')
# if flag == 'y':
# end = False
# else :
# end = True
strexp = raw_input()
i=0
while i<len(strexp):
print strexp[i],
i = i+1
print
for i in range(len(strexp)):
print strexp[i],
print
# print '''hello everybody,how are\
# you being recently.I \
# will come back soon'''
import sys;x = 'foo';sys.stdout.write(x+'\n');
|
UTF-8
|
Python
| false | false | 891 |
py
| 6 |
demo_1.py
| 4 | 0.575758 | 0.549944 | 0 | 49 | 17.204082 | 49 |
SpellMender/VG_Dev-CS90R
| 9,981,503,999,253 |
f56548783f3150055629aba1afbc9959775ab7c4
|
1a409a5abd236d36d9403957398d3b048bd9db2e
|
/Animation/entity.py
|
60f8c6ab8761c949a6141d2ef8788c64c797b8a6
|
[] |
no_license
|
https://github.com/SpellMender/VG_Dev-CS90R
|
efdc65833648e38fb8d0f7459ca1772b7261a081
|
3a461e6890a998172c0059e1aad7b83a2b037fdc
|
refs/heads/master
| 2020-03-19T00:48:32.936680 | 2019-09-04T18:54:05 | 2019-09-04T18:54:05 | 135,502,902 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import pygame
import graphics
class Entity(object):
def __init__(self):
self.x = 0
self.y = 0
self.sprite = None
# Change Sprites to combat
class Vizzi(Entity):
frames = {
# neutral
"start": [(0, 4, 69, 51)],
"one": [(77, 4, 69, 51)],
"two": [(166, 4, 69, 51)],
"three": [(262, 4, 69, 51)],
"four": [(357, 4, 69, 51)]
}
def __init__(self):
super(Vizzi, self).__init__()
self.sprite = graphics.load(
os.path.join("Animation V.png")
)
self.frame = self.frames["start"][0]
self.attack = False
self.aFrame = 0
# self.frame_num = 0
# self.facing = "down"
# self.speed = 0.5
# self.velocity = [0, 0]
# Change
def update(self):
if self.attack:
if self.aFrame == 0:
pygame.mixer.init()
coin = pygame.mixer.Sound("Thwack.wav")
coin.play()
self.frame = self.frames["one"][0]
self.aFrame = 1
elif self.aFrame == 1:
self.frame = self.frames["two"][0]
self.aFrame = 2
elif self.aFrame == 2:
self.frame = self.frames["three"][0]
self.aFrame = 3
elif self.aFrame == 3:
self.frame = self.frames["four"][0]
self.aFrame = 4
elif self.aFrame == 4:
self.frame = self.frames["start"][0]
self.aFrame = 0
self.attack = False
# self.x += self.velocity[0]
# self.y += self.velocity[1]
# self.frame_num = (self.frame_num + self.speed * 0.25) % 4
# self.frame = self.frames[self.facing][int(self.frame_num)]
# change
def key_handler(self, e):
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_a:
self.attack = True
# self.velocity[1] -= self.speed
# self.facing = "up"
# elif (e.key == pygame.K_DOWN):
# self.velocity[1] += self.speed
# self.facing = "down"
# elif (e.key == pygame.K_LEFT):
# self.velocity[0] -= self.speed
# self.facing = "left"
# elif (e.key == pygame.K_RIGHT):
# self.velocity[0] += self.speed
# self.facing = "right"
# elif (e.type == pygame.KEYUP):
# if (e.key == pygame.K_UP):
# self.velocity[1] += self.speed
# elif (e.key == pygame.K_DOWN):
# self.velocity[1] -= self.speed
# elif (e.key == pygame.K_LEFT):
# self.velocity[0] += self.speed
# elif (e.key == pygame.K_RIGHT):
# self.velocity[0] -= self.speed
|
UTF-8
|
Python
| false | false | 2,868 |
py
| 31 |
entity.py
| 28 | 0.449442 | 0.423291 | 0 | 94 | 29.510638 | 68 |
KaranKaur/Leetcode
| 16,810,502,014,890 |
25a16699d872f7467f92c9e4483c52467df63c0b
|
39f78b00d7d79a4e0f29f6b1fe15f20ecc74bea2
|
/540 - Single Element in a Sorted Array.py
|
8e96ea7ed027a2f3d01ef53a3a8c009df2832d00
|
[] |
no_license
|
https://github.com/KaranKaur/Leetcode
|
ca1ac5a590de720d37a3c0fca014065086e6e38e
|
765fb39ba57634d2c180eb1fd90522c781d409c4
|
refs/heads/master
| 2020-03-28T09:44:12.318384 | 2018-09-10T14:07:05 | 2018-09-10T14:07:05 | 148,056,159 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Given a sorted array consisting of only integers where every element appears
twice except for one element which appears once. Find this single element that
appears only once.
Example 1:
Input: [1,1,2,3,3,4,4,8,8]
Output: 2
Example 2:
Input: [3,3,7,7,10,11,11]
Output: 10
Note: Your solution should run in O(log n) time and O(1) space.
Binary Search
USe XOR: since you want pairs, (0,1), (2,3) etc
"""
#O(log(n))
def single_ele(nums):
lo, hi = 0, len(nums)-1
while lo < hi:
mid = (lo + hi )/2
if nums[mid] == nums[mid^1]:
lo = mid + 1
else:
hi = mid
return nums[lo]
x = [3,3,7,7,10,11,11]
print(single_ele(x))
#O(n) - dict construction
def two(nums):
dict_temp = {}
for i in nums:
dict_temp[i] = dict_temp.get(i, 0) + 1
for k, v in dict_temp.items():
if v == 1:
return k
print(two(x))
|
UTF-8
|
Python
| false | false | 901 |
py
| 53 |
540 - Single Element in a Sorted Array.py
| 52 | 0.580466 | 0.528302 | 0 | 49 | 17.408163 | 78 |
NweHlaing/Python_Learning_Udemy_Course
| 15,805,479,687,399 |
d624e5f282bb18c6eec32442a45afed6e84b727a
|
bafb87e41958f747f99c362b693e7c6387184a8d
|
/Section_13_Python_Generator/problem2.py
|
190336854d149c9ad6f349f193d620b5ca0de2eb
|
[] |
no_license
|
https://github.com/NweHlaing/Python_Learning_Udemy_Course
|
e4a92c4cb57e2c18338bb4db2fb290fdc671b871
|
c54989ce4610b1cc9c3c9983f9fbc3aedc031256
|
refs/heads/master
| 2022-11-18T17:38:15.623158 | 2020-07-17T04:14:34 | 2020-07-17T04:14:34 | 273,390,624 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
random.randint(1,10)
def rand_num(low,high,n):
for i in range(n):
yield random.randint(low, high)
for num in rand_num(1,10,12):
print(num)
|
UTF-8
|
Python
| false | false | 184 |
py
| 44 |
problem2.py
| 38 | 0.603261 | 0.559783 | 0 | 10 | 16.6 | 39 |
lingxiankong/qinling
| 13,529,147,015,608 |
b2d87fa3fc86a2acc9bbb853d7442bb50be42057
|
49a66e8c8cf8fa5e3cc7997ff965b287a817cc55
|
/qinling/engine/service.py
|
613d51839be08ca5f767ccd4c9bcae4d69c0305d
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/lingxiankong/qinling
|
f20e6ac943af908c2f58b5a7dd2d9ca4e71d573a
|
e18f80345ae519c9308cfc93fdf53b82c9be7618
|
refs/heads/master
| 2020-06-01T09:11:55.875880 | 2019-05-28T21:58:40 | 2019-06-01T11:10:27 | 190,727,149 | 0 | 1 |
Apache-2.0
| true | 2019-06-07T10:39:38 | 2019-06-07T10:39:37 | 2019-06-02T11:17:52 | 2019-06-07T02:40:43 | 1,524 | 0 | 0 | 0 | null | false | false |
# Copyright 2017 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cotyledon
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from qinling.db import api as db_api
from qinling.engine import default_engine as engine
from qinling.orchestrator import base as orchestra_base
from qinling import rpc
from qinling.services import periodics
from qinling.utils.openstack import keystone as keystone_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class EngineService(cotyledon.Service):
def __init__(self, worker_id):
super(EngineService, self).__init__(worker_id)
self.server = None
def run(self):
qinling_endpoint = keystone_utils.get_qinling_endpoint()
orchestrator = orchestra_base.load_orchestrator(CONF, qinling_endpoint)
db_api.setup_db()
topic = CONF.engine.topic
server = CONF.engine.host
transport = messaging.get_rpc_transport(CONF)
target = messaging.Target(topic=topic, server=server, fanout=False)
endpoint = engine.DefaultEngine(orchestrator, qinling_endpoint)
access_policy = dispatcher.DefaultRPCAccessPolicy
self.server = messaging.get_rpc_server(
transport,
target,
[endpoint],
executor='threading',
access_policy=access_policy,
serializer=rpc.ContextSerializer(
messaging.serializer.JsonPayloadSerializer())
)
LOG.info('Starting function mapping periodic task...')
periodics.start_function_mapping_handler(endpoint)
LOG.info('Starting engine...')
self.server.start()
def terminate(self):
periodics.stop()
if self.server:
LOG.info('Stopping engine...')
self.server.stop()
self.server.wait()
|
UTF-8
|
Python
| false | false | 2,452 |
py
| 115 |
service.py
| 59 | 0.683931 | 0.680669 | 0 | 70 | 34.028571 | 79 |
rafaelaleixo/import_raw_signal
| 12,979,391,169,420 |
ac616a0ff98f67d31d727e7184b4aacc02c7b942
|
970ad3fa8aa8d278957b72eaa393d251d8f6efd5
|
/labtrans/data/compress.py
|
c6fae251c14d591d5b10afc8cf27453f1fad1180
|
[] |
no_license
|
https://github.com/rafaelaleixo/import_raw_signal
|
a7027b58150a014f09d9134580aa4008e55e2bf0
|
bdc2f10b321e98249bd85c0e6a6bfb06647bf92b
|
refs/heads/master
| 2018-12-08T13:56:04.619651 | 2018-11-30T12:17:50 | 2018-11-30T12:17:50 | 140,875,461 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 21/04/2014
@author: ivan
'''
import traceback
# internal
from labtrans.data.zipper import InMemoryZip
from labtrans.utils import log
def zip_files(files_list):
imz = InMemoryZip()
for f_data in files_list:
try:
imz.append(f_data[0], f_data[1])
except:
log.append(traceback.format_exc())
return imz.read()
|
UTF-8
|
Python
| false | false | 384 |
py
| 108 |
compress.py
| 90 | 0.619792 | 0.59375 | 0 | 21 | 17.333333 | 46 |
barckcode/api_salsa
| 19,104,014,573,880 |
08f721727521fbfd5389c70c249cf077b8de1a49
|
f8556db88694c57e57aa996d0db59eac50366aab
|
/api/config/db.py
|
7db7713c60852feb083191ae8e2e0068b2b69e69
|
[
"MIT"
] |
permissive
|
https://github.com/barckcode/api_salsa
|
667ac69c24f2cb31479364414edc977f78c28199
|
4345d3b249646bb4055147f2481912a9c10ed7e2
|
refs/heads/main
| 2023-08-21T19:15:19.059618 | 2021-10-01T10:01:53 | 2021-10-01T10:01:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from sqlalchemy import create_engine, MetaData, engine
#
# PROD
##
# USER_DATABASE = os.getenv("DB_USER")
# PASSWORD_DATABASE = os.getenv("DB_PASSWORD")
# HOST_DATABASE = os.getenv("DB_HOST")
# DATABASE = os.getenv("DB_DATABASE")
#
# Local
##
USER_DATABASE = "postgres"
PASSWORD_DATABASE = "test"
HOST_DATABASE = "127.0.0.1"
DATABASE = "salsa"
URL_CONNECTION = f"postgresql://{USER_DATABASE}:{PASSWORD_DATABASE}@{HOST_DATABASE}:5432/{DATABASE}"
meta = MetaData()
engine_postgres = create_engine(URL_CONNECTION)
db_connection = engine_postgres.connect()
|
UTF-8
|
Python
| false | false | 567 |
py
| 13 |
db.py
| 9 | 0.716049 | 0.698413 | 0 | 24 | 22.625 | 100 |
shane-dawson/prenuvo_challenge
| 4,131,758,576,599 |
7629b443e4ee1815f753f1524e87123692e17e49
|
73031cf7d2258086e2c1c08a55b602dccfb4aaf9
|
/prenuvo/__init__.py
|
fe75bf2f3362c0721a934e9068940255771d9b62
|
[] |
no_license
|
https://github.com/shane-dawson/prenuvo_challenge
|
685c5562c9d78eddb70f07d53ec862558a4714e9
|
4c2cde16e518142923171730c5c5089a483d3e4d
|
refs/heads/master
| 2022-12-13T10:41:51.684884 | 2018-12-20T18:14:14 | 2018-12-20T18:14:14 | 162,630,459 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from prenuvo import app
from prenuvo import views
|
UTF-8
|
Python
| false | false | 50 |
py
| 20 |
__init__.py
| 14 | 0.84 | 0.84 | 0 | 2 | 24 | 25 |
YingyingHan1994/IS590_DataMashUpProject
| 16,939,351,036,488 |
be55c96fd317d9d5b9c88bab74d77651d52d0b0a
|
880440604614c22a0e019e0c75fc47053315bb22
|
/3.intermediatedataset/1. PrincetonDataset/princeton_clean6_split_iflonglatitude.py
|
c1d091ea8474241a5d860deb9bf6fc24a5f0827e
|
[] |
no_license
|
https://github.com/YingyingHan1994/IS590_DataMashUpProject
|
dc0335c73db6c8e10421e34fc4a7382f0f1da8d7
|
044551a1c72e3e45567f7ff49ffe5d2f07654d5b
|
refs/heads/master
| 2023-03-20T13:58:19.146040 | 2021-03-14T02:12:35 | 2021-03-14T02:12:35 | 211,170,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
# Read the json file
with open('princeton_clean5_countryvaluefixed.json', 'r') as fin:
data = json.load(fin)
nonelist = []
for records in data:
#print(records)
#print(records["State"])
# Check the record of which the state value is "probably Chiapas". Re-check its state information according to the given latitude/longitude number.
# if records["State"] == "probably Chiapas":
# #print(records)
# records["State"] = "San Luis Potosi"
# records["City"] = "Villa de Ramos"
# # if records["State"] == "probably Chiapas":
# # print(records)
#print(records)
latitude = records["Latitude"]
longitude = records["Longitude"]
if latitude == None:
nonelist.append(records)
#print(nonelist)
#print(len(nonelist)) the result 1126
if longitude == None:
nonelist.append(records)
#print(nonelist)
#print(len(nonelist)) the result is 3420
# Now, the nonelist include records either latitude number is none or longitude is none. This might includes three cases:
# (1) longitude and latitude are both none. (2)Longitude is none but latitude is not.
# (3) Latitude is none but longitude is not
# Delete the duplicates in the nonlist
nonelist_deleteduplicates = []
for records in nonelist:
if records in nonelist_deleteduplicates:
continue
else:
nonelist_deleteduplicates.append(records)
# print(nonelist_deleteduplicates)
# print(len(nonelist_deleteduplicates))
#Write the nonlist_deleteduplicates out in a json file names "princeton_clean7_nonelist.json"
import json
with open('princeton_clean7_nonelist.json', 'w') as foute:
json.dump(nonelist, foute)
# Write the records with longitude number and latitude number out in a file names "princeton_clean7_withlongitudelatitude.json
longlatitudelist = []
for records in data:
if records in nonelist_deleteduplicates:
continue
else:
longlatitudelist.append(records)
# print(longlatitudelist)
# print(len(longlatitudelist))
import json
with open('princeton_clean7_withlonglatitude.json', 'w') as fout:
json.dump(longlatitudelist, fout)
|
UTF-8
|
Python
| false | false | 2,210 |
py
| 46 |
princeton_clean6_split_iflonglatitude.py
| 14 | 0.688235 | 0.680995 | 0 | 64 | 32.28125 | 147 |
Unoblueboy/Tashys-Online-Store
| 4,002,909,561,602 |
e1d44a8f6d46b180663535f0bd41c3d0cec5aabb
|
361fb8aa452cd44999e1173d50453d133fb009d0
|
/storeFront/urls.py
|
905b7c74ab7b8d2d8e80577d38999dc3e114ced3
|
[] |
no_license
|
https://github.com/Unoblueboy/Tashys-Online-Store
|
419a4010c215b73cd61332e675ee8d7961118e81
|
a4b16140d6ed8610227bd86dc0e12352dc96a989
|
refs/heads/master
| 2018-02-09T19:35:17.981662 | 2017-07-11T02:15:08 | 2017-07-11T02:15:08 | 96,817,184 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import url
from . import views
app_name='storeFront'
urlpatterns = [
# ex: /
url(r'^$', views.index, name='index'),
# ex: /5/
url(r'^product/(?P<slug>[-\w]+)/$', views.description, name='description')
]
|
UTF-8
|
Python
| false | false | 243 |
py
| 10 |
urls.py
| 6 | 0.600823 | 0.596708 | 0 | 11 | 21.090909 | 78 |
queryfish/jobcrawler
| 11,089,605,584,609 |
d15726a0a508ee0539c9be4a14aced7d73ebdc7a
|
4bde6dcebd147723e693ea43ba1f36fbcfd04dc3
|
/tutorial/spiders/bossofmy.py
|
4e64ea34874ef34ce7ea113661b0a3a3518e3cf2
|
[
"MIT"
] |
permissive
|
https://github.com/queryfish/jobcrawler
|
95604637edf6c9cc8ea96648efdcb730d3076338
|
f0cf70e6ca909648e5a0af37dcc5fb3a548a4cfa
|
refs/heads/master
| 2020-06-15T00:06:50.140668 | 2019-11-26T10:26:07 | 2019-11-26T10:26:07 | 195,160,598 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#coding:utf-8
import scrapy
from tutorial.items import TutorialItem
from scrapy.http import Request
from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
import json
import time
import random
import redis
from scrapy.conf import settings
#zhipin 爬虫
class ScriptSlug(scrapy.Spider):
name = "bossofmy"
allowed_domains = ["www.zhipin.com"]
current_page = 1 #开始页码
max_page = 15 #最大页码
start_urls = [
"https://www.zhipin.com/c101010100-p110101/y_6-h_101010100/?ka=sel-salary-6",
]
custom_settings = {
"ITEM_PIPELINES":{
'tutorial.pipelines.BossOfMinePipeline': 300,
},
# "DOWNLOADER_MIDDLEWARES":{
# 'tutorial.middlewares.ScriptSlugMiddleware': 299,
# # 'tutorial.middlewares.ProxyMiddleware':301
# },
"DEFAULT_REQUEST_HEADERS":{
'Accept': 'application/json',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent':'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Mobile Safari/537.36',
'Referer':'https://www.zhipin.com/',
'X-Requested-With':"XMLHttpRequest"
# "cookie":"lastCity=101020100; JSESSIONID=""; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1532401467,1532435274,1532511047,1532534098; __c=1532534098; __g=-; __l=l=%2Fwww.zhipin.com%2F&r=; toUrl=https%3A%2F%2Fwww.zhipin.com%2Fc101020100-p100103%2F; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1532581213; __a=4090516.1532500938.1532516360.1532534098.11.3.7.11"
}
}
def parse(self, response):
# js = json.loads(response.body)
# html = js['html']
items = response.xpath('//li[@class="item"]/a/@href')
print(items)
host = 'https://www.zhipin.com'
x = 1
y = 1
for item in items:
detail_url = item.extract()
print('extracting href from alink')
print(item.extract())
# print(item.extract_first())
# position_name = item.css('h4::text').extract_first() #职位名称
# salary = item.css('.salary::text').extract_first() or '' #薪资
# work_year = item.css('.msg em:nth-child(2)::text').extract_first() or '不限' #工作年限
# educational = item.css('.msg em:nth-child(3)::text').extract_first() #教育程度
# meta = {
# "position_name":position_name,
# "salary":salary,
# "work_year":work_year,
# "educational":educational
# }
#
# # time.sleep(int(random.uniform(50, 70)))
# #初始化redis
# pool= redis.ConnectionPool(host='localhost',port=6379,decode_responses=True)
# r=redis.Redis(connection_pool=pool)
# key = settings.get('REDIS_POSITION_KEY')
# position_id = url.split("/")[-1].split('.')[0]
# print('further url:', detail_url)
# print('key:', key, "value:", position_id);
# print('parsing item: ...\n')
# print(meta)
url = host + detail_url
yield Request(url,callback=self.parse_item)
# if (r.sadd(key,position_id)) == 1:
# yield Request(url,callback=self.parse_item,meta=meta)
# if self.current_page < self.max_page:
# self.current_page += 1
# api_url = "https://scriptslug.com/scripts"+"?pg="+str(self.current_page)
# time.sleep(int(random.uniform(1, 5)))
# yield Request(api_url,callback=self.parse)
# pass
def parse_item(self,response):
# target = response.css('.script-single__download').xpath('./@href').extract_first()
item = TutorialItem()
print('Company Name')
company_name = response.xpath('//div[@class="info-primary"]/div/div[@class="name"]/text()').extract_first()
print(company_name)
print("Salary: ")
s = response.xpath('//div[@class="job-banner"]/div/span[@class="salary"]/text()').extract_first()
print(s)
print('Job Description')
jd= response.xpath('//div[@class="detail-content"]/div[@class="job-sec"]/div[@class="text"]').extract_first()
print(jd)
item['company_name'] = company_name
item['body']=jd
item['salary']=s
yield item
time.sleep(8)
# item = TutorialItem()
# q = response.css
# # item['address'] = q('.location-address::text').extract_first()
# # item['create_time'] = q('.job-tags .time::text').extract_first()
# # item['body'] = q('.text').xpath('string(.)').extract_first()
# # # item['body'] = item['body'].encode('utf-8')
# # # print(item['body'])
# # item['company_name'] = q('.business-info h4::text').extract_first()
# # item['postion_id'] = response.url.split("/")[-1].split('.')[0]
# # item = dict(item, **response.meta )
# pdf_url = q('.script-single__download').extract_first()
# print("parsing PDF...:")
# print(item)
# yield item
# yield Request(
# url=target,
# callback=self.save_pdf
# )
|
UTF-8
|
Python
| false | false | 5,318 |
py
| 13 |
bossofmy.py
| 12 | 0.563688 | 0.511597 | 0 | 125 | 41.08 | 366 |
besenthil/Algorithms
| 8,340,826,507,456 |
507d8f35929fd298ba2eb7148b241d599105101a
|
03c7bed4cbc25c8468f5ccebd71d847ff694d308
|
/finddigits.py
|
90a8c42877a919fb5686f61a467e3f7e4782554b
|
[] |
no_license
|
https://github.com/besenthil/Algorithms
|
faff1486c560bafbfd8f6fb7a0422d1b8b795d6e
|
5e8a49ffdc7aad1925ef0354208970d3d2cb62d2
|
refs/heads/master
| 2022-02-14T04:26:09.282976 | 2022-02-13T13:35:12 | 2022-02-13T13:35:12 | 51,376,159 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
for _ in range(int(input())):
N = int(input())
print (len([digit for digit in map(int,[x for x in str(N)]) if digit != 0 and N%digit == 0]))
|
UTF-8
|
Python
| false | false | 153 |
py
| 99 |
finddigits.py
| 99 | 0.555556 | 0.542484 | 0 | 3 | 48.666667 | 97 |
cescgina/peleffy
| 17,025,250,383,185 |
78b323c3d4ca5087605a436b2384b4f8bb5d0dba
|
79ce54603ce8fd96cd9b65e021106ca92fef6aff
|
/offpele/tests/test_main.py
|
5470f5947efbf69b37fa0ca797d64fafc92cb6ca
|
[
"Python-2.0",
"MIT"
] |
permissive
|
https://github.com/cescgina/peleffy
|
947f0a08ee78c95b7afd3570959766a3417c04af
|
fc68116dc98050ed3c2c92270d8218565d099801
|
refs/heads/master
| 2023-06-17T11:36:58.851715 | 2020-09-22T15:14:04 | 2020-09-22T15:14:04 | 307,655,233 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
This module contains the tests to check offpele's molecular representations.
"""
import pytest
import os
import tempfile
from offpele.main import run_offpele, handle_output_paths
from offpele.utils import get_data_file_path, temporary_cd
from offpele.topology import Molecule
FORCEFIELD_NAME = 'openff_unconstrained-1.2.0.offxml'
class TestMain(object):
"""
It wraps all tests that involve the Molecule class.
"""
def test_offpele_default_call(self):
"""
It checks the default call of offpele's main function.
"""
LIGAND_PATH = 'ligands/BNZ.pdb'
ligand_path = get_data_file_path(LIGAND_PATH)
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
run_offpele(ligand_path, output=tmpdir)
def test_offpele_custom_call(self):
"""
It checks the custom call of offpele's main function.
"""
LIGAND_PATH = 'ligands/BNZ.pdb'
ligand_path = get_data_file_path(LIGAND_PATH)
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
run_offpele(ligand_path,
forcefield=FORCEFIELD_NAME,
resolution=10,
charges_method='gasteiger',
output=tmpdir,
with_solvent=True,
as_datalocal=True)
def test_default_output_paths(self):
"""
It checks the default output paths that are used for each parameter
file from offpele.
"""
def from_PosixPath_to_string(paths):
"""
Convert PosixPaths to strings
"""
return map(str, paths)
molecule = Molecule(smiles='c1ccccc1', name='benzene', tag='BNZ')
rotlib_path, impact_path, solvent_path = \
handle_output_paths(molecule, '', False)
# Convert PosixPaths to strings
rotlib_path, impact_path, solvent_path = map(
str, [rotlib_path, impact_path, solvent_path])
assert rotlib_path == 'BNZ.rot.assign', 'Unexpected default ' \
+ 'rotamer library path'
assert impact_path == 'bnzz', 'Unexpected default Impact ' \
+ 'template path'
assert solvent_path == 'ligandParams.txt', 'Unexpected default ' \
+ 'solvent parameters path'
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
# To avoid the complain about unexistent folder
os.mkdir('output')
rotlib_path, impact_path, solvent_path = \
handle_output_paths(molecule, 'output', False)
# Convert PosixPaths to strings
rotlib_path, impact_path, solvent_path = map(
str, [rotlib_path, impact_path, solvent_path])
assert rotlib_path == 'output/BNZ.rot.assign', 'Unexpected default ' \
+ 'rotamer library path'
assert impact_path == 'output/bnzz', 'Unexpected default Impact ' \
+ 'template path'
assert solvent_path == 'output/ligandParams.txt', 'Unexpected ' \
+ 'default solvent parameters path'
rotlib_path, impact_path, solvent_path = \
handle_output_paths(molecule, '', True)
# Convert PosixPaths to strings
rotlib_path, impact_path, solvent_path = map(
str, [rotlib_path, impact_path, solvent_path])
assert rotlib_path == 'DataLocal/LigandRotamerLibs/' \
+ 'BNZ.rot.assign', 'Unexpected default rotamer library path'
assert impact_path == 'DataLocal/Templates/OFF/Parsley/' \
+ 'HeteroAtoms/bnzz', 'Unexpected default Impact template'
assert solvent_path == 'DataLocal/OBC/ligandParams.txt', \
'Unexpected default solvent parameters path'
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
# To avoid the complain about unexistent folder
os.mkdir('output')
rotlib_path, impact_path, solvent_path = \
handle_output_paths(molecule, 'output', True)
# Convert PosixPaths to strings
rotlib_path, impact_path, solvent_path = map(
str, [rotlib_path, impact_path, solvent_path])
assert rotlib_path == 'output/DataLocal/LigandRotamerLibs/' \
+ 'BNZ.rot.assign', 'Unexpected default rotamer library path'
assert impact_path == 'output/DataLocal/Templates/OFF/Parsley/' \
+ 'HeteroAtoms/bnzz', 'Unexpected default Impact template path'
assert solvent_path == 'output/DataLocal/OBC/ligandParams.txt', \
'Unexpected default solvent parameters path'
|
UTF-8
|
Python
| false | false | 4,813 |
py
| 10 |
test_main.py
| 9 | 0.596302 | 0.594847 | 0 | 126 | 37.198413 | 78 |
scentrade/website
| 13,417,477,832,992 |
29aaa2595ac84f681f28ad36525ff3f2944ab80c
|
63a44b93343dfe70f7c2ec212b0f56bfb125e631
|
/utils/templatetags/urls_tags.py
|
8a5d57f67194cd75db4104362cdf581e12eb4c09
|
[] |
no_license
|
https://github.com/scentrade/website
|
ca7a3f71418a3ba79e60661b833e29c4f816c197
|
89d8a01d62af03fc8439daac4d19cd88ad518def
|
refs/heads/master
| 2021-01-10T04:06:23.838276 | 2015-08-22T14:21:43 | 2015-08-22T14:21:43 | 43,756,451 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# encoding: utf-8
from django import template
from utils.url import make_absolute_url as mau
register = template.Library()
@register.simple_tag
def make_absolute_url(path):
"""
Divide the space description in two paragraphs.
"""
return mau(path)
|
UTF-8
|
Python
| false | false | 265 |
py
| 133 |
urls_tags.py
| 61 | 0.713208 | 0.709434 | 0 | 14 | 18 | 51 |
ali0003433/pred-by-issue-app
| 12,833,362,306,273 |
aaf0ead0997d3cf2f0a278f00520061021ae5d17
|
77209f2bc1d85545ada195ff99674ef3578b5f03
|
/scripts.py
|
cf4b6fe76f0ad59431ca573f9bb6291a05aeb266
|
[] |
no_license
|
https://github.com/ali0003433/pred-by-issue-app
|
a3c9e19f615d07477ba3e5f1543c0ee555224281
|
e8e78a79ad2635612cf65883bbadb88559dcb822
|
refs/heads/master
| 2020-09-22T01:22:00.111383 | 2019-12-07T16:30:05 | 2019-12-07T16:30:05 | 224,999,187 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
import json
import pickle
import pandas as pd
import numpy as np
import joblib
import random
from sklearn.linear_model import LogisticRegression
def dummies(res):
'''Convert categories into dummy variables.
'''
result = [0,0,0,0]
if int(res) > 1:
result[int(res)-2] = 1
return result
def make_prediction(res_size, res_racial, res_climate, res_budget, res_immigration, res_terrorism, res_gender):
'''Pass user's data to the dataframe to run through model and generate a prediction
'''
size1, size2, size3, size4 = dummies(res_size)
racial1, racial2, racial3, racial4 = dummies(res_racial)
clim1, clim2, clim3, clim4 = dummies(res_climate)
bgt1, bgt2, bgt3, bgt4 = dummies(res_budget)
imm1, imm2, imm3, imm4 = dummies(res_immigration)
trr1, trr2, trr3, trr4 = dummies(res_terrorism)
gdr1, gdr2, gdr3, gdr4 = dummies(res_gender)
data = {'imiss_c_2016_2.0': imm1,
'imiss_c_2016_3.0': imm2,
'imiss_c_2016_4.0': imm3,
'imiss_c_2016_8.0': imm4,
'imiss_f_2016_2.0': trr1,
'imiss_f_2016_3.0': trr2,
'imiss_f_2016_4.0': trr3,
'imiss_f_2016_8.0': trr4,
'imiss_l_2016_2.0': clim1,
'imiss_l_2016_3.0': clim2,
'imiss_l_2016_4.0': clim3,
'imiss_l_2016_8.0': clim4,
'imiss_p_2016_2.0': bgt1,
'imiss_p_2016_3.0': bgt2,
'imiss_p_2016_4.0': bgt3,
'imiss_p_2016_8.0': bgt4,
'imiss_u_2016_2.0': size1,
'imiss_u_2016_3.0': size2,
'imiss_u_2016_4.0': size3,
'imiss_u_2016_8.0': size4,
'imiss_x_2016_2.0': racial1,
'imiss_x_2016_3.0': racial2,
'imiss_x_2016_4.0': racial3,
'imiss_x_2016_8.0': racial4,
'imiss_y_2016_2.0': gdr1,
'imiss_y_2016_3.0': gdr2,
'imiss_y_2016_4.0': gdr3,
'imiss_y_2016_8.0': gdr4,
}
df = pd.DataFrame(data, index=[0])
print('dataframe created')
print(df)
print('df length:', len(df.columns))
clf = joblib.load('./clf_2.pkl')
prediction = clf.predict(df)
print(prediction)
if prediction == 1.0:
print('Clinton')
prediction = 'Hillary Clinton'
return prediction
elif prediction == 2.0:
print('Donald J. Trump')
prediction = 'Donald J. Trump'
return prediction
elif prediction == 3.0:
print('Other behavior')
prediction = 'A third party'
return prediction
else:
return prediction
|
UTF-8
|
Python
| false | false | 2,638 |
py
| 52 |
scripts.py
| 44 | 0.55724 | 0.466641 | 0 | 78 | 32.833333 | 111 |
Easterok/web
| 7,696,581,419,952 |
3d9a315c750e3e96a9f912ad51d7d6ff599d0c1c
|
10b0f2e5ee98ed3e2f4ed2d8e0cdbd69df2f9845
|
/boards/models.py
|
60ca8f8e99c34ac7708b787823961309663e22ae
|
[] |
no_license
|
https://github.com/Easterok/web
|
799446e59194f2a4d040ebd8aa191b76421964e1
|
6b3513c86feb36fd06695edca893c91cb3ad298a
|
refs/heads/master
| 2021-01-24T12:47:30.300144 | 2018-03-26T12:32:51 | 2018-03-26T12:32:51 | 123,149,266 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class Board(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
board_name = models.CharField(max_length=100, db_index=True)
board_status = models.IntegerField(default=0)
last_change_board = models.DateTimeField(auto_now_add=True)
command = models.BooleanField(default=0)
def __str__(self):
return "board name: {} user: {}".format(self.board_name, self.user)
class List(models.Model):
board_id = models.ForeignKey(Board, on_delete=models.CASCADE, db_index=True)
list_name = models.CharField(max_length=100, db_index=True)
list_private = models.IntegerField(default=0)
list_status = models.IntegerField(default=0)
list_time_create = models.DateTimeField(auto_now_add=True)
class CardsOnList(models.Model):
list_id = models.ForeignKey(List, on_delete=models.CASCADE)
card_name = models.CharField(max_length=100, db_index=True)
user_name = models.ForeignKey(User, on_delete=models.CASCADE)
card_time_create = models.DateTimeField(auto_now_add=True)
class CommentsInCard(models.Model):
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
comment_text = models.TextField(max_length=2000)
comment_pub_date = models.DateTimeField(auto_now_add=True)
|
UTF-8
|
Python
| false | false | 1,332 |
py
| 21 |
models.py
| 13 | 0.73048 | 0.717718 | 0 | 34 | 38.176471 | 80 |
evilvlso/es
| 16,879,221,478,261 |
7b407b3246e7f222c099b39acfbde63fdd233a46
|
05ce84440b82bd222f5e43979fb5bb7956e5d066
|
/scrapy_fish/scrapy_fish/utils/tools/py_md5.py
|
7eb07143d58a4d32fdd2d909a08623bfb3ff5f54
|
[] |
no_license
|
https://github.com/evilvlso/es
|
1b2a707340810d28727591ffcc9725ac13bc6769
|
2ecac5593f9d3bb32453bfa0f618c1a721b6ff39
|
refs/heads/master
| 2023-05-26T10:38:46.974074 | 2019-06-12T06:36:13 | 2019-06-12T06:36:13 | 191,326,639 | 0 | 0 | null | false | 2023-05-22T22:16:19 | 2019-06-11T08:22:27 | 2019-06-12T06:36:25 | 2023-05-22T22:16:18 | 71 | 0 | 0 | 5 |
Python
| false | false |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: zhangslob
@file: py_md5.py
@time: 2019/05/22
@desc:
"""
import hashlib
def md5_str(txt):
m = hashlib.md5(txt.encode(encoding='utf-8'))
return m.hexdigest()
# if __name__ == '__main__':
# print(md5_str('1231'))
|
UTF-8
|
Python
| false | false | 300 |
py
| 31 |
py_md5.py
| 26 | 0.55 | 0.49 | 0 | 20 | 14 | 49 |
ShonBC/task_level_planning_group_1
| 8,924,942,070,956 |
9dc52517061f95862a3e88cbbf168806ae98c221
|
38e2823dd422e98c79c12cb53dd2dbff7cef7966
|
/classes/industrial_robot.py
|
153f7dbc3cf855c1c60a249650eb808504b72614
|
[] |
no_license
|
https://github.com/ShonBC/task_level_planning_group_1
|
cdd851270a2de55ba5e43838631da525f8ffe617
|
693db34b4fe51cede75d0ffba1defd553050bf09
|
refs/heads/main
| 2023-06-16T03:13:56.977052 | 2021-07-12T19:23:54 | 2021-07-12T19:23:54 | 384,269,184 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Industrial Robot base class.
'''
class Industrial():
"""Industrial Robot base class.
Initialize with the robot name, payload, application, and company.
"""
def __init__(self, name: str, payload: float, application: list, company = 'NIST'):
"""Initialize class attributes.
Args:
name (str): Name of the robot. This attribute can only be accessed outside the class definition and cannot be set.
payload (float): Payload for the robot’s arm(s). This attribute can be both accessed and set outside the class definition.
application (list): List of applications the robot can perform. For instance, gantry_robot can do both kitting and assembly while ground_robot can only do kitting. This attribute can be both accessed and set outside the class definition.
company (str, optional): Name of the robot’s vendor. By default this is set to "Nist". This attribute can only be accessed outside the class definition and cannot be set. Defaults to 'NIST'.
"""
self._name = name
self._payload = payload
self._application = application
self._company = company
def __str__(self):
return f'Name: {self._name}, Payload: {self._payload}, Application: {self._application}, Company: {self._company}'
@property
def name(self):
return self._name
@property
def payload(self):
return self._payload
@property
def application(self):
return self._application
@property
def company(self):
return self._company
@payload.setter
def payload(self, payload):
self._payload = payload
@application.setter
def application(self, application):
self.application = application
def pick_up(self, parttype: str, bin: str):
"""Print the part type picked up and the bin it was obtained from.
Args:
parttype (str): Four part types are available in the environment, red_battery, blue_battery, green_regulator, and blue_sensor.
bin (str): Parts are stored in bins 1-8.
"""
print(f'{self.name} picks up {parttype} from bin {bin}')
def put_down(self, parttype: str, bin: str):
"""Print the part type put down and the bin it was taken from.
Args:
parttype (str): Four part types are available in the environment, red_battery, blue_battery, green_regulator, and blue_sensor.
bin (str): Parts are stored in bins 1-8.
"""
print(f'{self.name} puts down {parttype} from bin {bin}')
def attach_gripper(self, gripper: str):
"""Print the gripper the robot has attached.
Args:
gripper (str): Robots can use 2 grippers: A vacuum gripper (vacuum_gripper) and a 3-finger gripper (finger_gripper).
"""
print(f'{self.name} attaches {gripper}')
def detach_gripper(self, gripper: str):
"""Print the gripper the robot has detached.
Args:
gripper (str): Robots can use 2 grippers: A vacuum gripper (vacuum_gripper) and a 3-finger gripper (finger_gripper).
"""
print(f'{self.name} detaches {gripper}')
def move_to_bin(self, bin:str):
"""Print the bin the robot has moved to.
Args:
bin (str): Parts are stored in bins 1-8.
"""
print(f'{self.name} moves to bin {bin}')
def move_to_agv(self, agv: str):
"""Print the AGV the robot has moved to.
Args:
agv (str): Automated Guided Vehicle used to transport parts to kitting station.
"""
print(f'{self.name} moves to {agv}')
def move_to_gripper_station(self, station: str):
"""Print the robot has moved to the gripper station.
Args:
station (str): Gripper changing station. The robot must move here to change grippers.
"""
print(f'{self.name} moves to {station}')
def move_from_bin(self, bin: str):
"""Print the bin the robot is moving from.
Args:
bin (str): Parts are stored in bins 1-8.
"""
print(f'{self.name} moves from bin {bin}')
def move_from_agv(self, agv: str):
"""Print the AGV the robot is moving from.
Args:
agv (str): Automated Guided Vehicle used to transport parts to kitting station.
"""
print(f'{self.name} moves from {agv}')
def move_from_gripper_station(self, station: str):
"""Print the robot is moving from the gripper changing station.
Args:
station (str): Gripper changing station. The robot must move here to change grippers.
"""
print(f'{self.name} moves from {station}')
if __name__ == '__main__':
robot = Industrial('Shon', 1.2, ['s', 'd'])
print(robot)
robot.pick_up('red battery', 'bin 1')
robot.put_down('red battery', 'bin 1')
robot.attach_gripper('three_finger')
robot.detach_gripper('three_finger')
robot.move_to_bin('bin 1')
robot.move_to_agv('agv 1')
robot.move_from_bin('bin 1')
robot.move_from_agv('agv 1')
robot.move_to_gripper_station('gripper changing station')
robot.move_from_gripper_station('gripper changing station')
# g_robot = gantry('Shon', 2.0, ['s', 'a'], 1.0, 2.0, 10, 11, 'NIST')
# g_robot.pick_up('red battery', 'bin 2')
|
UTF-8
|
Python
| false | false | 5,423 |
py
| 9 |
industrial_robot.py
| 8 | 0.611552 | 0.605831 | 0 | 159 | 33.081761 | 249 |
Igor-Carvalho/rroll
| 3,186,865,746,566 |
90cb72d58c74f6063bb3ec2969ea56d24ce174f2
|
14706cc1a57f88da5347bea074e4c0d923709680
|
/rroll/settings/base.py
|
d105a2721a333ec8f3ea99dd2bb1a60288be87c8
|
[] |
no_license
|
https://github.com/Igor-Carvalho/rroll
|
65ccd35cc3869a1802fa5d2a13d4a5038b0a7abb
|
6a0528ad9c57f52935f21594f34cd9c3b2808754
|
refs/heads/master
| 2017-06-18T06:32:15.688788 | 2016-06-24T19:48:42 | 2016-06-24T19:48:42 | 41,831,273 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Configurações gerais do projeto rroll."""
from os import environ
from os.path import abspath, dirname, join
import dj_database_url
import dj_email_url
import django_cache_url
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse_lazy
def get_environment_variable(variable):
"""Obtém o valor de uma variável de ambiente requerida obrigatoriamente pelo projeto."""
try:
return environ[variable]
except KeyError:
raise ImproperlyConfigured('You must set {} environment variable.'.format(variable))
def get_path(path):
"""Helper para obter caminhos de arquivos referentes a este módulo de configuração."""
return abspath(join(BASE_DIR, path))
def get_name_email(value):
"""Helper para obter nome e email de admins e/ou managers da aplicação."""
result = []
for token in value.split(':'):
name, email = token.split(',')
result.append((name, email))
return result
# export ADMINS=username1,email1@domain.com:username2,email2@domain.com
ADMINS = get_name_email(get_environment_variable('ADMINS'))
managers = environ.get('MANAGERS', None)
MANAGERS = get_name_email(managers) if managers else ADMINS
# Build paths inside the project like this: join(BASE_DIR, ...)
BASE_DIR = dirname(abspath(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_environment_variable('SECRET_KEY')
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Email
# https://docs.djangoproject.com/en/dev/topics/email/
dj_email_url.SCHEMES.update(postoffice='post_office.EmailBackend')
vars().update(dj_email_url.config())
DEFAULT_CHARSET = environ.get('DEFAULT_CHARSET', 'utf-8') # default charset in django.core.email.
# default from_email in EmailMessage.
DEFAULT_FROM_EMAIL = environ.get('DEFAULT_FROM_EMAIL', 'webmaster@localhost')
# default prefix + subject in mail_admins/managers.
EMAIL_SUBJECT_PREFIX = environ.get('EMAIL_SUBJECT_PREFIX', '[Django]')
SERVER_EMAIL = environ.get('SERVER_EMAIL', 'admin@localhost') # default from: header in mail_admins/managers.
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'albums',
'gunicorn',
'post_office',
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'rest_auth',
'widget_tweaks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [get_path('../templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
ROOT_URLCONF = 'rroll.urls'
WSGI_APPLICATION = 'rroll.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = get_path('../../static')
STATICFILES_DIRS = (get_path('../static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = get_path('../../media')
AUTH_USER_MODEL = 'core.User'
LOGIN_URL = reverse_lazy('account_login')
LOGIN_REDIRECT_URL = '/'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = ''
OLD_PASSWORD_FIELD_ENABLED = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['pickle', 'json']
CACHES = {
'default': django_cache_url.config()
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
AUTHENTICATION_BACKENDS = global_settings.AUTHENTICATION_BACKENDS + \
['allauth.account.auth_backends.AuthenticationBackend']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',
'rest_framework.filters.SearchFilter')
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
}
|
UTF-8
|
Python
| false | false | 7,396 |
py
| 41 |
base.py
| 34 | 0.666712 | 0.66441 | 0 | 240 | 29.779167 | 110 |
ProtonHackers/CruzHacks-Flask
| 11,261,404,274,207 |
41f3adabdc7d896ee4aaa4437992c83c4f461990
|
e3f5f1100bf0fc7b4af87bd21b8eb3a20dfcc03e
|
/app/mobile/backgrounds.py
|
80f56d78dbd02040b819f527fa329feb92b207f3
|
[] |
no_license
|
https://github.com/ProtonHackers/CruzHacks-Flask
|
ec3e48b063b394cae20db517e5e77a097cf2844d
|
882bbe2d8d879d3207fd4fa54fc0e185c9cbc092
|
refs/heads/master
| 2022-12-16T12:54:35.037547 | 2018-01-21T19:47:38 | 2018-01-21T19:47:38 | 118,274,391 | 0 | 0 | null | false | 2022-11-22T01:45:46 | 2018-01-20T19:34:14 | 2018-01-20T19:38:41 | 2022-11-22T01:45:43 | 4,340 | 0 | 0 | 11 |
Python
| false | false |
import os
import cv2
import numpy as np
from flask import current_app
from PIL import Image
import os
def remove_background(img_path):
img = Image.open(current_app.config["UPLOAD_TEMPLATE"] + img_path)
filename, file_extension = os.path.splitext(img_path)
img.save(current_app.config["UPLOAD_TEMPLATE"] + filename + ".png")
# == Parameters =======================================================================
BLUR = 21
CANNY_THRESH_1 = 10
CANNY_THRESH_2 = 200
MASK_DILATE_ITER = 10
MASK_ERODE_ITER = 10
MASK_COLOR = (1.0, 1.0, 1.0) # In BGR format
# == Processing =======================================================================
# -- Read image -----------------------------------------------------------------------
gray = cv2.imread(img_path, 1)
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# -- Edge detection -------------------------------------------------------------------
edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges, None)
edges = cv2.erode(edges, None)
# -- Find contours in edges, sort by area ---------------------------------------------
contour_info = []
_, contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for c in contours:
contour_info.append((
c,
cv2.isContourConvex(c),
cv2.contourArea(c),
))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]
# -- Create empty mask, draw filled polygon on it corresponding to largest contour ----
# Mask is black, polygon is white
mask = np.zeros(edges.shape)
cv2.fillConvexPoly(mask, max_contour[0], 255)
# -- Smooth mask, then blur it --------------------------------------------------------
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
mask_stack = np.dstack([mask] * 3) # Create 3-channel alpha mask
# -- Blend masked img into MASK_COLOR background --------------------------------------
mask_stack = mask_stack.astype('float32') / 255.0 # Use float matrices,
img = img.astype('float32') / 255.0 # for easy blending
masked = (mask_stack * img) + ((1 - mask_stack) * MASK_COLOR) # Blend
masked = (masked * 255).astype('uint8') # Convert back to 8-bit
cv2.imwrite(os.path.join(current_app['UPLOAD_TEMPLATE'], img_path), masked)
|
UTF-8
|
Python
| false | false | 2,553 |
py
| 22 |
backgrounds.py
| 17 | 0.531923 | 0.50568 | 0 | 63 | 39.52381 | 91 |
tkrasnoperov/recursive_generation
| 5,918,464,937,043 |
e4ce433e0ed409d5bbb3e6348b99b9504eb18958
|
3d790f7852b1976ba464b767e1c2cccd8ec1e536
|
/main.py
|
226a8a2f52f6b805999a9caf28ca197aa995fcbd
|
[] |
no_license
|
https://github.com/tkrasnoperov/recursive_generation
|
f8d2903506fec4224b0555f12203d4001e9c1b90
|
7098a41a8df26a85d0d6bc48422580d94a7f2e35
|
refs/heads/master
| 2020-04-30T14:09:46.009630 | 2019-03-21T06:18:02 | 2019-03-21T06:18:02 | 176,881,728 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from time import time
import numpy as np
from numpy.linalg import pinv, matrix_rank, norm
import matplotlib.pyplot as plt
from scipy.linalg import null_space
from PIL import Image as image
import torch
import torchvision
from torch import FloatTensor, LongTensor
from torch.autograd import Variable
import torch.nn as nn
from torch.nn.functional import mse_loss, softmax
import torch.utils.model_zoo as model_zoo
import torchvision.transforms.functional as tf
from labels import labels
from utils import *
from alexnet import *
from solution_space import *
from visual import *
class Generator():
def __init__(self, model):
self.model = model
self.shapes = [model(torch.rand(1, 3, 224, 224), end=i).size() for i in range(23)]
self.x_min = torch.zeros(1, 3, 224, 224).cuda()
self.x_max = torch.ones(1, 3, 224, 224).cuda()
self.start = 0
self.end = 23
def __call__(self, x, h=.1, steps=1):
return self.backward_generate(target, h=h, steps=steps)
def quick_generate(self, target):
y = Variable(torch.rand(self.shapes[1]), requires_grad=True)
loss = lambda x: self.loss(x, y=target, start=1)
constraint = lambda x: self.constraint(x, mini=0, maxi=5)
y = grad_ascent(y, loss, constraint=constraint, h=1, steps=100, verbose=False)
start = time()
total_loss = 0
n_runs = 100
for _ in range(n_runs):
x = Variable(torch.rand(self.shapes[0]), requires_grad=True)
loss = lambda x: self.loss(x, y=y, end=1)
constraint = lambda x: self.constraint(x)
x = grad_ascent(x, loss, constraint=constraint, h=.1, steps=100, verbose=False)
jpeg(x)
total_loss += self.loss(x, y=target).item()
print(total_loss / n_runs, time() - start)
start = time()
total_loss = 0
for _ in range(n_runs):
x = Variable(torch.rand(self.shapes[0]), requires_grad=True)
loss = lambda x: self.loss(x, y=target)
constraint = lambda x: self.constraint(x)
x = grad_ascent(x, loss, constraint=constraint, h=.1, steps=1000, verbose=False)
total_loss += self.loss(x, y=target).item()
print(total_loss / n_runs, time() - start)
return y
def backward_generate(self, target, inter_layers=[], h=1, steps=1000):
j = self.end
y_j = target
for i in reversed(inter_layers):
y_i = Variable(torch.rand(self.shapes[i]), requires_grad=True)
loss = lambda x: self.loss(x, y=y_j, start=i, end=j)
constraint = lambda x: self.constraint(x, mini=-20, maxi=20)
y_i = grad_ascent(y_i, loss, constraint=constraint, h=h, steps=steps, verbose=False)
y_j = y_i.data.clone()
j = i
x = Variable(torch.rand(self.shapes[0]), requires_grad=True)
loss = lambda x: self.loss(x, y=y_j, start=0, end=j)
x = grad_ascent(x, loss, constraint=self.constraint, h=h, steps=steps, verbose=False)
return x
def loss(self, x, y=None, start=0, end=23):
loss = 0
loss += mse_loss(self.model(x, start=start, end=end), y)
return loss
def constraint(self, x, mini=0, maxi=1):
x_min = mini * torch.ones(x.size())
x_max = maxi * torch.ones(x.size())
x = torch.max(x, x_min)
x = torch.min(x, x_max)
return x
def grad_ascent(x, loss, constraint=None, h=.01, steps=1, verbose=True, model_loss=None, i=0):
if verbose:
print("\n\tGRAD ASCENT")
print("================================================================")
x_start = x.clone()
for i in range(steps):
step_loss = loss(x)
if verbose:
print("loss:\t\t{}".format(step_loss.data.item()))
step_loss.backward(retain_graph=True)
grad, *_ = x.grad.data
x_step = x - h * grad / grad.norm()
if constraint:
x_step = constraint(x_step)
if verbose:
print("grad mag:\t{}".format(grad.norm().item()))
print("step mag:\t{}".format(h * grad.norm().item()))
print()
x = Variable(x_step.data, requires_grad=True)
if verbose:
print("final loss:", loss(x).data.item())
print("displacement:", torch.norm(x_start - x).item())
print("========================================================\n")
return x
def load_synset(files, n=-1):
synset = []
for file in files[:n]:
synset.append(load_jpg("synset/" + file))
return synset
def perfect_target(i):
t = torch.zeros(1, 1000)
t[0][i] = 1
return t
# environment setup ===============================================
torch.set_default_tensor_type('torch.cuda.FloatTensor')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# alexnet model ===============================================
model = alexnet().to(device)
model.cuda()
model.eval()
# run =========================================================
gen = Generator(model)
# Generative Accuracy Experiment
targets = [
perfect_target(54),
perfect_target(120),
perfect_target(255),
perfect_target(386),
perfect_target(954),
]
layer_sets = [
[],
[3],
[10],
[3, 6],
[6, 14],
[3, 6, 10],
[3, 10, 14],
[3, 6, 10, 14]
]
n_iters = 100
with open("results.txt", 'w') as f:
for i, target in enumerate(targets):
for j, layer_set in enumerate(layer_sets):
total_loss = 0
start = time()
for _ in range(n_iters):
x = gen.backward_generate(target, inter_layers=layer_set)
loss = gen.loss(x, y=target).item()
total_loss += loss
print("target: {}\t layer_set: {}\t loss: {}\t time: {}"
.format(i, j, total_loss / n_iters, time() - start))
print()
# Generative Speed Experiment
y = gen.quick_generate(t)
|
UTF-8
|
Python
| false | false | 6,011 |
py
| 7 |
main.py
| 5 | 0.553153 | 0.532025 | 0 | 200 | 29.055 | 96 |
sonochiwa/xmlparser
| 18,227,841,228,156 |
a5a9d7d15fb9c49e889391bf87809714b0cc9b25
|
b8d0d87e79e7c766f402cd61aff3e1b06fbbd436
|
/widget.py
|
410157974a94fc3b636a3e8d108877a3f7af2996
|
[] |
no_license
|
https://github.com/sonochiwa/xmlparser
|
1ca0c585c425aa22171cae60b3a724003beb29b8
|
6b4f499f9972af2f902828806fe4a67001a039bf
|
refs/heads/main
| 2023-05-22T20:25:30.082948 | 2022-12-23T13:10:07 | 2022-12-23T13:10:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import csv
import docx
from item import Ui_Item
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from form import Ui_Form
from db.base import session
from db.models import RKN
from db.utils import get_field_type
from datetime import datetime
from XMLparser import parser
from sqlalchemy.sql.sqltypes import String, Integer, Boolean
def select(**kwargs):
with session() as s:
query_set = s.query(RKN).filter_by(**kwargs)
return query_set
def convert(sqltype, value):
if value is None:
return value
if isinstance(sqltype, String):
return value
elif isinstance(sqltype, Integer):
return int(value)
elif isinstance(sqltype, Boolean):
return bool(int(value))
class Item(QWidget, Ui_Item):
def __init__(self):
super(Item, self).__init__()
self.setupUi(self)
self.comboBox.addItems(RKN.__table__.columns.keys()[1:])
class Main(QWidget, Ui_Form):
def __init__(self):
super(Main, self).__init__()
self.setupUi(self)
self.file_path = ''
self.pushButton_1.clicked.connect(self.get_xml_file)
self.pushButton_2.clicked.connect(self.parse_XML)
self.pushButton_3.clicked.connect(self.add_item)
self.pushButton_4.clicked.connect(self.btn_select)
self.pushButton.clicked.connect(self.del_item)
self.contentLayout = QVBoxLayout()
self.areaContent.setLayout(self.contentLayout)
self.setUpContent()
def del_item(self):
self.contentLayout.itemAt(self.contentLayout.count()-1).widget().deleteLater()
def setUpContent(self):
if not self.contentLayout.count():
self.add_item()
def add_item(self):
w = Item()
self.contentLayout.addWidget(w)
def btn_select(self):
data = {}
for elem in self.areaContent.children():
if isinstance(elem, QWidget):
key = elem.comboBox.currentText()
value = elem.lineEdit_2.text()
sqltype = get_field_type(RKN, key)
value = convert(sqltype, value)
data[key] = value
result = select(**data)
self.info('Найдено записей: {}'.format(result.count()))
with open('dump.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow([column.name for column in RKN.__mapper__.columns])
[writer.writerow([getattr(curr, column.name) for column in RKN.__mapper__.columns]) for curr in result]
mydoc = docx.Document()
mydoc.add_paragraph('Дата - {}'.format(datetime.now().strftime('%d.%m.%Y')))
mydoc.add_paragraph('Время - {}'.format(datetime.now().strftime('%H:%M')))
mydoc.add_paragraph('Поле - {}, записей - {}'.format(', '.join(data.keys()), result.count()))
mydoc.save("report.docx")
def get_xml_file(self):
_filter = '*.xml'
self.file_path, _ = QFileDialog.getOpenFileName(self, 'Set folder', 'c:/tmp', _filter)
filename = self.file_path.split("/")[-1]
self.lineEdit_1.setText(filename)
def parse_XML(self):
if self.file_path:
self.info('Парсинг в процессе...')
self.xml_to_db()
self.info('Готово!')
self.info('Всего записей прочитано: {}'.format(self.all_read))
self.info('Всего записей вставлено: {}'.format(self.records))
self.info('Время обработки данных: {}'.format(self.seconds))
def info(self, text):
self.textBrowser.append(text)
self.textBrowser.repaint()
def xml_to_db(self):
rcode = self.spinBox.value()
RKN.metadata.create_all()
path = self.file_path
self.all_read = 0
self.records = 0
start = datetime.now()
with session() as s:
for record in parser(path):
self.all_read += 1
if record['region_code'] != str(rcode): continue
rkn = RKN()
for key, value in record.items():
if hasattr(rkn, key):
sqltype = get_field_type(RKN, key)
value = convert(sqltype, value)
setattr(rkn, key, value)
s.add(rkn)
s.flush()
self.records += 1
self.seconds = datetime.now() - start
if __name__ == '__main__':
app = QApplication([])
w = Main()
w.show()
app.exec_()
|
UTF-8
|
Python
| false | false | 4,653 |
py
| 9 |
widget.py
| 7 | 0.575744 | 0.572216 | 0 | 129 | 34.162791 | 115 |
moztn/slides-moztn
| 13,735,305,417,485 |
e55e247e78b2681d02f8cd84a1ca8d47996d2b18
|
8a4a79d95bac2373eb6cf4f033c09ad344673065
|
/models.py
|
353582d3c978e6a980ac89ae3d24790346254841
|
[] |
no_license
|
https://github.com/moztn/slides-moztn
|
361c1b05e642bc78fbd08de4d7dd974dcd4032f9
|
ae826bbc18528d1af8de78a751e5c537efac7a4f
|
refs/heads/master
| 2016-09-11T05:36:44.112041 | 2015-01-05T23:09:25 | 2015-01-05T23:09:25 | 10,198,896 | 2 | 0 | null | false | 2015-04-02T12:32:51 | 2013-05-21T15:23:11 | 2015-01-05T23:09:25 | 2015-04-02T12:32:51 | 55,616 | 7 | 12 | 13 |
JavaScript
| null | null |
from sqlalchemy import Column, Integer, String, ForeignKey
from database import Base
from sqlalchemy.orm import relationship, backref
from flask.ext.login import UserMixin
class AdministratorModel(Base,UserMixin):
__tablename__ = 'administrators'
id = Column(Integer, primary_key=True)
email = Column(String(90), unique=True)
def __init__(self, email=None):
self.email = email
def __repr__(self):
return '<Administrator %r>' %(self.email)
class SlideModel(Base):
__tablename__ = 'slides'
id = Column(Integer, primary_key=True)
title = Column(String(15), nullable=False)
url = Column(String(255), unique=True, nullable=False)
description = Column(String(255), nullable=False)
category = Column(Integer, ForeignKey('categories.id'), nullable=False)
# category = relationship('Category', backref=backref('slides', lazy='dynamic'))
screenshot = Column(String(255))
def __repr__(self):
return '<Slide %s>' %(self.title)
# We will use this fonction to generate the githubio url
# Note that we assume that is a correct github url
# And the gh-pages branch exists
# See function isValidURL in slides.py
@property
def github_demo_url(self):
print(self.url[19:].split('/'))
subdomain, prefix = self.url[19:].split('/')
return "http://{0}.github.io/{1}".format(subdomain, prefix)
@property
def github_download_url(self):
url = self.url.lower()
url = url + '/archive/master.zip'
return url
class CategoryModel(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False, unique=True)
# slides = relationship("Slide", backref="categories")
# def __init__(self, name=None):
# self.name = name
def __repr__(self):
return '<Category %s>' %(self.name)
|
UTF-8
|
Python
| false | false | 1,905 |
py
| 17 |
models.py
| 9 | 0.648294 | 0.636745 | 0 | 58 | 31.827586 | 84 |
kpmoorse/rkf_trajectory
| 1,778,116,495,572 |
cbad9f246efd180c9548bab67f61d1d555b5dbcb
|
05303b35d125ac17c17fc7d632977c0f6c4ce30f
|
/traj_stepwise.py
|
7bb06c3ee397018927e10ce4a012236569ecfe65
|
[] |
no_license
|
https://github.com/kpmoorse/rkf_trajectory
|
78cd78d45acbba226078c0a639b21df3b508d9fb
|
e6f388db54b689b3bc9251918530a46296501a52
|
refs/heads/master
| 2020-04-16T05:44:47.986863 | 2019-03-05T22:01:43 | 2019-03-05T22:01:43 | 165,319,060 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import sys
import time
import scipy
import numpy
import matplotlib.pyplot as plt
import trajectory
import argparse
import rospy
from std_msgs.msg import Float64
from autostep_proxy import AutostepProxy
# Initialize argument parser
parser = argparse.ArgumentParser(description='Stepwise Trajectory')
parser.add_argument('-t', '--traj', metavar='TRAJECTORY', type=float, nargs='+',
help='List of trajectory parameters; format = t f [t f [...')
parser.add_argument('-r', '--rng', metavar='RANGE', type=float, nargs='+',
help='Frequency range parameters; format = t [start] stop [step]')
parser.add_argument('--nopre', dest='nopre', action='store_true')
args = parser.parse_args()
autostep = AutostepProxy()
print()
print('* trajectory example')
print()
jog_params = {'speed': 200, 'accel': 500, 'decel': 500}
max_params = {'speed': 1000, 'accel': 10000, 'decel': 10000}
assert bool(args.traj) ^ bool(args.rng), "Arguments must be TRAJECTORY or LOOP but not both"
# Read trajectory params from command line or use default
if args.traj:
assert len(args.traj) % 2 == 0
freq_list = [args.traj[i:i+2] for i in range(0, len(args.traj), 2)]
elif args.rng:
t = args.rng[0]
args.rng = args.rng[1:]
assert len(args.rng) in [1, 2, 3]
freq_list = [[t, i] for i in numpy.arange(*args.rng)]
preview = False if args.nopre else True
print(freq_list)
rospy.init_node('freq_counter')
freq_pub = rospy.Publisher(rospy.resolve_name("~frequency"), Float64, queue_size=10)
# Create trajectory
dt = AutostepProxy.TrajectoryDt
tau = sum([freq[0] for freq in freq_list])
num_pts = int(tau/dt)
t = dt*numpy.arange(num_pts)
# Calculate stepwise frequency profile
trj = trajectory.Trajectory(t)
trj.set_frequency(trj.stepwise(freq_list, rnd=True), 80)
# Display trajectory plot
position = trj.position
if preview:
plt.plot(t, position)
plt.grid('on')
plt.xlabel('t (sec)')
plt.ylabel('position (deg)')
plt.title('Trajectory')
plt.show()
# Initialize to zero-point
print(' move to start position')
autostep.set_move_mode('jog')
autostep.move_to(position[0])
autostep.busy_wait()
time.sleep(1.0)
# Loop over stepwise chunks
print(' running trajectory ...', end='')
sys.stdout.flush()
autostep.set_move_mode('max')
for i, freq in enumerate(freq_list):
start = int(sum([x[0] for x in freq_list[:i]]) / dt)
end = int(sum([x[0] for x in freq_list[:i+1]]) / dt)
rng = range(start, end)
freq_pub.publish(freq[1])
autostep.run_trajectory(position[rng])
autostep.busy_wait()
print(' done')
time.sleep(1.0)
autostep.set_move_mode('jog')
print(' move to 0')
autostep.move_to(0.0)
autostep.busy_wait()
print()
|
UTF-8
|
Python
| false | false | 2,761 |
py
| 5 |
traj_stepwise.py
| 5 | 0.681637 | 0.662079 | 0 | 100 | 26.61 | 92 |
Brett-BI/Dodo
| 13,984,413,535,587 |
ae6b9c8219306205c0c1a4af6c65b373e8fa81a8
|
0a320bd7ddb1ac826f800ee4c34755fb414c26de
|
/Resources/Database.py
|
78b61a0ac9ba20f737e0ca0dd78c64b6f67edb22
|
[] |
no_license
|
https://github.com/Brett-BI/Dodo
|
65c66aca21a9607d2defed4d6c10de9374bc11c1
|
69d5e3ce1c679ecbe27657a0f76344f404898444
|
refs/heads/master
| 2023-07-14T04:02:12.850673 | 2021-08-27T00:00:28 | 2021-08-27T00:00:28 | 394,811,045 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import redis
from abc import abstractmethod
import json
from random import randint
from typing import Dict, List
# Interface for the Room, Message, and User classes. Need the initialize method for initial setup of the object in Redis.
class DatabaseObject():
@abstractmethod
def initialize(self) -> None:
pass
# ignoring users for now
class User(DatabaseObject):
pass
'''
{
id: alphanumeric
create_time: datetime
pub/sub: pub/sub data
}
'''
class Message(DatabaseObject):
def __init__(self, redis_instance: redis.Redis, channel_id: str) -> None:
self.r = redis_instance
self.channel_id = channel_id
# there's never going to be a need to init the entire class here because this is only ever called in the Channel class...
def initialize(self) -> None:
self.r.hset(self.channel_id, 'messages', json.dumps([]))
def add_message(self, message: Dict) -> None:
self.messages.append(message)
'''
? Should we use a generic Database class that can then be implemented by Users, Rooms, and Messages?
? Should we use generic classes for Users, Rooms, and Messages that assume you're passing in a Redis db object?
> This means we would need another class for initializing the DB setup OR overseeing the setup using Users, Rooms, and Messages so:
Ex.: DBOverseer().init([databaseclasses])
! Need to make a separate interface for Users, Rooms, and Messages that guarantees the presence of an init() method for DBOverseer.init() to call on each database object
! Users -> Channels.channel_id.messages
Should look like this: channels = { ch:1234: { messages: [], users: [], start_time: [] }}
users = { user:1234: { created_date: "" }}
'''
|
UTF-8
|
Python
| false | false | 1,840 |
py
| 18 |
Database.py
| 17 | 0.659239 | 0.654891 | 0 | 51 | 35.098039 | 179 |
JackRoten/UCSB129L
| 8,521,215,119,774 |
9ba7dcf377ea8d5a12ca75b705aa81ea4b0a8e0a
|
de0c4140e6e4ba4a9633e8efc27d031d332b866f
|
/Old-HW-JACK/HW2/stupidHWProgram6.py
|
7351dfc27c1414255d4e8d2afec5b38b0a7221cf
|
[] |
no_license
|
https://github.com/JackRoten/UCSB129L
|
6ed73746f8f113bdaba3730fbd3ec33dabe1eb97
|
24795b343b04bde25af3f5e8981b91da7e492dec
|
refs/heads/master
| 2020-04-18T06:01:37.613797 | 2019-09-19T18:46:59 | 2019-09-19T18:46:59 | 167,302,880 | 0 | 0 | null | false | 2019-03-22T22:52:27 | 2019-01-24T04:27:14 | 2019-03-22T21:40:09 | 2019-03-22T22:52:27 | 5,780 | 1 | 0 | 0 |
Jupyter Notebook
| false | null |
#!/usr/bin/env python3
#
# stupidHWProgram6.py
#
# Winter 2019 129L
# Homework Exercise 6
#
# Ask for number and return the prime factors of that number, with their
# relative powers.
#
# Jack Roten 25 Jan 19
#----------------------------------------------------------------------------
from collections import Counter
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def prime_list(m):
num = prime_factors(m)
counter = Counter(num)
for key,val in counter.items():
print(key, 'to the power ', val)
userInput = int(input("Please enter an integer: "))
prime_list(userInput)
|
UTF-8
|
Python
| false | false | 784 |
py
| 98 |
stupidHWProgram6.py
| 73 | 0.529337 | 0.507653 | 0 | 37 | 20.189189 | 77 |
yidapa/pyChemistry
| 10,213,432,241,109 |
9219c2c75711f54520d9d0857f9f161efc35a36a
|
87f6b0d8eda8579f51edf4241bf8afb51c5638a5
|
/__init__.py
|
bc2860bab51a314af195028b87c96fb8abde8955
|
[] |
no_license
|
https://github.com/yidapa/pyChemistry
|
0f1263f05c9036e93a83d690993eedb54fc9233a
|
f1071e145387d71ef0df57488a7890a5fd06aeb5
|
refs/heads/master
| 2021-01-18T10:07:41.228986 | 2016-02-01T20:07:13 | 2016-02-01T20:07:13 | 52,354,260 | 1 | 1 | null | true | 2016-02-23T11:32:39 | 2016-02-23T11:32:38 | 2016-01-30T09:12:24 | 2016-02-01T20:07:14 | 11 | 0 | 0 | 0 | null | null | null |
from chemlib import *
|
UTF-8
|
Python
| false | false | 21 |
py
| 6 |
__init__.py
| 5 | 0.809524 | 0.809524 | 0 | 1 | 21 | 21 |
shenwilly/Kleros-Monitor-Bot
| 15,470,472,241,585 |
c718fe8168d8c376746d78441d1d80317b63aec6
|
ce115b5b7fda1ca252ed3287a26ae1967764ef58
|
/bin/monitor.py
|
278273413ee7bd4cfe3cf33f6c9a27b49937b934
|
[
"MIT"
] |
permissive
|
https://github.com/shenwilly/Kleros-Monitor-Bot
|
47dd912c88bfc87a1bb9f54653a94af9a5747c55
|
2d8c0eaaab49e49f2dda8823f2173da51682981b
|
refs/heads/master
| 2020-06-20T19:41:03.050853 | 2019-07-24T16:58:47 | 2019-07-24T16:58:47 | 197,225,968 | 0 | 0 |
MIT
| true | 2019-07-16T16:03:04 | 2019-07-16T16:03:03 | 2019-07-16T15:15:05 | 2019-07-16T15:15:03 | 5,202 | 0 | 0 | 0 | null | false | false |
#!/usr/bin/python3
import pprint
import sys
sys.path.extend(('lib', 'db'))
pp = pprint.PrettyPrinter(indent=4)
import os
from kleros import Kleros, KlerosDispute, KlerosVote
from collections import Counter
#{"name":"_disputeID","type":"uint256"},{"name":"_voteIDs","type":"uint256[]"},{"name":"_choice","type":"uint256"},{"name":"_salt","type":"uint256"}],"name":"castVote
#castVote(uint256,uint256[],uint256,uint256)
node_url = os.environ["ETH_NODE_URL"]
kleros = Kleros(os.environ["ETH_NODE_URL"])
case_Number = int(sys.argv[1])
dispute = KlerosDispute(case_Number, node_url=node_url)
appeal = len(dispute.rounds) - 1
jurors = dispute.rounds[-1]
votes = dispute.get_vote_counter()
votesYes = votes[1]
votesYes_ratio = (votesYes / jurors) * 100
votesNo = votes[2]
votesNo_ratio = (votesNo / jurors) * 100
votesRefuse = votes[0]
votesRefuse_ratio = (votesRefuse / jurors) * 100
pending_votes = dispute.pending_vote()
case_closed_bool = dispute.ruled
subcourt_id = dispute.sub_court_id
PNK_at_stake = dispute.get_PNK_at_stake() / 10 ** 18
ETH_at_Stake = dispute.get_ETH_at_stake() / 10 ** 18
PNK_per_juror = dispute.get_PNK_per_juror() / 10 ** 18
ETH_per_juror = dispute.get_ETH_per_juror() / 10 ** 18
losers = dispute.define_losers()
vote_choices = {
0: 'Undecided',
1: 'Yes',
2: 'No'
}
winner = vote_choices[dispute.winning_choice()]
print("%s jurors drawn on last round \n" % jurors)
print("Each juror has staked %s PNK and might earn %.3f ETH on this case\n" % (PNK_per_juror, ETH_per_juror))
print("Yes votes: %s (%.2f %%)" % (votesYes, votesYes_ratio))
print("No votes : %s (%.2f %%)" % (votesNo, votesNo_ratio))
print("Refused to arbitrate : %s (%.2f %%)\n" % (votesRefuse, votesRefuse_ratio))
if pending_votes > 0:
print("Pending votes: %s \n" % pending_votes)
else:
print("Eveyone voted. \n")
print("Outcome: %s" % winner)
if votesYes > jurors // 2 or votesNo > jurors // 2 or votesRefuse > jurors // 2:
# print("Absolute majority was reached")
#TO DO move this to Kleros.py
ETH_distribution = ((losers * ETH_per_juror) / jurors) + ETH_per_juror
PNK_distribution = (losers * PNK_per_juror) / (jurors - losers)
print("Majority jurors who voted %s receive %.f PNK and %.3f ETH each \n" % (winner, PNK_distribution, ETH_distribution))
else:
print("No earnings information available yet.\n")
if case_closed_bool == True:
print("The case is closed, a total of %s PNK was at stake and %.3f ETH was distributed to jurors" % (PNK_at_stake, ETH_at_Stake))
else:
print("The case is still open, stay tuned for possible appeals")
# TO DO move this to kleros.py
def get_account_list():
juror_accounts = []
for i in range(jurors):
Votingdata = KlerosVote(case_Number, node_url=node_url, appeal = appeal, vote_id = i)
juror_accounts.append(Votingdata.account)
return juror_accounts
raw_account_list = get_account_list()
def get_sorted_list():
unique_jurors = dict(Counter(raw_account_list))
clean_list = []
for i in unique_jurors:
clean_list.append(i)
return clean_list
unique_jurors = get_sorted_list()
def get_total_PNK_stake_juror():
stake = []
for i in range(len(unique_jurors)):
x = dispute.get_juror_PNK_staked(account = unique_jurors[i], subcourtID = subcourt_id) / 10 ** 18
#dumb as fuck, we need something that iterate every court id until they find the juror stake on the same dispute some jurors can have staked in different subcourts.
if x == 0:
new_subcourt_id = subcourt_id + 1
x = dispute.get_juror_PNK_staked(account = unique_jurors[i], subcourtID = new_subcourt_id) / 10 ** 18
stake.append(x)
total = 0
for i in stake:
total = total + i
return total
total_stake = get_total_PNK_stake_juror()
print("Jurors of this case have staked a total of %.f PNK on Kleros" % (total_stake))
|
UTF-8
|
Python
| false | false | 3,884 |
py
| 7 |
monitor.py
| 5 | 0.671473 | 0.650618 | 0 | 112 | 33.678571 | 166 |
Tejas-Naik/-100DaysOfCode
| 8,031,588,890,521 |
a339255f4a63c2fb76a8c87901cc5aa14300fe83
|
d93710a6feab5e03c21db1f2b39e58e7879160c5
|
/Day32 smtplib, datetime module/Motivational Quotes sender/app.py
|
ef0a0a7ec52f5d6fceac8195689d56c8deb74b9a
|
[] |
no_license
|
https://github.com/Tejas-Naik/-100DaysOfCode
|
5167bc9d583e02d8d1e31f54bc51e091270ab47f
|
76d16dbdd1a30d4c427bb05e10bcb8e24730290c
|
refs/heads/master
| 2023-08-10T22:47:06.703066 | 2021-10-02T16:15:12 | 2021-10-02T16:15:12 | 396,397,901 | 2 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import smtplib
import datetime
import random
with open('quotes.txt') as quotes_files:
quotes_list = quotes_files.readlines()
quote = random.choice(quotes_list)
my_email = 'tejasrnaik2005@gmail.com'
password = 'abcd1234{}'
reciever_mail = 'rntejas2005@gmail.com'
now = datetime.datetime.now()
with smtplib.SMTP('smtp.gmail.com', 587) as connection:
connection.starttls()
connection.login(user=my_email, password=password)
if True:
connection.sendmail(
from_addr=my_email,
to_addrs=reciever_mail,
msg=f"Subject:Quote of the Week!\n\n{quote}"
)
print("Sent quote!")
|
UTF-8
|
Python
| false | false | 639 |
py
| 126 |
app.py
| 102 | 0.672926 | 0.649452 | 0 | 24 | 25.625 | 56 |
pyziko/python_basics
| 9,397,388,494,972 |
65707c4b87f1bca1b59981bd26f453799a32d903
|
bd492f51847836a248b4dd033436db3d20a9cf37
|
/functionalProgramming/pure_functions and Lambda.py
|
d19a809b265e8373c1695679706dba03c63daa2d
|
[] |
no_license
|
https://github.com/pyziko/python_basics
|
1cdfbe475e1bc77bdb25d74c89a8d09a4dd42fa3
|
0a4e55ee98c857ce349bdb76affb7c711e2aa8a0
|
refs/heads/main
| 2023-05-29T09:42:09.581886 | 2021-06-14T00:20:20 | 2021-06-14T00:20:20 | 376,665,624 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# given the same input, it will always give the same output
# it should not interact with the outside world (i.e its scope)
# the idea is we would never have a bug since it doesn't depend on
# any data from the outside world (outside its scope) which is subject to change
# pure functions leads to less buggy code
from functools import reduce
def multiply_by2(li):
new_list = []
for item in li:
new_list.append(item * 2)
return new_list
# todo info map, filter, zip and reduce
print("\n*********** MAP *************\n")
# todo map -> transforming data
# takes the function without "()" so that it does not execute then takes in the data
my_list = [1, 2, 3]
def timesBy2(item):
return item * 2
print(list(map(timesBy2, my_list)))
print(my_list)
#
#
#
print("\n*********** FILTER *************\n")
# todo filter -> checks predicate passed
def only_dd(item):
return item % 2 != 0
print(list(filter(only_dd, my_list)))
print(my_list)
#
#
#
#
print("\n*********** ZIP *************\n")
# todo zip -> use case, grouping some data together, say email, names, phoneNumber
# todo info note if one is longer it ignores the extras
email = ("test@test.com", "test@test1.com", "test@test2.com")
name = ("Ezekiel", "Ziko", "Zikozee")
number = ("07066616366", "08055573668", "08176569549", "08037672979")
print(list(zip(email, name, number)))
#
#
#
#
print("\n*********** REDUCE *************\n")
# todo reduce -> import from functools
def accumulator(acc, item):
print(acc, item)
return acc + item
print(reduce(accumulator, my_list, 0))
print("\n*********** LAMBDA *************\n")
# todo lambda expressions
# lambda param: action(param)
print("LAMBDA FOR MULTiPLY BY 2: ==> ", list(map(lambda item: item * 2, my_list)))
print("LAMBDA FOR FILTER ==> ", list(filter(lambda item: item % 2 != 0, my_list)))
print("LAMBDA FOR REDUCE ==> ", reduce(lambda acc, item: acc + item, my_list, 0))
|
UTF-8
|
Python
| false | false | 1,937 |
py
| 57 |
pure_functions and Lambda.py
| 57 | 0.629324 | 0.597315 | 0 | 82 | 22.621951 | 84 |
ferhatcicek/minifold
| 14,181,982,021,937 |
c0c2bbd6d5dca80638912d82cca0e7870b488af1
|
9adea4131921ae4b8c94e6e20c8dcd5efa8f5f4a
|
/src/where.py
|
4b9a9e6ad6bb1d44f3b9fb2455a26a81ea400a93
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/ferhatcicek/minifold
|
33f447133601c299c9ddf6e7bfaa888f43c999fd
|
00c5e912e18a713b0496bcb869f5f6af4f3d40c9
|
refs/heads/master
| 2022-12-15T05:58:04.541226 | 2020-09-25T16:55:52 | 2020-09-25T16:55:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
from .connector import Connector
from .query import Query
def where(entries :list, f) -> list:
return [entry for entry in entries if f(entry)]
class WhereConnector(Connector):
def __init__(self, child, keep_if):
super().__init__()
self.m_child = child
self.m_keep_if = keep_if
@property
def child(self):
return self.m_child
def attributes(self, object :str) -> set:
return self.child.attributes(object)
@property
def keep_if(self):
return self.m_keep_if
def query(self, q :Query) -> list:
super().query(q)
return self.answer(
q,
where(
self.m_child.query(q),
self.m_keep_if
)
)
|
UTF-8
|
Python
| false | false | 1,118 |
py
| 65 |
where.py
| 63 | 0.557245 | 0.550984 | 0 | 44 | 24.386364 | 56 |
nurdyt95/ironpython-stubs
| 6,322,191,906,089 |
7f6904a6a6421e98fd4b1b1356d7c659eb766e5d
|
c01e44a3daa107acd1367c490f6e9459d3a557f4
|
/stubs.min/System/Windows/Media/Animation_parts/ResumeStoryboard.py
|
b4a02452502fce18be87fb7440c02302c690b9af
|
[
"MIT"
] |
permissive
|
https://github.com/nurdyt95/ironpython-stubs
|
15142859d2ae1859b78cd1f906cf733b0dfd09e3
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
refs/heads/master
| 2020-12-03T08:03:35.301095 | 2017-06-27T14:54:39 | 2017-06-27T14:54:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class ResumeStoryboard(ControllableStoryboardAction):
"""
Supports a trigger action that resumes a paused System.Windows.Media.Animation.Storyboard.
ResumeStoryboard()
"""
|
UTF-8
|
Python
| false | false | 186 |
py
| 662 |
ResumeStoryboard.py
| 659 | 0.763441 | 0.763441 | 0 | 8 | 21.5 | 91 |
adamian/hclearn
| 12,498,354,852,525 |
9ff9bad4cbdfb355ce1b506a3ab72bc2b2d7b44f
|
addeba19af42dc21e41c0864b02616132ec079f1
|
/generate_maze_from_data.py
|
57e3af5dcd0af29aa61980f5c6769b8753aea076
|
[] |
no_license
|
https://github.com/adamian/hclearn
|
c9500f9c222c120b55855b1ec7587101a86f747a
|
c0d1d076abe29094dd43e55932457dd03f7eaed4
|
refs/heads/master
| 2021-01-20T06:32:54.738179 | 2015-02-12T09:51:50 | 2015-02-12T09:51:50 | 23,543,506 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 03 12:12:43 2014
Load all the images and build overall image....
@author: luke
"""
### Build map from google data
#import urllib
#import math
import numpy as np
import sys
#import filecmp
# import cv2.cv as cv
import cv2
import os
import glob
DEF_SCREEN_WIDTH=1600
DEF_SCREEN_HEIGHT=900
if sys.platform[0:5] == 'linux':
from PySide import QtGui
app = QtGui.QApplication(sys.argv)
screen_rect = app.desktop().screenGeometry()
DEF_SCREEN_WIDTH, DEF_SCREEN_HEIGHT = int(round(screen_rect.width()/3.,0)), int(round(screen_rect.height()/3.,0))
else:
from win32api import GetSystemMetrics
from collections import Counter
class maze_from_data:
#Requires the folder name
def __init__(self, folderName = 'D:/robotology/hclearn/division_street_1',save_images=False, save_image_dir='D:/robotology/hclearn/movie_images'):
self.folder = folderName
# Path to images FORWARD SLASHES
self.heading_index='NESW' #N=0, E=1, S=2, W=3
# Names of image windows
self.window_name='Streetview'
self.maze_map='Maze_Map'
self.place_cell_map='Place_cell_Map'
# Working out number of pixels to display maze etc
self.image_display_width=0;
self.image_display_height=0;
self.img_file=np.empty([],dtype='u1') #unsigned 8 bit (1 byte)
# Init first direction
self.direction='E'
## Making a Map build grip index
self.pixel_width=600#200
self.locations_unique=dict()
self.step_time_delay=100
## Option to save images recorded from each frame of the maze...
self.save_images=save_images
# Dir (inside of the sent folder) to save images
self.save_image_dir=save_image_dir
## Make save images directory if it doesnt exist
if self.save_images:
if not os.path.isdir(self.save_image_dir):
print 'Making directory: '+self.save_image_dir
os.mkdir(self.save_image_dir)
print 'Saving Images to ' +self.save_image_dir
### Check heading is range.....
def phase_wrap_heading(self,heading):
while True:
if heading>3:
heading=heading-4
elif heading<0:
heading=heading+4
if heading<=3 and heading>=0:
break
# # Work out direction vectors....
# Luke Original vectors
# if heading==0: # North = x=0, y=1
# direction_vector=[0,1]
# elif heading==1: # East = x=1, y=0
# direction_vector=[1,0]
# elif heading==2: # south = x=0, y=-1
# direction_vector=[0,-1]
# else: # west = x=-1, y=0
# direction_vector=[-1,0]
# Work out direction vectors....
#print 'USING CHARLES FOX DIRECTION VECTORS!'
if heading==0: # North = x=0, y=1
direction_vector=[0,-1]
elif heading==1: # East = x=1, y=0
direction_vector=[1,0]
elif heading==2: # south = x=0, y=-1
direction_vector=[0,1]
else: # west = x=-1, y=0
direction_vector=[-1,0]
return (heading, direction_vector)
##### Find matching 3 files to display
def find_next_set_images(self,location_x,location_y,heading):
image_found=0
heading,direction_vector=self.phase_wrap_heading(heading)
# Convert heading
phase_wrap=np.array([3, 0, 1, 2, 3, 0],dtype='u1')
heading_array=np.array([phase_wrap[heading], phase_wrap[heading+1],phase_wrap[heading+2]])
# Find mtching images.. if they exist
matched_image_index=self.find_quad_image_block(location_x,location_y)
# find x values
if matched_image_index==-1:
print "Not enough images at this x location!!"
return (0,0,heading,direction_vector,0,0)
# Check values found!!!!!
if matched_image_index==-2:
print "Not enough images at this y location!!"
return (0,0,heading,direction_vector,0,0)
###### New code here to deal with only partial image blocks (Not all present!)!!!!!
images_to_combine=np.zeros(3,dtype='i2')-1 # -1 = no image
for current_dir in range(0,3):
if heading_array[current_dir] in matched_image_index['heading']:
images_to_combine[current_dir]=matched_image_index['file_id'][np.where(matched_image_index['heading']==heading_array[current_dir])]
# print('Images to display:/n')
# print images_to_combine
# print matched_image_index
image_found=1
if images_to_combine[1]==-1:
picture_name='No image here'
else:
picture_name=self.file_database_sorted['img_fname'][np.where(self.file_database_sorted['file_id']==images_to_combine[1])][0] #self.file_database_sorted['img_fname'][np.where(self.file_database_sorted['file_id']==images_to_combine[1])]##picture_name_list[images_to_combine[1]]
######## Check for alternative image options -> Can we go forwards / backwards / left / right
available_direction_vector=np.zeros([4],dtype='i1')+1
# 1. Forwards
matched_image_index_test=self.find_quad_image_block(location_x+direction_vector[0],location_y+direction_vector[1])
if matched_image_index_test==-1 or matched_image_index_test==-2:
available_direction_vector[0]=0
# 2. Backwards
matched_image_index_test=self.find_quad_image_block(location_x-direction_vector[0],location_y-direction_vector[1])
if matched_image_index_test==-1 or matched_image_index_test==-2:
available_direction_vector[1]=0
# 3. Left
_, direction_vector_test=self.phase_wrap_heading(heading-1)
matched_image_index_test=self.find_quad_image_block(location_x+direction_vector_test[0],location_y+direction_vector_test[1])
if matched_image_index_test==-1 or matched_image_index_test==-2:
available_direction_vector[2]=0
# 4. Right
_, direction_vector_test=self.phase_wrap_heading(heading+1)
matched_image_index_test=self.find_quad_image_block(location_x+direction_vector_test[0],location_y+direction_vector_test[1])
if matched_image_index_test==-1 or matched_image_index_test==-2:
available_direction_vector[3]=0
return (images_to_combine,image_found,heading,direction_vector,picture_name,available_direction_vector)
def concatenate_resize_images(self,images_to_combine):
_,height,width,depth= self.img_file.shape
combined_img=np.zeros([3,int(height),int(width),int(depth)],dtype='u1')
for current_image in range(0,3):
if images_to_combine[current_image] !=-1:
combined_img[current_image]=self.img_file[images_to_combine[current_image]]
else:
print('Missing image file replaced with zeros....')
resized_img =cv2.resize(np.concatenate(combined_img , axis=1), (self.image_display_width, self.image_display_height))
return (resized_img)
def find_quad_image_block(self,location_x,location_y):
# find x values
matched_x_loc=np.extract(self.file_database_sorted['x_loc']==location_x,self.file_database_sorted)
# Check values found!!!!!
if matched_x_loc.size<1:
# print "NOWT in Y"
return(np.zeros([1],dtype='i1')-1)
# find y values
matched_image_index=np.extract(matched_x_loc['y_loc']==location_y,matched_x_loc)
# Check values found!!!!!
if matched_image_index.size<1:
# print "NOWT in Y"
return(np.zeros([1],dtype='i1')-2)
if matched_image_index.size>4:
print 'WARNING TOO MANY IMAGES AT LOCATION x:' + str(location_x) + ' y:' + str(location_y)
return(matched_image_index)
### Display images
def display_image(self,image_in, text_in, available_directions_index, heading_ind):
# File title - remove in final!!!
cv2.putText(image_in, text_in, (self.screen_width/2,20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, 0);
# Add arrows to represent avaiable directions
#colour_vector=(0,255,0)
# 1. Forward
if available_directions_index[0]==1: # red
cv2.fillConvexPoly(image_in,self.arrow[('up')],(0,255,0))
# else: #green
# colour_vector=(0,255,0)
# 2. Backward
if available_directions_index[1]==1: # red
cv2.fillConvexPoly(image_in,self.arrow[('down')],(0,255,0))
# colour_vector=(0,0,255)
# else: #green
# colour_vector=(0,255,0)
# 3. Left
if available_directions_index[2]==1: # red
cv2.fillConvexPoly(image_in,self.arrow[('left')],(255,0,0))
# colour_vector=(0,0,255)
# else: #green
# colour_vector=(0,255,0)
# 4. Right
if available_directions_index[3]==1: # red
cv2.fillConvexPoly(image_in,self.arrow[('right')],(255,0,0))
# colour_vector=(0,0,255)
# else: #green
# colour_vector=(0,255,0)
### Direction label
textsize=cv2.getTextSize(self.heading_index[heading_ind],cv2.FONT_HERSHEY_SIMPLEX,0.8,2)
# cv2.putText(image_in, self.heading_index[heading_ind], (x_arrow_base_location-(textsize[0][1]/2),\
# self.image_display_height-arrow_point_size-int(textsize[1]/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,0,255),2);
cv2.fillConvexPoly(image_in,self.arrow[('heading')],(0,0,255))
cv2.putText(image_in, self.heading_index[heading_ind], (0+(textsize[0][1]/2),\
30+int(textsize[1]/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,0,255),2);
cv2.imshow(self.window_name, image_in)
return image_in
def make_grid_index(self,x=8,y=8, pixel_width=200):
"Draw an x(i) by y(j) chessboard using PIL."
#import Image, ImageDraw
#from itertools import cycle
# Increase by one to include 0 effect
x+=1
y+=1
def sq_start(i,n):
"Return the x/y start coord of the square at column/row i."
return i * pixel_width / n
def square(i, j):
"Return the square corners, suitable for use in PIL drawings"
return sq_start(i,x), sq_start(j,y), sq_start(i+1,x), sq_start(j+1,y)
#image = Image.new("L", (pixel_width, pixel_width)
squares_out=np.empty([x,y,4],dtype='i2')
##draw_square = ImageDraw.Draw(image).rectangle
for ix in range(0,x):
for iy in range(0,y):
squares_out[ix,iy,:]=square(ix, iy)
return squares_out
def plot_exisiting_locations_on_grid(self,map_data):
# Plot white boxes onto grid where locations exist
# Work out middle location!
min_x=self.place_cell_id[1].min()
min_y=self.place_cell_id[2].min()
for current_loc in range(0,self.place_cell_id[1].size):
sq=self.squares_grid[self.place_cell_id[1][current_loc]-min_x,self.place_cell_id[2][current_loc]-min_y,:]
cv2.rectangle(map_data,tuple(sq[0:2]),tuple(sq[2:4]),(255,255,255),-1)
return map_data
def plot_current_position_on_map(self,current_x,current_y):
# Plot red box where vehicle is....
min_x=self.place_cell_id[1].min()
min_y=self.place_cell_id[2].min()
sq=self.squares_grid[current_x-min_x,current_y-min_y,:]
map_image_display=np.copy(self.map_template); # FORCE COPY SO IT DOESNT KEEP OLD MOVES!!!!!
cv2.rectangle(map_image_display,tuple(sq[0:2]),tuple(sq[2:4]),(0,0,255),-1)
map_image_display=self.flip_rotate_color_image(map_image_display,self.heading_index.find(self.direction),False)
#map_image_display=np.copy(np.rot90(np.flipud(map_image_display),self.heading_index.find(self.direction)))
# Show direction
textsize=cv2.getTextSize(self.direction,cv2.FONT_HERSHEY_SIMPLEX,0.8,2)
cv2.fillConvexPoly(map_image_display,self.arrow[('heading')],(0,0,255))
cv2.putText(map_image_display,self.direction,(int((textsize[0][1]/2)-2),int(30+(textsize[1]/2))), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,0,255),2);
#
cv2.imshow(self.maze_map,map_image_display)
return map_image_display
def plot_old_position_on_map(self,current_x,current_y):
# Plot red box where vehicle is....
min_x=self.place_cell_id[1].min()
min_y=self.place_cell_id[2].min()
sq=self.squares_grid[current_x-min_x,current_y-min_y,:]
cv2.rectangle(self.map_template,tuple(sq[0:2]),tuple(sq[2:4]),(0,255,0),-1)
return self.map_template
# Plot map with place cell id's
def plot_place_cell_id_on_map(self,map_data,place_cell_id):
# Plot red box where vehicle is....
min_x=place_cell_id[1].min()
min_y=place_cell_id[2].min()
ptp_y=place_cell_id[2].ptp()
map_out=np.copy(map_data); # FORCE COPY SO IT DOESNT KEEP OLD MOVES!!!!!
map_out=self.flip_rotate_color_image(map_out,0,False)
# Loop through each place id
for current_place in range(0,place_cell_id[0].size):
# sq=self.squares_grid[place_cell_id[1][current_place]-min_x,place_cell_id[2][current_place]-min_y,:]
# Flipping this in y-plane
sq=self.squares_grid[place_cell_id[1][current_place]-min_x,np.absolute(place_cell_id[2][current_place]-min_y-ptp_y),:]
# Place number at bottom of square in middle....
x_pos=sq[0]#+np.round(np.diff([sq[2],sq[0]])/2)
y_pos=self.pixel_width-sq[1]+np.round(np.diff([sq[3],sq[1]])/2)
cv2.putText(map_out, str(int(place_cell_id[0][current_place])), (int(x_pos),int(y_pos)), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0,0,255),1);
textsize=cv2.getTextSize('N',cv2.FONT_HERSHEY_SIMPLEX,0.8,2)
#cv2.fillConvexPoly(map_out,np.abs(np.array([[self.pixel_width,0],[self.pixel_width,0],[self.pixel_width,0]])-self.arrow[('heading')]),(0,0,255))
cv2.putText(map_out, 'N', (self.pixel_width-int((textsize[0][1]/2)+10),int(30+(textsize[1]/2))), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,0,255),2);
cv2.imshow(self.place_cell_map,map_out)
return map_out
# Flip image (mirror) then rotate anti clockwise by @ 90 degrees
def flip_rotate_color_image(self,image,angles_90, flip_on):
for current_color in range(0,image[0,0,:].size):
if flip_on:
image[:,:,current_color]=np.rot90(np.flipud(image[:,:,current_color]),angles_90)
else:
image[:,:,current_color]=np.rot90(image[:,:,current_color],angles_90)
return image
###### START OF MAIN
########################################
# Make database of image files
#####################################
def index_image_files(self):
# File list
#piclist = []
#### Load all files from given folder and sort by x then y then direction....
no_files=len(glob.glob(os.path.join(self.folder, '*.jpg')))
#file_database=np.empty([5,no_files],dtype=int)
file_database=np.empty(no_files,\
dtype=[('orig_file_id','i2'),('file_id','i2'),('x_loc','i2'),('y_loc','i2'),('heading','i2'),('img_id','i2'),('img_text','a50'),('img_fname','a100')])
#,no_files],dtype=int)
file_database['orig_file_id'][:]=range(0,no_files)
self.locations_unique=dict()
image_count=0
# if fixed_extract==True: # Lukes original mode of cutting by fixed locations in string.....
# for infile in glob.glob(os.path.join(self.folder, '*.jpg')):
# # ADD filename to list
# #piclist.append(infile)
# # Extract relevant file information.....
# # find start of filename section
# file_info=infile[infile.rfind("\\")+1:infile.rfind("\\")+14]
# # img count , x, y, heading, img_num
# file_database['img_fname'][image_count]=infile
# # x grid
# file_database['x_loc'][image_count]=int(file_info[0:3])
# # y grid
# file_database['y_loc'][image_count]=int(file_info[4:7])
# # Convert letter heading to index 1= N, 2=E, 3=S, 4=W
# file_database['heading'][image_count]=self.heading_index.find(file_info[8:9])
# # File identifier (optional extra e.g. two files at same location x,y and direction)
# file_database['img_id'][image_count]=int(file_info[10:13])
# # Massive data image block!!!
#
# else: # Use original mode from HCLEARN - Charles FOX
import re
if os.path.exists(self.folder):
for file in os.listdir(self.folder):
#print file
parts = re.split("[-,\.]", file)
#Test that it is (NUM-NUM-DIRECTION-whatever)
# print str(parts)
if len(parts)>=2 and parts[0].isdigit() and parts[1].isdigit() and parts[2][0].isalpha: # and len(parts[2]) == 1):
if parts[2][0] in self.heading_index:
# key = ((int(parts[0]), int(parts[1])),parts[2])
#If it doesnt already exist, make this key
# if key not in self.files.keys():
# self.files[key] = []
#fullFilePath = os.path.join(self.folder,file)
#Add the new file onto the end of the keys list (since there can be multiple images for one direction)
file_database['img_fname'][image_count]=file
# x grid
file_database['x_loc'][image_count]=int(parts[0])
# y grid
file_database['y_loc'][image_count]=int(parts[1])
# Convert letter heading to index 1= N, 2=E, 3=S, 4=W
file_database['heading'][image_count]=self.heading_index.find(parts[2])
# File identifier (optional extra e.g. two files at same location x,y and direction)
if parts[3].isdigit():
file_database['img_id'][image_count]=int(parts[3])
file_database['img_text'][image_count]='use_ID'
if image_count==0:
use_file_id=1 # uses the numbering of file instead of text!
elif parts[3].isalpha():
file_database['img_id'][image_count]=-1
file_database['img_text'][image_count]=parts[3]
if image_count==0:
use_file_id=0 # uses the string text of file instead of text!
else:
file_database['img_id'][image_count]=1
file_database['img_text'][image_count]=''
if image_count==0:
use_file_id=-1 # uses none!
#TODO: Add in Rain / Midday or image index sorting here
#### Build complete locations dictionary.......
current_location_key=(file_database['x_loc'][image_count],file_database['y_loc'][image_count])
# Setup new location entry if missing
if current_location_key not in self.locations_unique.keys():
self.locations_unique[current_location_key]={('Image_count'): np.zeros(4,dtype='i2')}
# Add Image count to location
self.locations_unique[current_location_key][('Image_count')][file_database['heading'][image_count]]+=1
### Fill in location info
img_count=self.locations_unique[current_location_key][('Image_count')][file_database['heading'][image_count]]-1
# Add heading marker
self.locations_unique[current_location_key][(img_count,parts[2])]=file_database['img_fname'][image_count]
image_count += 1
#self.files[key].append(fullFilePath)
else:
raise NameError("Heading is: %s\nit should be N E S or W" % parts[2])
else:
print self.folder
print file
#raise NameError("File: %s\ndoes not fit naming convention INT-INT-HEADING" % file)
else:
raise NameError("Folder does not exists")
#==============================================================================
# ### Mini task... get all northern images, in order of x location
# # Northern data,
# file_database_north=np.squeeze(file_database[:,np.transpose(np.where(file_database[3,:]==0)[0])])
# #Sub sorted by x location.....
# file_database_north_sortx=file_database_north[:,np.argsort(file_database_north[1,:])]
# #### Combine images into panorama
# # First Image
# #cv2.imshow('FRED', self.img_file[1])
# combined_img = np.concatenate((self.img_file[file_database_north_sortx[0,0:5]]) , axis=1) #file_database_north_sortx[0,:]
# resized_img = cv2.resize(combined_img, (self.screen_width, self.screen_height))
# cv2.imshow('FRED', resized_img)
# ## ALTERNATIVE:: get NESW for location
#==============================================================================
### Mini task... get data for each location NSEW
## First sort by x location!!
#file_database_by_loc=np.squeeze(file_database[:,np.transpose(np.where(file_database[1,:]==0)[0])])
#Sub sorted by y location.....
#file_database_by_loc_sorty=file_database_by_loc[:,np.argsort(file_database_by_loc[3,:])]
#### just get images that belong to each image ID.....
if use_file_id==1: # Use the fourth value (file id)
print ('Just using file IDs: ',str(file_database['img_id'][0]) )
file_database_primary=file_database[np.where(file_database['img_id']==file_database['img_id'][0])]
elif use_file_id==0: # Use first string value
print ('Just using file with id text: ',file_database['img_text'][0] )
file_database_primary=file_database[np.where(file_database['img_text']==file_database['img_text'][0])]
else: # do nothing
print ('Using all files')
file_database_primary=file_database
#### Combine images into panorama
self.file_database_sorted=np.sort(file_database_primary,order=['x_loc','y_loc','heading'])
self.file_database_sorted['file_id']=range(0,len(self.file_database_sorted))
# Not all directions included..... therefore cannot use NORTH only!!!!!!
#np.array(list(set(tuple(p) for p in points)))
useable_grid_locations=np.empty(len(self.locations_unique.keys()),dtype=[('x_loc','i2'),('y_loc','i2')])
useable_grid_locations['x_loc']=np.transpose(np.asarray(self.locations_unique.keys(),dtype='i3'))[0]
useable_grid_locations['y_loc']=np.transpose(np.asarray(self.locations_unique.keys(),dtype='i3'))[1]
useable_grid_locations=np.sort(useable_grid_locations,order=['x_loc','y_loc'])
## Add in place locations.
## Build empty array with x and y values...
self.place_cell_id=np.array([np.zeros(useable_grid_locations['x_loc'].size,dtype='i2'),useable_grid_locations['x_loc'],useable_grid_locations['y_loc']])
# 1. Order using longest x road (e.g. division street) => has most identical y values
most_y=Counter(self.place_cell_id[2]).most_common()
place_cell_id_x=np.zeros(useable_grid_locations['x_loc'].size,dtype='i2')
# for each counter output.... run through
place_cell_counter=0
for current_count_block in range(0,len(most_y)):
line_locations_x=np.where(self.place_cell_id[2]==most_y[current_count_block][0])
for current_map_tile in line_locations_x[0]:
#print str(current_count_block), str(current_map_tile)
place_cell_id_x[current_map_tile]=place_cell_counter
place_cell_counter+=1
#x_ok=np.where(np.diff(self.place_cell_id[1][self.place_cell_id_x])!=0)
self.place_cell_id[0]=place_cell_id_x
# Sort by place cell ID!
self.place_cell_id=self.place_cell_id[:,self.place_cell_id[0,:].argsort()]
#self.place_cell_id=np.array([range(0,useable_grid_locations[0].size),useable_grid_locations[0],useable_grid_locations[1]])
#print (str(self.place_cell_id))
def load_image_files(self):
num_images=len(self.file_database_sorted)
if os.path.exists(self.folder):
### Load first image.... to get sizes
dummy_img=cv2.imread(os.path.join(self.folder,self.file_database_sorted['img_fname'][0]))
if dummy_img is False:
print('Error nothing in image')
# self.img_file=np.empty([num_images,640,640,3],dtype='u1') #unsigned 8 bit (1 byte)
height, width, depth=dummy_img.shape
## Image for replacing when images missing!
# self.zero_image=np.zeros([height, width, depth],dtype='u1')
self.img_file=np.empty([num_images,height, width, depth],dtype='u1') #unsigned 8 bit (1 byte)
# load all image files into array
for image_count in range(0,num_images):
self.img_file[image_count,:,:,:]=cv2.imread(os.path.join(self.folder,self.file_database_sorted['img_fname'][image_count]))
else:
print('CANNOT FIND IMAGES RETURNING!!')
return(0)
def display_maps_images(self): # Loads and initialises image data for visual mapping
## Load image data from folder
self.load_image_files()
## Set up image arrays for map plots.....
self.map_template=np.zeros((self.pixel_width,self.pixel_width,3),dtype='u1') # default black
#map_image_display=np.zeros((self.pixel_width,self.pixel_width,3),dtype='u1') # default black
# Choose start location (take first place cell id)
self.location_x=self.place_cell_id[1,0]
self.location_y=self.place_cell_id[2,0]
# Windows screen size
try:
self.screen_width = GetSystemMetrics (0)
self.screen_height = GetSystemMetrics (1)
except:
self.screen_width = DEF_SCREEN_WIDTH
self.screen_height = DEF_SCREEN_HEIGHT
# fit 3x images in window
self.image_display_width=self.screen_width
self.image_display_height=int(round(self.screen_width/3,0))
############################
######### Make arrow points (to show where to go....)
x_arrow_base_location=int(self.image_display_width/2)
y_arrow_base_location=int(self.image_display_height*0.90)
# shorter size
arrow_point_size=int(self.image_display_height*0.05)
arrow_half_width=int((self.image_display_height*0.10)/2)
self.arrow=dict()
# 1. ARROW UP!!!
#arrow_up_pts
self.arrow[('up')]= np.array([[x_arrow_base_location,y_arrow_base_location-arrow_point_size],\
[x_arrow_base_location-arrow_half_width,y_arrow_base_location],[x_arrow_base_location+arrow_half_width,y_arrow_base_location]], np.int32)
self.arrow[('up')] = self.arrow[('up')].reshape((-1,1,2))
# 2. ARROW DOWN!!!
self.arrow[('down')] = np.array([[x_arrow_base_location,self.image_display_height-1],\
[x_arrow_base_location-arrow_half_width,self.image_display_height-arrow_point_size],[x_arrow_base_location+arrow_half_width,self.image_display_height-arrow_point_size]], np.int32)
self.arrow[('down')] = self.arrow[('down')].reshape((-1,1,2))
# 3. ARROW Left!!!
self.arrow[('left')] = np.array([[x_arrow_base_location-arrow_half_width-arrow_half_width,y_arrow_base_location+int(arrow_point_size/2)],\
[x_arrow_base_location-arrow_half_width,self.image_display_height-arrow_point_size], [x_arrow_base_location-arrow_half_width,y_arrow_base_location]], np.int32)
self.arrow[('left')] = self.arrow[('left')].reshape((-1,1,2))
# 4. ARROW Right!!!
self.arrow[('right')] = np.array([[x_arrow_base_location+arrow_half_width+arrow_half_width,y_arrow_base_location+int(arrow_point_size/2)],\
[x_arrow_base_location+arrow_half_width,y_arrow_base_location], [x_arrow_base_location+arrow_half_width,self.image_display_height-arrow_point_size]], np.int32)
self.arrow[('right')] = self.arrow[('right')].reshape((-1,1,2))
# 5. ARROW Direction!!!
self.arrow[('heading')] = np.array([[15,2],\
[10,12],[20,12]], np.int32)
self.arrow[('heading')] = self.arrow[('heading')].reshape((-1,1,2))
#################################
## Make grid index x,y, [coords]
self.squares_grid=self.make_grid_index(self.file_database_sorted['x_loc'].ptp(),self.file_database_sorted['y_loc'].ptp(), self.pixel_width)
### Initialise main image windows
heading_ind=self.heading_index.find(self.direction)
available_directions_index=0
#new_location_x=self.location_x
#new_location_y=self.location_y
### Initialise interative environment
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(self.location_x,self.location_y,heading_ind)
if image_found==0:
print "No base location image... exiting"
sys.exit()
# Build images to display
resized_img=self.concatenate_resize_images(images_to_combine)
## Windows to display graphics
# Updated map of maze and current location
cv2.namedWindow(self.maze_map)
self.map_template=self.plot_exisiting_locations_on_grid(self.map_template)
cv2.waitKey(100)
# Main image display
#cv2.namedWindow(self.window_name)
# Layout of place cells
cv2.namedWindow(self.place_cell_map)
self.plot_place_cell_id_on_map(self.map_template,self.place_cell_id)
cv2.waitKey(100)
## ALTERNATIVE:: get NESW for location
self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
# plot Place cells on the map
cv2.waitKey(100)
### Put current location on map
# Luke commnted dont save first location!
#cv2.imshow(self.maze_map,self.map_template)
self.plot_current_position_on_map(self.location_x,self.location_y)
cv2.waitKey(100)
if sys.platform[0:5] == 'linux':
print 'Press any key to continue...'
raw_input()
def maze_interactive(self):
if self.save_images:
# Make directory for interactive walking around the maze (maze_interactive)
self.save_image_dir_interactive=os.path.join(self.save_image_dir,'maze_interactive')
if not os.path.isdir(self.save_image_dir_interactive):
print 'Making directory: '+self.save_image_dir_interactive
os.mkdir(self.save_image_dir_interactive)
# get base x, y locations
new_location_x=self.location_x
new_location_y=self.location_y
try:
### Wait for key to update
location_count=0
while True:
k = cv2.waitKey(0) & 0xFF
if k == 27: # ESC
cv2.destroyAllWindows()
break
# elif k == ord('s'):
# cv2.imwrite('/Users/chris/foo.png', gray_img)
# cv2.destroyAllWindows()
# break
elif k == ord('w'): # w=forwards
#image = image[::-1]
old_location_x=new_location_x
old_location_y=new_location_y
new_location_x +=self.direction_vector[0]
new_location_y +=self.direction_vector[1]
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
new_location_x -=self.direction_vector[0]
new_location_y -=self.direction_vector[1]
else:
resized_img=self.concatenate_resize_images(images_to_combine)
image_displayed=self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
self.map_template=self.plot_old_position_on_map(old_location_x,old_location_y)
displayed_map=self.plot_current_position_on_map(new_location_x,new_location_y)
elif k == ord('s'): # s= backwards
#image = image[::-1]
old_location_x=new_location_x
old_location_y=new_location_y
new_location_x -=self.direction_vector[0]
new_location_y -=self.direction_vector[1]
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
new_location_x +=self.direction_vector[0]
new_location_y +=self.direction_vector[1]
else:
resized_img=self.concatenate_resize_images(images_to_combine)
image_displayed=self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
self.map_template=self.plot_old_position_on_map(old_location_x,old_location_y)
displayed_map=self.plot_current_position_on_map(new_location_x,new_location_y)
elif k == ord('a'): # ,<= left
#image = image[::-1]
#new_location_x -=1
self.new_heading_ind -=1
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
#new_location_x +=1
else:
resized_img=self.concatenate_resize_images(images_to_combine)
image_displayed=self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
#map_image_display=plot_current_position_on_map(self.map_template,useable_grid_locations,new_location_x,new_location_y)
elif k == ord('d'): # .>= right
#image = image[::-1]
#new_location_x -=1
self.new_heading_ind +=1
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
#new_location_x +=1
else:
resized_img=self.concatenate_resize_images(images_to_combine)
image_displayed=self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
#map_image_display=plot_current_position_on_map(self.map_template,useable_grid_locations,new_location_x,new_location_y)
## Save each image....
if self.save_images:
image_filename=os.path.join(self.save_image_dir_interactive,'maze_img_'+ str(location_count).zfill(5) + '.jpg')
cv2.imwrite(image_filename,image_displayed)
map_filename=os.path.join(self.save_image_dir_interactive,'map_img_'+ str(location_count).zfill(5) + '.jpg')
cv2.imwrite(map_filename,displayed_map)
print 'Saving viewed image to: '+ image_filename + '& map image to: '+ map_filename
location_count+=1
except KeyboardInterrupt:
pass
# Iterate around the maze either using random stepping or generated from paths.poslog
def maze_walk(self, random=True, paths=0):
if self.save_images:
# Make directory for path driven moves around maze
self.save_image_dir_path=os.path.join(self.save_image_dir,'maze_path')
if not os.path.isdir(self.save_image_dir_path):
print 'Making directory: '+self.save_image_dir_path
os.mkdir(self.save_image_dir_path)
# Reset map....
self.map_template=np.zeros((self.pixel_width,self.pixel_width,3),dtype='u1').copy() # default black
self.map_template=self.plot_exisiting_locations_on_grid(self.map_template)
cv2.imshow(self.maze_map,self.map_template)
cv2.waitKey(100)
# Depending on mode
if random:
new_location_x=self.location_x
new_location_y=self.location_y
location_count=0
try:
### Wait for key to update
while True: # AD commented # LB not needed here as its random & not using the path....
#while location_count < paths.shape[0]-1: # AD
# k = cv2.waitKey(0) & 0xFF
# Delay here for each cycle through the maze.....
k=cv2.waitKey(self.step_time_delay) & 0xFF
# Depending on mode
#if random: # Generate random direction NESW
next_step=np.random.choice(np.array([0,1,2,3]))
# Test for button press or location value
if k == 27: # ESC
cv2.destroyAllWindows()
break
# elif k == ord('s'):
# cv2.imwrite('/Users/chris/foo.png', gray_img)
# cv2.destroyAllWindows()
# break
elif next_step == 0: # w=forwards
#image = image[::-1]
old_location_x=new_location_x
old_location_y=new_location_y
new_location_x +=self.direction_vector[0]
new_location_y +=self.direction_vector[1]
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
new_location_x -=self.direction_vector[0]
new_location_y -=self.direction_vector[1]
else:
resized_img=self.concatenate_resize_images(images_to_combine)
self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
self.map_template=self.plot_old_position_on_map(old_location_x,old_location_y)
self.plot_current_position_on_map(new_location_x,new_location_y)
elif next_step == 1: # s= backwards
#image = image[::-1]
old_location_x=new_location_x
old_location_y=new_location_y
new_location_x -=self.direction_vector[0]
new_location_y -=self.direction_vector[1]
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
new_location_x +=self.direction_vector[0]
new_location_y +=self.direction_vector[1]
else:
resized_img=self.concatenate_resize_images(images_to_combine)
self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
self.map_template=self.plot_old_position_on_map(old_location_x,old_location_y)
self.plot_current_position_on_map(new_location_x,new_location_y)
elif next_step == 2: # ,<= left
#image = image[::-1]
#new_location_x -=1
self.new_heading_ind -=1
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
#new_location_x +=1
else:
resized_img=self.concatenate_resize_images(images_to_combine)
self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
#map_image_display=plot_current_position_on_map(self.map_template,useable_grid_locations,new_location_x,new_location_y)
elif next_step == 3: # .>= right
#image = image[::-1]
#new_location_x -=1
self.new_heading_ind +=1
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
#new_location_x +=1
else:
resized_img=self.concatenate_resize_images(images_to_combine)
self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
#map_image_display=plot_current_position_on_map(self.map_template,useable_grid_locations,new_location_x,new_location_y)
location_count+=1
cv2.destroyAllWindows()
except KeyboardInterrupt:
pass
else: # use paths
old_location_x=self.location_x.copy()
old_location_y=self.location_y.copy()
new_location_x=paths[0][0]
new_location_y=paths[0][1]
self.new_heading_ind=paths[0][2]
location_count=0 # start from as first location set.....
max_steps=paths.shape[0]
#image = image[::-1]
# new_location_x +=self.direction_vector[0]
# new_location_y +=self.direction_vector[1]
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print "No image"
new_location_x -=self.direction_vector[0]
new_location_y -=self.direction_vector[1]
else:
resized_img=self.concatenate_resize_images(images_to_combine)
image_displayed=self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
# LB removed
#self.map_template=self.plot_old_position_on_map(old_location_x,old_location_y)
displayed_map=self.plot_current_position_on_map(new_location_x,new_location_y)
if self.save_images:
image_filename=os.path.join(self.save_image_dir_path,'maze_img_'+ str(location_count).zfill(5) + '.jpg')
cv2.imwrite(image_filename,image_displayed)
map_filename=os.path.join(self.save_image_dir_path,'map_img_'+ str(location_count).zfill(5) + '.jpg')
cv2.imwrite(map_filename,displayed_map)
print 'Saving viewed image to: '+ image_filename + '& map image to: '+ map_filename
# This needs to be sorted to allowing sending on values for the next location to move to....
try:
### Wait for key to update
#while True:
while location_count < paths.shape[0]-1: # AD
# k = cv2.waitKey(0) & 0xFF
# Delay here for each cycle through the maze.....
k=cv2.waitKey(self.step_time_delay) & 0xFF
if location_count>=max_steps:
k=27
# Test for button press or location value
if k == 27: # ESC
cv2.destroyAllWindows()
break
# Continue
#next_step=paths[location_count]
location_count+=1
old_location_x=new_location_x
old_location_y=new_location_y
new_location_x=paths[location_count][0]
new_location_y=paths[location_count][1]
self.new_heading_ind=paths[location_count][2]
images_to_combine,image_found,self.new_heading_ind,self.direction_vector,image_title,available_directions_index=self.find_next_set_images(new_location_x,new_location_y,self.new_heading_ind)
if image_found==0:
print 'ERROR -> NO IMAGE FOUND @' + str(paths[location_count])
cv2.destroyAllWindows()
break
# print "No image"
# new_location_x -=self.direction_vector[0]
# new_location_y -=self.direction_vector[1]
# else:
resized_img=self.concatenate_resize_images(images_to_combine)
image_displayed=self.display_image(resized_img, image_title, available_directions_index, self.new_heading_ind)
self.map_template=self.plot_old_position_on_map(old_location_x,old_location_y)
displayed_map=self.plot_current_position_on_map(new_location_x,new_location_y)
if self.save_images:
image_filename=os.path.join(self.save_image_dir_path,'maze_img_'+ str(location_count).zfill(5) + '.jpg')
cv2.imwrite(image_filename,image_displayed)
map_filename=os.path.join(self.save_image_dir_path,'map_img_'+ str(location_count).zfill(5) + '.jpg')
cv2.imwrite(map_filename,displayed_map)
print 'Saving viewed image to: '+ image_filename + '& map image to: '+ map_filename
cv2.destroyAllWindows()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
print('FRED')
# Configure class
ttt=maze_from_data()
# Read available files
ttt.index_image_files()
# Display interactively
ttt.display_maps_images()
# Run interactive mode....
ttt.maze_interactive()
|
UTF-8
|
Python
| false | false | 49,501 |
py
| 15 |
generate_maze_from_data.py
| 14 | 0.553847 | 0.536959 | 0 | 901 | 53.927858 | 288 |
albin-cousson/Le_gestionnaire
| 16,106,127,362,592 |
69702ca24c717bec0b85d396a3283c8c65181cb6
|
e30d99f24cdb1120e50b617b326124582de68ea7
|
/Le_gestionnaire_app/forms.py
|
54ed95a4316a981fac985a6f638967004fdb0e68
|
[] |
no_license
|
https://github.com/albin-cousson/Le_gestionnaire
|
a0b2ec080577c58fe549218abcdc6e5e7d2186bb
|
04cd3a4fbfb94a2d112882e0f5a9f1182745c610
|
refs/heads/main
| 2023-01-23T01:30:12.072787 | 2020-11-15T15:49:05 | 2020-11-15T15:49:05 | 311,152,020 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from .models import main_idea_model, second_idea_model, third_idea_model
class main_idea_form(forms.ModelForm):
class Meta:
model = main_idea_model
fields = ('idea',)
class second_idea_form(forms.ModelForm):
class Meta:
model = second_idea_model
fields = ('idea','categorie',)
class third_idea_form(forms.ModelForm):
class Meta:
model = third_idea_model
fields = ('idea','categorie',)
|
UTF-8
|
Python
| false | false | 473 |
py
| 18 |
forms.py
| 15 | 0.649049 | 0.649049 | 0 | 17 | 26.823529 | 72 |
karthikpappu/pyc_source
| 3,126,736,211,365 |
a1228b08fc90d9baeec8557cba53eee5c1c4c1d5
|
91fa095f423a3bf47eba7178a355aab3ca22cf7f
|
/pycfiles/signalbox-0.3.4.4.tar/data.py
|
ae072ce218a111fb6ed37065013388ee24ffff10
|
[] |
no_license
|
https://github.com/karthikpappu/pyc_source
|
0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f
|
739e7e73180f2c3da5fd25bd1304a3fecfff8d6e
|
refs/heads/master
| 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: /Users/ben/dev/signalbox/signalbox/views/data.py
# Compiled at: 2014-08-27 19:26:12
import os
from datetime import datetime
from zipfile import ZipFile
from tempfile import NamedTemporaryFile
from django.core.exceptions import ValidationError
import pandas as pd
from django.contrib import messages
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template import Context, Template
from signalbox.decorators import group_required
from signalbox.models import Answer, Study, Reply, Question, Membership
from django.shortcuts import render_to_response, get_object_or_404
from signalbox.forms import SelectExportDataForm, get_answers, DateShiftForm
from signalbox.utilities.djangobits import conditional_decorator
from django.conf import settings
import reversion
ANSWER_FIELDS_MAP = dict([
('id', 'id'),
('reply__id', 'reply'),
('question__q_type', 'qtype'),
('answer', 'answer'),
('question__variable_name', 'variable_name')])
ROW_FIELDS_MAP = dict([
('reply__id', 'reply'),
('reply__collector', 'collector'),
('reply__observation__id', 'observation'),
('reply__entry_method', 'entry_method'),
('reply__observation__n_in_sequence', 'observation_index'),
('reply__observation__due', 'due'),
('reply__is_canonical_reply', 'canonical'),
('reply__started', 'started'),
('reply__last_submit', 'finished'),
('reply__id', 'reply'),
('reply__observation__dyad__user__username', 'participant'),
('reply__observation__dyad__relates_to__user__username', 'relates_to_participant'),
('reply__observation__dyad__study__slug', 'study'),
('reply__observation__dyad__condition__tag', 'condition'),
('reply__observation__dyad__date_randomised', 'randomised_on')])
@group_required(['Researchers'])
def export_data(request):
form = SelectExportDataForm(request.POST or None)
if not form.is_valid():
return render_to_response('manage/export_data.html', {'form': form}, context_instance=RequestContext(request))
else:
studies = form.cleaned_data['studies']
questionnaires = form.cleaned_data['questionnaires']
if studies:
answers = get_answers(studies)
if questionnaires:
answers = Answer.objects.filter(reply__asker__in=questionnaires)
if not answers.exists():
raise ValidationError('No data matching filters.')
answers = answers.filter(question__variable_name__isnull=False)
return export_answers(request, answers)
def export_answers(request, answers):
"""Take a queryset of Answers and export to a zip file."""
ad = answers.values(*ANSWER_FIELDS_MAP.keys())
rd = answers.values(*ROW_FIELDS_MAP.keys())
answerdata = pd.DataFrame({i['id']:i for i in ad}).T
rowmetadata = pd.DataFrame(i for i in rd)
answerdata.columns = [ ANSWER_FIELDS_MAP[i] for i in answerdata.columns ]
rowmetadata.columns = [ ROW_FIELDS_MAP[i] for i in rowmetadata.columns ]
answerdata = answerdata.set_index(['reply', 'variable_name']).unstack()['answer']
rowmetadata = rowmetadata.drop_duplicates('reply').set_index('reply')
namesofthingstoexport = ('answers meta').split()
tmpfiles = [ NamedTemporaryFile(suffix='.xlsx') for i in namesofthingstoexport ]
[ j.to_excel(i.name, merge_cells=False, encoding='utf-8') for i, j in zip(tmpfiles, [answerdata, rowmetadata]) ]
makedotmp = get_template('signalbox/stata/make.dotemplate')
makedostring = makedotmp.render(Context({'date': datetime.now(), 'request': request}))
questions = set(i.question for i in answers)
choicesets = set(filter(lambda x: x.get_choices(), (i.choiceset for i in questions if i.choiceset)))
syntaxtdotmp = get_template('signalbox/stata/process-variables.dotemplate')
syntax_dostring = syntaxtdotmp.render(Context({'questions': questions, 'choicesets': choicesets, 'request': request}))
with ZipFile(NamedTemporaryFile(suffix='.zip').name, 'w') as (zipper):
[ zipper.write(i.name, j + os.path.splitext(i.name)[1]) for i, j in zip(tmpfiles, namesofthingstoexport) ]
zipper.writestr('make.do', makedostring.encode('utf-8', 'replace'))
zipper.writestr('make_labels.do', syntax_dostring.encode('utf-8', 'replace'))
zipper.close()
zipbytes = open(zipper.filename, 'rb').read()
response = HttpResponse(zipbytes, content_type='application/x-zip-compressed')
response['Content-disposition'] = 'attachment; filename=exported_data.zip'
return response
def generate_syntax(template, questions, reference_study=None):
"""Return a string of stata syntax to format exported datafile for a given set of questions."""
t = get_template(template)
syntax = t.render(Context({'questions': questions, 'reference_study': reference_study}))
return syntax
def _shifted(obj, datetimefield, delta):
setattr(obj, datetimefield, getattr(obj, datetimefield) + delta)
return obj
@group_required(['Researchers'])
@conditional_decorator(reversion.create_revision, settings.USE_VERSIONING)
def dateshift_membership(request, pk=None):
"""Allows Researchers to shift the time of all observations within a Membership."""
membership = get_object_or_404(Membership, id=pk)
form = DateShiftForm(request.POST or None)
if form.is_valid():
delta = form.delta(current=membership.date_randomised)
membership.date_randomised = membership.date_randomised + delta
membership.save()
shiftable = [ i for i in membership.observations() if i.timeshift_allowed()
]
shifted = [ _shifted(i, 'due', delta) for i in shiftable ]
shifted = [ _shifted(i, 'due_original', delta) for i in shiftable ]
_ = [ i.add_data('timeshift', value=delta) for i in shifted ]
_ = [ i.save() for i in shifted ]
if settings.USE_VERSIONING:
revision.comment = 'Timeshifted observations by %s days.' % (delta.days,)
form = DateShiftForm()
messages.add_message(request, messages.WARNING, ('{} observations shifted by {} days.').format(len(shifted), delta.days))
return HttpResponseRedirect(reverse('admin:signalbox_membership_change', args=(membership.pk,)))
else:
return render_to_response('admin/signalbox/dateshift.html', {'form': form, 'membership': membership}, context_instance=RequestContext(request))
|
UTF-8
|
Python
| false | false | 6,675 |
py
| 114,545 |
data.py
| 111,506 | 0.701723 | 0.693783 | 0 | 132 | 49.575758 | 151 |
dluca14/credit-card-fraud-detection
| 15,659,450,803,710 |
a62d7d49bae49e9c119187abcf57c24d2043b88b
|
2d5139a1d9d960dba4500c53c45a191af1e2e2ed
|
/src/build_model/create_model.py
|
5c7617254d0ca7c0e58d06077aeb86258cca9426
|
[] |
no_license
|
https://github.com/dluca14/credit-card-fraud-detection
|
73ac9fa4f114edf6c4ac3fcc8c5b6fe1c6c4ee46
|
cd323e646902682f557ba8d4f9ac30561dbe40e2
|
refs/heads/master
| 2023-07-24T15:39:59.635041 | 2021-09-06T09:08:25 | 2021-09-06T09:08:25 | 403,555,742 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import gc
import os
import json
import joblib
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
import sys
sys.path.append("..")
from storage.mongo_db import register_model, get_models_validation_score
data_path = "../../../../../data/credit-card-fraud-detection/"
def generate_static_catalog():
path = os.path.abspath(os.path.dirname(__file__))
path_to_catalog = f"{path}/training.json"
with open(path_to_catalog, encoding='utf-8') as file:
static_catalog = json.load(file)
return static_catalog["training_parameters"], static_catalog["model_parameters"]
training_parameters, model_parameters = generate_static_catalog()
def read_data():
data_df = pd.read_csv(os.path.join(data_path, "credit_card_transactions.csv"))
print(f"Credit Card Fraud Detection data - rows: {data_df.shape[0]} columns: {data_df.shape[1]}")
return data_df
def data_train_test_split(data_df):
train_df, test_df = train_test_split(data_df, test_size=training_parameters["test_size"], shuffle=True,
random_state=training_parameters["random_state"], stratify=data_df["Class"])
return train_df, test_df
def get_predictors_target():
target = "Class"
predictors = ["Time", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10",
"V11", "V12", "V13", "V14", "V15", "V16", "V17", "V18", "V19",
"V20", "V21", "V22", "V23", "V24", "V25", "V26", "V27", "V28",
"Amount"]
return predictors, target
def train_model(train_df, test_df, predictors, target, run_optimization):
kf = StratifiedKFold(n_splits=training_parameters["folds"],
random_state=training_parameters["random_state"], shuffle=True)
oof_preds = np.zeros(train_df.shape[0])
test_preds = np.zeros(test_df.shape[0])
feature_importance = []
validation_score= []
n_fold = 0
if run_optimization:
print("Not implemented")
return None
for fold_, (train_idx, valid_idx) in enumerate(kf.split(train_df, y=train_df[target])):
train_x, train_y = train_df[predictors].iloc[train_idx], train_df[target].iloc[train_idx]
valid_x, valid_y = train_df[predictors].iloc[valid_idx], train_df[target].iloc[valid_idx]
model = LGBMClassifier(**model_parameters)
model.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric=model_parameters["metric"], verbose=training_parameters["verbose_eval"],
early_stopping_rounds=training_parameters["early_stop"])
oof_preds[valid_idx] = model.predict_proba(valid_x, num_iteration=model.best_iteration_)[:, 1]
test_preds += model.predict_proba(test_df[predictors], num_iteration=model.best_iteration_)[:, 1] / kf.n_splits
fold_importance = []
for i, item in enumerate(predictors):
fold_importance.append({"feature": str(predictors[i]), "importance": str(model.feature_importances_[i])})
feature_importance.append({"fold": str(fold_ + 1), "fold_importance": fold_importance})
print(f"Fold {fold_ + 1} AUC : {round(roc_auc_score(valid_y, oof_preds[valid_idx]), 4)}")
validation_score.append({"fold": str(fold_ + 1), "auc": round(roc_auc_score(valid_y, oof_preds[valid_idx]), 4)})
y_pred = model.predict_proba(test_df[predictors])[:, 1]
test_auc_score = roc_auc_score(test_df[target], y_pred)
print(f"===========================\n[TEST] fold: {fold_ + 1} AUC score test set: {round(test_auc_score, 4)}\n")
del model, train_x, train_y, valid_x, valid_y
gc.collect()
train_auc_score = roc_auc_score(train_df[target], oof_preds)
print(f"Full AUC validation score {round(train_auc_score, 4)}\n")
print("Train using all data")
model = LGBMClassifier(**model_parameters)
model.fit(train_df[predictors], train_df[target],
eval_set=[(train_df[predictors], train_df[target]), (test_df[predictors], test_df[target])],
eval_metric="auc", verbose=training_parameters["verbose_eval"],
early_stopping_rounds=training_parameters["early_stop"])
y_pred = model.predict_proba(test_df[predictors])[:, 1]
test_auc_score = roc_auc_score(test_df["Class"], y_pred)
print(f"===========================\n[TEST] AUC score test set: {round(test_auc_score, 4)}\n")
model_data = {"train_rows": train_df.shape[0], "train_columns": len(predictors)}
validation_data = {"validation_score_folds": validation_score,
"validation_score_all": round(train_auc_score, 4),
"feature_importance": feature_importance}
model_id = register_model(model_data=model_data, model_parameters=model_parameters,
training_parameters=training_parameters, validation_data=validation_data)
return model, model_id, validation_data["validation_score_all"]
def save_model(model):
path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
try:
joblib.dump(model, os.path.join(path, "model", "model_light_gbm.pkl"))
except:
print("Error writing model")
pass
def load_model():
path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
try:
model = joblib.load(os.path.join(path, "model", "model_light_gbm.pkl"))
return model
except:
print("Error reading model")
pass
def test_model(model, test_df, predictors):
model = load_model()
y_pred = model.predict_proba(test_df[predictors])[:, 1]
test_auc_score = roc_auc_score(test_df["Class"], y_pred)
print(f"===========================\nAUC score test set: {round(test_auc_score, 4)}")
def check_validation_score(model_id, validation_score):
validation_scores = get_models_validation_score()
if validation_scores:
for current_score in validation_scores:
if current_score["model_id"] != model_id and validation_score > current_score["validation_score"]:
return True
return False
def run_all(run_optimization=False, run_test=False):
data_df = read_data()
train_df, test_df = data_train_test_split(data_df)
predictors, target = get_predictors_target()
model, model_id, validation_score = train_model(train_df, test_df, predictors, target, run_optimization)
if check_validation_score(model_id, validation_score):
save_model(model)
if run_test:
model = load_model()
test_model(model, test_df, predictors)
return model_id, validation_score
if __name__ == "__main__":
run_all(run_test=True)
|
UTF-8
|
Python
| false | false | 6,999 |
py
| 10 |
create_model.py
| 6 | 0.61866 | 0.608658 | 0 | 166 | 40.162651 | 120 |
OdysseusC/core
| 12,601,434,077,418 |
4ab1cbe05b55727823c26487715c2379223f7412
|
00502bfe6120f4936101c59e5ec0ff012f2872a9
|
/examples/iot-paas.py
|
ae86d82b3b8c9828c65f320025b13f67ded224de
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/OdysseusC/core
|
d95e849cbf2ebd2983d94259b1b3cc0f9be122ef
|
8d5c1841d37d370c58bcb2fa7e52a7cafb46e9d0
|
refs/heads/main
| 2023-09-06T02:09:46.208401 | 2021-11-08T14:08:53 | 2021-11-08T14:08:53 | 419,555,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import random
import time
import traceback
import uuid
import requests
from paho.mqtt import client as mqtt_client
keel_url = "http://192.168.123.9:30707/v0.1.0"
broker = "192.168.123.9"
port = 32412
def create_entity_token(entity_id, entity_type, user_id):
data = dict(entity_id=entity_id, entity_type=entity_type, user_id=user_id)
token_create = "/auth/token/create"
res = requests.post(keel_url + token_create, json=data)
return res.json()["data"]["entity_token"]
def create_entity(entity_id, entity_type, user_id, plugin_id, token):
query = dict(entity_id=entity_id, entity_type=entity_type, user_id=user_id, source="abc", plugin_id=plugin_id)
entity_create = "/core/plugins/{plugin_id}/entities?id={entity_id}&type={entity_type}&owner={user_id}&source={source}".format(
**query)
data = dict(token=token)
res = requests.post(keel_url + entity_create, json=data)
print(res.json())
def create_subscription(entity_id, entity_type, user_id, plugin_id, subscription_id):
query = dict(entity_id=entity_id, entity_type=entity_type, user_id=user_id, source="abc", plugin_id=plugin_id, subscription_id=subscription_id)
entity_create = "/core/plugins/{plugin_id}/subscriptions?id={subscription_id}&type={entity_type}&owner={user_id}&source={source}".format(
**query)
data = dict(mode="realtime", source="ignore", filter="insert into abc select " + entity_id + ".p1", target="ignore", topic="abc", pubsub_name="client-pubsub")
print(data)
res = requests.post(keel_url + entity_create, json=data)
print(res.json())
def get_subscription(entity_id, entity_type, user_id, plugin_id, subscription_id):
query = dict(entity_id=entity_id, entity_type=entity_type, user_id=user_id, source="abc", plugin_id=plugin_id, subscription_id=subscription_id)
entity_create = "/core/plugins/{plugin_id}/subscriptions/{subscription_id}?type={entity_type}&owner={user_id}&source={source}".format(
**query)
res = requests.get(keel_url + entity_create)
print(res.json())
def get_entity(entity_id, entity_type, user_id, plugin_id):
query = dict(entity_id=entity_id, entity_type=entity_type, user_id=user_id, plugin_id=plugin_id)
entity_create = "/core/plugins/{plugin_id}/entities/{entity_id}?type={entity_type}&owner={user_id}&source={plugin_id}".format(
**query)
res = requests.get(keel_url + entity_create)
print(res.json()["properties"])
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
if __name__ == "__main__":
entity_id = uuid.uuid4().hex
entity_type = "device"
user_id = "abc"
print("base entity info")
print("entity_id = ", entity_id)
print("entity_type = ", entity_type)
print("user_id = ", user_id)
print("-" * 80)
print("get entity token")
token = create_entity_token(entity_id, entity_type, user_id)
print("token=", token)
time.sleep(1)
print("-" * 80)
print("create entity with token")
try:
create_entity(entity_id, entity_type, user_id, "pluginA", token)
print("create entity {entity_id} success".format(**dict(entity_id=entity_id)))
except Exception:
print(traceback.format_exc())
print("create entity failed")
time.sleep(1)
print("-" * 80)
print("create subscription")
create_subscription(entity_id, "SUBSCRIPTION", user_id, "pluginA", entity_id+"sub")
print("-" * 80)
print("get subscription")
get_subscription(entity_id, "SUBSCRIPTION", user_id, "pluginA", entity_id+"sub")
print("-" * 80)
print("update properties by mqtt")
client = mqtt_client.Client(entity_id)
client.username_pw_set(username=user_id, password=token)
client.on_connect = on_connect
client.connect(host=broker, port=port)
client.loop_start()
time.sleep(1)
payload = json.dumps(dict(p1=dict(value=random.randint(1, 100), time=int(time.time()))))
print(payload)
client.publish("system/test", payload=payload)
print("-" * 80)
print("get entity")
get_entity(entity_id, entity_type, user_id, "pluginA")
time.sleep(5)
while True:
payload = json.dumps(dict(p1=dict(value=random.randint(1, 100), time=int(time.time()))))
print(payload)
client.publish("system/test", payload=payload)
time.sleep(5)
client.disconnect()
|
UTF-8
|
Python
| false | false | 4,470 |
py
| 103 |
iot-paas.py
| 62 | 0.661745 | 0.647651 | 0 | 117 | 37.205128 | 162 |
ApyMajul/GatsbyLeMagnifique
| 14,903,536,546,958 |
4073c2e12a4ac0f616231196fd44cd065321a50b
|
482888592a16dff898b887f1b60a288f10da68f6
|
/contenus/admin.py
|
85df27504f0f767fc0627fa14707c2db6a5ada9d
|
[] |
no_license
|
https://github.com/ApyMajul/GatsbyLeMagnifique
|
b1ec9a77fdc66f02f68846871de011eca2a73d84
|
4916f84564636fe7d956d9d8ab5db44e29825849
|
refs/heads/master
| 2020-04-30T17:53:05.606363 | 2019-10-21T11:53:40 | 2019-10-21T11:53:40 | 176,993,069 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from .models import Content, Categorie
admin.site.register(Content)
admin.site.register(Categorie)
|
UTF-8
|
Python
| false | false | 134 |
py
| 16 |
admin.py
| 16 | 0.820896 | 0.820896 | 0 | 6 | 21.333333 | 38 |
GalinaDimitrova/Hack
| 386,547,065,975 |
8a1866eec006f61aab8efa679c3501622434abf1
|
be1a45b4ee526ec3cd81a2bcd06404db90c097fb
|
/week9/server.py
|
a66fae26bcea08744893728cc36648123000be15
|
[] |
no_license
|
https://github.com/GalinaDimitrova/Hack
|
c48e69c80678fa24937ca7dd4a36b1050e05d66e
|
186e6f3520183565765569e63b39b25249c82f11
|
refs/heads/master
| 2021-01-25T08:55:23.349341 | 2014-12-29T23:25:54 | 2014-12-29T23:25:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask
from flask import request
from flask import render_template
from local_settings import debug_mode
from make_database import Page, Website
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from sqlalchemy import or_
app = Flask(__name__)
@app.route('/')
def index():
html = open('index.html', 'r').read()
return html
def word_in_title(search_word, result):
if result.title and search_word in result.title:
return True
return False
def word_in_desc(search_word, result):
if result.description and search_word in result.description:
return True
return False
def word_in_url(search_word, result):
if result.url and search_word in result.url:
return True
return False
@app.route('/search/')
def search():
engine = create_engine("sqlite:///storage.db")
session = Session(bind=engine)
key_word = request.args.get('key_words', '')
pages = session.query(Page).filter(or_(Page.title.like(
'%' + key_word + '%'), Page.description.like(
'%' + key_word + '%'), Page.url.like('%' + key_word + '%')))
# result = []
# pages = session.query(Page.title, Page.description, Page.url).all()
# for page in pages:
# if word_in_title(key_word, page) and word_in_desc(key_word, page) and word_in_url(key_word, page):
# result.append(page)
# elif word_in_title(key_word, page) and word_in_desc(key_word, page):
# result.append(page)
# elif word_in_title(key_word, page):
# result.append(page)
# return render_template('result.html', pages=result)
return render_template('result.html', pages=pages)
if __name__ == '__main__':
app.run(debug=debug_mode)
# self.cursor.execute(
# "select string from stringtable where string like ? and type = ?",
# ('%'+searchstr+'%', type))
|
UTF-8
|
Python
| false | false | 1,892 |
py
| 93 |
server.py
| 86 | 0.641121 | 0.641121 | 0 | 74 | 24.581081 | 108 |
lyndonlens/tensor-field-networks-1
| 11,630,771,464,616 |
e8cc290456965d1820efc57da9046b844c37dbcc
|
71d2b5e3f8f6ee27962b9cda26b67e8f57a364ad
|
/tfn/layers/radial_factories.py
|
e399e6931d35650b452b5ea6bbd970d84379c950
|
[
"MIT"
] |
permissive
|
https://github.com/lyndonlens/tensor-field-networks-1
|
2190c0a16bd26742a51bcf4a8ef525050f1564b7
|
5c25583ee4108a13af8e73eabd3c448f42cb70a0
|
refs/heads/master
| 2023-06-20T19:23:08.439262 | 2021-07-15T16:32:37 | 2021-07-15T16:32:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import tensorflow as tf
from tensorflow.keras import activations, regularizers, Sequential
from tensorflow.keras.layers import Layer
class RadialFactory(object):
"""
Abstract class for RadialFactory objects, defines the interface. Subclass
"""
def __init__(
self,
num_layers: int = 2,
units: int = 32,
activation: str = "ssp",
l2_lambda: float = 0.0,
**kwargs,
):
self.num_layers = num_layers
self.units = units
if activation is None:
activation = "ssp"
if isinstance(activation, str):
self.activation = activation
else:
raise ValueError(
"Expected `str` for param `activation`, but got `{}` instead. "
"Ensure `activation` is a string mapping to a valid keras "
"activation function"
)
self.l2_lambda = l2_lambda
self.sum_points = kwargs.pop("sum_points", False)
self.dispensed_radials = 0
def get_radial(self, feature_dim, input_order=None, filter_order=None):
raise NotImplementedError
def to_json(self):
self.__dict__["type"] = type(self).__name__
return json.dumps(self.__dict__)
@classmethod
def from_json(cls, config: str):
raise NotImplementedError
class DenseRadialFactory(RadialFactory):
"""
Default factory class for supplying radial functions to a Convolution layer. Subclass this
factory and override its `get_radial` method to return custom radial instances/templates.
You must also override the `to_json` and `from_json` and register any custom `RadialFactory`
classes to a unique string in the keras global custom objects dict.
"""
def get_radial(self, feature_dim, input_order=None, filter_order=None):
"""
Factory method for obtaining radial functions of a specified architecture, or an instance
of a radial function (i.e. object which inherits from Layer).
:param feature_dim: Dimension of the feature tensor being point convolved with the filter
produced by this radial function. Use to ensure radial function outputs a filter of
shape (points, feature_dim, filter_order)
:param input_order: Optional. Rotation order of the of the feature tensor point convolved
with the filter produced by this radial function
:param filter_order: Optional. Rotation order of the filter being produced by this radial
function.
:return: Keras Layer object, or subclass of Layer. Must have attr dynamic == True and
trainable == True.
"""
layers = [
Radial(
self.units,
self.activation,
self.l2_lambda,
sum_points=self.sum_points,
name=f"radial_{self.dispensed_radials}/layer_{i}",
)
for i in range(self.num_layers)
]
layers.append(
Radial(
feature_dim,
self.activation,
self.l2_lambda,
sum_points=self.sum_points,
name=f"radial_{self.dispensed_radials}/layer_{self.num_layers}",
)
)
self.dispensed_radials += 1
return Sequential(layers)
@classmethod
def from_json(cls, config: str):
return cls(**json.loads(config))
class Radial(Layer):
def __init__(
self, units: int = 32, activation: str = "ssp", l2_lambda: float = 0.0, **kwargs
):
self.sum_points = kwargs.pop("sum_points", False)
super().__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.l2_lambda = l2_lambda
self.kernel = None
self.bias = None
def build(self, input_shape):
self.kernel = self.add_weight(
name="kernel",
shape=(input_shape[-1], self.units),
regularizer=regularizers.l2(self.l2_lambda),
)
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
regularizer=regularizers.l2(self.l2_lambda),
)
self.built = True
def compute_output_shape(self, input_shape):
return tf.TensorShape(list(input_shape)[:-1] + [self.units])
def get_config(self):
base = super().get_config()
updates = dict(units=self.units, activation=self.activation,)
return {**base, **updates}
def call(self, inputs, training=None, mask=None):
equation = "bpf,fu->bpu" if self.sum_points else "bpqf,fu->bpqu"
return self.activation(tf.einsum(equation, inputs, self.kernel) + self.bias)
|
UTF-8
|
Python
| false | false | 4,758 |
py
| 58 |
radial_factories.py
| 55 | 0.59794 | 0.592686 | 0 | 135 | 34.244444 | 97 |
j4zzlee/flask-restful
| 15,307,263,485,696 |
eaf19150f1f4b926ffd4a6e1082f348cfd583409
|
7ecc569b1934c20b1c60aefe7948d52e263fdc62
|
/api/app/models/MetaValue.py
|
64d1b621347511b1f3fcbb7a84c5813ebe0a26f7
|
[] |
no_license
|
https://github.com/j4zzlee/flask-restful
|
8bbd726e8b950e7d063c58a2886307ecf3b834ed
|
f278f57676b59c3bbbee475e8292dd4c5446b607
|
refs/heads/master
| 2022-08-27T10:16:09.157858 | 2015-10-28T03:36:53 | 2015-10-28T03:36:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'gia'
from models import Base, db
from libraries.db import guid
class MetaValue(db.Model, Base):
__tablename__ = 'sys_meta_value'
TYPE_INFO = 1
TYPE_LINK = 2
id = db.Column(guid(), primary_key=True)
group_id = db.Column(
guid(),
db.ForeignKey(
'sys_meta_group.id',
name='fk_sys_meta_value_id_sys_meta_group',
onupdate='CASCADE',
ondelete='CASCADE'
),
index=True
)
group = db.relationship('MetaGroup')
type = db.Column(db.Integer, default=1)
name = db.Column(db.String(255), nullable=False, unique=True)
value = db.Column(db.Text, nullable=False)
link_to = db.Column(db.String(4000), default='#')
|
UTF-8
|
Python
| false | false | 740 |
py
| 36 |
MetaValue.py
| 29 | 0.583784 | 0.57027 | 0 | 30 | 23.666667 | 65 |
HeKuToCbI4/Some_weird_project
| 4,861,903,029,113 |
be562c98030336df60f1eac5770180fbe769f099
|
fc8647206dd1cee7c75725c3af56eb4ef61f197d
|
/telegrambot/kisik_bot.py
|
5125c8f1e8d582f6e2e179e90964e164d674314d
|
[] |
no_license
|
https://github.com/HeKuToCbI4/Some_weird_project
|
1251077d0bf9a6c7a0e59cb54ffa3c7b8ba316d8
|
2375dde3ebee13c2c3862f4fce7979e1a833ef37
|
refs/heads/master
| 2021-09-07T07:15:03.793993 | 2018-02-19T12:43:07 | 2018-02-19T12:43:07 | 106,857,757 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from threading import Thread, Event
from time import sleep
import telebot
import telegrambot.config as config
from Modules.Common.checker import Failure
from Modules.Common.helper import LogClass
from Modules.Common.logger import Logger
from Modules.VkModule.vk_module import VkModule
from Modules.VkModule.WallMonitor.vk_wall_monitor import VkWallMonitor
from Modules.WeatherModule.weather_api import OWMProvider
class TelegramBot:
def __init__(self):
self.bot = telebot.TeleBot(config.token)
self.bot_logger = Logger(name='Bot logger', log_class=LogClass.Info, log_to_file=True,
log_script_information=True,
log_name='bot_log.txt')
self.vk = VkModule()
self.vk_wall_monitor = VkWallMonitor(self.vk.api)
self.monitor_posts = {}
self.OWM_provider = OWMProvider()
@self.bot.message_handler(commands=['weather'])
def handle_weather(message):
try:
self.bot_logger.log_string(LogClass.Info, f'Got message from {message.chat.id}: {message}')
message_string = str(message.text).lower()
city = message_string.split(' ')[1]
weather = self.OWM_provider.get_current_weather_in_city(city)
if weather is not None:
message_to_send = 'Текущая погода: {}\nТемпература: {} град. цельсия\nДавление: {} мм.рт.ст.\n' \
'Влажность: {}\nВосход: {}\nЗакат: {}\nВетер: {} м/c'.format(
weather.description, weather.temp, weather.pressure,
weather.humidity, weather.sunrise, weather.sunset, weather.wind)
else:
message_to_send = 'Возникла ошибка, соси хуй!'
self.bot.send_message(message.chat.id, message_to_send)
log_string = 'Sent message: {message_to_send}'.format(message_to_send=message_to_send)
self.bot_logger.log_string(LogClass.Info, log_string)
except BaseException as e:
self.bot_logger.log_string(LogClass.Exception, 'Возникла ошибка при обработке погоды'.format(e))
@self.bot.message_handler(commands=['monitor', 'off_monitor'])
def handle_monitoring(message):
try:
self.bot_logger.log_string(LogClass.Info, f'Got message from {message.chat.id}: {message}')
message_string = str(message.text).lower()
try:
target = message_string.split(' ')[1]
except BaseException:
message_to_send = 'Используйте формат /команда домен\nДомен-короткое имя страницы - цели.'
self.bot.send_message(message.chat.id, message_to_send)
raise Failure('Невозможно получить домен из сообщения {}'.format(message.text))
if message_string.__contains__('/off_monitor'):
self.stop_monitoring_posts(target, message.chat.id)
message_to_send = 'Прекращён мониторинг постов со страницы {}'.format(target)
else:
self.start_last_wall_posts_monitoring(target, message.chat.id)
message_to_send = 'Начинаем мониторинг постов в {}\nПоследние 5 постов:\n'.format(target)
self.bot.send_message(message.chat.id, message_to_send)
log_string = 'Sent message: {message_to_send}'.format(message_to_send=message_to_send)
self.bot_logger.log_string(LogClass.Info, log_string)
except BaseException as e:
self.bot_logger.log_string(LogClass.Exception, f'{e} occurred.')
@self.bot.message_handler(content_types=['text'])
def handle_messages(message):
self.bot_logger.log_string(LogClass.Trace, 'Got message at {}: {}'.format(message.chat.id, message.text))
# self.bot.send_message(message.chat.id, message.text)
# log_string = 'Sent message: {message_to_send}'.format(message_to_send=message.text)
# self.bot_logger.log_string(LogClass.Info, log_string)
def monitor_wall_posts(self, domain, chat_id):
try:
last_posts_ids = []
while self.monitor_posts[(domain, chat_id)].isSet():
five_last_posts = self.vk_wall_monitor.get_n_last_wall_posts(domain=domain, count=5)
for post in five_last_posts:
if not post['id'] in last_posts_ids:
self.bot.send_message(chat_id, "Новый пост на странице {}:\n{}".format(domain, post['text']))
last_posts_ids.append(post['id'])
sleep(60)
if len(last_posts_ids) > 50:
last_posts_ids = last_posts_ids[:50]
except:
self.monitor_posts.pop((domain, chat_id))
def start_last_wall_posts_monitoring(self, domain, chat_id):
if not (domain, chat_id) in self.monitor_posts.keys():
self.monitor_posts[(domain, chat_id)] = Event()
if not self.monitor_posts[(domain, chat_id)].isSet():
self.monitor_posts[(domain, chat_id)].set()
t = Thread(target=self.monitor_wall_posts, args=(domain, chat_id))
t.setDaemon(True)
t.start()
def stop_monitoring_posts(self, domain, chat_id):
self.monitor_posts[(domain, chat_id)].clear()
def start_bot(self):
self.bot.polling(none_stop=True)
def stop_bot(self):
self.bot.stop_polling()
def main():
bot = TelegramBot()
bot.start_bot()
|
UTF-8
|
Python
| false | false | 5,945 |
py
| 18 |
kisik_bot.py
| 17 | 0.591974 | 0.590199 | 0 | 112 | 49.294643 | 117 |
Ramhawkz47/guvi
| 17,300,128,277,210 |
967f40117104650d87ec25f1c263bf6b454d2721
|
cd537753b46b5e4c9dbb17a27cb6da23aba1d491
|
/delnum.py
|
9ca1a58e70efd5f524e3c066becfce57157cee6b
|
[] |
no_license
|
https://github.com/Ramhawkz47/guvi
|
0b064af8d32529370c88c2f24b9ec04a85f170a7
|
8e87a9c8105103f387b51372ca83c7f700880fff
|
refs/heads/master
| 2020-05-09T04:49:50.092839 | 2019-07-06T03:56:11 | 2019-07-06T03:56:11 | 180,985,898 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def deln(n,k):
if((n==0)or(k==0)):
return n
#print(n)
#print(k)
a=deln(n//10,k)*10+n%10
b=deln(n//10,k-1)
#print(a)
#print(b)
if a<b:
return a
else:
return b
s=input()
s=s.split(" ")
print(deln(int(s[0]),int(s[1])))
|
UTF-8
|
Python
| false | false | 277 |
py
| 8 |
delnum.py
| 7 | 0.454874 | 0.407942 | 0 | 17 | 15.294118 | 32 |
serarca/LowerBoundsVRP
| 15,719,580,330,238 |
9571e9e0dd1dbdb73dc43bfb68abb20c3f5763aa
|
1038aa15501853ae292f580cb1198411f02a2d31
|
/baldacci.py
|
bd8346d15d69e79278f747b0b5f9a8aab83be17d
|
[] |
no_license
|
https://github.com/serarca/LowerBoundsVRP
|
f62540d32eec0ceeb46cc845e20ed0cea722c96a
|
48b7660587633f69ffce748278a8ac6385f7cb3f
|
refs/heads/master
| 2020-03-18T03:05:27.314898 | 2018-07-21T21:22:09 | 2018-07-21T21:22:09 | 134,222,176 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# We solve the GENPATH problem
def GENPATH(Delta, gamma, h, capacity, N, quantities, distance, direction):
P = {}
T = {}
for k in N + [h]:
P[k] = []
T[k] = []
T[h] = [{'path':[h],'cost':0,'lower_bound':0, "load":0, 'end':h}]
count_paths = 0
while True:
costs = {}
for k in T.keys():
if len(T[k])>0:
costs[k] = T[k][0]['cost']
if len(costs) == 0:
break
min_costs = min(costs, key = costs.get)
p_star = T[min_costs].pop(0)
if not min_costs in P.keys():
P[min_costs] = []
P[min_costs].append(p_star)
count_paths += 1
# If too many paths, stop
if count_paths==Delta:
break
# If path violates capacity, go to the next one
if p_star['load'] > capacity/2.0:
continue
for n in N:
if not (n in p_star['path']):
if direction == 'right':
new_p = {'path':p_star['path'] + [n], 'cost':p_star['cost'] + distance[n][p_star['end']],
'lower_bound':p_star['cost'] + distance[n][p_star['end']], 'load': p_star['load'] + quantities[n],
'end':n}
elif direction == 'left':
new_p = {'path':p_star['path'] + [n], 'cost':p_star['cost'] + distance[p_star['end']][n],
'lower_bound':p_star['cost'] + distance[p_star['end']][n], 'load': p_star['load'] + quantities[n],
'end':n}
# Check if the new path has a cost too high
if new_p['lower_bound'] >= gamma:
continue
# Check if the new path has a load too high
if new_p['load'] > capacity:
continue
# Check if this new path is dominated by any path in P
dominated = False
for p in P[n]:
if (p['end'] == new_p['end']) and (p['cost'] <= new_p['cost']) and (set(p['path']) == set(new_p['path'])):
dominated = True
break
if dominated:
continue
# Check if the path is dominated by any path in T
insertion_index = 0
for i,p in enumerate(T[n]):
if (p['end'] == new_p['end']) and (p['cost'] <= new_p['cost']) and (set(p['path']) == set(new_p['path'])):
dominated = True
break
if (p['cost'] > new_p['cost']):
break
insertion_index = i+1
if dominated:
continue
# Append the path
T[n].insert(insertion_index, new_p)
# Delete dominated elements
j = insertion_index + 1
while j<len(T[n]):
p = T[n][j]
if (p['end'] == new_p['end']) and (p['cost'] > new_p['cost']) and (set(p['path']) == set(new_p['path'])):
T[n].pop(j)
else:
j += 1
return P
def GENROUTE(Delta, gamma, h, capacity, N, quantities, distance):
P_l = GENPATH(Delta, gamma, h, capacity, N, quantities, distance, direction = 'left')
P_r = GENPATH(Delta, gamma, h, capacity, N, quantities, distance, direction = 'right')
T = {}
R = {}
added = {}
for n in N:
added[n] = set((-1,-1))
if len(P_l[n])>1 and len(P_r[n])>1:
T[n] = [[(0,0),P_l[n][0]['cost']+P_r[n][0]['cost']]]
added[n].add((0,0))
else:
T[n] = []
R[n] = []
valid_v = [0,0,0,0]
while True:
# Calculate costs
costs = {}
for n in N:
if len(T[n])>0:
costs[n] = T[n][0][1]
if len(costs) == 0:
break
min_costs_n = min(costs, key = costs.get)
min_cost = costs[min_costs_n]
indices = T[min_costs_n].pop(0)[0]
path_l = P_l[min_costs_n][indices[0]]
path_r = P_r[min_costs_n][indices[1]]
if min_cost> gamma:
break
total_load = path_l['load'] + path_r['load'] - quantities[min_costs_n]
valid = True
if total_load > capacity:
valid = False
valid_v[0] = valid_v[0]+1
elif (np.min([path_l['load'],path_r['load']]) < total_load/2.0 or
np.max([path_l['load'],path_r['load']]) > total_load/2.0+quantities[min_costs_n]):
valid = False
valid_v[1] = valid_v[1]+1
elif (set(path_l['path']).intersection(set(path_r['path'])) != set([h,min_costs_n])):
valid = False
valid_v[2] = valid_v[2]+1
else:
for n in N:
for r in R[n]:
if set(r['path']) == set(path_l['path']+path_r['path']):
valid = False
valid_v[3] = valid_v[3] + 1
break
if not valid:
break
if valid:
R[min_costs_n].append({'path':path_l['path'][0:(len(path_l['path'])-1)]+list(reversed(path_r['path'])),
'cost':path_l['cost']+path_r['cost'],
'load':total_load,
'median':min_costs_n,
'indices':indices})
new_route_1 = (indices[0]+1,indices[1])
new_route_2 = (indices[0],indices[1]+1)
# If routes do not exist, transform them into the first route
if (indices[0]+1 >= len (P_l[min_costs_n])):
new_route_1 = (0,0)
if (indices[1]+1 >= len (P_r[min_costs_n])):
new_route_2 = (0,0)
new_routes = [new_route_1,new_route_2]
new_costs = [P_l[min_costs_n][new_routes[0][0]]['cost']+P_r[min_costs_n][new_routes[0][1]]['cost'],
P_l[min_costs_n][new_routes[1][0]]['cost']+P_r[min_costs_n][new_routes[1][1]]['cost']]
min_cost = np.min(new_costs)
max_cost = np.max(new_costs)
min_route = new_routes[np.argmin(new_costs)]
max_route = new_routes[(np.argmin(new_costs)+1)%2]
insert_index = 0
# Check if the route has been added previously
if not min_route in added[min_costs_n]:
for i in range(len(T[min_costs_n])):
cost = T[min_costs_n][i][1]
if min_cost<cost:
break
insert_index+=1
T[min_costs_n].insert(insert_index,[min_route,min_cost])
insert_index +=1
added[min_costs_n].add(min_route)
# Check if the route has been added previously
if not max_route in added[min_costs_n]:
for i in range(insert_index, len(T[min_costs_n])):
cost = T[min_costs_n][i][1]
if max_cost<cost:
break
insert_index+=1
T[min_costs_n].insert(insert_index,[max_route,max_cost])
added[min_costs_n].add(max_route)
# Verify that the routes are not always empty
for n in N:
if len(R[n]) == 0:
R[n] = [{'path':[h,n,h], 'cost':distance[h][n] + distance[n][h], 'load': quantities[n]}]
return R
|
UTF-8
|
Python
| false | false | 7,396 |
py
| 7 |
baldacci.py
| 6 | 0.443348 | 0.430909 | 0 | 180 | 40.088889 | 126 |
shlok97/FundamentalsStocksAnalysis
| 8,744,553,438,858 |
450fb4ad979f5c67ff264905fe620a414d51e456
|
eff1eb79e1fd3e1d2acc8b134dc6b69fd8c706f0
|
/FundamentalAnalysis.py
|
6fd0bf8ace35693f47d6cd9ab52cba3cc06dc125
|
[] |
no_license
|
https://github.com/shlok97/FundamentalsStocksAnalysis
|
037bbc4ade69e2531a604cb316213175df722552
|
3da9d4c4e537e72d683a710128f4c9fbb16cc2f0
|
refs/heads/master
| 2020-05-29T21:29:17.196520 | 2019-05-30T08:55:12 | 2019-05-30T08:55:12 | 189,379,667 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
import numpy as np
import csv
# df_cashflow = pd.read_csv("stocks/Kotak/Cash Flow-Table 1.csv", error_bad_lines=False)
# df_profitloss = pd.read_csv("stocks/Kotak/Profit & Loss-Table 1.csv", quoting=csv.QUOTE_NONE, error_bad_lines=False)
#
# print(df_profitloss.head())
errors = 0
def getData(stock):
finalData = []
# stock = '3i Infotech 3'
parameterIndexMapping = {}
NetCashFlow = 'Net Cash Flow'
ROE = 'Return on Equity'
ROCE = 'Return on Capital Emp'
Investments = 'Investments'
OtherAssets = 'Other Assets'
EquityShareCapital = 'Equity Share Capital'
Sales = 'Sales'
NetProfit = 'Net profit'
EPS = 'EPS'
PE = 'Price to earning'
Price = 'Price'
finalLen = 0
with open('data/' + stock + '_Cash Flow-Table1.csv', 'rt')as f:
data = csv.reader(f)
for index, row in enumerate(data):
# print(row)
if row[0] == NetCashFlow:
finalData.append(row)
parameterIndexMapping[NetCashFlow] = finalLen
finalLen += 1
with open('data/' + stock + '_Balance Sheet-Table1.csv', 'rt')as f:
data = csv.reader(f)
for index, row in enumerate(data):
# print(row)
row = row[:11]
if row[0] == ROE:
finalData.append(row)
parameterIndexMapping[ROE] = finalLen
finalLen += 1
if row[0] == ROCE:
finalData.append(row)
parameterIndexMapping[ROCE] = finalLen
finalLen += 1
if row[0] == Investments:
finalData.append(row)
parameterIndexMapping[Investments] = finalLen
finalLen += 1
if row[0] == OtherAssets:
finalData.append(row)
parameterIndexMapping[OtherAssets] = finalLen
finalLen += 1
if row[0] == EquityShareCapital:
finalData.append(row)
parameterIndexMapping[EquityShareCapital] = finalLen
finalLen += 1
with open('data/' + stock + '_Profit & Loss-Table1.csv', 'rt')as f:
data = csv.reader(f)
for index, row in enumerate(data):
# print(row)
row = row[:11]
if row[0] == Sales:
finalData.append(row)
parameterIndexMapping[Sales] = finalLen
finalLen += 1
if row[0] == NetProfit:
finalData.append(row)
parameterIndexMapping[NetProfit] = finalLen
finalLen += 1
if row[0] == EPS:
finalData.append(row)
parameterIndexMapping[EPS] = finalLen
finalLen += 1
if row[0] == PE:
finalData.append(row)
parameterIndexMapping[PE] = finalLen
finalLen += 1
if row[0] == Price:
finalData.append(row)
parameterIndexMapping[Price] = finalLen
finalLen += 1
data = []
for row in finalData:
# print(row, len(row))
#
# for col in row:
# print(col[:2] == " \t")
for i in range(len(row)):
# if row[i][:2] == " \t":
# row[i] = row[i][2:]
# Prepare to convert to float values
if row[i][-1:] == "%":
row[i] = row[i][:-1]
row[i] = row[i].replace(",", "")
row[i] = row[i].replace("(", "")
row[i] = row[i].replace(")", "")
if i is not 0:
if row[i] == '' or row[i] == '-':
row[i] = None
continue
# print(row[i])
# row[i] = float(row[i])
try:
row[i] = float(row[i])
except ValueError:
# print("Error", row[i])
row[i] = None
# print(row)
rowNumpy = np.array(row[1:])
cleanNumpy = rowNumpy[rowNumpy != None]
# Replace all empty fields
for i in range(len(row)):
if row[i] is None:
if len(cleanNumpy) is 0:
row[i] = 0
continue
row[i] = np.median(cleanNumpy)
# print(rowNumpy, np.median(cleanNumpy))
# print(row)
data.append(row[1:])
# print(data)
# price = data[-1]
# print(price[-1], price[-2])
# y = (price[-1] - price[-2])/price[-2]
#
# print(y)
# print(parameterIndexMapping)
def getY(data):
price = data[-1]
y = (price[-1] - price[-2]) / price[-2]
return y
def percentChange(data, param, duration):
index = parameterIndexMapping[param]
return (data[index][-2] - data[index][-2 - duration]) / data[index][-2 - duration]
def getMean(data, param, duration=0):
index = parameterIndexMapping[param]
return (data[index][-2] + data[index][-2 - duration]) / 2
# print(percentChange(data, Price, 1))
dateRange = [0, 2, 4]
sales_change = [percentChange(data, Sales, i + 1) for i in dateRange]
net_cash_flow_change = [percentChange(data, NetCashFlow, i + 1) for i in dateRange]
share_capital_change = [percentChange(data, EquityShareCapital, i + 1) for i in dateRange]
investments_change = [percentChange(data, Investments, i + 1) for i in dateRange]
net_profit_change = [percentChange(data, NetProfit, i + 1) for i in dateRange]
eps_change = [percentChange(data, EPS, i + 1) for i in dateRange]
other_assets_change = [percentChange(data, OtherAssets, i + 1) for i in dateRange]
price_change = [percentChange(data, Price, i + 1) for i in dateRange]
roe_change = [percentChange(data, ROE, i + 1) for i in dateRange]
eps = getMean(data, EPS)/100
pe = getMean(data, PE)/20
roe = getMean(data, ROE)/100
roce = getMean(data, ROCE)/100
y = getY(data)
# print(sales_change)
# print(net_cash_flow_change)
# print(share_capital_change)
# print(investments_change)
# print(net_profit_change)
# print(eps_change)
# print(price_change)
# print(eps)
# print(pe)
# print(y)
X = sales_change + roe_change + other_assets_change + net_cash_flow_change + share_capital_change + investments_change + net_profit_change + eps_change + price_change + [eps, pe, roe, roce]
return X, y
def run():
# print(getData('3i Infotech 3'))
# print(getData('Kotak Mah. Bank.xlsx'))
from stockList import stockList
# print(stockList)
# print(getData(stockList[0]))
X = np.array([])
y = np.array([])
stockParamMapping = {}
for stock in stockList:
try:
# print(getData(stock))
params, output = getData(stock)
if output > 0:
output = 1
else:
output = 0
if len(X) == 0:
X = [params]
y = [output]
else:
X = np.append(X, [params], axis=0)
y = np.append(y, [output], axis=0)
# except ValueError:
# continue
except FileNotFoundError:
continue
except ZeroDivisionError:
continue
# print(X)
# # print(len(X[0]))
# print(y)
# print(len(y))
# break_point = 500
# X_train = X[:break_point]
# y_train = y[:break_point]
#
# X_test = X[break_point:]
# y_test = y[break_point:]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05, random_state=42)
# from sklearn.tree import DecisionTreeClassifier
#
# clf = DecisionTreeClassifier()
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
#
#
#
# param_dist = {
# "max_depth": [30, 60, 100, 150, None],
# "max_features": [5, 8, 16, None],
# "min_samples_leaf": [2, 8, 16, 32],
# "criterion": ["gini", "entropy"],
# "n_estimators": [20, 30, 50]
# }
clf = RandomForestClassifier(n_estimators=80, max_depth=100, max_features=16, verbose=True)
#
# clf.fit(X_train, y_train)
# clf = RandomForestClassifier()
# forest_cv = GridSearchCV(clf, param_dist, cv=5)
#
# forest_cv.fit(X, y)
# print("Best params are: ", forest_cv.best_params_)
#
# random_grid = { 'bootstrap': [True, False],
# 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
# 'max_features': ['auto', 'sqrt'],
# 'min_samples_leaf': [1, 2, 4],
# 'min_samples_split': [2, 5, 10],
# 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]
# }
#
#
# clf = RandomForestClassifier()
# rf_random = RandomizedSearchCV(estimator = clf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state = 42, n_jobs = -1)
#
# rf_random.fit(X, y)
# print("Best params are: ", rf_random.best_params_)
# clf = RandomForestClassifier(n_estimators= 50, min_samples_split= 5, min_samples_leaf= 4, max_features= 'auto', max_depth= 30, bootstrap= True)
clf.fit(X_train, y_train)
y_pred = []
for index in range(len(X_test)):
prediction = clf.predict([X_test[index]])[0]
y_pred.append(prediction)
y_pred = np.array(y_pred)
from sklearn.metrics import confusion_matrix
# print(confusion_matrix(y_test, y_pred))
cf_mat = confusion_matrix(y_test, y_pred)
# print("Confusion Matrix")
# print(cf_mat)
recall = cf_mat[0][0]/(cf_mat[0][0] + cf_mat[1][0])
precision = cf_mat[0][0]/(cf_mat[0][0] + cf_mat[0][1])
# accuracy = cf_mat[0][0] + cf_mat[1][1]/(cf_mat[0][0] + cf_mat[1][0] + cf_mat[0][1]+ cf_mat[1][1])
# print("PRECISION", precision,"RECALL", recall)
return precision, recall, cf_mat
precisions = []
while True:
precision, recall, cf_mat = run()
final_cf = np.array([[]])
precisions.append(precision)
if precision == np.max(precisions):
final_cf = cf_mat
print(len(precisions))
if precision > 0.68 or len(precisions) > 20:
print("Confusion Matrix")
print(final_cf)
print("PRECISION", np.max(precisions), "RECALL", recall)
print("MeanPrecision: ", np.mean(precisions))
break
|
UTF-8
|
Python
| false | false | 10,622 |
py
| 2,435 |
FundamentalAnalysis.py
| 2 | 0.533986 | 0.510827 | 0 | 334 | 30.802395 | 193 |
kwojdalski/my-first-blog
| 7,121,055,783,416 |
f39bd710090e8b5e2dbc221a464e844a313a4987
|
c562b5ac46a4411433e7264b1a35de00ceb2f3f7
|
/django_env/lib/python2.7/sre_compile.py
|
44cb7a78b86b38f4bae7659380a8a36f45def28c
|
[] |
no_license
|
https://github.com/kwojdalski/my-first-blog
|
01e048f2f2246787d105cedba458945fd73e8607
|
839383bd9ad15d936a6b61b186a78dfe2904f87b
|
refs/heads/master
| 2020-05-29T08:54:32.841015 | 2016-09-29T19:58:33 | 2016-09-29T19:58:33 | 69,102,988 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
/Users/krzysztofwojdalski/anaconda2/lib/python2.7/sre_compile.py
|
UTF-8
|
Python
| false | false | 64 |
py
| 24 |
sre_compile.py
| 22 | 0.859375 | 0.8125 | 0 | 1 | 64 | 64 |
4Catalyzer/flask-resty
| 7,370,163,921,079 |
99f507c238a8f42cb4a706bf6866462fce2be6cc
|
fc94a7e1c86dc0c7c6d2db6efeb82d0d491b1e3c
|
/flask_resty/utils.py
|
abda5518b20773306388a9efde9c9a71bae6292d
|
[
"MIT"
] |
permissive
|
https://github.com/4Catalyzer/flask-resty
|
cf2d9171fc994b36166493257dd78da337f3729e
|
a4c8855dc2f482c29569001ae0e54ab5a40acb2f
|
refs/heads/master
| 2023-06-07T12:55:13.396268 | 2022-10-05T19:31:06 | 2022-10-05T19:31:06 | 39,663,289 | 91 | 17 |
MIT
| false | 2023-05-30T18:55:30 | 2015-07-24T23:29:49 | 2022-09-14T17:58:52 | 2023-05-30T18:55:29 | 781 | 83 | 13 | 19 |
Python
| false | false |
"""Internal utility helpers."""
# UNDEFINED is a singleton; ensure that it is falsy and returns the same instance when copied
class _Undefined:
def __bool__(self):
return False
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __repr__(self): # pragma: no cover
return "<UNDEFINED>"
UNDEFINED = _Undefined()
# -----------------------------------------------------------------------------
def if_none(value, default):
if value is None:
return default
return value
# -----------------------------------------------------------------------------
def iter_validation_errors(errors, path=()):
if isinstance(errors, dict):
for field_key, field_errors in errors.items():
field_path = path + (field_key,)
yield from iter_validation_errors(field_errors, field_path)
else:
for message in errors:
yield (message, path)
# -----------------------------------------------------------------------------
class SettableProperty:
def __init__(self, get_default):
self.get_default = get_default
self.internal_field_name = "_" + get_default.__name__
self.__doc__ = get_default.__doc__
def __get__(self, instance, owner):
if instance is None:
return self
try:
return getattr(instance, self.internal_field_name)
except AttributeError:
return self.get_default(instance)
def __set__(self, instance, value):
setattr(instance, self.internal_field_name, value)
def __delete__(self, instance):
try:
delattr(instance, self.internal_field_name)
except AttributeError:
pass
#: A property that can be set to a different value on the instance.
settable_property = SettableProperty
|
UTF-8
|
Python
| false | false | 1,869 |
py
| 63 |
utils.py
| 49 | 0.527555 | 0.527555 | 0 | 71 | 25.323944 | 93 |
kozolex/ISIC_Classification_Melanoma_Benign
| 10,926,396,847,255 |
722b838d4060a38966e461f41e09a1f1a2a2eb15
|
a1d7dc9a1c4fe99701ef1a1fe20bee31aae32a14
|
/dataset_to_folders.py
|
7dc6245d9839a3e63e8acd2a78766b7ce37cac6a
|
[
"MIT"
] |
permissive
|
https://github.com/kozolex/ISIC_Classification_Melanoma_Benign
|
6be5c8e60e4929e3d56b808f7e13a3ffab81c848
|
9945e54971eeaa427668392ef0381d068a5ef819
|
refs/heads/master
| 2022-04-21T04:17:59.496489 | 2020-04-24T13:54:37 | 2020-04-24T13:54:37 | 258,480,829 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import csv
import zipfile
from os import path, makedirs
class DatasetAnalyzer:
"""
"""
def __init__(self, zipPath, csvPath, keyWords = [], output = 'output/'):
self.zipPath = zipPath
self.csvPath = csvPath
self.keyWords = keyWords
self.output = output
print(f'File to analyze {self.zipPath}')
def createFolder(self, newPath):
if not path.exists(newPath):
makedirs(newPath)
def listGenerator(self, zipfile):
pass
def processing(self):
"""
"""
with open(self.csvPath, newline='') as csvfile:
listreader = csv.reader(csvfile, delimiter=' ', quotechar=',')
archive = zipfile.ZipFile(self.zipPath, 'r') #Open zip file
for row in listreader:
row = str(row[0]).split(',')
print(row)
imgdata = archive.extract('ISBI2016_ISIC_Part3_Training_Data/'+str(row[0])+'.jpg', self.output + str(row[1]))
zipPath = 'dataset/ISBI2016_ISIC_Part3_Training_Data.zip'
csvPath = 'dataset/ISBI2016_ISIC_Part3_Training_GroundTruth.csv'
zip1 = DatasetAnalyzer(zipPath, csvPath, ['benign', 'malignant'] )
zip1.processing()
|
UTF-8
|
Python
| false | false | 1,229 |
py
| 2 |
dataset_to_folders.py
| 1 | 0.590724 | 0.574451 | 0 | 48 | 24.625 | 125 |
killian-coder/Kamel_link_sys
| 1,709,397,003,767 |
dfefd82addcd7ded0ce5626c83b27846ebd74e34
|
bd622478d3e7f711eb8133aa94afd7bcbbb47b14
|
/about_us/views.py
|
86ebd0ad374c70fc3541466ab8351bc70a64a7f4
|
[] |
no_license
|
https://github.com/killian-coder/Kamel_link_sys
|
ac495dcac34da46d1e1007c614ea58a78f394ee2
|
21c407a3171bd28bc25dfd16d447454aab5c7695
|
refs/heads/main
| 2023-06-08T01:07:58.844125 | 2021-06-22T17:40:01 | 2021-06-22T17:40:01 | 304,036,352 | 0 | 0 | null | false | 2021-06-21T08:02:09 | 2020-10-14T14:12:47 | 2021-06-20T16:34:46 | 2021-06-20T16:34:43 | 3,954 | 0 | 0 | 1 |
HTML
| false | false |
from django.shortcuts import render
from django.views.generic import ListView
from about_us.models import About
# Create your views here.
class AboutUsview(ListView):
template_name = "about.html"
model = About
|
UTF-8
|
Python
| false | false | 223 |
py
| 9 |
views.py
| 7 | 0.757848 | 0.757848 | 0 | 11 | 19.181818 | 41 |
muneer22/local
| 8,495,445,312,453 |
991fa9b7cd16d6742690571ee22bac92a31fb8b6
|
00689046bb71927fc98301a23235cc33500d26bc
|
/membership.py
|
3a813b40b7373f778e373fd9c3cd85bcd8d0ee2e
|
[] |
no_license
|
https://github.com/muneer22/local
|
11392ea8a7fb9ff35440b174d84806864c689038
|
7900c43bb846076589db6fddba0e1fce16e102d2
|
refs/heads/master
| 2021-09-07T16:52:07.004726 | 2018-02-26T10:21:32 | 2018-02-26T10:21:32 | 108,250,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
list=[1,2,3,4,5,6,7,8,9,10]
a=2
b=5
c=12
if(a in list):
print('a is in the list')
else:
print('a is not in the list')
if(b not in list):
print('b is not in the list')
else:
print('b is in the list')
if(c not in list):
print('c is not in the list')
else:
print('c is in the list')
|
UTF-8
|
Python
| false | false | 307 |
py
| 11 |
membership.py
| 10 | 0.579805 | 0.530945 | 0 | 18 | 16 | 33 |
nabraj12/Pediatric-Bone-Age
| 7,507,602,853,620 |
063b42667f566eb5ad1321d04c46f6b9df7bb749
|
828386e4986f7310fc9f8a03c8ecc6eee5424f1d
|
/model-training-python/checkimg.py
|
c51e6c2ae6865de796f26f26d42dc1625e297471
|
[] |
no_license
|
https://github.com/nabraj12/Pediatric-Bone-Age
|
da1672d4413a40b8a427c1ed41f224ede0ffa214
|
bde54b3c21d77da0707d7ee9f98626eb5f7e491c
|
refs/heads/main
| 2023-03-23T10:42:51.409360 | 2021-03-15T00:07:05 | 2021-03-15T00:07:05 | 306,743,198 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import matplotlib.image as mpimg
import os.path
import matplotlib.pyplot as plt
import logging
import sys
#==============================
#prepare logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
file_handler = logging.FileHandler('log.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def check_image(df, fname_col, img_dir):
"""Check for missing/corrupted images.
Inputs- df, col name contains file name, image dir
Outputs- succees status """
for filename in df[fname_col].values[0:4]:
if not os.path.isfile(img_dir+filename):
logger.error("path {} does not exit".format(img_dir+filename))
success = False
else:
try:
img = mpimg.imread(img_dir + filename)
success = True
except OSError:
success = False
logger.error("image is {} corrupted/missing".
format(filename))
return success
def display_image(df, fname_col, img_dir, n):
"""Displays train, valid, and test images.
Inputs- df, col-name contains file anme, img dir, # of imgs to display
Outputs-None(display images) """
# Display some train images
nrows = 1+n//20
fig, axs = plt.subplots(nrows,20, figsize=(20,1.2*nrows),
facecolor='w', edgecolor='k')
axs = axs.ravel()
for idx, filename in enumerate (df[fname_col][0:n].values):
if not os.path.isfile(img_dir+filename):
logger.error("path {} does not exit".format(img_dir+filename))
img = mpimg.imread(img_dir + filename)
axs[idx].imshow(img)
axs[idx].set_axis_off()
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
if __name__ == "__main__":
import exploration
# Image directory to check whether any image file is..
# missing or corrupted
train_img_pre = os.path.join(os.path.dirname(__file__),
'mini_dataset/train/')
valid_img_pre = os.path.join(os.path.dirname(__file__),
'mini_dataset/valid/')
test_img_pre = os.path.join(os.path.dirname(__file__),
'mini_dataset/test/')
# CSV file path + name
csv_train = os.path.join(os.path.dirname(__file__),
'mini_dataset/train.csv')
csv_valid = os.path.join(os.path.dirname(__file__),
'mini_dataset/validation.csv')
csv_test = os.path.join(os.path.dirname(__file__),
'mini_dataset/test.csv')
df_mean_std_list = exploration.df_exploration(csv_train, csv_valid,
csv_test)
train_df,valid_df, test_df = df_mean_std_list
check = check_image(train_df, 'id', train_img_pre)
if check: print("No missing or corrupted image found.")
else: print ("Image directory contains missing or corrupted image")
# Display images
display_image(train_df, 'id', train_img_pre, 4)
display_image(valid_df, 'Image ID', valid_img_pre, 4)
display_image(test_df, 'Case ID', test_img_pre, 4)
|
UTF-8
|
Python
| false | false | 2,892 |
py
| 21 |
checkimg.py
| 12 | 0.664938 | 0.659059 | 0 | 96 | 29.104167 | 71 |
isthattyler/MySQLBirthdayTracker
| 18,957,985,645,724 |
aff44e98a42446b4a2ceef972eb16f461bccf6d7
|
56491ee53e7ee9eee6aa253afe1b1cca73fe5107
|
/src/Birthday.py
|
c7a08e732eaaf281fe3fbcf52937a6162a4c1d9d
|
[] |
no_license
|
https://github.com/isthattyler/MySQLBirthdayTracker
|
8233592f7bd2d7687395757c584a53e92319e937
|
aa03cedc9a523c56af955547577428e235068b3e
|
refs/heads/master
| 2020-08-30T00:11:20.477119 | 2019-10-29T23:58:29 | 2019-10-29T23:58:29 | 218,210,901 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" Birthday Class """
from MySQLConnector import *
import socket
class Birthday:
def __init__(self):
self.config = ""
# get host address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
host = s.getsockname()[0]
self.db = MySQLConnector(host, 'test','MyPassword1!', 'BirthdayTracker' )
self.db._connect()
def run(self):
choice = 1
print("Welome to the program.")
print("\nThis program will tell you their birthday or their age.")
print("\nIf the person you mentioned is not available on our database, you can choose to add the person in.")
print("\n1. Look for age 2. Look for birthday")
option = int(input("What do you want to do today? "))
while choice:
if option == 1:
self.getAge()
else:
self.getBirthday()
print("\n0. No 1. Yes")
choice = int(input("\nDo you want to look for another person? "))
print("\nHave a good day!")
exit()
def getBirthday(self):
self.config = input("\nPlease input their Fname and their Lname separated by whitespace(* to show everyone in database): ")
if self.config == '*':
self.searchBirthday(self.config, all=1)
temp = str(self.db)
print("\nHere's the info of everyone you requested for: \n" + temp)
else:
name = tuple(self.config.split())
result = self.searchBirthday(name)
if not result:
self.__noName()
else:
temp = str(self.db)
print("\nHere's the info of the person you requested for: \n" + temp)
def getAge(self):
self.config = input("\nPlease input their Fname and their Lname separated by whitespace(* to show everyone in database): ")
if self.config == '*':
self.searchAge(self.config, all=1)
temp = str(self.db)
print("\nHere's the info of everyone you requested for: \n" + temp)
else:
name = tuple(self.config.split())
result = self.searchAge(name)
if not result:
self.__noName()
else:
temp = str(self.db)
print("\nHere's the info of the person you requested for: \n" + temp)
def __noName(self):
print("The person appears to not be on our database.")
print("\n0. No 1. Yes")
choice = int(input("\nDo you want to put this person into our database for future reference? "))
if choice:
self.config = input("\nPlease input the person Fname, Lname, Bdate(YYYY-MM-DD), and Phone number separated by whitespace: ")
print(self.config)
print("Thank you. The data has been inserted.")
config = tuple(self.config.split())
print(config)
self.insert(config)
else:
print("\nOkay no worries! Have a good day!")
exit()
def insert(self, config):
query = ("""INSERT INTO Birthday
(Fname, Lname, Bdate, PhoneNum)
VALUES (%s, %s, %s, %s);""")
return self.db._query(query, config)
def searchBirthday(self, name, all=0):
if not all:
query = ("""SELECT Fname, Lname, Bdate
FROM Birthday
WHERE Fname=%s AND Lname=%s;""")
return self.db._query(query, name)
else:
query = ("""SELECT Fname, Lname, Bdate
FROM Birthday;""")
return self.db._query(query)
def searchAge(self, name, all=0):
if not all:
query = ("""SELECT Fname, Lname, YEAR(CURDATE()) - YEAR(Bdate) AS age
FROM Birthday
WHERE Fname=%s AND Lname=%s;""")
return self.db._query(query, name)
else:
query = ("""SELECT Fname, Lname, YEAR(CURDATE()) - YEAR(Bdate) AS age
FROM Birthday;""")
return self.db._query(query)
|
UTF-8
|
Python
| false | false | 4,160 |
py
| 3 |
Birthday.py
| 2 | 0.530529 | 0.525721 | 0 | 104 | 39 | 136 |
abisha22/S1-A-Abisha-Accamma-vinod
| 7,567,732,396,229 |
399c075fac99fdd022b771679d55b633ddb8feb2
|
7b1d1957183cb588aae19181e7b57be881906c38
|
/Programming Lab/27-01-21/prgm7abc.py
|
8cef835ae9326ca60be34de0f0c6ccd6515093d9
|
[] |
no_license
|
https://github.com/abisha22/S1-A-Abisha-Accamma-vinod
|
29c3ad7f183f3087955fda2e8c3b8b1a8630c034
|
49c2041519a3522c44a650efe39bff27b8103917
|
refs/heads/main
| 2023-06-12T21:27:43.859222 | 2021-06-30T01:52:23 | 2021-06-30T01:52:23 | 353,318,058 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
Python 3.9.1 (tags/v3.9.1:1e5d33e, Dec 7 2020, 17:08:21) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> list1=[12,3,4,56,7,8,9,19,34,87]
>>> list2=[10,4,67,89,4,77,29,5,7,8]
>>> len1=len(list1)
>>> len2=len(list2)
>>> if len1==len2:
print('Both list have equal length')
else:
print('Both list doesnot have equal length')
Both list have equal length
>>> list1=[12,3,4,56,7,8,9,19,34,87]
>>> list2=[10,4,67,89,4,77,29,5,7,8]
>>> total1=sum(list1)
>>> total2=sum(list2)
>>> if total1==total2:
print('Both list have equal sum')
else:
print('Both list doesnot have equal sum')
Both list doesnot have equal sum
>>> list1=[12,3,4,56,7,8,9,19,34,87]
>>> list2=[10,4,67,89,4,77,29,5,7,8]
>>> for value in list1:
if value in list2:
common=1
>>> if common==1:
print("There are common element")
else:
print("There is no common element")
There are common element
>>>
|
UTF-8
|
Python
| false | false | 982 |
py
| 25 |
prgm7abc.py
| 11 | 0.6222 | 0.476578 | 0 | 39 | 23.205128 | 94 |
chlendyd7/JustDoIt_Python
| 17,042,430,255,696 |
6256b9163946bd75b2089cb1f358f5a73fd33b0f
|
9099dce6485bdc8f2cdfc06168bfeb6dcf86449f
|
/2021_10_08/Dictionary.py
|
06465a18aabb2ac9aba54abb562f7fb8fb14d0b5
|
[] |
no_license
|
https://github.com/chlendyd7/JustDoIt_Python
|
a6306bee6289d781b5272070c32cb76eec739c96
|
230a9f8d8223f81d5f37b1ced4a426819f417cff
|
refs/heads/main
| 2023-09-02T10:25:08.141084 | 2021-10-11T14:29:42 | 2021-10-11T14:29:42 | 412,497,001 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#사전
cabinet = {3:"유재석", 100:"김태호"}
print(cabinet[3])
print(cabinet[100])
|
UTF-8
|
Python
| false | false | 93 |
py
| 14 |
Dictionary.py
| 14 | 0.61039 | 0.506494 | 0 | 4 | 17.25 | 30 |
DevManuelBarros/melingo
| 13,718,125,566,756 |
8a79c7468179ec938cadbf606a63838c77aed0ad
|
59d5f22b84cfb2b6f0128b6f12cb21e7b00a66b3
|
/loadMeli/views.py
|
a46cc65b7f2ca2caacaa84e6d8ee2629f19b2ee0
|
[] |
no_license
|
https://github.com/DevManuelBarros/melingo
|
7d4d9b868990e81c632552e680f0f5c179bf8bd6
|
af0ea564cc0db12c6ca2b09c71b6f2aa9237fde2
|
refs/heads/master
| 2022-12-20T20:15:52.871942 | 2019-11-20T02:08:41 | 2019-11-20T02:08:41 | 217,774,816 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from meli.meli import Meli
from django.http import HttpResponse, HttpResponseRedirect
from .serializers import LoginModelSerializer
from .models import LoginModel
from django.contrib.auth.decorators import login_required
from django.urls import reverse
#
@login_required(redirect_field_name='login')
def initial_login(request):
meli = Meli()
escritura = meli.auth_url_melingo()
print(escritura)
if "auth.mercadolibre.com" in escritura:
return HttpResponseRedirect(escritura)
else:
return HttpResponse("Modulo Ingresado")
@login_required(redirect_field_name='login')
def login(request):
code = request.GET.get('code', '')
if code:
meli = Meli()
response = meli.authorize_melingo(code)
tmpInstance = LoginModel.objects.all().last()
tmpSerializer = LoginModelSerializer()
tmpSerializer.update(tmpInstance, response)
else:
return HttpResponseRedirect(reverse('initial_login'))
return HttpResponseRedirect(reverse('profile'))
@login_required(redirect_field_name='login')
def profile(request):
meli = Meli(charge_data=True)
context = meli.get('/users/me', params={'access_token':meli.access_token})
return render(request, 'loadMeli/profile.html', {'context' : context.json()})
@login_required(redirect_field_name='login')
def logout(request):
tmpInstance = LoginModel.objects.all().last()
#Aquí hay que realizar los seteos
#para que deje todo en blanco.
return HttpResponseRedirect("http://www.mercadolibre.com/jms/mla/lgz/logout?go=http%3A%2F")
|
UTF-8
|
Python
| false | false | 1,618 |
py
| 11 |
views.py
| 9 | 0.719233 | 0.717996 | 0 | 48 | 32.6875 | 95 |
mc811mc/cracking-python-bootcamp
| 9,483,287,815,374 |
8bab732ee287aa6aa6a963a8d37a4a9ba7738e58
|
0449b8088a99ff55b0125f16ecd1ca3444b2dacf
|
/sports_betting_calculator.py
|
8da1c28dc487c8ee695ad83758940f055eda8e5e
|
[] |
no_license
|
https://github.com/mc811mc/cracking-python-bootcamp
|
24e6c00828baa8bbf8ac0a574549cc9fbd544d0e
|
6812ccb77a20a2c00f402975ae470c69486d5a02
|
refs/heads/master
| 2023-08-22T04:44:59.662534 | 2021-10-01T15:21:04 | 2021-10-01T15:21:04 | 281,063,993 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print("Welcome to the odds calculator")
odds = int(input("Enter the odds: "))
wager = int(input("Enter wager (bet amount): "))
print("Bet", wager, "\n")
if odds > 0:
win = wager / 100 * odds
probability = 100 / (odds + 100) * 100
elif odds < 0:
win = wager / abs(odds) * 100
probability = abs(odds) / (abs(odds) + 100) * 100
#variable
payout = abs(wager) + abs(win)
fractional_odds = abs(win/wager)
decimal_odds = abs(payout/wager)
print("Statistics List \n")
print("To Win:", abs(win))
print("Payout:", payout)
print("American Odds:", odds)
print("Fractional Odds", fractional_odds)
print("Decimal Odds:", decimal_odds)
print("Implied Probability:", probability)
|
UTF-8
|
Python
| false | false | 713 |
py
| 11 |
sports_betting_calculator.py
| 10 | 0.632539 | 0.600281 | 0 | 24 | 27.458333 | 53 |
paula-cristina-martins/Stack-in-Python
| 1,571,958,041,889 |
a20c1c6667d4ea80d03c1ab30969094a4896fbb0
|
71d28579020151154bd5d1eb68dafaa7b1aad659
|
/main.py
|
0034283106626063da50127680ace40c77803570
|
[] |
no_license
|
https://github.com/paula-cristina-martins/Stack-in-Python
|
ab7ef57b8fd46fdcbd8e6a07124947a491a0bcf9
|
bd25f6170c3327b718ac60e70bc8e82638a60118
|
refs/heads/master
| 2023-06-07T09:58:14.097772 | 2021-06-30T22:38:43 | 2021-06-30T22:38:43 | 381,541,114 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from AreaContainer import AreaContainer
# ------------------------------------------------------------------------------------------------------------ #
mccc = AreaContainer(1, 3)
mccw = AreaContainer(5, 8)
mccr = AreaContainer(5, 7)
maqm = AreaContainer(5, 6)
# ------------------------------------------------------------------------------------------------------------ #
print("\nBoas Vindas ao nosso Sistema de Estoque!\n" +
"Opções a serem armazenadas no sistema:\n" +
"1 - MCCC - Máquina de cartão / Conexão CHIP.\n" +
"2 - MCCW - Máquina de cartão / Conexão Wireless.\n" +
"3 - MCCR - Máquina de cartão / Conexão Cabo de Rede.\n" +
"4 - MAQM - Máquina de cartão Mobile.\n"
)
opcao_modelo_add = int(input("Informe modelo deseja guardar? "))
# mccc - limite 45
if (opcao_modelo_add == 1):
quantidade_maquininha = int(
input("\nInsira a quantidade que deseja cadastrar / armazenar? "))
if quantidade_maquininha > 45:
print("\nQuantidade superior ao limite disponível!")
exit()
for i in range(quantidade_maquininha):
serial_number = input(
"Insira o cód. de série do equipamento: ")
mccc.add_maquininha(serial_number)
# mccw - limite 40
elif (opcao_modelo_add == 2):
quantidade_maquininha = int(
input("\nInsira a quantidade que deseja cadastrar / armazenar? "))
if quantidade_maquininha > 40:
print("\nQuantidade superior ao limite disponível!")
exit()
for i in range(quantidade_maquininha):
serial_number = input(
"Insira o cód. de série do equipamento: ")
mccw.add_maquininha(serial_number)
# mccr - limite 35
elif (opcao_modelo_add == 3):
quantidade_maquininha = int(
input("\nInsira a quantidade que deseja cadastrar / armazenar? "))
if quantidade_maquininha > 35:
print("\nQuantidade superior ao limite disponível!")
exit()
for i in range(quantidade_maquininha):
serial_number = input(
"Insira o cód. de série do equipamento: ")
mccr.add_maquininha(serial_number)
# maqm - limite 30
elif (opcao_modelo_add == 4):
quantidade_maquininha = int(
input("\nInsira a quantidade que deseja cadastrar / armazenar? "))
if quantidade_maquininha > 30:
print("\nQuantidade superior ao limite disponível!")
exit()
for i in range(quantidade_maquininha):
serial_number = input(
"Insira o cód. de série do equipamento: ")
maqm.add_maquininha(serial_number)
else:
print("\nOpção Inválida! Por favor, tente novamente!\n")
# ------------------------------------------------------------------------------------------------------------ #
info = int(input("Deseja enviar máquinas de cartão? "
"\n1 - SIM"
"\n2 - NAO\n\nEscolha uma opção: "
))
if (info == 1):
print("\nOpções a serem enviadas as máquinas de cartão no sistema:\n" +
"1 - MCCC - Máquina de cartão / Conexão CHIP.\n" +
"2 - MCCW - Máquina de cartão / Conexão Wireless.\n" +
"3 - MCCR - Máquina de cartão / Conexão Cabo de Rede.\n" +
"4 - MAQM - Máquina de cartão Mobile.\n"
)
opcao_modelo_remove = int(input("Informe modelo deseja enviar? "))
if opcao_modelo_remove == 1:
mccc.del_maquininha()
elif opcao_modelo_remove == 2:
mccw.del_maquininha()
elif opcao_modelo_remove == 3:
mccr.del_maquininha()
elif opcao_modelo_remove == 4:
maqm.del_maquininha()
else:
print('Opção Inválida!')
print("\nAgradecemos por utilizar nossos serviços!\n\n")
# ------------------------------------------------------------------------------------------------------------ #
|
UTF-8
|
Python
| false | false | 3,844 |
py
| 4 |
main.py
| 3 | 0.544424 | 0.533087 | 0 | 110 | 33.472727 | 112 |
Yogarine/bungie-sdk-python
| 5,892,695,159,334 |
0cd72132d97f7d0c7632fc19c75dc42eaaa4b092
|
af2431c3cfce08626a8863ec3bde8a6defe1134b
|
/bungie_sdk_python/Model/Destiny/Entities/Characters/destiny_character_progression_component.py
|
fbd324f1ac8c367d073c7e9ce9430bdee492f74e
|
[] |
no_license
|
https://github.com/Yogarine/bungie-sdk-python
|
4d8be794fd6815b5729ef9c4a52d4051092ce0c2
|
8f0da6501b45add0914a1ec7bac05649991c4787
|
refs/heads/master
| 2020-07-03T15:43:51.701753 | 2019-08-12T15:16:11 | 2019-08-12T15:16:11 | 201,954,254 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
"""
Bungie.Net API
These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality. # noqa: E501
OpenAPI spec version: 2.3.6
Contact: support@bungie.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class DestinyCharacterProgressionComponent(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'progressions': 'dict(str, DestinyProgression)',
'factions': 'dict(str, DestinyFactionProgression)',
'milestones': 'dict(str, DestinyMilestone)',
'quests': 'list[DestinyQuestStatus]',
'uninstanced_item_objectives': 'dict(str, list[DestinyObjectiveProgress])',
'checklists': 'dict(str, dict(str, bool))'
}
attribute_map = {
'progressions': 'progressions',
'factions': 'factions',
'milestones': 'milestones',
'quests': 'quests',
'uninstanced_item_objectives': 'uninstancedItemObjectives',
'checklists': 'checklists'
}
def __init__(self, progressions=None, factions=None, milestones=None, quests=None, uninstanced_item_objectives=None, checklists=None): # noqa: E501
"""DestinyCharacterProgressionComponent - a model defined in OpenAPI""" # noqa: E501
self._progressions = None
self._factions = None
self._milestones = None
self._quests = None
self._uninstanced_item_objectives = None
self._checklists = None
self.discriminator = None
if progressions is not None:
self.progressions = progressions
if factions is not None:
self.factions = factions
if milestones is not None:
self.milestones = milestones
if quests is not None:
self.quests = quests
if uninstanced_item_objectives is not None:
self.uninstanced_item_objectives = uninstanced_item_objectives
if checklists is not None:
self.checklists = checklists
@property
def progressions(self):
"""Gets the progressions of this DestinyCharacterProgressionComponent. # noqa: E501
A Dictionary of all known progressions for the Character, keyed by the Progression's hash. Not all progressions have user-facing data, but those who do will have that data contained in the DestinyProgressionDefinition. # noqa: E501
:return: The progressions of this DestinyCharacterProgressionComponent. # noqa: E501
:rtype: dict(str, DestinyProgression)
"""
return self._progressions
@progressions.setter
def progressions(self, progressions):
"""Sets the progressions of this DestinyCharacterProgressionComponent.
A Dictionary of all known progressions for the Character, keyed by the Progression's hash. Not all progressions have user-facing data, but those who do will have that data contained in the DestinyProgressionDefinition. # noqa: E501
:param progressions: The progressions of this DestinyCharacterProgressionComponent. # noqa: E501
:type: dict(str, DestinyProgression)
"""
self._progressions = progressions
@property
def factions(self):
"""Gets the factions of this DestinyCharacterProgressionComponent. # noqa: E501
A dictionary of all known Factions, keyed by the Faction's hash. It contains data about this character's status with the faction. # noqa: E501
:return: The factions of this DestinyCharacterProgressionComponent. # noqa: E501
:rtype: dict(str, DestinyFactionProgression)
"""
return self._factions
@factions.setter
def factions(self, factions):
"""Sets the factions of this DestinyCharacterProgressionComponent.
A dictionary of all known Factions, keyed by the Faction's hash. It contains data about this character's status with the faction. # noqa: E501
:param factions: The factions of this DestinyCharacterProgressionComponent. # noqa: E501
:type: dict(str, DestinyFactionProgression)
"""
self._factions = factions
@property
def milestones(self):
"""Gets the milestones of this DestinyCharacterProgressionComponent. # noqa: E501
Milestones are related to the simple progressions shown in the game, but return additional and hopefully helpful information for users about the specifics of the Milestone's status. # noqa: E501
:return: The milestones of this DestinyCharacterProgressionComponent. # noqa: E501
:rtype: dict(str, DestinyMilestone)
"""
return self._milestones
@milestones.setter
def milestones(self, milestones):
"""Sets the milestones of this DestinyCharacterProgressionComponent.
Milestones are related to the simple progressions shown in the game, but return additional and hopefully helpful information for users about the specifics of the Milestone's status. # noqa: E501
:param milestones: The milestones of this DestinyCharacterProgressionComponent. # noqa: E501
:type: dict(str, DestinyMilestone)
"""
self._milestones = milestones
@property
def quests(self):
"""Gets the quests of this DestinyCharacterProgressionComponent. # noqa: E501
If the user has any active quests, the quests' statuses will be returned here. Note that quests have been largely supplanted by Milestones, but that doesn't mean that they won't make a comeback independent of milestones at some point. # noqa: E501
:return: The quests of this DestinyCharacterProgressionComponent. # noqa: E501
:rtype: list[DestinyQuestStatus]
"""
return self._quests
@quests.setter
def quests(self, quests):
"""Sets the quests of this DestinyCharacterProgressionComponent.
If the user has any active quests, the quests' statuses will be returned here. Note that quests have been largely supplanted by Milestones, but that doesn't mean that they won't make a comeback independent of milestones at some point. # noqa: E501
:param quests: The quests of this DestinyCharacterProgressionComponent. # noqa: E501
:type: list[DestinyQuestStatus]
"""
self._quests = quests
@property
def uninstanced_item_objectives(self):
"""Gets the uninstanced_item_objectives of this DestinyCharacterProgressionComponent. # noqa: E501
Sometimes, you have items in your inventory that don't have instances, but still have Objective information. This provides you that objective information for uninstanced items. This dictionary is keyed by the item's hash: which you can use to look up the name and description for the overall task(s) implied by the objective. The value is the list of objectives for this item, and their statuses. # noqa: E501
:return: The uninstanced_item_objectives of this DestinyCharacterProgressionComponent. # noqa: E501
:rtype: dict(str, list[DestinyObjectiveProgress])
"""
return self._uninstanced_item_objectives
@uninstanced_item_objectives.setter
def uninstanced_item_objectives(self, uninstanced_item_objectives):
"""Sets the uninstanced_item_objectives of this DestinyCharacterProgressionComponent.
Sometimes, you have items in your inventory that don't have instances, but still have Objective information. This provides you that objective information for uninstanced items. This dictionary is keyed by the item's hash: which you can use to look up the name and description for the overall task(s) implied by the objective. The value is the list of objectives for this item, and their statuses. # noqa: E501
:param uninstanced_item_objectives: The uninstanced_item_objectives of this DestinyCharacterProgressionComponent. # noqa: E501
:type: dict(str, list[DestinyObjectiveProgress])
"""
self._uninstanced_item_objectives = uninstanced_item_objectives
@property
def checklists(self):
"""Gets the checklists of this DestinyCharacterProgressionComponent. # noqa: E501
The set of checklists that can be examined for this specific character, keyed by the hash identifier of the Checklist (DestinyChecklistDefinition) For each checklist returned, its value is itself a Dictionary keyed by the checklist's hash identifier with the value being a boolean indicating if it's been discovered yet. # noqa: E501
:return: The checklists of this DestinyCharacterProgressionComponent. # noqa: E501
:rtype: dict(str, dict(str, bool))
"""
return self._checklists
@checklists.setter
def checklists(self, checklists):
"""Sets the checklists of this DestinyCharacterProgressionComponent.
The set of checklists that can be examined for this specific character, keyed by the hash identifier of the Checklist (DestinyChecklistDefinition) For each checklist returned, its value is itself a Dictionary keyed by the checklist's hash identifier with the value being a boolean indicating if it's been discovered yet. # noqa: E501
:param checklists: The checklists of this DestinyCharacterProgressionComponent. # noqa: E501
:type: dict(str, dict(str, bool))
"""
self._checklists = checklists
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DestinyCharacterProgressionComponent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
UTF-8
|
Python
| false | false | 11,269 |
py
| 206 |
destiny_character_progression_component.py
| 154 | 0.67699 | 0.667317 | 0 | 255 | 43.192157 | 420 |
lastfm/python-mirbuild
| 2,774,548,873,737 |
4f644d1c81f0963b82c3353924b4716d9650f905
|
d2d11acdeb6618ab4d0df328472d9f4bb06659a8
|
/mirbuild/python.py
|
f45dee1d1a1779e0f5b1286e057a363bca49567d
|
[] |
no_license
|
https://github.com/lastfm/python-mirbuild
|
7cfe0f544d6a7d937ad5d6839814d5b77d2645a5
|
706be24b426f672d614b3fdd14543c4841af08c2
|
refs/heads/master
| 2020-12-24T17:17:37.585640 | 2014-10-14T09:48:00 | 2014-10-14T09:48:00 | 8,272,896 | 1 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © 2011-2013 Last.fm Limited
#
# This file is part of python-mirbuild.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
r"""
Python specific classes
"""
__author__ = 'Marcus Holland-Moritz <marcus@last.fm>'
__all__ = 'PythonProject PythonTestBuilder PythonTestRunner PythonHelpers'.split()
import errno
import os
import glob
import re
import shutil
import sys
import mirbuild.project
import mirbuild.test
import mirbuild.version
from mirbuild.tools import LazyFileWriter, ScopedChdir
class PythonTestBuilder(mirbuild.test.TestBuilder):
def __init__(self, env, dir, *args):
mirbuild.test.TestBuilder.__init__(self, env, dir, *args)
@staticmethod
def looks_like_test_dir(dir):
for py in os.listdir(dir):
path = os.path.join(dir, py)
if os.path.isfile(path) and PythonTestBuilder.looks_like_test_file(path):
return True
return False
@staticmethod
def looks_like_test_file(file):
for line in open(file):
if re.search('import\s+py\.?test', line):
return True
if re.search('import\s+unittest', line):
return True
return False
def build(self):
if self.dir is not None:
if not self.tests:
for e in os.listdir(self.dir):
if e.endswith('.py'):
epath = os.path.join(self.dir, e)
if os.path.isfile(epath) and PythonTestBuilder.looks_like_test_file(epath):
self.add_test(e)
class PythonTestRunner(mirbuild.test.TestRunner):
name = 'python'
deps_paths = []
def execute(self, dir, tests, observer):
oldpypath = os.environ.get('PYTHONPATH', None)
try:
# Set the python path for tests
test_python_path = [os.path.realpath(p) for p in glob.glob('build/lib*')]
for d in PythonTestRunner.deps_paths:
test_python_path.extend(glob.glob(os.path.join(os.path.realpath(d), 'build', 'lib') + '*'))
## Just a hack to work with thrift dependencies
test_python_path.extend(glob.glob(os.path.join(os.path.realpath(d), 'build', 'build', 'lib') + '*'))
os.environ['PYTHONPATH'] = ':'.join(test_python_path)
scd = ScopedChdir(dir)
for t in tests:
assert isinstance(t, mirbuild.test.Test)
self._env.say('\n=== Running Test [ {0} ] ===\n'.format(t.name))
t.start_timer()
try:
self._env.execute('py.test', os.path.realpath(t.test))
t.set_passed()
except RuntimeError:
t.set_passed(False)
self._env.dbg('Test {0} finished in {1:.2f} seconds.'.format(t.name, t.duration))
observer.add_test(t)
finally:
if oldpypath is None:
del os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = oldpypath
class PythonSetupMixin(object):
def __init__(self):
self.add_option('--python-egg-directory',
dest = 'python_egg_directory',
type = 'string',
default = None,
metavar = 'PATH',
help = 'directory into which generated eggs will be moved')
self._vinfo = mirbuild.version.VersionInfoFactory.create()
def _exec_python_setup(self, *args):
self.env.execute(sys.executable,
os.path.basename(self.python_setup_file),
*args,
stdout=sys.stderr,
cwd=os.path.dirname(os.path.abspath(self.python_setup_file)))
@property
def package_prefix(self):
source = self._vinfo.package()
name = self.env.project_name
return source[:-len(name)] if source.endswith(name) else ''
def run_bdist_egg(self):
self.run_configure()
self._run_plugins('pre_bdist_egg')
self._run_plugins('bdist_egg')
self.do_bdist_egg()
self._run_plugins('post_bdist_egg')
def do_bdist_egg(self):
self._exec_python_setup('bdist_egg')
if self.opt.python_egg_directory:
dist_directory = os.path.join(os.path.dirname(os.path.abspath(self.python_setup_file)), 'dist')
egg_files = list(os.path.join(dist_directory, i) for i in os.listdir(dist_directory) if i.endswith('.egg'))
for i in egg_files:
# shutil.move fails if file already exists in destination
# -> remove it first
try:
os.remove(os.path.join(self.opt.python_egg_directory, os.path.basename(i)))
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
shutil.move(i, self.opt.python_egg_directory)
class PythonProject(mirbuild.project.Project, PythonSetupMixin):
test_builder_class = PythonTestBuilder
test_runner_class = PythonTestRunner
python_setup_file = 'setup.py'
default_dependency_class = mirbuild.dependency.PythonDependency
def __init__(self, name, **opts):
# Steal the 'setup' and 'packages' named parameter
setup_option = opts.pop('setup', {})
packages = opts.pop('packages', None)
# Initialise base class
mirbuild.project.Project.__init__(self, name, **opts)
PythonSetupMixin.__init__(self)
# Build actual list of parameters to setup.py's setup function
author = re.match('(.*?)\s+<([^>]+)>', self._vinfo.author());
stripped_project_name = name[7:] if name.startswith('python-') else name
# These are default members
setup_info = {
'name': self.package_prefix + stripped_project_name,
'version': self._vinfo.upstream_version(),
'description': '',
'package_dir': {'': '.'},
'maintainer': author.group(1),
'maintainer_email': author.group(2),
'packages': packages,
}
# Override these defaults with user supplied values
setup_info.update(setup_option)
self.__setup_info = setup_info
self.__libpath = []
def do_configure(self):
if self.__is_autogenerated(self.python_setup_file):
self.__write_setup_file(self.python_setup_file)
def add_include_path(self, obj):
pass
def add_library_path(self, *args):
self.__libpath += args
PythonTestRunner.deps_paths += args
@property
def setup_info(self):
return self.__setup_info
def __write_setup_file(self, file):
setup_info = dict(self.__setup_info)
setup_args = []
packages = setup_info.pop('packages', None)
if packages is None:
# no 'packages' option was given, or it is None
setup_args.append('packages=find_packages()')
else:
setup_args.append('packages={0!r}'.format(packages))
for key, value in setup_info.iteritems():
val = self.options.get(key, value)
setup_args.append('{0}={1!r}'.format(key, val))
setup = LazyFileWriter(file)
setup.create()
setup.write('''#!{0}
#########################################################################
# #
# ----------------------------------------- #
# THIS FILE WAS AUTOGENERATED BY MIRBUILD #
# ----------------------------------------- #
# #
# You can put your own customisations in this file, just remove this #
# header and the file won't be cleaned up automatically. #
# #
#########################################################################
from setuptools import setup, find_packages
setup({1})
'''.format(sys.executable, ",\n ".join(setup_args)))
setup.commit()
def do_build(self):
if self.opt.called_by_packager:
return
self._exec_python_setup('build')
def do_install(self):
if self.opt.called_by_packager:
return
args = ['install']
if self.opt.install_destdir is not None:
args.append('--root=' + self.opt.install_destdir)
args.append('--no-compile')
self._exec_python_setup(*args)
def __is_autogenerated(self, file):
if not os.path.exists(file):
return True
try:
fh = open(file, 'r')
for line in fh:
if re.match('#\s+THIS FILE WAS AUTOGENERATED BY MIRBUILD\s+#', line):
return True
except Exception:
pass
return False
def do_clean(self):
for root, dirs, files in os.walk('.'):
for f in files:
if f.endswith('.pyc'):
self.env.remove_files(os.path.join(root, f))
for d in dirs:
if d.endswith('.egg-info') or d == '__pycache__':
self.env.remove_trees(os.path.join(root, d))
self.env.remove_files('README.debtags')
if self.__is_autogenerated(self.python_setup_file):
self.env.remove_files(self.python_setup_file)
self.env.remove_trees('build', 'dist')
def do_realclean(self):
self.do_clean()
def prepare_package(self):
mirbuild.project.Project.prepare_package(self)
if isinstance(self.packager, mirbuild.packagers.pkg_debian.DebianPackaging):
# We are building a Python package. The old "dh_pysupport" way of
# doing this has been deprecated. The new "dh_python2" must be
# selected by using a corresponding option when calling dh.
# NB: For as long as we have to support lenny, only add --with python2
# if we actually find that dh_python2 is installed.
if os.path.exists('/usr/bin/dh_python2'):
self.packager.rules.dh_options += ['--with', 'python2']
# If this packages comes without a setup.py file, the "build.py configure"
# step will create one.
# The build and install steps are calling build.py. Build and
# install steps for Python Debian packages, however, are a bit
# more sophisticated, as it includes building and installing
# the packages for various Python versions. This is best done
# by the Debian dh scripts.
# The standard override_dh_auto_{build,install} targets, the
# way that mirbuild.Project sets them up, call build.py using
# the "--called-by-packager" option. The strategy here is,
# that build.py should not call setup.py, but only do the
# additional build/install steps (e.g. defined by plugins in
# the build.py file). After the call to build.py, the standard
# dh_auto_{build,install} executables are called, and they
# do the real work.
self.packager.rules.target_prepend('override_dh_auto_build', ['dh_auto_build'])
self.packager.rules.target_prepend('override_dh_auto_install', ['dh_auto_install'])
class PythonHelpers(object):
namespace_package_declaration = """\
try:
# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
# See http://docs.python.org/library/pkgutil.html#pkgutil.extend_path
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)\n"""
@staticmethod
def modules2namespaces(modules):
"""
Returns a list of namespaces necessary to host the given modules.
E.g. ['foo.bar.baz', 'foo.foo.foo', 'foo.foo.bar'] will return
['foo', 'foo.bar', 'foo.foo']
"""
namespaces = []
for m in modules:
comp = m.split('.')
for i in range(1, len(comp)):
ns = '.'.join(comp[0:i])
if ns in modules:
break
if ns not in namespaces:
namespaces.append(ns)
return namespaces
|
UTF-8
|
Python
| false | false | 13,557 |
py
| 43 |
python.py
| 33 | 0.565359 | 0.563219 | 0 | 341 | 38.753666 | 119 |
tryba/shared-queue
| 8,400,956,063,634 |
28ff6c6023c418f11c48a330440fd1c9e1549768
|
ba4c49cb1f288136eae1c84b540718ab2ba4ba33
|
/queue_server/queues/views.py
|
0baf77ef837c246b499f83b5f30bcfcdced76ed8
|
[] |
no_license
|
https://github.com/tryba/shared-queue
|
c26edb6fae192d603f039f7a79d7d20c7f6df312
|
7ce2ba330a075acbc6cce8b5b4a9087c981f6757
|
refs/heads/master
| 2021-01-19T12:36:00.466243 | 2013-01-28T07:13:01 | 2013-01-28T07:13:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render_to_response
from django.shortcuts import redirect
from django.template import RequestContext
from django.contrib.auth.models import User
from accounts.models import UserProfile
from queues.models import Queue
from queues.models import Membership
from music.models import Song
from queue_server.decorators import AllowJSONPCallback
from django.http import HttpResponse
import json
import logging
logger = logging.getLogger(__name__)
@AllowJSONPCallback
def create(request, user_id):
queue = Queue(user_id=user_id)
queue.save()
return HttpResponse(json.dumps(queue.to_dict()), mimetype="application/json")
@AllowJSONPCallback
def view_one(request, user_id, queue_id):
queue = Queue.objects.get(id=queue_id)
return HttpResponse(json.dumps(queue.to_dict()), mimetype="application/json")
@AllowJSONPCallback
def view_all(request, user_id):
queues = Queue.objects.filter(user_id=user_id)
return HttpResponse(json.dumps([queue.to_dict() for queue in queues]), mimetype="application/json")
@AllowJSONPCallback
def push_song(request, user_id, queue_id, song_id):
queue = Queue.objects.get(id=queue_id)
song = Song.objects.get(id=song_id)
queue.push(song)
return HttpResponse(json.dumps(queue.to_dict()), mimetype="application/json")
@AllowJSONPCallback
def remove_membership(request, user_id, queue_id, membership_id):
membership = Membership(id=membership_id )
membership.delete()
queue = Queue.objects.get(id=queue_id)
return HttpResponse(json.dumps(queue.to_dict()), mimetype="application/json")
@AllowJSONPCallback
def pop_song(request, user_id, queue_id):
queue = Queue.objects.get(id=queue_id)
response = {}
song = queue.pop()
if(song != None):
api, result = UserProfile.get_google_music_api(user_id)
stream_url = api.get_stream_url(song.id)
song_dict = song.to_dict()
song_dict['stream_url'] = stream_url
response['popped_song'] = song_dict
response['queue'] = queue.to_dict()
return HttpResponse(json.dumps(response), mimetype="application/json")
|
UTF-8
|
Python
| false | false | 2,052 |
py
| 35 |
views.py
| 27 | 0.753899 | 0.753899 | 0 | 59 | 33.779661 | 101 |
mengdilin/Cjango-Unchained
| 14,104,672,604,336 |
7ddea5ca24085b10816e2bcb7916761cf486a9fe
|
4ee050c126c4bf29acb8103b153c3ac269c10dd9
|
/test/verifications_post_demo.py
|
0b5b3ba47dc6b98eb6af18ed58a29365a654a0c6
|
[] |
no_license
|
https://github.com/mengdilin/Cjango-Unchained
|
95e0db859fab96f766b0c2ab3e85da479e0cedcc
|
62bb923a2e34e93a9ff1f41a48396291cc1d573d
|
refs/heads/master
| 2021-01-20T10:46:16.207185 | 2017-04-28T03:09:57 | 2017-04-28T03:09:57 | 83,937,221 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from bs4 import BeautifulSoup
import json
def check_input(data):
if not type(data) is str:
raise Exception("Input argument is not of type str")
def verify_echo(data):
'''
Simply echos the data
'''
check_input(data)
print('received raw data of length {}'.format(len(data)))
print(data)
return True
def verify_cjango_404(data):
'''
GET request on an undefined path
'''
check_input(data)
print('received raw data of length {}'.format(len(data)))
target = "Cjango: 404 Page Not Found"
if data != target:
print('contents mismatch')
print('expect: {}'.format(target))
print('actual: {}'.format(data))
return False
return True
def verify_get_home(data):
'''
GET request on page with static components
'''
check_input(data)
print('received raw data of length {}'.format(len(data)))
soup = BeautifulSoup(data, 'html.parser')
target = "Cjango Demo"
if soup.find('title').text != target:
return False
print('title OK: {}'.format(target))
target = "Please log in"
if soup.find('h2').text != target:
return False
print('contents OK')
return True
|
UTF-8
|
Python
| false | false | 1,216 |
py
| 92 |
verifications_post_demo.py
| 47 | 0.61102 | 0.604441 | 0 | 53 | 21.943396 | 61 |
guicho271828/latplan
| 15,951,508,568,865 |
f1346c0ae118f51bbfc1c2274b5f5873e1b5899b
|
62b84f877ccb4171f558c225fa0fdd4fd2c44d6c
|
/latplan/puzzles/model/puzzle.py
|
6537e33f4776a3d33bf034e7c6bbdd3d06b1e2c5
|
[] |
no_license
|
https://github.com/guicho271828/latplan
|
b6dfb55f3cceac947df770fb623d496111f9ab19
|
75a2fc773de245b422a695b51fccaf17294da123
|
refs/heads/master
| 2022-10-25T02:02:05.547143 | 2022-03-25T20:42:06 | 2022-03-25T20:59:29 | 96,482,151 | 77 | 19 | null | false | 2023-03-04T14:10:46 | 2017-07-07T00:11:52 | 2023-01-30T06:14:45 | 2022-10-07T14:48:54 | 3,439 | 75 | 17 | 0 |
Python
| false | false |
#!/usr/bin/env python3
import random
import numpy as np
from ...util.np_distances import bce
from ..util import wrap
from keras.layers import Input, Reshape
from keras.models import Model
import keras.backend.tensorflow_backend as K
import tensorflow as tf
# domain specific state representation:
#
# In a config array C, C_ij is the location of j-th panel in the i-th configuration.
#
# [[0,1,2,3,4,5,6,7,8]] represents a single configuration, where 0 is at 0 (top left)),
# 1 is at 1 (top middle) and so on.
setting = {
'base' : None,
'panels' : None,
'loader' : None,
'min_threshold' : 0.0,
'max_threshold' : 0.5,
}
def load(width,height,force=False):
if setting['panels'] is None or force is True:
setting['panels'] = setting['loader'](width,height)
def generate(configs, width, height, **kwargs):
load(width, height)
from keras.layers import Input, Reshape
from keras.models import Model
import keras.backend.tensorflow_backend as K
import tensorflow as tf
def build():
base = setting['base']
P = len(setting['panels'])
configs = Input(shape=(P,))
configs_one_hot = K.one_hot(K.cast(configs,'int32'), width*height)
matches = K.permute_dimensions(configs_one_hot, [0,2,1])
matches = K.reshape(matches,[-1,P])
panels = K.variable(setting['panels'])
panels = K.reshape(panels, [P, base*base])
states = tf.matmul(matches, panels)
states = K.reshape(states, [-1, height, width, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, height*base, width*base])
return Model(configs, wrap(configs, states))
model = build()
return model.predict(np.array(configs),**kwargs)
def build_error(s, height, width, base):
P = len(setting['panels'])
s = K.reshape(s,[-1,height,base,width,base])
s = K.permute_dimensions(s, [0,1,3,2,4])
s = K.reshape(s,[-1,height,width,1,base,base])
s = K.tile(s, [1,1,1,P,1,1,])
allpanels = K.variable(np.array(setting['panels']))
allpanels = K.reshape(allpanels, [1,1,1,P,base,base])
allpanels = K.tile(allpanels, [K.shape(s)[0], height, width, 1, 1, 1])
error = K.abs(s - allpanels)
error = K.mean(error, axis=(4,5))
return error
from .util import binary_search
def validate_states(states, verbose=True, **kwargs):
base = setting['base']
height = states.shape[1] // base
width = states.shape[2] // base
load(width,height)
if states.ndim == 4:
assert states.shape[-1] == 1
states = states[...,0]
bs = binary_search(setting["min_threshold"],setting["max_threshold"])
def build():
states = Input(shape=(height*base,width*base))
error = build_error(states, height, width, base)
matches = K.cast(K.less_equal(error, bs.value), 'float32')
# a, h, w, panel
num_matches = K.sum(matches, axis=3)
panels_ok = K.all(K.equal(num_matches, 1), (1,2))
panels_nomatch = K.any(K.equal(num_matches, 0), (1,2))
panels_ambiguous = K.any(K.greater(num_matches, 1), (1,2))
panel_coverage = K.sum(matches,axis=(1,2))
# ideally, this should be [[1,1,1,1,1,1,1,1,1], ...]
coverage_ok = K.all(K.less_equal(panel_coverage, 1), 1)
coverage_ng = K.any(K.greater(panel_coverage, 1), 1)
validity = tf.logical_and(panels_ok, coverage_ok)
return Model(states,
[ wrap(states, x) for x in [panels_ok,
panels_nomatch,
panels_ambiguous,
coverage_ok,
coverage_ng,
validity]])
while True:
model = build()
panels_ok, panels_nomatch, panels_ambiguous, \
coverage_ok, coverage_ng, validity = model.predict(states, **kwargs)
panels_nomatch = np.count_nonzero(panels_nomatch)
panels_ambiguous = np.count_nonzero(panels_ambiguous)
if verbose:
print(f"threshold value: {bs.value}")
print(panels_nomatch, "images have some panels which are unlike any panels")
print(np.count_nonzero(panels_ok), "images have all panels which match exactly 1 panel each")
print(panels_ambiguous, "images have some panels which match >2 panels")
if np.abs(panels_nomatch - panels_ambiguous) <= 1:
if verbose:
print("threshold found")
print(np.count_nonzero(np.logical_and(panels_ok, coverage_ng)),"images have duplicated tiles")
print(np.count_nonzero(np.logical_and(panels_ok, coverage_ok)),"images have no duplicated tiles")
return validity
elif panels_nomatch < panels_ambiguous:
bs.goleft()
else:
bs.goright()
return validity
def to_configs(states, verbose=True, **kwargs):
base = setting['base']
width = states.shape[1] // base
height = states.shape[1] // base
load(width,height)
if states.ndim == 4:
assert states.shape[-1] == 1
states = states[...,0]
def build():
P = len(setting['panels'])
states = Input(shape=(height*base,width*base))
error = build_error(states, height, width, base)
matches = 1 - K.clip(K.sign(error - setting["threshold"]),0,1)
# a, h, w, panel
config = K.reshape(matches, [K.shape(states)[0], height * width, -1])
# a, pos, panel
config = K.permute_dimensions(config, [0,2,1])
# a, panel, pos
config = config * K.arange(height*width,dtype='float')
config = K.sum(config, axis=-1)
num_matches = K.sum(matches, axis=3)
panels_nomatch = K.any(K.equal(num_matches, 0), (1,2))
panels_ambiguous = K.any(K.greater(num_matches, 1), (1,2))
return Model(states,
[ wrap(states, x) for x in [config,
panels_nomatch,
panels_ambiguous]])
bs = binary_search(setting["min_threshold"],setting["max_threshold"])
setting["threshold"] = bs.value
while True:
model = build()
config, panels_nomatch, panels_ambiguous = model.predict(states, **kwargs)
panels_nomatch = np.count_nonzero(panels_nomatch)
panels_ambiguous = np.count_nonzero(panels_ambiguous)
if verbose:
print(f"threshold value: {bs.value}")
print(panels_nomatch, "images have some panels which are unlike any panels")
print(panels_ambiguous, "images have some panels which match >2 panels")
if np.abs(panels_nomatch - panels_ambiguous) <= 1:
return config
elif panels_nomatch < panels_ambiguous:
setting["threshold"] = bs.goleft()
else:
setting["threshold"] = bs.goright()
return config
def states(width, height, configs=None, **kwargs):
digit = width * height
if configs is None:
configs = generate_configs(digit)
return generate(configs,width,height, **kwargs)
# old definition, slow
def transitions_old(width, height, configs=None, one_per_state=False):
digit = width * height
if configs is None:
configs = generate_configs(digit)
if one_per_state:
transitions = np.array([
generate(
[c1,random.choice(successors(c1,width,height))],width,height)
for c1 in configs ])
else:
transitions = np.array([ generate([c1,c2],width,height)
for c1 in configs for c2 in successors(c1,width,height) ])
return np.einsum('ab...->ba...',transitions)
def transitions(width, height, configs=None, one_per_state=False, **kwargs):
digit = width * height
if configs is None:
configs = generate_configs(digit)
if one_per_state:
pre = generate(configs, width, height, **kwargs)
suc = generate(np.array([random.choice(successors(c1,width,height)) for c1 in configs ]), width, height, **kwargs)
return np.array([pre, suc])
else:
transitions = np.array([ [c1,c2] for c1 in configs for c2 in successors(c1,width,height) ])
pre = generate(transitions[:,0,:],width,height, **kwargs)
suc = generate(transitions[:,1,:],width,height, **kwargs)
return np.array([pre, suc])
def generate_configs(digit=9):
import itertools
return itertools.permutations(range(digit))
def generate_random_configs(digit=9,sample=10000):
results = np.zeros((sample,digit))
for i in range(sample):
results[i] = np.random.permutation(digit)
return results
def successors(config,width,height):
pos = config[0]
x = pos % width
y = pos // width
succ = []
try:
if x != 0:
dir=1
c = list(config)
other = next(i for i,_pos in enumerate(c) if _pos == pos-1)
c[0] -= 1
c[other] += 1
succ.append(c)
if x != width-1:
dir=2
c = list(config)
other = next(i for i,_pos in enumerate(c) if _pos == pos+1)
c[0] += 1
c[other] -= 1
succ.append(c)
if y != 0:
dir=3
c = list(config)
other = next(i for i,_pos in enumerate(c) if _pos == pos-width)
c[0] -= width
c[other] += width
succ.append(c)
if y != height-1:
dir=4
c = list(config)
other = next(i for i,_pos in enumerate(c) if _pos == pos+width)
c[0] += width
c[other] -= width
succ.append(c)
return succ
except StopIteration:
board = np.zeros((height,width))
for i in range(height*width):
_pos = config[i]
_x = _pos % width
_y = _pos // width
board[_y,_x] = i
print(board)
print(succ)
print(dir)
print((c,x,y,width,height))
def validate_transitions_cpu_old(transitions, **kwargs):
pre = np.array(transitions[0])
suc = np.array(transitions[1])
base = setting['base']
height = pre.shape[1] // base
width = pre.shape[2] // base
load(width,height)
pre_validation = validate_states(pre, **kwargs)
suc_validation = validate_states(suc, **kwargs)
results = []
for pre, suc, pre_validation, suc_validation in zip(pre, suc, pre_validation, suc_validation):
if pre_validation and suc_validation:
c = to_configs(np.array([pre, suc]), verbose=False)
succs = successors(c[0], width, height)
results.append(np.any(np.all(np.equal(succs, c[1]), axis=1)))
else:
results.append(False)
return results
def validate_transitions_cpu(transitions, check_states=True, **kwargs):
pre = np.array(transitions[0])
suc = np.array(transitions[1])
base = setting['base']
height = pre.shape[1] // base
width = pre.shape[2] // base
load(width,height)
if check_states:
pre_validation = validate_states(pre, verbose=False, **kwargs)
suc_validation = validate_states(suc, verbose=False, **kwargs)
pre_configs = to_configs(pre, verbose=False, **kwargs)
suc_configs = to_configs(suc, verbose=False, **kwargs)
results = []
if check_states:
for pre_c, suc_c, pre_validation, suc_validation in zip(pre_configs, suc_configs, pre_validation, suc_validation):
if pre_validation and suc_validation:
succs = successors(pre_c, width, height)
results.append(np.any(np.all(np.equal(succs, suc_c), axis=1)))
else:
results.append(False)
else:
for pre_c, suc_c in zip(pre_configs, suc_configs):
succs = successors(pre_c, width, height)
results.append(np.any(np.all(np.equal(succs, suc_c), axis=1)))
return results
validate_transitions = validate_transitions_cpu
# def to_objects(configs,width,height):
# configs = np.array(configs)
# xy = np.concatenate((np.expand_dims( np.array( configs % 3, np.uint8),-1),
# np.expand_dims( np.array( configs // 3, np.uint8),-1)),
# axis=-1)
#
# return np.unpackbits(xy, 2)
# experimental
def to_objects(configs,width,height,shuffle=False):
configs = np.array(configs)
ix = np.eye(width)
iy = np.eye(height)
x = ix[np.array( configs % 3, np.uint8)]
y = iy[np.array( configs // 3, np.uint8)]
# panels
p = np.tile(np.eye(width*height), (len(configs),1,1))
objects = np.concatenate((p,x,y), axis=-1)
if shuffle:
for sample in objects:
np.random.shuffle(sample)
return objects
def object_transitions(width, height, configs=None, one_per_state=False,shuffle=False, **kwargs):
digit = width * height
if configs is None:
configs = generate_configs(digit)
if one_per_state:
pre = to_objects(configs, width, height, shuffle)
suc = to_objects(np.array([random.choice(successors(c1,width,height)) for c1 in configs ]), width, height, shuffle, **kwargs)
return np.array([pre, suc])
else:
transitions = np.array([ [c1,c2] for c1 in configs for c2 in successors(c1,width,height) ])
pre = to_objects(transitions[:,0,:],width,height, shuffle, **kwargs)
suc = to_objects(transitions[:,1,:],width,height, shuffle, **kwargs)
return np.array([pre, suc])
|
UTF-8
|
Python
| false | false | 13,834 |
py
| 164 |
puzzle.py
| 133 | 0.576117 | 0.562166 | 0 | 379 | 35.501319 | 133 |
jakob-nagel/DMSUB-classification-system
| 3,255,585,257,470 |
f8dc4aaa7fee3a4d288dbbe0ee587e202e29a0ec
|
734fc7fb082ea452aadf0fc9ea83e079aba6f754
|
/classification/genres.py
|
07ad384b5a7ed3726db6f33cf2af77894424edd1
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/jakob-nagel/DMSUB-classification-system
|
9c3fa6e254793c4b2563b092a71c6c699eb6500e
|
432c1b0e927f27213cffc594864f906dda6684b5
|
refs/heads/master
| 2021-06-08T05:54:20.491655 | 2016-09-16T21:22:39 | 2016-09-16T21:22:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" Provides genre lists. The contained strings have to match the preambles of
the feature files.
"""
GTZAN = [
'blues',
'classical',
'country',
'disco',
'hiphop',
'jazz',
'metal',
'pop',
'reggae',
'rock'
]
DMSUB = [
'deep',
'disco',
'house',
'soulful',
'techno'
]
|
UTF-8
|
Python
| false | false | 327 |
py
| 34 |
genres.py
| 10 | 0.513761 | 0.513761 | 0 | 24 | 12.625 | 78 |
A-khateeb/Full-Stack-Development-Path
| 18,373,870,104,441 |
daae500fe042094583befb85619d6d46e7e92466
|
dca0bd2e04dda3801d395c2a6ab2f9d95be79551
|
/Python/SmallProject/Compare1.py
|
4a67a2be0ba00aa0f989e540d3ea38b29de7032c
|
[] |
no_license
|
https://github.com/A-khateeb/Full-Stack-Development-Path
|
ab8c86abea2f983fb8e0046a65b99772416c754c
|
5a5eaa198367cc95a6b5638e9740f4ad564dec23
|
refs/heads/master
| 2021-06-01T23:52:04.965494 | 2020-05-01T22:59:20 | 2020-05-01T22:59:20 | 89,286,943 | 2 | 0 | null | false | 2017-12-22T22:21:52 | 2017-04-24T21:04:07 | 2017-11-18T21:13:46 | 2017-12-22T22:21:52 | 233 | 0 | 0 | 1 |
Shell
| false | null |
print(27/4)
|
UTF-8
|
Python
| false | false | 13 |
py
| 325 |
Compare1.py
| 308 | 0.615385 | 0.384615 | 0 | 1 | 11 | 11 |
transceptor-technology/trender
| 5,866,925,345,808 |
c56fb6f5449d642777d872ee08faac579c273ee1
|
7088124fe742b2adcf189626ed89a8d0e712f55a
|
/trender/aiohttp_template.py
|
20eb5898aa2bf0722a04d337544681f0b5254654
|
[
"MIT"
] |
permissive
|
https://github.com/transceptor-technology/trender
|
609cfdf2ab4230bfc34573a977109c82ee707e9b
|
ef2b7374ea2ecc83dceb139b358ec4ad8ce7033b
|
refs/heads/master
| 2020-05-21T15:19:33.551923 | 2018-08-15T13:09:27 | 2018-08-15T13:09:27 | 44,837,764 | 24 | 1 | null | false | 2016-08-14T20:39:30 | 2015-10-23T20:49:55 | 2016-08-11T14:18:55 | 2016-08-14T20:39:29 | 69 | 4 | 1 | 0 |
Python
| null | null |
'''Use TRender with aiohttp.
This implementation is based on aiohttp_jinja2, see:
http://aiohttp-jinja2.readthedocs.org/en/stable/ and
https://github.com/aio-libs/aiohttp_jinja2
:copyright: 2015, Jeroen van der Heijden (Transceptor Technology)
'''
from aiohttp import web
from .trender import TRender
_templates = []
class _Template:
def __init__(self, name, **kwargs):
self.name = name
self.ctemplate = None
self.kwargs = {
'content_type': 'text/html',
'charset': 'utf-8'
}
self.kwargs.update(kwargs)
def template(template_name, **kwargs):
# register this template name
rtemplate = _Template(template_name, **kwargs)
_templates.append(rtemplate)
def wrapper(func):
async def wrapped(*args):
namespace = await func(*args)
text = rtemplate.ctemplate.render(namespace)
return web.Response(body=text.encode('utf-8'), **rtemplate.kwargs)
return wrapped
return wrapper
def setup_template_loader(template_path):
for template in _templates:
template.ctemplate = TRender(
template.name,
path=template_path)
|
UTF-8
|
Python
| false | false | 1,187 |
py
| 18 |
aiohttp_template.py
| 17 | 0.639427 | 0.631845 | 0 | 46 | 24.804348 | 78 |
riccardosabatini/nextmng
| 4,612,794,898,159 |
0ac4e21b75f9dede8d89996290ab6dbdf9498f98
|
915e5fc7d81d5c0f14e76e1337af01ca2827a9c2
|
/nextmng/settings/common.py
|
aab6085e973f24c94681f47dd12605c77d7f1a2a
|
[] |
no_license
|
https://github.com/riccardosabatini/nextmng
|
3636eb5aa441a22b3ec6dc90cb74338bd6ffd6ee
|
7453e96aa8b0d1563f9dfcf44c913ceb2f13dd2d
|
refs/heads/master
| 2021-01-10T21:20:30.947025 | 2014-05-15T11:32:34 | 2014-05-15T11:32:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Django settings for nextmng project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=g#tv+3@)t9z2h)zz-rfb001_g1x87yi+4bj!-wnd940#my3!8'
# Application definition
INSTALLED_APPS = (
'djangocms_admin_style',
#'admin_shortcuts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'nextmng.main',
'djcelery',
'rest_framework',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
CONSOLE_LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'logentries': {
'format': 'DJ %(levelname)s %(name)s %(module)s: %(message)s',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': CONSOLE_LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'logentries',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'WARNING',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'nextmng': {
'handlers': ['console'],
'level': CONSOLE_LOG_LEVEL,
},
# 'celery.tasks': {
# 'handlers': ['console'],
# 'level': CONSOLE_LOG_LEVEL,
# },
}
}
ROOT_URLCONF = 'nextmng.urls'
WSGI_APPLICATION = 'nextmng.wsgi.application'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Rome'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# AngularJS will complains is we append slashes
APPEND_SLASH = False
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = (
("resources", os.path.join(BASE_DIR, '..', 'resources')),
)
PLOT_DATA = {
'ymin': int(os.environ.get('PLOT_DATA_YMIN', -3)),
'ymax': int(os.environ.get('PLOT_DATA_YMAX', 3)),
}
# ----------------------
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
#CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
#CELERY_BEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
|
UTF-8
|
Python
| false | false | 4,609 |
py
| 24 |
common.py
| 18 | 0.646344 | 0.638967 | 0 | 156 | 28.512821 | 74 |
efgstone/django-barcode-auth
| 6,244,882,479,775 |
fb7dd169b4a4eca08d0333f6ff013a9e103c83c3
|
ed2aa652d963dbd20bc76c977ed5146f0d86552f
|
/backends.py
|
55dfd558eacf34403763783aa4cc86f71ed7e828
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
https://github.com/efgstone/django-barcode-auth
|
ea85579ed60eb5063d4671b74835bb6eeb4f3619
|
022eeb29cc78ec614eaa9cec8ed314c1ad46c6d7
|
refs/heads/master
| 2020-03-27T12:56:36.835488 | 2011-09-27T14:57:06 | 2011-09-27T14:57:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth.models import User
from barauth.utils import gen_passhash
class BarcodeAuthBackend(object):
"""
Authenticates against a username and a hash contained in a
barcode generated by django-barcode-auth.utils.gen_passhash()
"""
def authenticate(self, user_id=None, password=None):
try:
user = User.objects.get(pk=user_id)
known_passhash = gen_passhash(user)
if password == known_passhash:
return user
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
UTF-8
|
Python
| false | false | 714 |
py
| 10 |
backends.py
| 9 | 0.623249 | 0.623249 | 0 | 25 | 27.56 | 65 |
acse-srm3018/HelloWorldHackathon
| 7,670,811,602,491 |
555f5b076ed25a6112c5f6cab6f23ef835434eca
|
026d23864f82349482395dfd18ffdad56b1c8fb3
|
/main.py
|
6d589c04f1922e44f66cafe0c215b1d08af93b7e
|
[] |
no_license
|
https://github.com/acse-srm3018/HelloWorldHackathon
|
d355b019569c710afc61a4d003ffad6e5b87c71e
|
0041344456a038f957d4752b5f657e602cf50a5f
|
refs/heads/main
| 2023-09-03T09:02:50.157329 | 2021-03-28T15:29:49 | 2021-03-28T15:29:49 | 405,771,452 | 4 | 0 | null | true | 2021-09-12T22:57:17 | 2021-09-12T22:57:16 | 2021-03-28T15:29:58 | 2021-03-28T15:29:56 | 38,639 | 0 | 0 | 0 | null | false | false |
import json
import spotipy
import pandas as pd
from spotipy.oauth2 import SpotifyClientCredentials, SpotifyOAuth
import requests
# client_id = '' #insert your client id
# client_secret = '' # insert your client secret id here
redirect_uri = 'http://localhost:8080/'
with open("credentials.json", "r") as file:
credentials = json.load(file)
client_id = credentials['client_id']
client_secret = credentials['client_secret']
# client_credentials_manager = SpotifyClientCredentials(client_id, client_secret)
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri))
playlists = sp.user_playlists('staplegun.')
playlist_list = []
for playlist in playlists['items']:
playlist_list.append(playlist["id"])
print(playlist_list)
results = sp.playlist(playlist_list[0])
# results = sp.playlist(playlist_id)
song_ids = []
for item in results['tracks']['items']:
track = item['track']['id']
song_ids.append(track)
print(song_ids)
# print(current_user_playlists(limit=50, offset=0))
# GET https://api.spotify.com/v1/users/{'staplegun.'}/playlists
|
UTF-8
|
Python
| false | false | 1,116 |
py
| 5 |
main.py
| 2 | 0.742832 | 0.733871 | 0 | 40 | 26.9 | 124 |
saeeddiscovery/Deep3DSM
| 5,600,637,385,746 |
f16bb62aa43c52869db11aebc8d9bfcbafe87aea
|
e048af261935eee97adfa64db44d2d6d230171b0
|
/Utils/utils.py
|
7915adebefa60a66c5283be69776e8565f059c6d
|
[] |
no_license
|
https://github.com/saeeddiscovery/Deep3DSM
|
b7a67a13f514303070f026bb503f7f0a02e71bcc
|
7bc7a0db4e34b9f28ad14706a693030bfa1a8837
|
refs/heads/master
| 2021-10-11T04:51:22.779100 | 2019-01-06T21:24:58 | 2019-01-06T21:24:58 | 166,949,211 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
def myPrint(text, path, consolePrint=True):
if not os.path.exists(path+'/reports/'):
os.mkdir(path+'/reports/')
if consolePrint:
print(text)
print(text, file=open(path+'/reports/output.txt', 'a'))
def myLog(text, path):
myPrint(text, path, consolePrint=False)
def visualizeDataset(dataset, plotSize=[4,4]):
import matplotlib.pyplot as plt
plt.figure()
for num in range(len(dataset)):
plt.subplot(plotSize[0],plotSize[1],num+1)
centerSlice = int(dataset.shape[1]/2)
plt.imshow(dataset[num, :, centerSlice, :, 0], cmap='gray')
plt.axis('off')
plt.suptitle('Center Coronal Slice\nfrom each training image')
import re
def sortHuman(l):
convert = lambda text: float(text) if text.isdigit() else text
alphanum = lambda key: [convert(c) for c in re.split('([-+]?[0-9]*\.?[0-9])', key)]
l.sort(key=alphanum)
return l
|
UTF-8
|
Python
| false | false | 934 |
py
| 8 |
utils.py
| 6 | 0.62955 | 0.616702 | 0 | 28 | 32.392857 | 87 |
SushainRazdan/dlms-cosem
| 3,418,793,985,513 |
94f890363d973a6b459bd85abf24dbf72fda2b3b
|
d63ea8022aeb30cd3a4a5bfb5f915ad67b17318c
|
/dlms_cosem/clients/serial_hdlc.py
|
a0a4fafd97c7480b088db151444a5af3779e4ff5
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/SushainRazdan/dlms-cosem
|
75c63567ba2022fcc548b74c007ecc35f9fb9190
|
b169e4c5b38d48415101b7d01acee161dbdf29f8
|
refs/heads/master
| 2023-01-21T10:59:03.467407 | 2020-11-17T10:22:41 | 2020-11-17T10:22:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from typing import Optional
import logging
import attr
import serial
from dlms_cosem.protocol.hdlc import (
address,
state,
connection,
frames,
exceptions as hdlc_exception,
)
LOG = logging.getLogger(__name__)
class ClientError(Exception):
"""General error in client"""
@attr.s(auto_attribs=True)
class SerialHdlcClient:
"""
HDLC client to send data over serial.
"""
client_logical_address: int
server_logical_address: int
serial_port: str
serial_baud_rate: int = attr.ib(default=9600)
server_physical_address: Optional[int] = attr.ib(default=None)
client_physical_address: Optional[int] = attr.ib(default=None)
hdlc_connection: connection.HdlcConnection = attr.ib(
default=attr.Factory(
lambda self: connection.HdlcConnection(
self.server_hdlc_address, self.client_hdlc_address
),
takes_self=True,
)
)
_serial: serial.Serial = attr.ib(
default=attr.Factory(
lambda self: serial.Serial(
port=self.serial_port, baudrate=self.serial_baud_rate, timeout=2
),
takes_self=True,
)
)
_send_buffer: list = attr.ib(factory=list)
@property
def server_hdlc_address(self):
return address.HdlcAddress(
logical_address=self.server_logical_address,
physical_address=self.server_physical_address,
address_type="server",
)
@property
def client_hdlc_address(self):
return address.HdlcAddress(
logical_address=self.client_logical_address,
physical_address=self.client_physical_address,
address_type="client",
)
def connect(self):
"""
Sets up the HDLC Connection by sending a SNRM request.
"""
# TODO: Implement hdlc parameter negotiation in SNRM frame
if self.hdlc_connection.state.current_state != state.NOT_CONNECTED:
raise ClientError(
f"Client tried to initiate a HDLC connection but connection state was "
f"not in NOT_CONNECTED but in "
f"state={self.hdlc_connection.state.current_state}"
)
snrm = frames.SetNormalResponseModeFrame(
destination_address=self.server_hdlc_address,
source_address=self.client_hdlc_address,
)
self._send_buffer.append(snrm)
ua_response = self._drain_send_buffer()[0]
LOG.info(f"Received {ua_response!r}")
return ua_response
def disconnect(self):
"""
Sends a DisconnectFrame
:return:
"""
disc = frames.DisconnectFrame(
destination_address=self.server_hdlc_address,
source_address=self.client_hdlc_address,
)
self._send_buffer.append(disc)
response = self._drain_send_buffer()[0]
return response
def _drain_send_buffer(self):
"""
Messages to send might need to be fragmented and to handle the flow we can split all
data that is needed to be sent into several frames to be send and when this is
called it will make sure all is sent according to the protocol.
"""
response_frames = list()
while self._send_buffer:
frame = self._send_buffer.pop(0) # FIFO behavior
self._write_frame(frame)
if self.hdlc_connection.state.current_state in state.RECEIVE_STATES:
response = self._next_event()
response_frames.append(response)
return response_frames
def _next_event(self):
"""
Will read the serial line until a proper response event is read.
:return:
"""
while True:
# If we already have a complete event buffered internally, just
# return that. Otherwise, read some data, add it to the internal
# buffer, and then try again.
event = self.hdlc_connection.next_event()
if event is state.NEED_DATA:
self.hdlc_connection.receive_data(self._read_frame())
continue
return event
def send(self, telegram: bytes) -> bytes:
"""
Send will make sure the data that needs to be sent i sent.
The send is the only public function that will return the response data
when received in full.
Send will handle fragmentation of data if data is to large to be sent in a
single HDLC frame.
:param telegram:
:return:
"""
current_state = self.hdlc_connection.state.current_state
if not current_state == state.IDLE:
raise hdlc_exception.LocalProtocolError(
f"Connection is not in state IDLE and cannot send any data. "
f"Current state is {current_state}"
)
info = self.generate_information_request(telegram)
self._send_buffer.append(info)
response: frames.InformationFrame = self._drain_send_buffer()[0]
return response.payload
def generate_information_request(self, payload):
return frames.InformationFrame(
destination_address=self.server_hdlc_address,
source_address=self.client_hdlc_address,
payload=payload,
send_sequence_number=self.hdlc_connection.state.client_ssn,
receive_sequence_number=self.hdlc_connection.state.client_rsn,
response_frame=False
)
def _write_frame(self, frame):
frame_bytes = self.hdlc_connection.send(frame)
LOG.info(f"Sending {frame!r}")
self._write_bytes(frame_bytes)
def _write_bytes(self, to_write: bytes):
LOG.debug(f"Sending: {to_write!r}")
self._serial.write(to_write)
def _read_frame(self) -> bytes:
in_bytes = self._serial.read_until(frames.HDLC_FLAG)
if in_bytes == frames.HDLC_FLAG:
# We found the first HDLC Frame Flag. We should read until the last one.
in_bytes += self._serial.read_until(frames.HDLC_FLAG)
LOG.debug(f"Received: {in_bytes!r}")
return in_bytes
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disconnect()
|
UTF-8
|
Python
| false | false | 6,344 |
py
| 12 |
serial_hdlc.py
| 11 | 0.606715 | 0.605296 | 0 | 192 | 32.041667 | 92 |
DanielFord11/Final
| 13,572,096,686,326 |
9524774219ae22b13aa8a05ded4f893eef0d2735
|
62e8f6c6c8bf4c0ad78165184134b5a4a3782ddf
|
/Clout_Chaser/Reddit_Scraper.py
|
c168e8bd43c8355f6c62c5b531d233f7e54c5b7e
|
[] |
no_license
|
https://github.com/DanielFord11/Final
|
a54c30cab8d3fd47bb6829b7760b0571dd23cde4
|
52d2dd8ff420aa187a67df9b57b384e20c6d5990
|
refs/heads/main
| 2023-03-17T17:50:56.135550 | 2021-03-15T20:28:11 | 2021-03-15T20:28:11 | 347,547,540 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#set to run every 14 min
from psaw import PushshiftAPI
import datetime as dt
import pandas as pd
import json
import requests
end_epoch = int(datetime.today().timestamp())
start_epoch = end_epoch - 840
api = PushshiftAPI()
sub_list = ['wallstreetbets', 'WallstreetbetsELITE', 'Wallstreetbets']
stock_df = pd.read_csv("stocks.csv")
ticker_list = list(stock_df["Symbol"])
name_list = list(stock_df["Name"])
#mongo dependencies
import pymongo
client = pymongo.MongoClient()
db = client["Clout_Chaser"]
collection=db["stocks"]
def scrape_reddit():
for sub in sub_list:
start_epoch=int(dt.datetime(2021, 2, day+1).timestamp())
end_epoch=int(dt.datetime(2021, 2, day+2).timestamp())
print(start_epoch)
try:
reddit_response = list(api.search_submissions(after=start_epoch,
before=end_epoch,
subreddit=sub,
filter=['url','author','title','subreddit',
'upvote_ratio','score'],
limit=5000))
print(f"response len:{len(reddit_response)}")
except:
print(f"call failed for day:{day}")
for post in range(len(reddit_response)):
try:
# print(len(reddit_response))
document = {"author":reddit_response[post][0],
"created": reddit_response[post][1],
"score":reddit_response[post][2],
"subreddit":reddit_response[post][3],
"title":reddit_response[post][4]}
collection.insert_one(document)
print(f"wrote {post} of {len(reddit_response)} to mongo")
except:
print("generating doc failed")
return
if __name__ == "__main__":
scrape_reddit()
|
UTF-8
|
Python
| false | false | 1,936 |
py
| 7 |
Reddit_Scraper.py
| 5 | 0.534091 | 0.520661 | 0 | 60 | 31.266667 | 78 |
afcarl/qtick
| 11,811,160,077,213 |
c7227a0f41309df9621aafcb384373945a31721d
|
c22ec5162b4fce59e55a9e73a3104c6141c99fda
|
/scripts/state.py
|
f5a787d9b8e46182c7671e1d3c6ecf55e042ad5c
|
[] |
no_license
|
https://github.com/afcarl/qtick
|
4c8b6417b77e082598a8a0143589d66e07eac967
|
495f8a783266fe5f322812089ba07923bf67f8bb
|
refs/heads/master
| 2020-09-03T21:54:27.520150 | 2017-05-13T07:35:06 | 2017-05-13T07:35:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from collections import deque
class state(object):
def __init__(self, shape, size):
self.steps = deque()
self.shape = shape
self.size = size
self.value = None
for i in range(size):
self.push_zeroes()
def push_zeroes(self):
self.push_array(np.zeros(self.shape))
def push_array(self, step_array):
assert self.shape == step_array.shape[0]
if len(self.steps) == self.size:
self.steps.popleft()
self.steps.append(step_array)
self.complete()
def complete(self):
self.value = np.concatenate(self.steps)
def read(self):
return self.value
def reshape(self, rows, cols):
return self.value.reshape(rows, cols)
def vector(self):
return self.value.reshape(1, self.value.shape[0])
def __str__(self):
return str(self.value)
|
UTF-8
|
Python
| false | false | 916 |
py
| 7 |
state.py
| 6 | 0.586245 | 0.582969 | 0 | 40 | 21.9 | 57 |
pkumath/datastructure
| 9,947,144,275,257 |
62cbd76fc4c11ac8ce1a13ef20f1c631e6b42936
|
538c056be6dcca1e676cf23d427ecc12a0865e0e
|
/gui.py
|
796f14b06c69d13bb76f5269eab82122cd5503d5
|
[
"MIT"
] |
permissive
|
https://github.com/pkumath/datastructure
|
41e7e13ccfd4a129b7e496ec1dda32e47ecfd5fc
|
0b440b59af73ed73c575df5cd1c67946aa510dba
|
refs/heads/master
| 2023-05-25T22:24:49.691928 | 2021-08-16T00:45:40 | 2021-08-16T00:45:40 | 252,172,905 | 4 | 1 |
MIT
| false | 2023-05-22T23:27:08 | 2020-04-01T12:46:20 | 2021-08-16T00:45:42 | 2023-05-22T23:27:07 | 46,909 | 2 | 1 | 1 |
Python
| false | false |
import tkinter as tk
from tkinter import filedialog as tkfiledialog
from tkinter import messagebox as tkmessagebox
from pathlib import Path
import sys
import pyperclip
from multiprocessing import Process
import threading
import webbrowser
import os
from appdirs import *
import logging as log
import inkscape_control
from globe import Globe as globe
import widget
import edit_scroll_process as svg_file
import workspace
from blueprint import show_blueprint, export_blueprint, import_blueprint
from util import StrUtil as strutil
user_dir = Path(user_config_dir("project", "ww"))
#用于存放数据文件
if not user_dir.is_dir():
user_dir.mkdir(parents=True)
data_dir = Path(user_data_dir('project','ww'))
#用于存放数据
if not data_dir.is_dir():
user_dir.mkdir(parents=True)
flag_path = data_dir / 'flag.txt'
def init():
# Root
if flag_path.exists():
pass
else:
f = open(flag_path, 'w')
f.write('Browser')
f.close()
root = tk.Tk()
root.title('LaTeX模版生成程序')
root.geometry('700x800')
# Var
var_snippet = tk.StringVar()
var_dependency = tk.StringVar()
varr_snippet = tk.StringVar()
varr_dependency = tk.StringVar()
varr_workpath = tk.StringVar()
varr_snippet.set('经过处理的图片文件名:'+var_snippet.get())
# Hint
hint_variable = '图片名称'
hint_snippet = ' 提示:这是一个自制的简易LaTeX模版生成程序,我们将持续加入其他模版作为扩展,这是在macOS下制作的,我本人不是很清楚Menu组件和Windows显示的是否一致.\n众所周知的是,原来课本上的menu写法只在Windows上生效,因为Mac里的menu是显示在屏幕最上方而不是窗口里面的.\n如果您没有成功显示,换一个电脑,或者忽略格式错误.\n'+\
'**********************************************************************************\n下方浅黄色区域时就是您的工作区域.\n请输入...\n欲获取详细信息,请查看菜单栏的"使用说明"'
hint_dependency = '这里是上面模版所需的LaTeX依赖展示区.是需要被放入导言区的内容.'
# Field
field_variable = widget.HintEntry(root,0,0,hint_variable)
field_variable.place(relx = 0.5,rely = 0.05, anchor = tk.CENTER)
field_list = widget.make_list(root,svg_file.get_svgnames(os.getcwd()),os.getcwd())
field_list.place(relheight = 0.8,relwidth = 0.27,relx = 0.7,rely = 0.15)
field_snippet = widget.HintText(root,0,0,hint_snippet,80,40)
field_snippet.place(relheight = 0.4, relwidth = 0.7, rely = 0.15)
field_dependency = widget.HintText(root,0,0,hint_dependency,80,40)# useHint = False)
field_dependency.place(relheight = 0.4, relwidth = 0.7, rely = 0.55)
# Label
label_variable = tk.Label(root, textvariable = varr_snippet)
label_variable.place(relx = 0.5, rely = 0.1,anchor = tk.CENTER)
warning = '请注意,工作路径须与tex文件保持一致。\n如果要修改工作路径,请在菜单栏当中选取"切换工作路径"。'
varr_workpath.set('当前工作路径:'+os.getcwd()+'.'+warning)
label_workpath = widget.auto_label(root,varr_workpath)
label_workpath.place(relx=0.5, rely=0.98, anchor=tk.CENTER)
# Button
btn_generate = tk.Button(root, text = '生成片段并复制',command = lambda : callback(field_snippet, var_snippet,varr_snippet,field_variable))
btn_generate.place(relx = 0.7, rely = 0.04)
btn_edit = tk.Button(root, text='编辑已有图片!',
command=lambda : globe.blueprint.do_macro(name=field_list.content()))
btn_edit.place(relx=0.7, rely=0.08)
btn_clrsnip = tk.Button(root, text = '清空片段',command = lambda : field_snippet.clear())
btn_clrsnip.place(relx = 0.2,rely = 0.04)
btn_clrdep = tk.Button(root, text = '清空依赖区',command = lambda : field_dependency.clear())#button: clear dependency
btn_clrdep.place(relx = 0.05,rely = 0.04)
btn_inkscape = tk.Button(root, text = '执行宏',command = lambda : globe.blueprint.do_macro(name=field_variable.content()) if not field_variable.hinting else None)#inkscape_control.create(strutil.label(var_snippet.get())))
btn_inkscape.place(relx = 0.05,rely = 0.08)
# Menu
menubar = tk.Menu(root)
menu_file = tk.Menu(menubar, tearoff = False)
menu_file.add_command(label = '切换工作路径',command = lambda : menu_callback('cwd',field_snippet,var_snippet,varr_snippet,field_list))
menu_file.add_separator()
menu_file.add_command(label = '导入片段',command = lambda : menu_callback('open',field_snippet,var_snippet,varr_snippet,field_list))
menu_file.add_command(label = '导入依赖区',command = lambda : menu_callback('open',field_dependency,var_dependency,varr_dependency,field_list))
menu_file.add_command(label = '退出',command = root.quit)
menu_file.add_separator()
menu_file.add_command(label ='保存片段',command = lambda : menu_callback('save',field_snippet,var_snippet,varr_snippet,field_list))
menu_file.add_command(label ='保存依赖区',command = lambda : menu_callback('save',field_dependency,var_dependency,varr_dependency,field_list))
menu_file.add_separator()
menu_file.add_command(label = '导入蓝图',command = lambda : import_filedialog())
menu_file.add_command(label ='导出蓝图',command = lambda : export_filedialog())
menu_help = tk.Menu(menubar, tearoff = False)
menu_help.add_command(label = '关于...',command = lambda : menu_callback('about',field_snippet,var_snippet,varr_snippet,field_list))
menu_help.add_command(label = '使用说明',command = lambda : menu_callback('hint',field_snippet,var_snippet,varr_snippet,field_list))
menu_help.add_command(label='获取教学视频',
command=lambda: menu_callback('video', field_snippet, var_snippet, varr_snippet, field_list))
menubar.add_cascade(label = '文件',menu = menu_file)
menubar.add_cascade(label = '帮助', menu = menu_help)
root.config(menu=menubar)
globe.ui = {
"root": root,
"var": {
"snippet": var_snippet,
"dependency": var_dependency,
},
"varr": {
"snippet": varr_snippet,
"dependency": varr_dependency,
},
"field": {
"variable": field_variable,
"snippet": field_snippet,
"dependency": field_dependency,
"list": field_list,
},
"label": {
"variable": label_variable,
},
"button": {
"generate": btn_generate,
"clrsnip": btn_clrsnip,
"clrdep": btn_clrdep,
"inkscape": btn_inkscape,
"edit": btn_edit,
},
"menubar": menubar,
"menu": {
"file": menu_file,
"help": menu_help,
}
}
field_list.auto_check()
label_workpath.auto_check(varr_workpath,warning)
# check_inkscape()
log.info("GUI initiated")
show_blueprint() #显示默认蓝图
root.mainloop()
log.info("GUI destroyed")
def callback(widget,var,varr,field_variable):
"""callback"""
"""控制按钮触发"""
if field_variable.hinting:
log.warning("Still hinting")
return
log.info(field_variable.content())
var.set(field_variable.content())
variable = var.get()
fileName = globe.blueprint.get_factor(**{'name': variable})['fileName']
fragment = globe.blueprint.get_fragment(**{'name': variable})
# text.myvar.set(latex_template(var.get(),title))
widget.text.delete('1.0','end')
widget.text.insert('1.0', fragment)
pyperclip.copy(fragment)
varr.set('经过处理的图片题目:'+fileName)
if widget.hinting == True:
widget.unhint()
def menu_callback(command,widget,var,varr,listbox):
"""menu_callback
:param command: 菜单栏控制
"""
if command == 'about':
tkmessagebox.showinfo('Help',message= '这是一个latex模版生成程序.\n 温刚于5.10最后一次修改, 1800011095,\n school of mathematics, Peking University.\n 王奕轩, 1900014136, department of chinese, Peking University.')
# listbox.update()
elif command == 'hint':
tkmessagebox.showinfo('Hint',message = '图片标题的处理是为了防止不合法的标题,所以不建议或者未开放关闭自动处理功能.')
with open(str(flag_path), 'r') as f:
manual_state = f.read()
if 'Browser' in manual_state:
sys.path.append("libs")
url = 'http://39.107.57.131/?p=605'
webbrowser.open(url)
listbox.update()
elif 'True' in manual_state:
print('here!')
os.system('open ' + str(data_dir/'manual.pdf').replace(' ', '\ '))
elif command == 'save':
widget.save_file_as(None,varr)
# listbox.update()
elif command == 'open':
widget.open_file(None,None,var,varr)
# listbox.update()
elif command == 'cwd':
cwd_select()
# listbox.update()
elif command == 'video':
sys.path.append("libs")
url = 'http://39.107.57.131/?p=593'
webbrowser.open(url)
def cwd_select():
"""cwd_select
选择工作路径
"""
cwdpath = tk.filedialog.askdirectory(initialdir=os.getcwd())
if not (cwdpath == ''):
workspace.cwd(cwdpath)
tkmessagebox.showinfo('工作路径', '当前工作路径已切换至 {}'.format(os.getcwd()))
else: log.warning("Selection cancelled.")
def export_filedialog(kind='json'):
filename = tk.filedialog.asksaveasfilename(initialdir=os.getcwd(), filetypes=[
(kind.upper(), '*.%s'%kind),
], )
if not (filename == ''):
if filename[-len(kind)-1:] != ".%s"%kind: filename += ".%s"%kind
export_blueprint(filename)
else:
log.info("Save cancelled.")
def import_filedialog():
filename = tk.filedialog.askopenfilename(initialdir=os.getcwd(), filetypes=[
('JSON', '*.json'),
])
if not (filename == ''):
return_code = import_blueprint(filename)
if return_code ==-1:
tkmessagebox.showerror('导入失败', '{} 不是合法的蓝图文件。'.format(filename))
|
UTF-8
|
Python
| false | false | 10,441 |
py
| 29 |
gui.py
| 11 | 0.623726 | 0.609015 | 0 | 264 | 35.049242 | 222 |
jaideepmurkute/kaggle_ranzcr
| 2,388,001,852,866 |
2d421e86beff65026f6559c8c6209acad52652a4
|
196b5faf37e333d6429a1a2f1f31591c94f8c845
|
/custom_vit.py
|
44c8130d079263bc3aa1df37591361d7a5449096
|
[] |
no_license
|
https://github.com/jaideepmurkute/kaggle_ranzcr
|
9f7ab0ddbf7dec78524206d97bd4fcfe1e4e73a6
|
0099d3767625327ce5985dfa1a071e4030082281
|
refs/heads/main
| 2023-02-18T23:14:31.567664 | 2021-01-19T18:43:33 | 2021-01-19T18:43:33 | 322,129,680 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import timm
import torch.nn as nn
import torch
import numpy as np
from vit_pytorch import ViT
class CustomViT(nn.Module):
def __init__(self, args):
super(CustomViT, self).__init__()
self.choice = args.choice
self.num_classes = args.num_classes # number of output classes for model
self.device = args.device
self.mixup_alpha = args.mixup_alpha
self.is_contrastive = args.is_contrastive
self.input_size = args.input_size
self.model = ViT(image_size=self.input_size, patch_size=44, num_classes=11, dim=1024, depth=12, heads=32, mlp_dim=2048,
dropout=0.05, emb_dropout=0.05)
# print(self.model)
# exit(0)
def forward(self, args, x, label, cat_label=None, enable_mixup=False, training=False):
gammas = []
if enable_mixup:
self.mixup_layer = np.random.choice(np.arange(0, 1))
else:
self.mixup_layer = None
self.mixup_lambdas = None
output = x
if enable_mixup and self.mixup_layer == 0:
if args.mixup_method == 'manifold_mixup':
output, label = self.perform_mixup(args, output, label)
if args.mixup_method == 'manifold_cutmix':
output, label = self.perform_cutmix(args, output, label)
output = self.model(output)
embeddings = output
cat_label_output = None
return output, embeddings, label, cat_label_output, cat_label, gammas
|
UTF-8
|
Python
| false | false | 1,547 |
py
| 5 |
custom_vit.py
| 4 | 0.586942 | 0.570136 | 0 | 43 | 34.023256 | 127 |
chrinide/MIM
| 9,740,985,831,101 |
d4a5101ded734415397009b3159cb51e66b5c918
|
fffdfbe01eda85efe8e1ecbf521778f9aa89b380
|
/mim/Fragment.py
|
4a0c6ad2f31830c6d6fbb022054333b82af3110a
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/chrinide/MIM
|
65fef80174cc101216cc2a7699e8e417068a677c
|
587ae79b0ec76c20af6235a68c85ed15f2cc7155
|
refs/heads/master
| 2023-08-02T05:56:10.776001 | 2021-10-05T17:46:23 | 2021-10-05T17:46:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import string
import time
import numpy as np
#from .Pyscf import *
#from ase import Atoms
#from ase.calculators.vasp import Vasp
#from ase.vibrations import Infrared
from numpy import linalg as LA
from mendeleev import element
#import mim
#from mim import runpie, Molecule, fragmentation, Fragment, Pyscf
class Fragment():
"""
Class to store a list of primitives corresponding to a molecular fragment
Parameters
----------
theory : str
Level of theory for calculation
basis : str
Basis set name for calculations
prims : list
List of fragments from Fragmentation class with atom indexes in list
attached : list
List of attached pairs with the atom that is in the fragment and its corresponding atom pair that was cut
coeff : int
Coefficent of fragment. This will either be 1 or -1.
"""
def __init__(self, qc_class, molecule, prims, attached=[], coeff=1, step_size=0.001, local_coeff=1):
self.prims = prims
self.molecule = molecule
self.coeff = coeff
self.attached = attached #[(supporting, host), (supporting, host), ...]
self.inputxyz = []
self.apt = []
self.aptgrad = np.array([])
self.step = step_size
self.energy = 0
self.grad = 0
self.hessian = 0
self.hess = []
self.notes = [] # [index of link atom, factor, supporting atom, host atom]
self.jacobian_grad = [] #array for gradient link atom projections
self.jacobian_hess = [] #ndarray shape of full system*3 x fragment(with LA)*3
self.qc_class = qc_class
self.step_size = step_size
self.local_coeff = local_coeff
self.M = [] #this is the mass matrix for massweighting shape: (3N, 3N)
self.center = []
self.gradlist = []
self.origin_vec = []
self.nuc_deriv = []
def add_linkatoms(self, atom1, attached_atom, molecule):
""" Adds H as a link atom
This link atoms adds at a distance ratio between the supporting and host atom to each fragment where a previous atom was cut
Parameters
----------
atom1 : int
This is the integer corresponding to the supporting atom (real atom)
attached_atom : int
This is the integer corresponiding to the host atom (ghost atom)
molecule : <class> instance
This is the molecule class instance
Returns
-------
new_xyz : list
This is the list of the new link atom with atom label and xyz coords
factor : float
The factor between the supporting and host atom. Used in building Jacobians for link atom projections.
"""
atom1_element = molecule.atomtable[atom1][0]
attached_atom_element = molecule.atomtable[attached_atom][0]
cov_atom1 = molecule.covrad[atom1_element][0]
cov_attached_atom = molecule.covrad[attached_atom_element][0]
self.atom_xyz = np.array(molecule.atomtable[atom1][1:])
attached_atom_xyz = np.array(molecule.atomtable[attached_atom][1:])
vector = attached_atom_xyz - self.atom_xyz
dist = np.linalg.norm(vector)
h = 0.32
factor = (h + cov_atom1)/(cov_atom1 + cov_attached_atom)
new_xyz = list(factor*vector+self.atom_xyz)
coord = []
coord.append('H')
coord.append(new_xyz)
return coord, factor
def build_xyz(self):
""" Builds the xyz input with the atom labels, xyz coords, and link atoms as a string or list
Parameters
----------
none
Returns
-------
inputxyz : str
String with atom label then corresonding xyz coordinates. This input includes the link atoms.
input_list : list of lists
ie [[['H', [0, 0 ,0]], ['O', [x, y, z]], ... ]
self.notes: list of lists
List of lists that is created with len = number of link atoms. Each sub list corresponds to one link atom.
(i.e. [index of link atom, factor, supporting atom number, host atom number])
"""
self.notes = []
input_list = []
coord_matrix = np.empty([len(self.prims)+len(self.attached), 3])
for atom in self.prims:
input_list.append([self.molecule.atomtable[atom][0]])
input_list[-1].append(list(self.molecule.atomtable[atom][1:]))
x = np.array(self.molecule.atomtable[atom][1:])
for pair in range(0, len(self.attached)):
la_input, factor = self.add_linkatoms(self.attached[pair][0], self.attached[pair][1], self.molecule)
input_list.append(la_input)
position = len(self.prims)+pair
self.notes.append([position])
self.notes[-1].append(factor)
self.notes[-1].append(self.attached[pair][0])
self.notes[-1].append(self.attached[pair][1])
#self.input_list = input_list
return input_list
def build_jacobian_Grad(self):
"""Builds Jacobian matrix for gradient link atom projections
Parameters
----------
none
Returns
-------
self.jacobian_grad : ndarray
Array where entries are floats on the diagonal with the corresponding factor.
Array has size (# of atoms in full molecule + all link atoms, # of atoms in primiative)
"""
self.jacobian_grad = 0
array = np.zeros((self.molecule.natoms, len(self.prims)))
linkarray = np.zeros((self.molecule.natoms, len(self.notes)))
for i in range(0, len(self.prims)):
array[self.prims[i]][i] = 1
for j in range(0, len(self.notes)):
factor = 1 - self.notes[j][1]
linkarray[self.notes[j][2]][j] = factor
linkarray[self.notes[j][3]][j] = self.notes[j][1]
self.jacobian_grad = np.concatenate((array, linkarray), axis=1)
jacob = self.jacobian_grad
return jacob
def build_jacobian_Hess(self):
""" Builds Jacobian matrix for hessian link atom projections.
Parameters
----------
Returns
-------
self.jacobian_hess : ndarray (tensor)
Array where the entries are matrices corresponding factor.
"""
zero_list = []
full_array = np.zeros((self.molecule.natoms, len(self.prims)+len(self.notes), 3, 3))
for i in range(0, len(self.prims)):
full_array[self.prims[i], i] = np.identity(3)
for j in range(0, len(self.notes)):
factor_s = 1-self.notes[j][1]
factor_h = self.notes[j][1]
x = np.zeros((3,3))
np.fill_diagonal(x, factor_s)
position = len(self.prims) + j
full_array[self.notes[j][2]][position] = x
np.fill_diagonal(x, factor_h)
full_array[self.notes[j][3]][position] = x
self.jacobian_hess = full_array
return self.jacobian_hess
def qc_backend(self):
"""
Runs the quantum chemistry backend.
This runs an energy and gradient calculation. If hessian is available
it will also run that.
Returns
-------
self.energy : float
This is the energy for the fragment*its coeff
self.gradient : ndarray
This is the gradient for the fragment*its coeff
self.hessian : ndarray (4D tensor)
This is the hessian for the fragement*its coeff
"""
np.set_printoptions(suppress=True, precision=9, linewidth=200)
self.energy = 0
hess_py = 0
self.grad = 0
self.inputxyz = self.build_xyz()
#sets origin of coords to center of mass
#self.center = self.com()
#finds inertia vector, R and T modes (only for 3 atom molecules currently)
#self.inertia()
energy, grad, hess_py = self.qc_class.energy_gradient(self.inputxyz)
#self.energy = self.coeff*energy
self.energy = self.local_coeff*self.coeff*energy
jacob = self.build_jacobian_Grad()
self.grad = self.local_coeff*self.coeff*jacob.dot(grad)
self.M = self.mass_matrix()
print("Done! \n")
return self.energy, self.grad, hess_py #, self.hessian#, self.apt
def hess_apt(self, hess_py):
"""
Runs only the hessian and atomic polar tensor calculations
Returns
-------
self.hessian : ndarray
self.apt : ndarray
"""
#If not analytical hess, do numerical below
if type(hess_py) is int:
print("Numerical hessian needed, Theory=", self.qc_class.theory)
hess_flat = np.zeros(((len(self.inputxyz))*3, (len(self.inputxyz))*3))
i = -1
for atom in range(0, len(self.inputxyz)):
for xyz in range(0, 3):
i = i+1
self.inputxyz[atom][1][xyz] = self.inputxyz[atom][1][xyz]+self.step_size
grad1 = self.qc_class.energy_gradient(self.inputxyz)[1].flatten()
self.inputxyz[atom][1][xyz] = self.inputxyz[atom][1][xyz]-2*self.step_size
grad2 = self.qc_class.energy_gradient(self.inputxyz)[1].flatten()
self.inputxyz[atom][1][xyz] = self.inputxyz[atom][1][xyz]+self.step_size
vec = (grad1 - grad2)/(4*self.step_size)
hess_flat[i] = vec
hess_flat[:,i] = vec
#Analytical hess from qc_backend gets reshaped and flatten to 3Nx3N matrix
else:
hess_flat = hess_py
#start building jacobian and reshaping
self.jacobian_hess = self.build_jacobian_Hess() #shape: (Full, Sub, 3, 3)
j_reshape = self.jacobian_hess.transpose(0,2,1,3)
j_flat = j_reshape.reshape(self.molecule.natoms*3, len(self.inputxyz)*3, order='C') #shape: (Full*3, Sub*3)
j_flat_tran = j_flat.T #shape: (Sub*3, Full*3)
first = np.dot(j_flat, hess_flat) # (Full*3, Sub*3) x (Sub*3, Sub*3) -> (Full*3, Sub*3)
second = np.dot(first, j_flat_tran) # (Full*3, Sub*3) x (Sub*3, Full*3) -> (Full*3, Full*3)
self.hessian = second*self.coeff*self.local_coeff
#start building the APT's
self.apt = self.build_apt()
#self.aptgrad = self.apt_grad() #one i am trying to get to work
return self.hessian, self.apt
def inertia(self):
""" Finds principal axes and moments of inertia in amu*Bohr^2
(I did this in a very non-optimized way!)
"""
xx = 0
yy = 0
zz = 0
xy = 0
xz = 0
yz = 0
for i in range(0, len(self.inputxyz)):
x = element(self.inputxyz[i][0])
mass = x.atomic_weight
xx += (self.inputxyz[i][1][1]**2 + self.inputxyz[i][1][2]**2)*mass
yy += (self.inputxyz[i][1][0]**2 + self.inputxyz[i][1][2]**2)*mass
zz += (self.inputxyz[i][1][0]**2 + self.inputxyz[i][1][1]**2)*mass
xy += self.inputxyz[i][1][0]*self.inputxyz[i][1][1]*mass
xz += self.inputxyz[i][1][0]*self.inputxyz[i][1][2]*mass
yz += self.inputxyz[i][1][1]*self.inputxyz[i][1][2]*mass
print("moment of interia for xx:", xx)
print("moment of interia for yy:", yy)
print("moment of interia for zz:", zz)
print("moment of interia for xy:", xy)
print("moment of interia for xz:", xz)
print("moment of interia for yz:", yz)
tensor = np.zeros((3,3))
tensor[0][0] = xx
tensor[0][1] = tensor[1][0] = xy
tensor[1][1] = yy
tensor[0][2] = tensor[2][0] = xz
tensor[2][2] = zz
tensor[1][2] = tensor[2][1] = yz
print("Inertia tensor:\n", tensor)
evalues, vec = LA.eig(tensor) ###only for origin in pyscf calc
#evalues, vec = LA.eigh(tensor)
print(evalues)
print(" Principal axes and moments of inertia in amu*Bohr^2:")
print("Eigenvalues: \n", evalues*1.88973*1.88973)
#vec[:, [2, 0]] = vec[:, [0, 2]]
xyz = np.array(["X", "Y", "Z"])
print(xyz[0], vec[0])
print(xyz[1], vec[1])
print(xyz[2], vec[2])
#compute rotational constants
conv = (6.626755E-34/(8*np.pi**2))/1.6605402E-27 #kg -> amu, cancel out all masses
conv_final = (conv*1E20)/2.99792458E10 #B^2 -> A^2 -> m^2, cancel out all lengths, speed of light cm/s
self.origin_vec = np.sqrt(conv/evalues) #units of Bohr
print("Pyscf origin vector:", self.origin_vec)
rotate_const = conv_final/evalues
print("Rotational constants (units: cm-1)\n", rotate_const)
#generating internal coordinates to sep out R and T modes
#self.int_coords(vec)
def com(self):
""" This is translating the origin of fragment to the center of mass.
This will also update the coordinates for self.inputxyz to be in the center of mass basis.
"""
first = 0
second = 0
for i in range(0, len(self.inputxyz)):
x = element(self.inputxyz[i][0])
mass = x.atomic_weight
first += np.array(self.inputxyz[i][1])*mass
second += mass
self.center = (first/second)
#update coordinates to COM in Bohr
#for j in range(0, len(self.inputxyz)):
# self.inputxyz[j][1] = np.array(self.inputxyz[j][1]) - self.center
return self.center
# def int_coords(self, X):
# """" Generate coordinates in teh rotating and translating frame.
#
# This was trying to match Gaussian's way of computing the frequencies, taking out
# the rotational and translational modes, and IR intensities.
# """
# R = np.zeros((len(self.inputxyz), 3)) #Coords in COM
# M = np.zeros((len(self.inputxyz), 3)) #Mass 3x3 matrix with m^1/2
# T = np.zeros((len(self.inputxyz), 3)) #Translation matrix 3x3
# D = np.zeros((len(self.inputxyz)*3, 6))
# D1 = np.array([1, 0, 0, 1, 0, 0, 1, 0, 0]).reshape((3,3))
# D2 = np.array([0, 1, 0, 0, 1, 0, 0, 1, 0]).reshape((3,3))
# D3 = np.array([0, 0, 1, 0, 0, 1, 0, 0, 1]).reshape((3,3))
#
# for i in range(0, R.shape[0]):
# x = element(self.inputxyz[i][0])
# mass = np.sqrt(x.atomic_weight)
# M[i][i] = mass
# D1[i] = D1[i]*mass
# D2[i] = D2[i]*mass
# D3[i] = D3[i]*mass
# R[i] = np.array(self.inputxyz[i][1])
# P = np.dot(R, X.T)
# D1 = D1.flatten()
# D2 = D2.flatten()
# D3 = D3.flatten()
# D4 = np.dot(np.outer(P[:,1], X[2]) - np.outer(P[:,2], X[1]), M).flatten()
# print("D4:\n", np.dot(np.outer(P[:,1], X[2]) - np.outer(P[:,2], X[1]), M))
# print("D5\n", np.dot(np.outer(P[:,2], X[0]) - np.outer(P[:,0], X[2]), M))
# D5 = np.dot(np.outer(P[:,2], X[0]) - np.outer(P[:,0], X[2]), M).flatten()
# print("D6\n", np.dot(np.outer(P[:,0], X[1]) - np.outer(P[:,1], X[0]), M))
# D6 = np.dot(np.outer(P[:,0], X[1]) - np.outer(P[:,1], X[0]), M).flatten()
# #print("D1\n", D1)
# #print("D2\n", D2)
# #print("D3\n", D3)
# #print("D4\n", D4)
# #print("D5\n", D5)
# #print("D6\n", D6)
# #print(D[:,0].shape)
# #print(D1.shape)
# D[:,0] = D1
# D[:,1] = D2
# D[:,2] = D3
# D[:,3] = D4
# D[:,4] = D5
# D[:,5] = D6
# #print(D, D.shape)
#
# #normalize D tensor
# for j in range(0, D.shape[1]):
# norm = 0
# scalar = np.dot(D[:,j].T, D[:,j])
# print(scalar)
# if scalar < 1E-8:
# continue
# else:
# norm = 1/np.sqrt(scalar)
# D[:,j] = D[:,j]*norm
#
# q, r = np.linalg.qr(D)
# print(q, q.shape)
# #exit()
def apt_grad(self):
""" Working on implementing this.
Function to create the apts by applying an electric field in a certain direciton to
molecule then finite difference of gradient w.r.t the applied E field.
Returns
-------
apt_grad : ndarray (3N, 3)
The deriv of gradient w.r.t applied field after LA projections are done.
"""
extra_dip = self.qc_class.get_dipole(self.inputxyz)[0]
#e_field = 1.889725E-4 #Got this number from Qchem
e_field = 0.001
E = [0, 0, 0]
energy_vec = np.zeros((3))
apt = np.zeros((3, ((len(self.prims)+len(self.notes))*3)))
nucapt = np.zeros((3, ((len(self.prims)+len(self.notes))*3)))
nuc3 = np.zeros((3))
for i in range(0, 3):
#no field
e1, g1, dip, n, g_nuc, g_elec = self.qc_class.apply_field(E, self.inputxyz, self.center, self.origin_vec, i) #no field
print("\n############ Field applied in the ", i, "direction ###############\n")
#positive direction field
E[i] = e_field
e2, g2, dipole2, nuc2, g_nuc2, g_elec2 = self.qc_class.apply_field(E, self.inputxyz, self.center, self.origin_vec, i) #positive direction
#negative direction field
E[i] = -1*e_field
e3, g3, dipole3, nuc, g_nuc3, g_elec3 = self.qc_class.apply_field(E, self.inputxyz, self.center, self.origin_vec, i) #neg direction
#setting field back to zero
E[i] = 0
print(g1)
print(g2)
print(g3)
#central finite diff of gradient, a.u. -> Debye
#print("positive grad:\n", g3, "\n Negative grad:\n", g2, "\n")
gradient1 = ((g3-g2)/(2*e_field))/0.3934303
print("$$$$$$$$$$$\n", gradient1)
#add nuclear gradient to electronic
gradient = g_nuc/0.393430 - gradient1 #for pyscf
#checking finite diff of E w.r.t field (should be dipole moment)
energy2 = (e2-e3)/(2*e_field)
energy_vec[i] = energy2/0.393430 #a.u.(E_field) -> Debye, may need a neg sign
#Subtracting elec dip from nuclear dip moment
newvec = energy_vec #for psi4
#newvec = nuc3 - energy_vec #for pyscf
print("\nElectronic energy vec (Debye):", energy_vec, np.linalg.norm(energy_vec))
print("\nNuclear dipole moment energy vec (Debye):", nuc3, np.linalg.norm(nuc3))
print("\nDipole moment energy vec (Debye):", newvec, np.linalg.norm(newvec))
print("\nDipole moment from no field (Debye):\n", extra_dip, np.linalg.norm(extra_dip))
print("\ngradient no field", g1, "\n")
print("\ngradient elec after finite diff:\n", gradient1)
print("\ngradient nuc after finite diff:\n", g_nuc/0.393430)
print("\ng_nuc - g_elec:\n", gradient)
apt[i] = gradient1.flatten()
#apt[i] = gradient.flatten() #nuclear and electronic grad
#mass weight APT
mass_apt = apt.T
#Do link atom projection, multiply by local and principle inclusion/exculsion coefficients
reshape_mass_hess = self.jacobian_hess.transpose(0, 2, 1, 3)
jac_apt = reshape_mass_hess.reshape(reshape_mass_hess.shape[0]*reshape_mass_hess.shape[1],reshape_mass_hess.shape[2]*reshape_mass_hess.shape[3])
apt_grad = np.dot(self.M, self.local_coeff*self.coeff*np.dot(jac_apt, mass_apt))
return apt_grad
def build_apt(self):
"""
Builds the atomic polar tensor with numerical derivative of dipole moment w.r.t atomic Cartesian
coordinates. Function builds xyz input with link atoms in ndarray format, not string type or list like previous functions.
Units of APT: Debye / (Angstrom np.sqrt(amu))
Returns
-------
oldapt: ndarray (3N, 3)
This is the mass weighted APT for current fragment after LA projections are done.
"""
apt = []
for atom in range(0, len(self.prims)+len(self.notes)): #atom interation
storing_vec = np.zeros((3,3))
y = element(self.inputxyz[atom][0])
value = 1/(np.sqrt(y.atomic_weight))
for comp in range(0,3): #xyz interation
self.inputxyz[atom][1][comp] = self.inputxyz[atom][1][comp]+self.step_size
dip1, nuc1 = self.qc_class.get_dipole(self.inputxyz)
self.inputxyz[atom][1][comp] = self.inputxyz[atom][1][comp]-2*self.step_size
dip2, nuc2 = self.qc_class.get_dipole(self.inputxyz)
vec = (dip1 - dip2)/(2*self.step_size)
storing_vec[comp] = vec
self.inputxyz[atom][1][comp] = self.inputxyz[atom][1][comp]+self.step_size
apt.append(storing_vec)
px = np.vstack(apt)
reshape_mass_hess = self.jacobian_hess.transpose(0, 2, 1, 3)
jac_apt = reshape_mass_hess.reshape(reshape_mass_hess.shape[0]*reshape_mass_hess.shape[1],reshape_mass_hess.shape[2]*reshape_mass_hess.shape[3])
oldapt = np.dot(self.M, self.local_coeff*self.coeff*np.dot(jac_apt, px)) #mass weight here and LA projection
return oldapt
def mass_matrix(self):
M = np.zeros((self.molecule.natoms*3, self.molecule.natoms*3))
counter = np.array([0, 1, 2])
for i in range(0, self.molecule.natoms):
x = element(self.molecule.atomtable[i][0])
value = 1/(np.sqrt(x.atomic_weight))
for j in counter:
M[j][j] = value
counter = counter + 3
self.M = M
return self.M
def mw_hessian(self, full_hessian):
"""
Will compute the mass-weighted hessian, frequencies, and
normal modes for the full system.
Parameters
----------
full_hessian : ndarray
This is the full hessian for the full molecule.
Returns
-------
freq : ndarray
1D np array holding the frequencies
modes : ndarray
2D ndarray holding normal modes in the columns
"""
np.set_printoptions(suppress=True)
first = np.dot(full_hessian, self.M) #shape (3N,3N) x (3N, 3N)
second = np.dot(self.M, first) #shape (3N,3N) x (3N, 3N)
e_values, modes = LA.eigh(second)
print("\nEvalues of hessian [H/Bohr^2]):\n", e_values)
#unit conversion of freq from H/B**2 amu -> 1/s**2
#factor = (4.3597482*10**-18)/(1.6603145*10**-27)/(1.0*10**-20) # Hartreee->J, amu->kg, Angstrom->m
factor = (1.8897259886**2)*(4.3597482*10**-18)/(1.6603145*10**-27)/(1.0*10**-10)**2 #Bohr->Angstrom, Hartreee->J, amu->kg, Angstrom->m
freq = (np.sqrt(e_values*factor))/(2*np.pi*2.9979*10**10) #1/s^2 -> cm-1
return freq, modes, self.M, e_values
|
UTF-8
|
Python
| false | false | 23,314 |
py
| 58 |
Fragment.py
| 43 | 0.548555 | 0.521832 | 0 | 560 | 40.630357 | 152 |
organichacker/KOU_Python
| 9,577,777,116,310 |
2bd1d67111895ed1cc8188d0b2386c95f2cb6ef4
|
98cd4b641929e35cd3482b058e00ef7d0a151126
|
/Asal_Sayi.py
|
8b959741b68a4af1634abb8fb6835afa2131e797
|
[] |
no_license
|
https://github.com/organichacker/KOU_Python
|
60abaa9cf5eec1d0509d9d7113f152ee60e30cfa
|
d4534d94ef7a32c2e684f5baaac2a7ff30dee96b
|
refs/heads/main
| 2023-04-25T09:01:51.586110 | 2021-05-22T14:20:48 | 2021-05-22T14:20:48 | 365,274,296 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
sayac = 0
liste = []
for i in range(5):
a = int(input("Sayi:"))
liste.append(a)
for r in liste:
for j in range(2,r):
if(r%j == 0):
sayac += 1
else:
continue
if r == 1:
print("{} sayısı asal değildir.".format(r))
elif r > 1 and sayac == 0:
print("{} sayısı asaldır.".format(r))
else:
print("{} sayısı asal değildir.".format(r))
|
UTF-8
|
Python
| false | false | 433 |
py
| 9 |
Asal_Sayi.py
| 9 | 0.478774 | 0.459906 | 0 | 17 | 23.941176 | 56 |
ilofy/Py-Library-NFC
| 3,788,161,161,949 |
15e553de2a653c20fb13ea0277c9f3c5570476e9
|
dbbf2162e2300b3ededdf119c725cc7379059b73
|
/setup.py
|
2cf031514c445acb9eddad96cf8674c11e1e9fc6
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
https://github.com/ilofy/Py-Library-NFC
|
46075390e34f0e3ba6211832c191db94dfebf055
|
45c8a8740c88b8b59f007d398dfb543d8470eb48
|
refs/heads/master
| 2023-03-17T02:16:21.063571 | 2021-03-02T08:13:45 | 2021-03-02T08:13:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="yongshi-pynfc",
version="0.4.3",
author="Michael-Yongshi",
author_email="4registration@outlook.com",
description="A nfc library for python based solely on pyscard to communicate with the nfc card and ndeflib to arrange encoding and decoding of messages",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Michael-Yongshi/Py-Library-NFC",
packages=setuptools.find_packages(),
data_files=[
(os.path.join('pynfc'), [
os.path.join('pynfc', 'nfc_communication.json'),
])
],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: OS Independent",
],
python_requires='>=3.3',
)
|
UTF-8
|
Python
| false | false | 992 |
py
| 8 |
setup.py
| 6 | 0.650202 | 0.641129 | 0 | 29 | 33.241379 | 157 |
dibdidib/lcpy
| 4,569,845,207,664 |
cb04f9c09318b8bdba33dd0fde9ef019922e43b6
|
b36832512e653063f078e2fbadc820f8d0f9c012
|
/074_search_2D_matrix.py
|
9117e9a2f30a9643d5718f77daea7dcab50448ad
|
[] |
no_license
|
https://github.com/dibdidib/lcpy
|
1bf570bda8b8b231f86551e9bf82867deaeeb926
|
4500ebd5bc885ae6a83c8b7bff71fbf24d747e21
|
refs/heads/master
| 2018-10-14T16:58:09.882172 | 2018-10-08T13:48:22 | 2018-10-08T13:48:22 | 103,874,585 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from common import print_matrix
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
m, n = len(matrix), len(matrix[0])
lo, hi = 0, m * n - 1
while lo <= hi:
mid = (lo + hi) // 2
midval = matrix[mid // n][mid % n]
if target == midval:
return True
elif target < midval:
hi = mid - 1
else:
lo = mid + 1
return False
def searchMatrixV1(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
m, n = len(matrix), len(matrix[0])
i, j = 0, 0
while i < m and j < n:
if matrix[i][j] == target:
return True
elif i == m - 1 or target < matrix[i+1][j]:
j += 1
else:
i += 1
return False
if __name__ == '__main__':
s = Solution()
tests = [
(
[[1,3]],
3
),
(
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
],
3
),
(
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
],
4
),
(
[
[1, 4, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
],
0
),
(
[
[1, 4, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
],
99
),
]
for matrix, target in tests:
print_matrix(matrix)
print("target {} was{}found".format(
target,
" " if s.searchMatrix(matrix, target) else " not "
))
print()
|
UTF-8
|
Python
| false | false | 2,169 |
py
| 502 |
074_search_2D_matrix.py
| 499 | 0.333794 | 0.285846 | 0 | 90 | 23.088889 | 62 |
mardzien/python_backend_2021_04_17
| 18,777,597,042,159 |
26dce454c6370bbac8ace78415a7c674f3fea210
|
08b0ec28c96ca1604787a83e9b9fd3e97462939f
|
/Ćwiczenia/zjazd1/z_11.py
|
7f5d7e5609bb3c7d45eb802d5d8345434cfdc003
|
[] |
no_license
|
https://github.com/mardzien/python_backend_2021_04_17
|
f078b2c16c314d3308ac0f66f1cf9510ce8e221e
|
2a911f1360dec7e3662b26e35255304bba48f9df
|
refs/heads/master
| 2023-04-20T17:02:18.063249 | 2021-05-13T22:03:23 | 2021-05-13T22:03:23 | 358,820,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def mean_std(*args):
n = len(args)
if n == 0:
return None, None
X = sum(args) / n
var = sum((x - X) ** 2 for x in args) / n
sigma = var ** 0.5
return X, sigma
print(mean_std(4, 4, 4, 5, 5, 5))
|
UTF-8
|
Python
| false | false | 232 |
py
| 34 |
z_11.py
| 31 | 0.469828 | 0.426724 | 0 | 15 | 14.4 | 45 |
WilliamsTravis/Pasture-Rangeland-Forage
| 2,156,073,609,147 |
cacdaa6d7b19ec113b8176563d04708828dea082
|
a8995d89e9082c44fb273726fcc2cf98a6bd4556
|
/experiments/PRFUSDMmodel-online.py
|
5e882391070eddc2bcc38ee1bff5033d7a967827
|
[] |
no_license
|
https://github.com/WilliamsTravis/Pasture-Rangeland-Forage
|
d2f118c5428c6d01e78871ae9b61770601a9fa1c
|
933f58c43c6f9730f67e53989eb2d1b8f932ecaf
|
refs/heads/master
| 2021-04-28T19:13:12.198823 | 2018-07-29T19:36:48 | 2018-07-29T19:36:48 | 121,832,004 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Created on Thu Mar 15 23:00:32 2018
Basis Risk of PRF payouts by conditional probability with the USDM
The plan here is to take the weekly US Drought Monitor rasters and convert them
into bi-monthly values. I think I may use the weekly mode to start.
Then I will take the bi-monthly RMA and calculate the number of times each
cell recieves no payout when the drought monitor indicates drought of a
severity comparable to the strike level in the RMA
Things to do:
1) Place the Rainfall index payout triggers next to USDM DM categories
2) Put a tooltip to each graph
3) Run each parameter and store in s3 bucket
4) Consider a graph for each location
5) Weight ratio by number of PRF payout triggers
i) Because a signular miss does not tell us much...
ii) Perhaps simply multiply each ratio by the number of triggers and then
standardize?
6) Get a nation-wide figure that sums up the "basis risk" according to the USDM
i) average ratio (USDM pay: NOAA pay)?
@author: trwi0358
"""
# In[]:
# Import required libraries
############################ Get Functions ####################################
import os
os.chdir("c:\\users\\user\\github\\PRF-USDM")
from functions import *
import warnings
warnings.filterwarnings("ignore")
############################ Get Data #########################################
grid = np.load("data\\prfgrid.npz")["grid"]
#source = xr.open_rasterio("data\\prfgrid.tif")
#source.to_netcdf("data\\source_array3.nc")
source = xr.open_dataarray("data\\source_array3.nc")
source_signal = '["data\\\\rainfall_indices.npz", 4, 0.7,100]'
states = np.load("data\\states.npz")["states"]
mask = np.load("data\\mask.npz")['mask']
statefps = pd.read_csv("data\\statefps.csv")
# Load pre-conditioned bi-monthly USDM modal category rasters into numpy arrays
with np.load("data\\usdm_arrays.npz") as data:
usdmodes = data.f.arr_0
data.close()
with np.load("data\\usdm_dates.npz") as data:
udates = data.f.arr_0
data.close()
usdmodes = [[str(udates[i]),usdmodes[i]] for i in range(len(usdmodes))]
###############################################################################
############################ Create the App Object ############################
###############################################################################
# Create Dash Application Object
app = dash.Dash(__name__)
# I really need to get my own stylesheet, if anyone know how to do this...
app.css.append_css({'external_url': 'https://cdn.rawgit.com/plotly/dash-app-stylesheets/2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css'}) # noqa: E501
# Create server object
server = app.server
# Create and initialize a cache for storing data - data pocket
#cache = Cache(config = {'CACHE_TYPE':'simple'})
#cache.init_app(server)
###############################################################################
############################ Create Lists and Dictionaries ####################
###############################################################################
# Index Paths
indices = [{'label':'Rainfall Index','value':'E:\\data\\droughtindices\\noaa\\nad83\\indexvalues\\'}
# {'label':'PDSI','value':'D:\\data\\droughtindices\\palmer\\pdsi\\nad83\\'},
# {'label':'PDSI-Self Calibrated','value':'D:\\data\\droughtindices\\palmer\\pdsisc\\nad83\\'},
# {'label':'Palmer Z Index','value':'D:\\data\\droughtindices\\palmer\\pdsiz\\nad83\\'},
# {'label':'EDDI-1','value':'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\1month\\'},
# {'label':'EDDI-2','value': 'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\2month\\'},
# {'label':'EDDI-3','value':'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\3month\\'},
# {'label':'EDDI-6','value':'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\6month\\'},
# {'label':'SPI-1' ,'value': 'D:\\data\\droughtindices\\spi\\nad83\\1month\\'},
# {'label':'SPI-2' ,'value': 'D:\\data\\droughtindices\\spi\\nad83\\2month\\'},
# {'label':'SPI-3' ,'value': 'D:\\data\\droughtindices\\spi\\nad83\\3month\\'},
# {'label':'SPI-6' ,'value': 'D:\\data\\droughtindices\\spi\\nad83\\6month\\'},
# {'label':'SPEI-1' ,'value': 'D:\\data\\droughtindices\\spei\\nad83\\1month\\'},
# {'label':'SPEI-2' ,'value': 'D:\\data\\droughtindices\\spei\\nad83\\2month\\'},
# {'label':'SPEI-3' ,'value': 'D:\\data\\droughtindices\\spei\\nad83\\3month\\'},
# {'label':'SPEI-6','value': 'D:\\data\\droughtindices\\spei\\nad83\\6month\\'}
]
# Index names, using the paths we already have. These are for titles.
indexnames = {'E:\\data\\droughtindices\\noaa\\nad83\\indexvalues\\': 'Rainfall Index',
# 'D:\\data\\droughtindices\\palmer\\pdsi\\nad83\\': 'Palmer Drought Severity Index',
# 'D:\\data\\droughtindices\\palmer\\pdsisc\\nad83\\': 'Self-Calibrated Palmer Drought Severity Index',
# 'D:\\data\\droughtindices\\palmer\\pdsiz\\nad83\\': 'Palmer Z Index',
# 'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\1month\\':'Evaporative Demand Drought Index - 1 month',
# 'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\2month\\':'Evaporative Demand Drought Index - 2 month',
# 'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\3month\\':'Evaporative Demand Drought Index - 3 month',
# 'D:\\data\\droughtindices\\eddi\\nad83\\monthly\\6month\\':'Evaporative Demand Drought Index - 6 month',
# 'D:\\data\\droughtindices\\spi\\nad83\\1month\\':'Standardized Precipitation Index - 1 month',
# 'D:\\data\\droughtindices\\spi\\nad83\\2month\\':'Standardized Precipitation Index - 2 month',
# 'D:\\data\\droughtindices\\spi\\nad83\\3month\\':'Standardized Precipitation Index - 3 month',
# 'D:\\data\\droughtindices\\spi\\nad83\\6month\\':'Standardized Precipitation Index - 6 month',
# 'D:\\data\\droughtindices\\spei\\nad83\\1month\\': 'Standardized Precipitation-Evapotranspiration Index - 1 month',
# 'D:\\data\\droughtindices\\spei\\nad83\\2month\\': 'Standardized Precipitation-Evapotranspiration Index - 2 month',
# 'D:\\data\\droughtindices\\spei\\nad83\\3month\\': 'Standardized Precipitation-Evapotranspiration Index - 3 month',
# 'D:\\data\\droughtindices\\spei\\nad83\\6month\\': 'Standardized Precipitation-Evapotranspiration Index - 6 month'
}
# State options
statefps = statefps.sort_values('state')
statefps = statefps.reset_index()
stateoptions = [{'label':statefps['state'][i],'value':statefps['statefp'][i]} for i in range(len(statefps['state']))]
stateoptions.insert(0,{'label':'All','value':100})
stateoptions.remove({'label':'District of Columbia','value':8})
# Data Summary
datatable = pd.read_csv("data\\state_risks.csv",index_col=0)
datatable = datatable.dropna()
datatable = datatable[datatable.State != 'District of Columbia'].to_dict('RECORDS')
columnkey = [{'label':'Strike Level: Rainfall Index Strike Level','value': 1},
{'label':'DM Category: Drought Monitor Drought Severity Category','value': 2},
{'label':'Missed (sum): Total Number of times the rainfall index would not have paid given the chosen US Drought Monitor Severity Category','value': 3},
{'label':'Missed (ratio): Ratio between the number of times the USDM reached the chosen drought category and the numbers of time rainfall index would not have paid','value': 4},
{'label':'Strike Events: Number of times the rainfall index fell below the strike level','value': 5},
{'label':'DM Events: Number of times the USDM reached the chosen category','value': 6}]
# Strike levels
strikes = [{'label':'70%','value':.70},
{'label':'75%','value':.75},
{'label':'80%','value':.80},
{'label':'85%','value':.85},
{'label':'90%','value':.90}]
DMs = [{'label':'D4','value':4},
{'label':'D3','value':3},
{'label':'D2','value':2},
{'label':'D1','value':1},
{'label':'D0','value':0}]
DMlabels = {0:'D0',
1:'D1',
2:'D2',
3:'D3',
4:'D4'}
## Create Coordinate Index - because I can't find the array position in the
# click event!
xs = range(300)
ys = range(120)
lons = [-130 + .25*x for x in range(0,300)]
lats = [49.75 - .25*x for x in range(0,120)]
londict = dict(zip(lons, xs))
latdict = dict(zip(lats, ys))
londict2 = {y:x for x,y in londict.items()} # This is backwards to link simplified column
latdict2 = {y:x for x,y in latdict.items()} # This is backwards to link simplified column
# Descriptions
raininfo = "The number of times the rainfall index fell below the chosen strike level."
dminfo = "The number of times the Drought Monitor reached the chosen drought severity category."
countinfo = "The number of times the Drought Monitor reached or exceeded the chosen drought severity category and the rainfall index did not fall below the chosen strike level."
ratioinfo = "The ratio between the number of times the rainfall index at the chosen strike level would not have paid during a drought according to the chosen drought severity category and the number of times that category category was met or exceeded. Only locations with 10 or more drought events are included."
# Create global chart template
mapbox_access_token = 'pk.eyJ1IjoidHJhdmlzc2l1cyIsImEiOiJjamZiaHh4b28waXNkMnptaWlwcHZvdzdoIn0.9pxpgXxyyhM6qEF_dcyjIQ'
# Map Layout:
layout = dict(
autosize=True,
height=500,
font=dict(color='#CCCCCC'),
titlefont=dict(color='#CCCCCC', size='20'),
margin=dict(
l=10,
r=10,
b=35,
t=55
),
hovermode="closest",
plot_bgcolor="#191A1A",
paper_bgcolor="#020202",
legend=dict(font=dict(size=10), orientation='h'),
title='Potential Payout Frequencies',
mapbox=dict(
accesstoken=mapbox_access_token,
style="dark",
center=dict(
lon= -95.7,
lat= 37.1
),
zoom=3,
)
)
# In[]:
# Create app layout
app.layout = html.Div(
[ html.Div(# Pictures
[
html.Img(
src = "https://github.com/WilliamsTravis/Pasture-Rangeland-Forage/blob/master/data/earthlab.png?raw=true",
className='one columns',
style={
'height': '100',
'width': '225',
'float': 'right',
'position': 'relative',
},
),
html.Img(
src = "https://github.com/WilliamsTravis/Pasture-Rangeland-Forage/blob/master/data/wwa_logo2015.png?raw=true",
className='one columns',
style={
'height': '100',
'width': '300',
'float': 'right',
'position': 'relative',
},
),
],
className = "row",
),
html.Div(# One
[
html.H1(
'Pasture, Rangeland, and Forage Insurance and the US Drought Monitor: Risk of Non-Payment During Drought',
className='eight columns',
),
],
className='row'
),
html.Div(# Four
[
html.Div(# Four-a
[
html.P('Drought Index'),
dcc.Dropdown(
id = 'index_choice',
options = indices,
multi = False,
value = "rainfall_arrays"
),
html.P("Filter by State"),
dcc.Dropdown(
id = "state_choice",
options = stateoptions,
value = 100,
multi = True,
searchable = True
),
html.Button(id='submit', type='submit', n_clicks = 0, children='submit')
],
className='six columns',
style = {'margin-top':'20'},
),
html.Div(# Four-a
[
html.P('RMA Strike Level'),
dcc.RadioItems(
id='strike_level',
options=strikes,
value=.85,
labelStyle={'display': 'inline-block'}
),
html.P('USDM Category'),
dcc.RadioItems(
id='usdm_level',
options=DMs,
value=1,
labelStyle={'display': 'inline-block'}
),
],
className='six columns',
style = {'margin-top':'20'},
),
],
className = 'row'
),
html.Div(#Six
[
html.Div(#Six-a
[
dcc.Graph(id='rain_graph'),
html.Button(title = raininfo,
type='button',
children='Map Info \uFE56 (Hover)'),
],
className='six columns',
style={'margin-top': '10'}
),
html.Div(#Six-a
[
dcc.Graph(id='drought_graph'),
html.Button(title = dminfo,
type='button',
children='Map Info \uFE56 (Hover)'),
],
className='six columns',
style={'margin-top': '10'}
),
#
],
className='row'
),
html.Div(#Six
[
html.Div(#Six-a
[
dcc.Graph(id='hit_graph'),
html.Button(title = countinfo,
type='button',
children='Map Info \uFE56 (Hover)'),
],
className='six columns',
style={'margin-top': '10'}
),
html.Div(#Six-a
[
dcc.Graph(id='basis_graph'),
html.Button(title = ratioinfo,
type='button',
children='Map Info \uFE56 (Hover)'),
],
className='six columns',
style={'margin-top': '10'}
),
],
className='row'
),
# Data Table
html.Div(#Seven
[
html.Div(
[ html.H1(" "),
html.H4('Summary Statistics'),
html.H5("Column Key"),
dcc.Dropdown(options = columnkey,
placeholder = "Column Name: Description"),
dt.DataTable(
rows = datatable,
id = "summary_table",
editable=False,
filterable=True,
sortable=True,
row_selectable=True,
# min_width = 1655,
)
],
className='twelve columns',
style={'width':'100%',
'display': 'inline-block',
'padding': '0 20'},
),
],
className='row'
),
html.Div(id='signal', style={'display': 'none'})
],
className='ten columns offset-by-one'
)
# In[]:
###############################################################################
######################### Create Cache ########################################
###############################################################################
#@cache.memoize()
def global_store(signal):
# Transform the argument list back to normal
# if not signal:
# signal = source_signal
signal = json.loads(signal)
# Unpack signals
index_choice = signal[0]
usdm_level = signal[1]
strike_level = signal[2]
statefilter = signal[3]
print("####################" + str(statefilter))
if type(statefilter) != list:
statefilter2 = []
statefilter2.append(statefilter)
statefilter = statefilter2
## Get the index to compare to usdm - later there will be many choices
# Load Rainfall Index
with np.load("data\\rainfall_indices.npz") as data:
indexlist = data.f.arr_0
data.close()
with np.load("data\\rainfall_dates.npz") as data:
rdates = data.f.arr_0
data.close()
indexlist = [[str(rdates[i]),indexlist[i]] for i in range(len(indexlist))]
# Now, to check both against each other, but first, match times
udates = [m[0][-6:] for m in usdmodes]
indexlist = [i for i in indexlist if i[0][-6:] in udates]
idates = [m[0][-6:] for m in indexlist]
usdms = [u for u in usdmodes if u[0][-6:] in idates]
# Create a list of monthly arrays with 1's for the scenario
risks = [basisCheck(usdm = usdms[i],noaa = indexlist[i],
strike = strike_level, dm = usdm_level) for i in range(len(usdms))]
# Sum them up
hits = np.nansum(risks,axis = 0)*mask
# Create a list of monthly arrays with 1's for droughts
droughts = [droughtCheck(usdm = usdmodes[i],dm = usdm_level) for i in range(len(usdmodes))]
rainbelow = [droughtCheck2(rain = indexlist[i],strike = strike_level) for i in range(len(indexlist))]
# Sum and divide by time steps
droughtchances = np.nansum(droughts,axis = 0)*mask
rainchance = np.nansum(rainbelow,axis = 0)*mask
# Final Basis risk according to the USDM and Muneepeerakul et als method
basisrisk = hits/droughtchances
# Possible threshold for inclusion
# select only those cells with 10 or more dm events
threshold = np.copy(droughtchances)
threshold[threshold<10] = np.nan
threshold = threshold*0+1
basisrisk = basisrisk * threshold
# Filter if a state or states were selected
if str(type(statefilter)) + str(statefilter) == "<class 'list'>[100]":
statemask = np.copy(states)
statemask = statemask*0+1
typeof = str(type(statefilter)) + str(statefilter)
elif "," not in str(statefilter):
statemask = np.copy(states)
statelocs = np.where(statemask == statefilter)
statemask[statelocs] = 999
statemask[statemask < 999] = np.nan
statemask = statemask*0+1
typeof = str(type(statefilter)) + str(statefilter)
else:
print("!")
statemask = np.copy(states)
statelocs = [np.where(statemask==f) for f in statefilter]
statelocs1 = np.concatenate([statelocs[i][0]for i in range(len(statelocs))])
statelocs2 = np.concatenate([statelocs[i][1] for i in range(len(statelocs))])
statelocs = [statelocs1,statelocs2]
statemask[statelocs] = 999
statemask[statemask < 999] = np.nan
statemask = statemask*0+1
typeof = str(type(statefilter)) + str(statefilter)
# Package Returns for later
df = [basisrisk*statemask, droughtchances*statemask,hits*statemask,rainchance*statemask]
return df
def retrieve_data(signal):
df = global_store(signal)
return df
# Store the data in the cache and hide the signal to activate it in the hidden div
@app.callback(Output('signal', 'children'),
[Input('submit','n_clicks')],
[State('index_choice','value'),
State('usdm_level','value'),
State('strike_level','value'),
State('state_choice','value')])
def compute_value(click,index_choice,usdm_level,strike_level,state_choice):
# Package the function arguments
signal = json.dumps([index_choice,usdm_level,strike_level,state_choice])
# compute value and send a signal when done
global_store(signal)
return signal
# In[]:
###############################################################################
######################### Graph Builders ######################################
###############################################################################
@app.callback(Output('rain_graph', 'figure'),
[Input('signal','children')])
def rainGraph(signal):
"""
This will be a map of PRF Payout frequencies at the chosen strike level
"""
# Get data
if not signal:
signal = source_signal
df = retrieve_data(signal)
# Transform the argument list back to normal
signal = json.loads(signal)
# Unpack signals
index_choice = signal[0]
usdm_level = signal[1]
strike_level = signal[2]
# Get desired array
payouts = df[3]
# Second, convert data back into an array, but in a from xarray recognizes
array = np.array([payouts],dtype = "float32")
# Third, change the source array to this one. Source is defined up top
source.data = array
# Fourth, bin the values into lat, long points for the dataframe
dfs = xr.DataArray(source, name = "data")
pdf = dfs.to_dataframe()
step = .25
to_bin = lambda x: np.floor(x / step) * step
# pdf['data'] = pdf['data'].fillna(999)
# pdf['data'] = pdf['data'].astype(int)
# pdf['data'] = pdf['data'].astype(str)
# pdf['data'] = pdf['data'].replace('-1', np.nan)
pdf["latbin"] = pdf.index.get_level_values('y').map(to_bin)
pdf["lonbin"] = pdf.index.get_level_values('x').map(to_bin)
pdf['gridx']= pdf['lonbin'].map(londict)
pdf['gridy']= pdf['latbin'].map(latdict)
grid2 = np.copy(grid)
grid2[np.isnan(grid2)] = 0
pdf['grid'] = grid2[pdf['gridy'],pdf['gridx']]
pdf['grid'] = pdf['grid'].apply(int)
pdf['grid'] = pdf['grid'].apply(str)
pdf['printdata1'] = "Grid ID#: "
pdf['printdata'] = "<br> Data: "
pdf['grid2'] = pdf['printdata1'] + pdf['grid'] +pdf['printdata'] + pdf['data'].apply(str)
groups = pdf.groupby(("latbin", "lonbin"))
df_flat = pdf.drop_duplicates(subset=['latbin', 'lonbin'])
df= df_flat[np.isfinite(df_flat['data'])]
# Add Grid IDs
colorscale = [[0, 'rgb(2, 0, 68)'], [0.35, 'rgb(17, 123, 215)'],# Make darker (pretty sure this one)
[0.45, 'rgb(37, 180, 167)'], [0.55, 'rgb(134, 191, 118)'],
[0.7, 'rgb(249, 210, 41)'], [1.0, 'rgb(255, 249, 0)']] # Make darker
# Create the scattermapbox object
data = [
dict(
type = 'scattermapbox',
# locationmode = 'USA-states',
lon = df['lonbin'],
lat = df['latbin'],
text = df['grid2'],
mode = 'markers',
marker = dict(
colorscale = colorscale,
cmin = 0,
color = df['data'],
cmax = df['data'].max(),
opacity=0.85,
colorbar=dict(
title= "Frequency",
textposition = "auto",
orientation = "h"
)
)
)]
layout['title'] = " Rainfall Index | Sub %" + str(int(strike_level*100)) + " Frequency"
layout['mapbox']['zoom'] = 2
# Seventh wrap the data and layout into one
figure = dict(data=data, layout=layout)
# return {'figure':figure,'info': index_package_all}
return figure
# In[]:
@app.callback(Output('drought_graph', 'figure'),
[Input('signal','children')])
def droughtGraph(signal):
"""
This will be the drought occurrence map, in order to map over mapbox we are
creating a scattermapbox object.
"""
# Get data
df = retrieve_data(signal)
# Transform the argument list back to normal
signal = json.loads(signal)
# Unpack signals
index_choice = signal[0]
usdm_level = signal[1]
strike_level = signal[2]
# Get desired array
droughtchances = df[1]
# Second, convert data back into an array, but in a from xarray recognizes
array = np.array([droughtchances],dtype = "float32")
# Third, change the source array to this one. Source is defined up top
source.data = array
# Fourth, bin the values into lat, long points for the dataframe
dfs = xr.DataArray(source, name = "data")
pdf = dfs.to_dataframe()
step = .25
to_bin = lambda x: np.floor(x / step) * step
pdf["latbin"] = pdf.index.get_level_values('y').map(to_bin)
pdf["lonbin"] = pdf.index.get_level_values('x').map(to_bin)
pdf['gridx']= pdf['lonbin'].map(londict)
pdf['gridy']= pdf['latbin'].map(latdict)
grid2 = np.copy(grid)
grid2[np.isnan(grid2)] = 0
pdf['grid'] = grid2[pdf['gridy'],pdf['gridx']]
pdf['grid'] = pdf['grid'].apply(int)
pdf['grid'] = pdf['grid'].apply(str)
pdf['printdata1'] = "Grid ID#: "
pdf['printdata'] = "<br> Data: "
pdf['grid2'] = pdf['printdata1'] + pdf['grid'] +pdf['printdata'] + pdf['data'].apply(str)
groups = pdf.groupby(("latbin", "lonbin"))
df_flat = pdf.drop_duplicates(subset=['latbin', 'lonbin'])
df= df_flat[np.isfinite(df_flat['data'])]
# Add Grid IDs
colorscale = [[0, 'rgb(2, 0, 68)'], [0.35, 'rgb(17, 123, 215)'],# Make darker (pretty sure this one)
[0.45, 'rgb(37, 180, 167)'], [0.55, 'rgb(134, 191, 118)'],
[0.7, 'rgb(249, 210, 41)'], [1.0, 'rgb(255, 249, 0)']] # Make darker
# Create the scattermapbox object
data = [
dict(
type = 'scattermapbox',
# locationmode = 'USA-states',
lon = df['lonbin'],
lat = df['latbin'],
text = df['grid2'],
mode = 'markers',
marker = dict(
colorscale = colorscale,
cmin = 0,
color = df['data'],
cmax = df['data'].max(),
opacity=0.85,
colorbar=dict(
title= "Frequency",
textposition = "auto",
orientation = "h"
)
)
)]
layout['title'] = "USDM | " + DMlabels.get(usdm_level) +"+ Drought Frequency"
layout['mapbox']['zoom'] = 2
# Seventh wrap the data and layout into one
figure = dict(data=data, layout=layout)
# return {'figure':figure,'info': index_package_all}
return figure
# In[]:
@app.callback(Output('hit_graph', 'figure'),
[Input('signal','children')])
def riskcountGraph(signal):
"""
This the non-payment count map.
"""
# Get data
df = retrieve_data(signal)
# Transform the argument list back to normal
signal = json.loads(signal)
# Unpack signals
index_choice = signal[0]
usdm_level = signal[1]
strike_level = signal[2]
# Get desired array
[basisrisk, droughtchances, hits, rainchance] = df
# Second, convert data back into an array, but in a from xarray recognizes
array = np.array([hits],dtype = "float32")
# Third, change the source array to this one. Source is defined up top
source.data = array
# Fourth, bin the values into lat, long points for the dataframe
dfs = xr.DataArray(source, name = "data")
pdf = dfs.to_dataframe()
step = .25
to_bin = lambda x: np.floor(x / step) * step
pdf["latbin"] = pdf.index.get_level_values('y').map(to_bin)
pdf["lonbin"] = pdf.index.get_level_values('x').map(to_bin)
pdf['gridx']= pdf['lonbin'].map(londict)
pdf['gridy']= pdf['latbin'].map(latdict)
grid2 = np.copy(grid)
grid2[np.isnan(grid2)] = 0
pdf['grid'] = grid2[pdf['gridy'],pdf['gridx']]
pdf['grid'] = pdf['grid'].apply(int)
pdf['grid'] = pdf['grid'].apply(str)
pdf['printdata1'] = "Grid ID#: "
pdf['printdata'] = "<br> Data: "
pdf['grid2'] = pdf['printdata1'] + pdf['grid'] +pdf['printdata'] + pdf['data'].apply(str)
groups = pdf.groupby(("latbin", "lonbin"))
df_flat = pdf.drop_duplicates(subset=['latbin', 'lonbin'])
df= df_flat[np.isfinite(df_flat['data'])]
# Add Grid IDs
colorscale = [[0, 'rgb(2, 0, 68)'], [0.35, 'rgb(17, 123, 215)'],# Make darker (pretty sure this one)
[0.45, 'rgb(37, 180, 167)'], [0.55, 'rgb(134, 191, 118)'],
[0.7, 'rgb(249, 210, 41)'], [1.0, 'rgb(255, 249, 0)']] # Make darker
# Create the scattermapbox object
data = [
dict(
type = 'scattermapbox',
# locationmode = 'USA-states',
lon = df['lonbin'],
lat = df['latbin'],
text = df['grid2'],
mode = 'markers',
marker = dict(
colorscale = colorscale,
cmin = 0,
color = df['data'],
cmax = df['data'].max(),
opacity=0.85,
colorbar=dict(
title= "Frequency",
textposition = "auto",
orientation = "h"
)
)
)]
layout['title'] = ("Non-Payment Count<br>%"+str(int(strike_level*100)) +" Rainfall Index Would Not Have Payed during "
+ DMlabels.get(usdm_level) + "+ Drought" )
layout['mapbox']['zoom'] = 2
# Seventh wrap the data and layout into one
figure = dict(data=data, layout=layout)
# return {'figure':figure,'info': index_package_all}
return figure
# In[]:
@app.callback(Output('basis_graph', 'figure'),
[Input('signal','children')])
def basisGraph(signal):
"""
This is the risk ratio map.
"""
# Get data
df = retrieve_data(signal)
[basisrisk, droughtchances, hits, rainchance] = df
# Transform the argument list back to normal
# if not signal:
# signal= source_signal
signal = json.loads(signal)
# Unpack signals
index_choice = signal[0]
usdm_level = signal[1]
strike_level = signal[2]
statefilter = signal[3]
typeof = str(type(statefilter))
# Second, convert data back into an array, but in a form xarray recognizes
array = np.array([basisrisk],dtype = "float32")
# Third, change the source array to this one. Source is defined up top
source.data = array
# Fourth, bin the values into lat, long points for the dataframe
dfs = xr.DataArray(source, name = "data")
pdf = dfs.to_dataframe()
step = .25
to_bin = lambda x: np.floor(x / step) * step
pdf["latbin"] = pdf.index.get_level_values('y').map(to_bin)
pdf["lonbin"] = pdf.index.get_level_values('x').map(to_bin)
pdf['gridx']= pdf['lonbin'].map(londict)
pdf['gridy']= pdf['latbin'].map(latdict)
grid2 = np.copy(grid)
grid2[np.isnan(grid2)] = 0
pdf['grid'] = grid2[pdf['gridy'],pdf['gridx']]
pdf['grid'] = pdf['grid'].apply(int)
pdf['grid'] = pdf['grid'].apply(str)
pdf['printdata1'] = "Grid ID#: "
pdf['printdata'] = "<br> Data: "
pdf['grid2'] = pdf['printdata1'] + pdf['grid'] +pdf['printdata'] + pdf['data'].apply(np.round,decimals = 4).apply(str)
groups = pdf.groupby(("latbin", "lonbin"))
df_flat = pdf.drop_duplicates(subset=['latbin', 'lonbin'])
df= df_flat[np.isfinite(df_flat['data'])]
# Add Grid IDs
colorscale = [[0, 'rgb(2, 0, 68)'], [0.35, 'rgb(17, 123, 215)'],# Make darker (pretty sure this one)
[0.45, 'rgb(37, 180, 167)'], [0.55, 'rgb(134, 191, 118)'],
[0.7, 'rgb(249, 210, 41)'], [1.0, 'rgb(255, 249, 0)']] # Make darker
# Create the scattermapbox object
data = [
dict(
type = 'scattermapbox',
# locationmode = 'USA-states',
lon = df['lonbin'],
lat = df['latbin'],
text = df['grid2'],
mode = 'markers',
marker = dict(
colorscale = colorscale,
cmin = 0,
color = df['data'],
cmax = df['data'].max(),
opacity=0.85,
colorbar=dict(
title= "Risk Ratio",
textposition = "auto",
orientation = "h"
)
)
)]
# Return order to help with average value:
# Weight by the number of drought events
average = str(round(np.nansum(droughtchances*basisrisk)/np.nansum(droughtchances),4))
# average = np.nanmean(basisrisk)
layout['title'] = ("Non-Payment Likelihood <br>"
+ "Rainfall Index at %"+str(int(strike_level*100))
+ " strike level and " + DMlabels.get(usdm_level) +"+ USDM Severity | Average: " + average)
# layout['title'] = typeof
# Seventh wrap the data and layout into one
figure = dict(data=data, layout=layout)
# return {'figure':figure,'info': index_package_all}
return figure
# In[]:
# Main
if __name__ == '__main__':
app.server.run(debug=True,use_reloader = False)# threaded=True
|
UTF-8
|
Python
| false | false | 34,020 |
py
| 34 |
PRFUSDMmodel-online.py
| 15 | 0.522634 | 0.500882 | 0 | 865 | 38.326012 | 312 |
gsedometov/Data-protection
| 13,477,607,398,463 |
094157e86397bc2b813b9bef405c9097e5196d5f
|
a05d5cdf99b43010a59db756ab8707eb8aa7b692
|
/zi21.py
|
0d1e73e41c464c82cda300de7f4c780f2b9a088f
|
[] |
no_license
|
https://github.com/gsedometov/Data-protection
|
e00b747a495329db9593de107e6c514f648e1384
|
5f27835c3e99c7a57ad7833826b615cf334ab0c0
|
refs/heads/master
| 2020-12-24T12:05:53.849618 | 2016-12-05T07:34:02 | 2016-12-05T07:34:02 | 73,073,793 | 0 | 0 | null | false | 2016-11-07T14:03:26 | 2016-11-07T11:56:47 | 2016-11-07T12:04:46 | 2016-11-07T14:03:26 | 0 | 0 | 0 | 0 |
Python
| null | null |
from argparse import ArgumentParser
from functools import partial
from utils import add_modulo_256, sub_modulo_256, encode, decode
def crypt(msg, bias, op):
return bytes(map(lambda x: op(x, bias), msg))
def process_io(fun, args):
return decode(fun(encode(args.m), args.k))
encrypt = lambda args: process_io(partial(crypt, op=add_modulo_256), args)
decrypt = lambda args: process_io(partial(crypt, op=sub_modulo_256), args)
def parse_args():
parser = ArgumentParser(description='Шифрует и расшифрует строки методом одноалфавитной подстановки.')
subparsers = parser.add_subparsers()
enc_parser = subparsers.add_parser('encrypt')
enc_parser.add_argument('-m', type=str, help='Строка для шифрования.')
enc_parser.add_argument('-k', type=int, help='Ключ (смещение).')
enc_parser.set_defaults(func=encrypt)
dec_parser = subparsers.add_parser('decrypt')
dec_parser.add_argument('-m', type=str, help='Строка для шифрования.')
dec_parser.add_argument('-k', type=int, help='Ключ (смещение).')
dec_parser.set_defaults(func=decrypt)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args.func(args))
|
UTF-8
|
Python
| false | false | 1,338 |
py
| 8 |
zi21.py
| 7 | 0.680328 | 0.670492 | 0 | 33 | 34.969697 | 106 |
Rony-21/mtkclient-1
| 13,477,607,389,839 |
f3c493e4ab9ceb6be96a9edc9e8ddd822b260f8b
|
6b75bd7642d8dfb46ddca5fec1cf93e8882d714a
|
/mtkclient/Library/daconfig.py
|
2cbcd1ea92ff468403cacec7b6de875fdea14774
|
[
"MIT"
] |
permissive
|
https://github.com/Rony-21/mtkclient-1
|
3d228d94cfe347d982fd39e4bb0fa07446a29d85
|
e623dfccceaed99f0276a48cdd11a54d5ce1dc7e
|
refs/heads/main
| 2023-06-25T15:35:47.489129 | 2021-08-02T14:19:02 | 2021-08-02T14:19:02 | 391,983,916 | 1 | 0 |
MIT
| true | 2021-08-02T14:38:28 | 2021-08-02T14:38:27 | 2021-08-02T14:19:05 | 2021-08-02T14:19:02 | 18,709 | 0 | 0 | 0 | null | false | false |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# (c) B.Kerler 2018-2021 MIT License
import logging
import os
from struct import unpack
from mtkclient.Library.utils import LogBase, read_object, logsetup
from mtkclient.config.payloads import pathconfig
class Storage:
MTK_DA_HW_STORAGE_NOR = 0
MTK_DA_HW_STORAGE_NAND = 1
MTK_DA_HW_STORAGE_EMMC = 2
MTK_DA_HW_STORAGE_SDMMC = 3
MTK_DA_HW_STORAGE_UFS = 4
class DaStorage:
MTK_DA_STORAGE_EMMC = 0x1
MTK_DA_STORAGE_SDMMC = 0x2
MTK_DA_STORAGE_UFS = 0x30
MTK_DA_STORAGE_NAND = 0x10
MTK_DA_STORAGE_NAND_SLC = 0x11
MTK_DA_STORAGE_NAND_MLC = 0x12
MTK_DA_STORAGE_NAND_TLC = 0x13
MTK_DA_STORAGE_NAND_AMLC = 0x14
MTK_DA_STORAGE_NAND_SPI = 0x15
MTK_DA_STORAGE_NOR = 0x20
MTK_DA_STORAGE_NOR_SERIAL = 0x21
MTK_DA_STORAGE_NOR_PARALLEL = 0x22
class EMMC_PartitionType:
MTK_DA_EMMC_PART_BOOT1 = 1
MTK_DA_EMMC_PART_BOOT2 = 2
MTK_DA_EMMC_PART_RPMB = 3
MTK_DA_EMMC_PART_GP1 = 4
MTK_DA_EMMC_PART_GP2 = 5
MTK_DA_EMMC_PART_GP3 = 6
MTK_DA_EMMC_PART_GP4 = 7
MTK_DA_EMMC_PART_USER = 8
MTK_DA_EMMC_PART_END = 9
MTK_DA_EMMC_BOOT1_BOOT2 = 10
class UFS_PartitionType:
UFS_LU0 = 0
UFS_LU1 = 1
UFS_LU2 = 2
UFS_LU3 = 3
UFS_LU4 = 4
UFS_LU5 = 5
UFS_LU6 = 6
UFS_LU7 = 7
UFS_LU8 = 8
class Memory:
M_EMMC = 1
M_NAND = 2
M_NOR = 3
class NandCellUsage:
CELL_UNI = 0,
CELL_BINARY = 1
CELL_TRI = 2
CELL_QUAD = 3
CELL_PENTA = 4
CELL_HEX = 5
CELL_HEPT = 6
CELL_OCT = 7
entry_region = [
('m_buf', 'I'),
('m_len', 'I'),
('m_start_addr', 'I'),
('m_start_offset', 'I'),
('m_sig_len', 'I')]
DA = [
('magic', 'H'),
('hw_code', 'H'),
('hw_sub_code', 'H'),
('hw_version', 'H'),
('sw_version', 'H'),
('reserved1', 'H'),
('pagesize', 'H'),
('reserved3', 'H'),
('entry_region_index', 'H'),
('entry_region_count', 'H')
# vector<entry_region> LoadRegion
]
class DAconfig(metaclass=LogBase):
def __init__(self, mtk, loader=None, preloader=None, loglevel=logging.INFO):
self.__logger = logsetup(self, self.__logger, loglevel)
self.mtk = mtk
self.pathconfig = pathconfig()
self.config = self.mtk.config
self.usbwrite = self.mtk.port.usbwrite
self.usbread = self.mtk.port.usbread
self.flashsize = 0
self.sparesize = 0
self.readsize = 0
self.pagesize = 512
self.da = None
self.dasetup = {}
self.loader = loader
self.extract_emi(preloader, self.mtk.config.chipconfig.damode)
if loader is None:
loaders = []
for root, dirs, files in os.walk(self.pathconfig.get_loader_path(), topdown=False):
for file in files:
if "Preloader" not in root:
loaders.append(os.path.join(root, file))
for loader in loaders:
self.parse_da_loader(loader)
else:
if not os.path.exists(loader):
self.warning("Couldn't open " + loader)
else:
self.parse_da_loader(loader)
def extract_emi(self, preloader=None, legacy=False) -> bytearray:
if preloader is None:
self.emi = None
return
if isinstance(preloader, bytearray) or isinstance(preloader, bytes):
data = bytearray(preloader)
elif isinstance(preloader, str):
if os.path.exists(preloader):
with open(preloader, "rb") as rf:
data = rf.read()
else:
assert "Preloader :"+preloader+" doesn't exist. Aborting."
exit(1)
if legacy:
idx = data.rfind(b"MTK_BIN")
if idx == -1:
self.emi = None
return
dramdata = data[idx:][0xC:][:-0x128]
self.emi = dramdata
return
else:
idx = data.rfind(b"MTK_BLOADER_INFO_v")
if idx != -1:
emi = data[idx:]
count = unpack("<I", emi[0x6C:0x70])[0]
size = (count * 0xB0) + 0x70
emi = emi[:size]
self.emi = emi
return
self.emi = None
return
def parse_da_loader(self, loader):
if not "MTK_AllInOne_DA" in loader:
return True
try:
if loader not in self.dasetup:
self.dasetup[loader] = []
with open(loader, 'rb') as bootldr:
# data = bootldr.read()
# self.debug(hexlify(data).decode('utf-8'))
bootldr.seek(0x68)
count_da = unpack("<I", bootldr.read(4))[0]
for i in range(0, count_da):
bootldr.seek(0x6C + (i * 0xDC))
datmp = read_object(bootldr.read(0x14), DA) # hdr
datmp["loader"] = loader
da = [datmp]
# bootldr.seek(0x6C + (i * 0xDC) + 0x14) #sections
count = datmp["entry_region_count"]
for m in range(0, count):
entry_tmp = read_object(bootldr.read(20), entry_region)
da.append(entry_tmp)
self.dasetup[loader].append(da)
return True
except Exception as e:
self.error("Couldn't open loader: " + loader + ". Reason: " + str(e))
return False
def setup(self):
dacode = self.config.chipconfig.dacode
for loader in self.dasetup:
for setup in self.dasetup[loader]:
if setup[0]["hw_code"] == dacode:
if setup[0]["hw_version"] <= self.config.hwver:
if setup[0]["sw_version"] <= self.config.swver:
if self.loader is None:
self.da = setup
self.loader = loader
if self.da is None:
self.error("No da config set up")
return self.da
|
UTF-8
|
Python
| false | false | 6,136 |
py
| 26 |
daconfig.py
| 21 | 0.517438 | 0.493155 | 0 | 201 | 29.527363 | 95 |
pixeloxx/batch-renderer-rhino-vray
| 13,958,643,714,095 |
10eb11737fa8bee72e012eb4037ff1081249f682
|
b884761c313de569cae1c75b5fbd0035d75621f5
|
/main.py
|
8e60cd80e6b75f94331c504b77655a1b8b1be829
|
[
"MIT"
] |
permissive
|
https://github.com/pixeloxx/batch-renderer-rhino-vray
|
6b574c7810a29ce67475e7855a6ba4dd38e50782
|
efe5b45266b18474d7ba4e4fe93908b437b6a949
|
refs/heads/master
| 2022-11-28T22:10:31.635100 | 2020-07-31T11:54:32 | 2020-07-31T11:54:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
=========================================================
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
Title: Rhino Layer State Batch Render
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
=========================================================
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
Author: Vlad
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
Description:
The Script renders all the named views with
and goes through all the layer states
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
Notes:
The folder destination, layerstate name,
& view names must be clear of characters
which cannot be part of a file's name (i.e.
Tilde (~)
Number sign (#)
Percent (%)
Ampersand (&)
Asterisk (*)
Braces ({ })
Backslash (\)
Colon (:)
Angle brackets (< >)
Question mark (?)
Slash (/)
Plus sign (+)
Pipe (|)
Quotation mark (")
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
=========================================================
"""
import rhinoscriptsyntax as rs
import sys
def ChangeLayerState(LayerState):
"""
Receives a LayerState and changes
the model to that specific LayerState
"""
plugin = rs.GetPlugInObject("Rhino Bonus Tools")
if plugin is not None:
plugin.RestoreLayerState(LayerState, 0)
return 1
else :
return 0
def GetLayerStates():
"""
The function returns the LayerStates
that can be found in the model
"""
plugin = rs.GetPlugInObject("Rhino Bonus Tools")
if plugin is not None:
MyArray = plugin.LayerStateNames
MyArrayB = []
MyArray = str(MyArray[1])
Trigger = True
while (Trigger):
poz=MyArray.rfind("'")
MyArray = MyArray[:poz]
poz=MyArray.rfind("'")
dif = MyArray[poz:]
dif = dif[1:]
MyArrayB.append(dif)
MyArray = MyArray[:poz]
if len(MyArray)<14:
Trigger = False
del MyArrayB[-1] #clean up the list
return MyArrayB
def GetViewNames():
"""
Returns a string of defining
the NamedViews that can be found
in the file
"""
a = rs.NamedViews()
return a
def ChooseFolderPath():
"""
pick a folder to save the renderings to
return the folder
"""
folder = rs.BrowseForFolder(rs.DocumentPath, "Browse for folder", "Batch Render")
return folder
def Render(folder,View,State):
"""
Defines the Rendering action
Saves the render to the browsed folder
Adds the name of the view and the name
of the layer state to the naming of the
view
"""
FileName = '"'+folder +'\\'+View+'_'+State+'"'
FileName = str(FileName)
rs.Command ("!_-Render")
rs.Command ("_-SaveRenderWindowAs "+FileName)
rs.Command ("_-CloseRenderWindow")
return 1
def ChangeView(View):
rs.Command ("_-NamedView _Restore " + View + " _Enter", 0)
if __name__ == "__main__":
"""
Main Function
"""
VRay = rs.GetPlugInObject("V-Ray for Rhino")
VRay.SetBatchRenderOn(True) #Set Batch Render on True
arrStates = GetLayerStates() #initialise layer states
arrViewNames = GetViewNames()
folder = ChooseFolderPath()
for State in arrStates:
ChangeLayerState(State)
print (State)
for View in arrViewNames:
ChangeView(View)
Render(folder,View,State)
|
UTF-8
|
Python
| false | false | 3,797 |
py
| 2 |
main.py
| 1 | 0.472478 | 0.469845 | 0 | 126 | 29.134921 | 85 |
InterImm/marsapi
| 2,817,498,589,105 |
67e93de513c8f10dc7019dc8e52907338d5581fd
|
03f9a8c0632839c4e6a77b198015a1ee3b6c0215
|
/marsapi/api/restplus.py
|
5d9564781a52bbfa6326418c7e6a4bcecd834e38
|
[
"MIT"
] |
permissive
|
https://github.com/InterImm/marsapi
|
e7b972ceb24586e28cbd913229d2907ac70536e4
|
f8f832360d9df00d1a6112a51f580817389797cb
|
refs/heads/master
| 2020-05-17T18:31:50.473136 | 2019-06-25T06:03:04 | 2019-06-25T06:03:04 | 183,886,497 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
import traceback
from flask_restplus import Api
from marsapi import settings
MARSAPI_CONFIG = {
"version": "0.0.1",
"title": "MarsAPI",
"description": "Get Mars related information through API"
}
log = logging.getLogger(__name__)
api = Api(**MARSAPI_CONFIG)
@api.errorhandler
def default_error_handler(e):
message = 'Something is not right, Scott.'
log.exception(message)
if not settings.FLASK_DEBUG:
return {'message': message}, 500
|
UTF-8
|
Python
| false | false | 508 |
py
| 10 |
restplus.py
| 7 | 0.661417 | 0.649606 | 0 | 24 | 20.166667 | 65 |
Carlos-Alfredo/M-todos-Num-ricos
| 7,541,962,603,043 |
800203a2cf4e3db5f5a0578b2dfd1bd7962eac34
|
4ba06bf2d3fd0aac9af0ca41b07801a276a9cc07
|
/metodos numericos python/Integral Numerica/Questao1.py
|
65b95bbdc7806ee04a5d4f0d5a96f72c6e6001a4
|
[] |
no_license
|
https://github.com/Carlos-Alfredo/M-todos-Num-ricos
|
2e69040793ea953a47d79937ef497bc73f1e0f24
|
63f212faa2ba1712ad110413f6472aece4eaf51c
|
refs/heads/master
| 2022-12-01T21:19:35.765223 | 2020-08-12T11:53:30 | 2020-08-12T11:53:30 | 287,000,517 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
import matplotlib.pyplot as plt
def funcao(x):
return 6 + 3*math.cos(x)
valor_real=6*(math.pi/2)+3*(math.sin(math.pi/2)-math.sin(0))
a=0
b=math.pi/2
def trapezio(n,a,b):
h=(b-a)/n
integral=0
for i in range(0,n):
integral=integral+(funcao(a+h*i)+funcao(a+h*(i+1)))*h/2
return integral
def n_trapezio(n,valor_real):
integrais=[]
erros=[]
numero=[]
for i in range(1,n+1):
integral=trapezio(i,a,b)
integrais.append(integral)
erros.append(math.fabs((integral-valor_real)/valor_real))
numero.append(i)
plt.figure(1)
plt.subplot(211)
plt.title("Integral através da regra do trapézio")
plt.plot(numero,integrais)
plt.xlabel("n")
plt.ylabel("Integral")
plt.subplot(212)
plt.plot(numero,erros)
plt.xlabel("n")
plt.ylabel("|%Erro|")
def simpson1_3(n,a,b):
h=(b-a)/n
integral=0
for i in range(0,int(n/2)):
integral=integral+h/3*(funcao(a+h*(2*i))+4*funcao(a+h*(2*i+1))+funcao(a+h*(2*i+2)))
return integral
def n_simpson1_3(n,valor_real):
integrais=[]
erros=[]
for i in range(0,len(n)):
integral=simpson1_3(n[i],a,b)
integrais.append(integral)
erros.append(math.fabs((integral-valor_real)/valor_real))
plt.figure(2)
plt.subplot(211)
plt.title("Integral através da regra de 1/3 de Simpson")
plt.plot(n,integrais)
plt.yscale('log')
plt.xlabel("n")
plt.ylabel("Integral")
plt.subplot(212)
plt.plot(n,erros)
plt.xlabel("n")
plt.ylabel("|%Erro|")
def simpson3_8(n,a,b):
h=(b-a)/n
integral=0
for i in range(0,int(n/3)):
integral=integral+3*h/8*(funcao(a+h*(3*i))+3*funcao(a+h*(3*i+1))+3*funcao(a+h*(3*i+2))+funcao(a+h*(3*i+3)))
return integral
def n_simpson(n,a,b):
if(n%2==0):
return simpson1_3(n,a,b)
elif(n==3):
return simpson3_8(3,a,b)
else:
return simpson1_3(n-3,a,b-3*(b-a)/n)+simpson3_8(3,b-3*(b-a)/n,b)
def simpson(n,valor_real):
integrais=[]
erros=[]
for i in range(0,len(n)):
integral=n_simpson(n[i],a,b)
integrais.append(integral)
erros.append(math.fabs((integral-valor_real)/valor_real))
plt.figure(3)
plt.subplot(211)
plt.title("Integral através das regras de 1/3 e 3/8 de Simpson")
plt.plot(n,integrais)
plt.yscale('log')
plt.xlabel("n")
plt.ylabel("Integral")
plt.subplot(212)
plt.plot(n,erros)
plt.xlabel("n")
plt.ylabel("|%Erro|")
n_trapezio(10,valor_real)
n_simpson1_3([2,4,6,8,10],valor_real)
simpson([3, 4, 5, 6, 7, 8, 9, 10],valor_real)
plt.show()
|
UTF-8
|
Python
| false | false | 2,449 |
py
| 34 |
Questao1.py
| 31 | 0.6409 | 0.595501 | 0 | 93 | 24.311828 | 109 |
neonbevz/AdFontes
| 5,274,219,859,304 |
0e6ab8925a125e0d841c7a319af5fb3918eac822
|
6a3dff31e6a581b58c692f893b2975676e8003fc
|
/Python/graph_data.py
|
c6b51d9a9d1c0583172bba31fc390c748efbabdc
|
[] |
no_license
|
https://github.com/neonbevz/AdFontes
|
8bb5511643ba126e3a5d1032d8f949895634d825
|
27e48d354b45874f625fe89b62d9cd24ba3b09cf
|
refs/heads/master
| 2021-08-31T21:08:29.598299 | 2017-12-22T23:11:20 | 2017-12-22T23:11:20 | 111,950,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import random
def generate_graph(n_nodes, n_edges, min_len, max_len):
nodes = ["N" + str(i + 1) for i in range(n_nodes - 1)]
nodes = ["O"] + nodes
edges = []
for j in range(n_edges):
node = nodes[j % len(nodes)]
node2 = random.choice(nodes)
while node2 == node or [node, node2] in edges or [node2, node] in edges:
node2 = random.choice(nodes)
edges.append([node, node2])
for edge in edges:
edge.append(random.randint(min_len, max_len))
return "O", nodes, edges
def write_graph(filename, origin, nodes, edges):
d = {"origin": origin, "nodes": nodes, "edges": edges}
with open(filename, mode="w") as file:
file.write(json.dumps(d))
def read_graph(filename):
with open(filename) as file:
d = json.loads(file.read())
return d["origin"], d["nodes"], d["edges"]
|
UTF-8
|
Python
| false | false | 882 |
py
| 14 |
graph_data.py
| 10 | 0.592971 | 0.5839 | 0 | 29 | 29.413793 | 80 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.