repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
DanielAbrahamDzulPech/articicial-inteligence-and-data-science | 11,785,390,289,211 | fc2d22de53d361df0655a81e13e87143a6ccd211 | d137ca8c04da3b01e51f68e2f173114baef4b19c | /05_python_intermedio/modulo_III_comprehensions/complementos/list-comprehensions/reto.py | 1e8dc892f2adf69c546ee640f96db3c669d8c639 | [
"MIT"
]
| permissive | https://github.com/DanielAbrahamDzulPech/articicial-inteligence-and-data-science | b4e9258071f5c3208b078d1f60d09dd07e4355c6 | 953566220e64cbd8f732c2667b818da807bb54c0 | refs/heads/master | 2023-08-11T08:40:15.301204 | 2021-09-20T04:28:57 | 2021-09-20T04:28:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Resolviendo Retos y agregandole un nivel mas de dificultad
alumno: @edinsonrequena
"""
def func_reto():
arr = [i for i in range(1, 10000) if i % 4 == 0 and i % 6 == 0 and i % 9 == 0]
print(arr)
| UTF-8 | Python | false | false | 211 | py | 208 | reto.py | 113 | 0.601896 | 0.545024 | 0 | 12 | 16.583333 | 82 |
yusukew62/redmine-issue-registerd | 16,621,523,439,972 | 9f5cb95053a0563859b8807d8cefefbea4661c1e | 7d26da553c77ef4bed9555af2700d809ec64ee3f | /setup.py | f9cecdfd48677c798c8b4dadd3cd4fca53d9251c | []
| no_license | https://github.com/yusukew62/redmine-issue-registerd | e4272d345f81f7880a6267cd08626af03c75976d | 39a2a20f3f31928322c8f08b97be7f4cd323ad24 | refs/heads/master | 2021-01-12T09:10:23.893040 | 2017-01-20T16:38:07 | 2017-01-20T16:38:07 | 76,780,962 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf8 -*-
from setuptools import setup, find_packages
setup(
name = 'redmine-issue-registerd',
version = '0.0.1',
description = 'Register auto issue of Redmine by receiving e-mail',
license = 'MIT license',
author = 'Yusuke Watanabe',
author_email = 'yusuke.w62@gmail.com',
url = 'https://github.com/yusukew62/redmine-issue-registerd.git',
keywords = 'python redmine',
packages = find_packages(),
install_requires = [''],
entry_points = {
'console_scripts': [
'redmine-ird=redmine-ird.redmine-ird:main',
],
},
)
| UTF-8 | Python | false | false | 626 | py | 3 | setup.py | 2 | 0.610224 | 0.597444 | 0 | 22 | 27.227273 | 71 |
simonegymondo/BucketFillCompare | 5,617,817,255,306 | 1e3a17ac1ac3109d41be6f5e0ea53992392090c4 | d471a71c2811d2254f03ed8add65e9402a045216 | /scanline_opencv.py | d7a036868f091186ee3df982df2a43a8aeab7eef | []
| no_license | https://github.com/simonegymondo/BucketFillCompare | f845ce6c03acd4979627e403aecff83f2613e849 | 8211c3162376e2151a0482e964d1a71bef335598 | refs/heads/master | 2021-01-17T16:01:10.615061 | 2016-03-04T10:27:37 | 2016-03-04T10:27:37 | 53,127,630 | 0 | 0 | null | true | 2016-03-04T10:28:34 | 2016-03-04T10:28:34 | 2016-03-04T07:40:30 | 2016-03-04T10:28:16 | 2,978 | 0 | 0 | 0 | null | null | null | from canvas import Canvas
from canvas import timing
class ScanlineOpenCV(Canvas):
"""
This is a scan line approach as designed by OpenCV. The algorithm is similar to
the scanline as described above but it uses a different flow. Instead of filling
from left to right, it starts filling from the center to both horizontal directions.
It maintains a left and right index which are added to the stack and processed
at each iteration.
"""
@timing
def fill(self, x, y, color):
self.validate(x, y, color)
old_color = self.pixels[x][y]
if old_color == color:
return # nothing to do
self.max_depth = 0
w = len(self.pixels[0])
h = len(self.pixels)
l = y
r = y
while(r < w and self.pixels[x][r] == old_color):
self.pixels[x][r] = color
r += 1
l -= 1
while(l >= 0 and self.pixels[x][l] == old_color):
self.pixels[x][l] = color
l -= 1
l += 1
r -= 1
stack = [(x, l, r, r + 1, r, 1)]
while stack:
self.max_depth = max(self.max_depth, len(stack))
yc, l, r, pl, pr, dirz = stack.pop()
data = [[-dirz, l, r], [dirz, l, pl - 1], [dirz, pr + 1, r]]
for i in range(0, 3):
dirz = data[i][0]
yc_d = yc + dirz
if yc_d >= h or yc_d < 0:
continue
left = data[i][1]
right = data[i][2]
k = left
while k <= right:
if k >= 0 and k < w and self.pixels[yc_d][k] == old_color:
self.pixels[yc_d][k] = color
j = k
j -= 1
while j >= 0 and self.pixels[yc_d][j] == old_color:
self.pixels[yc_d][j] = color
j -= 1
k += 1
while k < w and self.pixels[yc_d][k] == old_color:
self.pixels[yc_d][k] = color
k += 1
stack.append((yc_d, j + 1, k - 1, l, r, -dirz))
k += 1 | UTF-8 | Python | false | false | 2,258 | py | 7 | scanline_opencv.py | 6 | 0.431798 | 0.419841 | 0 | 76 | 28.723684 | 88 |
paulromano/serpent-tools | 11,656,541,246,620 | 9dccfa31b5d5177345c7022b6c9218896f28395e | 7eeb75666d8d41271569e8088aa65359c1de1d43 | /serpentTools/seed.py | 0848b0cad687633ccb89bab5bb07411f10e7e487 | [
"MIT"
]
| permissive | https://github.com/paulromano/serpent-tools | 39377bc439e7ec5d1a184bc6ca2dcdbb66ba2677 | e991c2311a4b5f63b8534a70b65807507bdc1b3e | refs/heads/master | 2022-06-22T15:53:19.239019 | 2018-05-11T12:36:44 | 2018-05-11T12:36:44 | 133,976,907 | 1 | 0 | null | true | 2018-05-18T16:08:00 | 2018-05-18T16:07:59 | 2018-05-18T16:07:37 | 2018-05-18T15:42:35 | 10,623 | 0 | 0 | 0 | null | false | null | """
Function to copy an input file N times with N randomly generated seeds
"""
import os
from os import path
from shutil import copy
import random
from six.moves import range
from serpentTools.messages import error, debug
__all__ = ['seedFiles']
SLOPE = 0.3010142116935483
OFFSET = 0.0701126088709696
def _writeSeed(stream, bits, length):
seed = random.getrandbits(bits)
while len(str(seed)) != length:
seed = random.getrandbits(bits)
stream.write('\nset seed {}'.format(seed))
def _makeFileFmt(inputFile):
baseName = path.basename(inputFile)
if '.' in baseName:
s = baseName.split('.')
base = s[0]
ext = '.' + '.'.join(s[1:])
else:
base = baseName
ext = ''
return base + '_{}' + ext
def _include(inputFile, numSeeds, fileFmt, bits, length):
for N in range(numSeeds):
name = fileFmt.format(N)
with open(name, 'w') as stream:
stream.write('include \"{}\"\n'.format(inputFile))
_writeSeed(stream, bits, length)
def _copy(inputFile, numSeeds, fileFmt, bits, length):
for N in range(numSeeds):
name = fileFmt.format(N)
copy(inputFile, name)
with open(name, 'a') as stream:
_writeSeed(stream, bits, length)
def seedFiles(inputFile, numSeeds, seed=None, outputDir=None, link=False,
digits=10):
"""
Copy input file multiple times with unique seeds.
Parameters
----------
inputFile: str
Path to input file
numSeeds: int
Number of files to create
seed: int
Optional argument to set the seed of the builtin random
number generator
outputDir: str
Path to desired output directory. Files will be copied here.
If the folder does not exist, try to make the directory. Assumes path
relative to directory that contains the input file
link: bool
If True, do not copy the full file. Instead, create a new file
with 'include <inputFile>' and the new seed declaration.
digits: int
Average number of digits for random seeds
See Also
--------
:py:mod:`random`
:py:func:`random.seed()`
:py:func:`random.getrandbits()`
"""
if '~' in inputFile:
inputFile = os.path.expanduser(inputFile)
if not path.exists(inputFile):
error('Input file {} does not exist'.format(inputFile))
return
if numSeeds < 1:
error('Require positive number of files to create')
return
if digits < 1:
error('Require positive number of digits in random seeds')
bits = int((digits - OFFSET) / SLOPE)
random.seed(seed)
inputPath = path.abspath(path.join(os.getcwd(), inputFile))
inputRoot = path.dirname(inputPath)
if outputDir is not None:
fPrefix = path.abspath(path.join(inputRoot, outputDir))
if not path.isdir(fPrefix):
debug('Creating directory at {}'.format(fPrefix))
os.mkdir(fPrefix)
else:
fPrefix = inputRoot
fileFmt = path.join(fPrefix, _makeFileFmt(inputFile))
writeFunc = _include if link else _copy
writeFunc(inputPath, numSeeds, fileFmt, bits, digits)
return
| UTF-8 | Python | false | false | 3,213 | py | 64 | seed.py | 24 | 0.626829 | 0.614379 | 0 | 118 | 26.228814 | 77 |
cloudmesh/client | 16,338,055,595,055 | c24f87cf9ba01cc323de4eb0f29682d6b405741d | 25445f9e3ad9440a3ef3757b96933c33ac1b1daa | /cloudmesh_client/api/impl/openstack.py | fdd35e64d94c25c33519619f489b592ca299710e | [
"Apache-2.0"
]
| permissive | https://github.com/cloudmesh/client | 9d0d024b5f48ef59f8f02a2619fc34ed65cbfa17 | a5fc7dbaf2c51f1227cff346aedea4bf7f563fa9 | refs/heads/master | 2022-10-13T13:59:12.201306 | 2019-02-08T21:04:50 | 2019-02-08T21:04:50 | 37,379,872 | 3 | 19 | NOASSERTION | false | 2022-09-16T19:34:16 | 2015-06-13T16:56:22 | 2020-10-05T13:43:34 | 2022-09-16T19:34:15 | 49,462 | 3 | 12 | 101 | Python | false | false | from cloudmesh_client.cloud.network import Network
from cloudmesh_client.cloud.image import Image
from cloudmesh_client.cloud.vm import Vm
from cloudmesh_client import Console
from cloudmesh_client.api import Resource, Provider, Node
from cloudmesh_client.db.openstack.model import IMAGE_OPENSTACK, VM_OPENSTACK
from cloudmesh_client.common.util import exponential_backoff
from cloudmesh_client.default import Default
class ImageResource(Resource):
def create(self):
pass
def delete(self):
pass
def list(self):
pass
def refresh(self):
pass
class KeyResource(Resource):
pass
class FloatingIpResource(Resource):
pass
class FlavorResource(Resource):
pass
class OpenstackProvider(Provider):
# def __init__(self):
# super(OpenstackProvider, self).__init__()
#
# self.images = ImageResource()
# self._add_resource(self.images)
#
# self.keys = KeyResource()
# self._add_resource(self.keys)
#
# self.flavors = FlavorResource()
# self._add_resource(self.flavors)
#
# self.floating_ips = FloatingIpResource()
# self._add_resource(self.floating_ips)
def boot(self, **kwargs):
"""Boot a single VM
:param kwargs: parameters to :meth:`Vm.boot`
:return: the vm details
:rtype: :class:`Node`
"""
cloud = kwargs.get('cloud', Default.cloud)
name = kwargs.get('name', Vm.generate_vm_name())
image = kwargs.get('image', Default.image)
flavor = kwargs.get('flavor', Default.flavor)
key = kwargs.get('key', Default.key)
secgroup = kwargs.get('secgroup', Default.secgroup)
group = kwargs.get('group', Default.group)
username = kwargs.get('username', Image.guess_username(image))
cluster = kwargs.get('cluster', None)
# shorthand for getting a dict of all the vm details
#
# IMPORTANT: anything declared prior to the call to `locals()`
# may be passed to `Vm.boot`, so make sure that only parameters are
# defined above this comment.
details = locals()
details.pop('kwargs')
# currently, Vm.boot returns the instance UUID from the provider for openstack images
# 2016/12/12
uuid = Vm.boot(**details)
# helper function: the Vm.boot only returns a UUID, but we
# need to use the VM model instead. Additionally, we'll need
# to poll the VM to wait until it is active.
#
# The kwargs are used to select the item from the DB:
# eg: uuid=???, cm_id=???, etc
def get_vm(**kwargs):
"""Selects the VM based on the given properties"""
model = self.db.vm_table_from_provider('openstack')
vm = self.db.select(model, **kwargs).all()
assert len(vm) == 1, vm
vm = vm[0]
return vm
# get the VM from the UUID
vm = get_vm(uuid=uuid)
cm_id = vm.cm_id
def is_active():
Vm.refresh(cloud=cloud)
vm = get_vm(cm_id=cm_id)
return vm.status == 'ACTIVE'
if not exponential_backoff(is_active):
Console.error('Failed to get ACTIVE vm within timeframe')
raise ValueError
assert is_active()
vm = get_vm(cm_id=cm_id)
assert isinstance(vm, VM_OPENSTACK), vm.__class__
return OpenstackNode(model=vm, provider=self)
def create_ip(self, node):
ip = Network.find_assign_floating_ip(
cloudname=self.cloud,
instance_id=node.name,
)
Vm.refresh(cloud=self.cloud)
Console.ok('Assigned ip to {}: {}'.format(node.name, ip))
def delete(self, nodde):
raise NotImplementedError
def node(self):
raise NotImplementedError
class OpenstackNode(Node):
def __init__(self, model, provider):
super(Node, self).__init__()
self._model = model
self._provider = provider
@property
def name(self):
return self._model.name
@property
def username(self):
return self._model.username
@property
def private_ip(self):
return self.model.static_ip
@property
def public_ip(self):
return self.model.floating_ip
def boot(self, **kwargs):
pass
def delete(self):
raise NotImplementedError
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def ssh(self, cmd=None, user=None):
raise NotImplementedError
def create_ip(self):
self._provider.create_ip(self)
| UTF-8 | Python | false | false | 4,698 | py | 244 | openstack.py | 179 | 0.603661 | 0.601533 | 0 | 181 | 24.955801 | 93 |
blazeghost/doctorfinder | 14,293,651,194,399 | b9908c7bcf00f37e79e947f91c722c0504f39cb6 | eacf02e8e3ba089cbbbdae584b3f59581ccef8b3 | /app/migrations/0012_delete_appoinment.py | 1ee2b7135f5bc23317e67243056144cee3953e4b | []
| no_license | https://github.com/blazeghost/doctorfinder | bea6c84b3ece4fc64a75b7a5f6fc5cc2dca41061 | 8ddeda32125d19c1e8060888f335418dbb052f2e | refs/heads/main | 2023-05-11T00:50:17.420008 | 2021-05-24T19:52:31 | 2021-05-24T19:52:31 | 338,102,994 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.6 on 2021-03-10 06:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0011_appoinment'),
]
operations = [
migrations.DeleteModel(
name='Appoinment',
),
]
| UTF-8 | Python | false | false | 287 | py | 34 | 0012_delete_appoinment.py | 14 | 0.58885 | 0.522648 | 0 | 16 | 16.9375 | 47 |
Mike-Revy/Python-Simple-examples | 94,489,324,466 | 6da20f2ef87feadf1502c040a44603efbf72d961 | 8e434aa4669fdee7faff7401309d5de6f82eb6aa | /dict.py | 7bb3cb629bee776982fedd686aa0c201692c4526 | []
| no_license | https://github.com/Mike-Revy/Python-Simple-examples | 6651e6a6bbfb3e20181debdbec33bebb7bd771b7 | b95148f5a1e49db7e59e00152c40a1e6bc63758e | refs/heads/master | 2022-01-09T23:38:08.478505 | 2019-07-05T07:00:31 | 2019-07-05T07:00:31 | 109,949,471 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # dictionaries - 2 parts - Key & Value
course = {"title": "Python Collections"}
course["title"] # 'Python Collections'
# No Index / sorting of Dictionaries
nameDict = dict([["name","Kenneth"]])
course ={"title": "Python Collections", "teacher": "Kenneth Love", "videos":22}
course["videos"] # 22
# KeyError - when no key is asked for
# useful for nesting
course ={"title": "Python Collections", "teacher" : {"last_name": "Love", "first_name": "Kenneth"}, "videos":22}
course["teacher"]
course["teacher"]["first_name"]
kenneth = {"first_name" : "Kenneth", "job" : "Teacher"}
# adding to a dictionary add key [] and then = value - same with EDIT
kenneth["last_name"] = "Love"
# method update on dictionary - more than one key Value pair
kenneth.update({"job": "Python Teacher", "editor" : "Vim"})
# editing a value for a key
kenneth["editor"] = "any"
# delete a key
del kenneth["job"]
| UTF-8 | Python | false | false | 909 | py | 58 | dict.py | 51 | 0.654565 | 0.646865 | 0 | 21 | 42.142857 | 113 |
Juanky92/Basico | 11,158,325,075,194 | ea85446c0313d1b431ce96b79a71f417eb4cdd3d | 195405791f6c4d829f37b9a52d3adc40a88d5249 | /Funcion18.py | 3a48c7235dd02636ba82ad8f07bdae1450e3f803 | []
| no_license | https://github.com/Juanky92/Basico | 6bfcee9938e5f7726cec50fd06f2f9410eee881c | 786ff572652ceeac8909e45d34d3048dc9a80003 | refs/heads/master | 2021-01-12T10:47:05.836983 | 2016-11-10T00:34:36 | 2016-11-10T00:34:36 | 72,698,532 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def multiplicar(numero):
Rango=range(1,11)
for ele in Rango:
multi=numero*ele
print numero,'X',ele,'=',multi
return
print multiplicar(2)
print multiplicar(5)
print multiplicar(9)
print multiplicar(3)
#Hay que pulirlo
| UTF-8 | Python | false | false | 231 | py | 10 | Funcion18.py | 10 | 0.722944 | 0.692641 | 0 | 13 | 16.615385 | 32 |
doloopwhile/utask | 807,453,873,644 | eff44c66f92ad8da78ec30e7bcb9e70a8199e00b | 922bb9efc32e41580ebe2ae2f14c225480b8669f | /utask/view.py | 0cff53af44271755c65181c1e4758b5f2dcc112c | []
| no_license | https://github.com/doloopwhile/utask | eeda4c8bc105d983b8475bd870cbabd7953b717e | 18b2352c095d0852c3f11c902ec450cd1f48f7e3 | refs/heads/master | 2021-01-01T17:00:17.555683 | 2013-06-26T11:28:15 | 2013-06-26T11:28:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyramid.view import view_config
from pyramid.response import Response
@view_config(route_name='timelines')
def timelines(request):
return Response('OK')
@view_config(route_name='tasks')
def tasks(request):
return Response('OK')
@view_config(route_name='projects')
def projects(request):
return Response('OK')
| UTF-8 | Python | false | false | 335 | py | 3 | view.py | 2 | 0.725373 | 0.725373 | 0 | 14 | 22.928571 | 37 |
vaskocuturilo/GoogleSearch-Python- | 2,027,224,609,238 | 04d72311464ea761a0fe70ef0412c42ceef277a5 | 2d90259827e5283250c6ddf68a294c88954afc9d | /google_search.py | 54d4a5a3ff04a2a0c65ac969a40e8a7dd3aab996 | []
| no_license | https://github.com/vaskocuturilo/GoogleSearch-Python- | 6d55e34f0edebc296191bad083f9088f92840e32 | ee687bbb971edc9d5c6121271a6f93731542a6e1 | refs/heads/master | 2017-12-03T20:44:19.370953 | 2017-03-25T10:35:13 | 2017-03-25T10:35:13 | 86,149,104 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from page import page
from selenium import webdriver
class search_data(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("http://www.google.com/ncr")
self.driver.implicitly_wait(10)
self.driver.maximize_window()
def search_google_page(self):
main_page = page(self.driver)
images_page = page(self.driver)
back_main_page = page(self.driver)
result = main_page.search("selenium")
assert "www.seleniumhq.org" in result.first_link()
result_image = images_page.search_image()
assert "www.seleniumhq.org" in result_image.first_image()
result = back_main_page.search_all()
assert "www.seleniumhq.org" in result.first_link()
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 886 | py | 3 | google_search.py | 3 | 0.639955 | 0.637698 | 0 | 29 | 29.551724 | 65 |
braybaud/dtk-tools | 3,083,786,567,209 | 8bf38ee262912b306e325344749f778c521ad008 | 12c7a452fb196a73925a399a7ae62bff0f1bbc08 | /calibtool/resamplers/BaseResampler.py | 7e0a3fc8349e611e2d98fcc4fc7a6cd4ba52ddbb | [
"MIT"
]
| permissive | https://github.com/braybaud/dtk-tools | fec8fb743a01c8cacf2d0d5d4697dec1391af266 | 389ce75ab013a9b1f5fd2a7a0fee6434dd240cde | refs/heads/master | 2020-04-17T02:27:31.863445 | 2020-04-14T17:42:27 | 2020-04-14T17:42:27 | 166,135,502 | 0 | 0 | NOASSERTION | true | 2019-07-16T22:11:04 | 2019-01-17T00:59:18 | 2019-07-14T23:05:53 | 2019-07-16T22:11:04 | 401,488 | 0 | 0 | 0 | Python | false | false | from abc import ABCMeta, abstractmethod
from itertools import zip_longest
import os
from simtools.Analysis.AnalyzeManager import AnalyzeManager
from simtools.ExperimentManager.ExperimentManagerFactory import ExperimentManagerFactory
from calibtool.resamplers.CalibrationPoint import CalibrationPoint, CalibrationParameter
class BaseResampler(metaclass=ABCMeta):
def __init__(self):
self.calib_manager = None # needs setting externally
self.output_location = None # must be set via setter below
self.selection_columns = [] # items to strip off resampled points DataFrame and pass to next resampler
self.selection_values = None # a DataFrame, created using self.selection_columns, added to the resampled points for the next resampler to use
# strictly required to be defined in subclasses
@abstractmethod
def resample(self, calibrated_points, selection_values, initial_calibration_points):
pass
# extend if desired in subclasses
def post_analysis(self, resampled_points, analyzer_results, from_resample=None):
os.makedirs(self.output_location, exist_ok=True)
def set_calibration_manager(self, calib_manager):
self.calib_manager = calib_manager
self.output_location = os.path.join(calib_manager.name, 'resampling_output')
def _run(self, points, resample_step):
"""
This run method is for running simulations, which is the in-common part of resampling.
:param points: The points to run simulations at.
:return: The Experiment object for these simulations
"""
# create a sweep where each point is a separate sim
if not self.calib_manager:
raise Exception('calibration manager has not set for resampler. Cannot generate simulations.')
point_dicts = [point.to_value_dict() for point in points]
# ck4, the number of replicates must be 1 for HIV for now; the general solution should allow a user-selected
# replicate count, so long as their likelihood analyzer can handle > 1 replicates.
exp_builder = self.calib_manager.exp_builder_func(point_dicts, n_replicates=1)
# Create an experiment manager
manager = ExperimentManagerFactory.from_cb(self.calib_manager.config_builder)
exp_name = self.calib_manager.name + '_resample_step_%d' % resample_step
manager.run_simulations(exp_name=exp_name, blocking=True, exp_builder=exp_builder)
return manager
def _analyze(self, experiment, analyzers, points_ran):
"""
This method is the in-common route for Resamplers to analyze simulations for liklihood.
:param experiment: the experiment to analyze, should be from self._run()
:param points_ran: Points objects that were just _run()
:return: The supplied points_ran with their .likelihood attribute set, AND the direct results of the analyzer
as a list.
"""
am = AnalyzeManager(analyzers=analyzers, exp_list=experiment)
am.analyze()
# compute a single likelihood value from all of the analyzers on a per-simulation basis
result_tuples = zip_longest(*[analyzer.results for analyzer in am.analyzers])
try:
results = [sum(tup) for tup in result_tuples]
except TypeError as e: # if 1+ None values snuck in...
raise type(e)('All analyzers must contain one result per simulation. The result list lengths do not match.')
for i in range(len(results)):
# Add the likelihood
points_ran[i].likelihood = results[i]
# verify that the returned points all have a likelihood attribute set
likelihoods_are_missing = True in {point.likelihood is None for point in points_ran}
if likelihoods_are_missing:
raise Exception('At least one Point object returned by the provided analyzer does not have '
'its .likelihood attribute set.')
return points_ran, results
def resample_and_run(self, calibrated_points, resample_step, selection_values, initial_calibration_points):
"""
Canonical entry method for using the resampler.
:param calibrated_points:
:return:
"""
# 1. resample
# The user-provided _resample() method in Resampler subclasses must set the 'Value' in each Point object dict
# for keying off of in the _run() method above.
# Any _resample() methodology that depends on the likelihood of the provided points should reference
# the 'likelihood' attribute on the Point objects (e.g., use mypoint.likelihood, set it in the analyer
# return points.
points_to_run, for_post_analysis = self.resample(calibrated_points=calibrated_points,
selection_values=selection_values,
initial_calibration_points=initial_calibration_points)
# # 2. run simulations
experiment_manager = self._run(points=points_to_run, resample_step=resample_step)
experiment_manager.wait_for_finished()
# 3. analyze simulations for likelihood
self.resampled_points, self.analyzer_results = self._analyze(experiment=experiment_manager.experiment,
analyzers=self.calib_manager.analyzer_list,
points_ran=points_to_run)
# 4. perform any post-analysis processing, if defined
self.post_analysis(self.resampled_points, self.analyzer_results, from_resample=for_post_analysis)
return self.resampled_points, self.selection_values
def _transform_df_points_to_calibrated_points(self, calibrated_point, df_points):
# build calibration points from dataframe, preserving CalibrationParameter metadata from calibrated_point
calibrated_points = []
for index, row in df_points.iterrows():
parameters = []
for name in calibrated_point.parameter_names:
new_parameter = CalibrationParameter.from_calibration_parameter(calibrated_point.get_parameter(name),
value=row[name])
parameters.append(new_parameter)
calibrated_points.append(CalibrationPoint(parameters))
self.selection_values = df_points[self.selection_columns].copy()
return calibrated_points
| UTF-8 | Python | false | false | 6,579 | py | 609 | BaseResampler.py | 431 | 0.660739 | 0.659371 | 0 | 129 | 50 | 149 |
lpierzchala/gameTest | 10,050,223,490,951 | df9cb43c933763c18f957ce3843c27226930542a | 862d23eceb7d4421b2018794de0e911a2d802f47 | /Pages/BasePage.py | 9160b39b712a6bdd39b7b56dc7ac3925408e4ba9 | []
| no_license | https://github.com/lpierzchala/gameTest | df89d828b6d9183e40329b16984a9f010933eb4c | 3d640533a54a27f52deea3f86f3173969fedeb12 | refs/heads/master | 2022-01-18T00:54:33.672068 | 2019-05-16T20:27:39 | 2019-05-16T20:27:39 | 186,867,790 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
class BasePage(object):
def __init__(self, driver):
self.driver = driver
def findElement(self, locator, timeout=30):
'''Find element for given locator'''
try:
return WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located(locator))
except TimeoutException:
print('Timeout: Cannot find element defined by selector')
return False
def findElementToClick(self, locator, timeout=30):
'''Find element for given locator and wait until it will be clickable'''
try:
return WebDriverWait(self.driver, timeout).until(EC.element_to_be_clickable(locator))
except TimeoutException:
print('Timeout: Cannot find element defined by selector')
return False
| UTF-8 | Python | false | false | 980 | py | 5 | BasePage.py | 4 | 0.686735 | 0.682653 | 0 | 25 | 38.2 | 103 |
TheJakey/JCP | 7,473,243,129,876 | 7ab276d675aae6e4d8b94b44d3efe85aea0392fb | 35ede0074f53b2c01f10a06a7a0b98c5b6173eca | /sender.py | e7581bff0f11777e8268256bb8762de38862a03e | []
| no_license | https://github.com/TheJakey/JCP | 8ad6048d1c9b58c57f84f1b2c550b609cdda4b42 | f8b12cbba53bc728e2f491485688f70892a1b25d | refs/heads/master | 2020-08-12T00:30:58.164805 | 2019-12-31T12:31:55 | 2019-12-31T12:31:55 | 214,656,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cryptograph
import settings
def build_and_send(soc, identifier, flag, fragmentNumber, paycheck, message, *args):
'''
Builds and sends message as UDP packet.
:param soc:
:param identifier:
:param flag:
:param fragmentNumber:
:param message:
:return: encoded message that's ready to be send. Contains encoded dictionary with message
'''
tcpDict = dict(
identifier=identifier,
flag=flag,
fragmented=fragmentNumber,
paycheck=paycheck,
data=message
)
completeMessage = cryptograph.encode(cryptograph, tcpDict)
if (args.__len__() == 0):
soc.sendto(completeMessage, (settings.ipAddress, settings.target_port))
else:
soc.sendto(completeMessage, (args[0]))
return completeMessage
def send_message(soc, completeMessage, *args):
if (args.__len__() == 0):
soc.sendto(completeMessage, (settings.ipAddress, settings.target_port))
else:
soc.sendto(completeMessage, (args[0]))
| UTF-8 | Python | false | false | 1,016 | py | 10 | sender.py | 7 | 0.655512 | 0.651575 | 0 | 38 | 25.736842 | 94 |
OlexaNdrus/big_data_test_task | 4,896,262,747,686 | 9605118cfb31a7d8f58855c6c76e2d87db1313ce | e8e1814a1a8375c8e30cf71331fdff401f97c1f9 | /main.py | edd450d65297599439da61c6b5dc5d8c732ac83b | []
| no_license | https://github.com/OlexaNdrus/big_data_test_task | 9d038b6f9de539ea9685f89da1ac391300b772c4 | 4cd85c2bea95ab73885fea0f0bf31a8b713127f8 | refs/heads/master | 2023-07-13T07:46:57.940980 | 2021-08-16T17:00:48 | 2021-08-16T17:00:48 | 396,887,381 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
products_csv = 'test-task_dataset_summer_products.csv'
def main_body(csv_file):
products_df = pd.read_csv(csv_file)
products_df = products_df[['price', 'rating_five_count', 'rating_count', 'origin_country']]
products_df.replace(r'^\s*$', np.nan, regex=True)
agg_df = products_df.groupby(
products_df['origin_country'].fillna('empty')).\
agg({'price': 'mean', 'rating_five_count':'sum', 'rating_count':'sum'}). \
reset_index().\
replace({'origin_country': {'empty': np.nan}})
agg_df['five_percentage'] = (agg_df['rating_five_count'] / agg_df['rating_count'] * 100).fillna(0).round(2)
agg_df.to_csv(path_or_buf='result.csv', index=False)
return agg_df
if __name__ == '__main__':
print(main_body(products_csv))
| UTF-8 | Python | false | false | 818 | py | 3 | main.py | 1 | 0.622249 | 0.616137 | 0 | 24 | 33.083333 | 111 |
WN1695173791/CNN-Pytorch | 16,320,875,739,265 | 95202683b0c1ac98531012c58ba702a3f1b8ddfa | 11ce8ed5965de77859aa413469239d1b96ce54c7 | /RunBuilder.py | 4f7bc741119d91745cae68c2e0722beed1af3c02 | []
| no_license | https://github.com/WN1695173791/CNN-Pytorch | 329b5f59388b623ae9e8ba480f9e813fa17c421a | 7ef8c10a06ca545b95424301137fe47b80fbc16b | refs/heads/main | 2023-03-16T14:02:29.526092 | 2021-01-26T06:32:34 | 2021-01-26T06:32:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
created by PyCharm
date: 2021/1/15
time: 0:16
user: wkc
"""
from collections import namedtuple
from itertools import product
class RunBuilder:
@staticmethod
def get_run(params): # 静态方法,不需要实例化
Run = namedtuple('Run', params.keys())
runs = []
for v in product(*params.values()):
runs.append(Run(*v))
return runs
| UTF-8 | Python | false | false | 393 | py | 6 | RunBuilder.py | 2 | 0.617251 | 0.590296 | 0 | 20 | 17.55 | 46 |
HephyAnalysisSW/TopEFT | 13,116,830,142,360 | 0737af286e9c58704163dba5492d71ff666b6314 | aef8eb6681e555ecb61ac67151e4c54d6fdd1023 | /plots/plotsDaniel/SUSY/ttZcomposition.py | 434ce10c92ad0c8a10084351604c9a9b70c90ee7 | []
| no_license | https://github.com/HephyAnalysisSW/TopEFT | 0e2dc89f7a43bacf50c77a042f56663e9d4f3404 | 53174807c96dffa6654e4dc63bef92f2b71706ee | refs/heads/master | 2022-11-07T02:41:53.120759 | 2020-03-31T08:08:27 | 2020-03-31T08:08:27 | 98,643,866 | 0 | 3 | null | false | 2019-10-14T09:02:09 | 2017-07-28T11:38:23 | 2019-10-14T08:17:14 | 2019-10-14T09:02:08 | 5,222 | 1 | 5 | 0 | Python | false | false | ''' Analysis script for 1D 2l plots (RootTools)
'''
#Standard imports
import ROOT
from math import sqrt, cos, sin, pi, acos
import itertools,os
import copy
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--small', action='store_true', help="For testing")
argParser.add_argument('--noData', action='store_true', help="Omit data")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
args = argParser.parse_args()
#RootTools
from RootTools.core.standard import *
from TopEFT.Tools.user import data_directory
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v19/dilep/"
from TopEFT.samples.color import color
from TopEFT.Tools.cutInterpreter import cutInterpreter
from TopEFT.Tools.helpers import deltaR, deltaR2, deltaPhi
from TopEFT.Tools.objectSelection import getJets
from TopEFT.Tools.objectSelection import getFilterCut
from TopEFT.Tools.triggerSelector import triggerSelector
from TopEFT.Tools.user import plot_directory
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
dirs = {}
dirs['TTZ'] = ['TTZToLLNuNu_ext','TTZToQQ', 'TTZToLLNuNu_m1to10']#, 'TTZToQQ']
dirs['TTZToLLNuNu'] = ['TTZToLLNuNu_m1to10', 'TTZToLLNuNu_ext']
dirs['TTZToQQ'] = ['TTZToQQ']
directories = { key : [ os.path.join( data_directory, postProcessing_directory, dir) for dir in dirs[key]] for key in dirs.keys()}
# Define samples
TTZ = Sample.fromDirectory(name="TTZ", treeName="Events", isData=False, color=color.TTZ, texName="t#bar{t}Z", directory=directories['TTZ'])
TTZToLLNuNu = Sample.fromDirectory(name="TTZToLLNuNu", treeName="Events", isData=False, color=color.TTZ, texName="t#bar{t}Z(ll, #nu#nu)", directory=directories['TTZToLLNuNu'])
TTZToQQ = Sample.fromDirectory(name="TTZToQQ", treeName="Events", isData=False, color=color.WZ, texName="t#bar{t}Z(qq)", directory=directories['TTZToQQ'])
dilepSelection = cutInterpreter.cutString('lepSelDilepSUSY-njet3p-btag1p-mt2ll100-met80')
dilepSelection += '&&nlep==2&&nLeptons_tight_3l==2&&((nElectrons_tight_3l==1&&nMuons_tight_3l==1)||(nElectrons_tight_3l==2&&abs(Z_mass-91.2)>10)||(nMuons_tight_3l==2&&abs(Z_mass-91.2)>10))&&genZ_pt>=0'
#dilepSelection = cutInterpreter.cutString('njet3p-btag1p') + '&&genZ_pt>=0'
#dilepSelection += '&&(abs(genZ_daughter_flavor)==12||abs(genZ_daughter_flavor)==14||abs(genZ_daughter_flavor)==16)'
invisibleSelection = dilepSelection + '&&(abs(genZ_daughter_flavor)==12||abs(genZ_daughter_flavor)==14||abs(genZ_daughter_flavor)==16)'
leptonicSelection = dilepSelection + '&&(abs(genZ_daughter_flavor)==11||abs(genZ_daughter_flavor)==13)'
tauSelection = dilepSelection + '&&(abs(genZ_daughter_flavor)==15)'
hadronicSelection = dilepSelection + '&&(abs(genZ_daughter_flavor)<7)'
selection = "&&".join([dilepSelection])
## Sequence
read_variables = ["weight/F",
"jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I,btagDeepCSV/F]", "njet/I","nJetSelected/I",
"lep[pt/F,eta/F,phi/F,pdgId/I]", "nlep/I",
"met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I",
"genZ_pt/F",
"Z_l1_index/I", "Z_l2_index/I", "nonZ_l1_index/I", "nonZ_l2_index/I",
"Z_phi/F","Z_pt/F", "Z_mass/F", "Z_eta/F","Z_lldPhi/F", "Z_lldR/F"
]
sequence = []
## Plotting
lumi_scale = 35.9
noData = args.noData
small = args.small
def drawObjects( plotData, dataMCScale, lumi_scale ):
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
lines = [
(0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'),
(0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale)
]
return [tex.DrawLatex(*l) for l in lines]
def drawPlots(plots, dataMCScale):
for log in [False, True]:
ext = "_small" if small else ""
ext += "_log" if log else ""
plot_directory_ = os.path.join(plot_directory, 'ttZcomposition', 'test_dilep_mt2ll100_met80%s'%ext)
for plot in plots:
if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot
extensions_ = ["pdf", "png", "root"]
plotting.draw(plot,
plot_directory = plot_directory_,
extensions = extensions_,
ratio = {'yRange':(0.1,1.9)} if not noData else None,
logX = False, logY = log, sorting = True,
yRange = (0.03, "auto") if log else (0.001, "auto"),
legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2],
drawObjects = drawObjects( not noData, dataMCScale , lumi_scale ),
copyIndexPHP = True,
)
# Samples
#DY_HT_LO.read_variables = [VectorTreeVariable.fromString('jet[hadronFlavour/I]') ]
TTZ_invisible = copy.deepcopy(TTZ)
TTZ_leptonic = copy.deepcopy(TTZ)
TTZ_tau = copy.deepcopy(TTZ)
TTZ_hadronic = copy.deepcopy(TTZ)
TTZ_invisible.color = ROOT.kYellow+1
TTZ_invisible.texName = "t#bar{t}Z(inv)"
TTZ_leptonic.color = ROOT.kOrange+8
TTZ_leptonic.texName = "t#bar{t}Z(ll)"
TTZ_tau.color = ROOT.kRed-3
TTZ_tau.texName = "t#bar{t}Z(#tau#tau)"
TTZ_hadronic.color = ROOT.kGreen+3
TTZ_hadronic.texName = "t#bar{t}Z(qq)"
#mc = [DY_twoTrueBsElse, DY_twoTrueBsFromG, DY_twoTrueBsFromP, DY_twoTrueBsFromQ, DY_oneTrueBs, DY_fakeBs,TTX,boson, Top]
mc = [TTZ_leptonic,TTZ_tau,TTZ_hadronic,TTZ_invisible]
for s in mc:
# s.setSelectionString([getFilterCut(isData=False), tr.getSelection("MC")])
s.read_variables = ['reweightPU36fb/F', 'reweightBTagDeepCSV_SF/F']
# s.weight = lambda event, s: event.reweightBTagDeepCSV_SF*event.reweightPU36fb
s.style = styles.fillStyle(s.color)
# s.scale = lumi_scale
TTZ_invisible.addSelectionString([invisibleSelection])
TTZ_leptonic.addSelectionString([leptonicSelection])
TTZ_tau.addSelectionString([tauSelection])
TTZ_hadronic.addSelectionString([hadronicSelection])
Data = TTZ
#Data.setSelectionString([getFilterCut(isData=True)]) #trigger already applied in post-processing
Data.style = styles.errorStyle(ROOT.kBlack)
Data.texName = "t#bar{t}Z"
if small:
for s in mc + [Data]:
s.reduceFiles( to = 1 )
stack = Stack(mc, Data)
weight_ = lambda event, sample: event.weight
Plot.setDefaults(stack = stack, weight = staticmethod(weight_), selectionString = selection, addOverFlowBin='upper')
plots = []
plots.append(Plot(
name = 'dl_mass', texX = 'M(ll) (GeV)', texY = 'Number of Events',
attribute = TreeVariable.fromString( "Z_mass/F" ),
binning=[40,0,200],
))
plots.append(Plot(
name = 'nBTag', texX = 'N_{b-tag}', texY = 'Number of Events',
attribute = TreeVariable.fromString( "nBTag/I" ),
binning=[4,-0.5,3.5],
))
plots.append(Plot(
name = 'nJetSelected', texX = 'N_{jets}', texY = 'Number of Events',
attribute = TreeVariable.fromString( "nJetSelected/I" ),
binning=[8,-0.5,7.5],
))
plots.append(Plot(
texX = 'E_{T}^{miss} (GeV)', texY = 'Number of Events / 20 GeV',
attribute = TreeVariable.fromString( "met_pt/F" ),
binning=[400/20,0,400],
))
plots.append(Plot(
texX = 'M_{T2}(ll) (GeV)', texY = 'Number of Events / 20 GeV',
attribute = TreeVariable.fromString( "dl_mt2ll/F" ),
binning=[10,0,400],
))
plots.append(Plot(
texX = 'M_{T2}(blbl) (GeV)', texY = 'Number of Events / 20 GeV',
attribute = TreeVariable.fromString( "dl_mt2blbl/F" ),
binning=[10,0,400],
))
plotting.fill(plots, read_variables = read_variables, sequence = sequence)
dataMCScale = 1
drawPlots(plots, dataMCScale)
| UTF-8 | Python | false | false | 8,031 | py | 190 | ttZcomposition.py | 179 | 0.665048 | 0.640269 | 0 | 202 | 38.752475 | 212 |
tamagrijr/pocketTrainer | 1,108,101,579,964 | 84c5e590f7f0d605051018e9fc586106493c0603 | 83dd01fe1bd1bb241299bc82fe87a12b38cea88c | /app/models/exercise.py | ecd2b23664b482495bc7d7d4a469b2fe4f33eacb | []
| no_license | https://github.com/tamagrijr/pocketTrainer | 2c88475270c397520af1a4d86f2792d8dd124d86 | 25232864ff676dd0092a37d409502c3fb2b9af9a | refs/heads/main | 2023-04-07T16:57:35.479451 | 2021-04-20T13:36:31 | 2021-04-20T13:36:31 | 317,941,681 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .db import db
class Exercise(db.Model):
__tablename__ = "exercises"
id = db.Column(db.Integer, primary_key=True)
sessionId = db.Column(db.Integer, db.ForeignKey('sessions.id'), nullable=False)
workoutId = db.Column(db.Integer, db.ForeignKey('workouts.id'), nullable=False)
order = db.Column(db.Integer)
sets = db.Column(db.String)
reps = db.Column(db.String)
setType = db.Column(db.String)
tempo = db.Column(db.String)
rest = db.Column(db.String)
time = db.Column(db.String)
additionalComments = db.Column(db.Text)
removed = db.Column(db.Boolean, nullable=False, default=False)
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
#RELATIONSHIPS
session = db.relationship('Session',
back_populates='exercises'
)
workout = db.relationship('Workout',
back_populates='exercise'
)
user_exercise = db.relationship('UserExercise',
back_populates='exercise'
)
def to_dict(self):
return {
'id': self.id,
'order': self.order,
'sessionId': self.sessionId,
'workoutId': self.workoutId,
'sets': self.sets,
'reps': self.reps,
'setType': self.setType,
'tempo': self.tempo,
'rest': self.rest,
'time': self.time,
'additionalComments': self.additionalComments,
'workout': self.workout.to_dict(),
'removed': self.removed,
}
def exercise_id(self):
return self.id
| UTF-8 | Python | false | false | 1,518 | py | 85 | exercise.py | 76 | 0.660079 | 0.660079 | 0 | 52 | 28.192308 | 98 |
Wotipati/studyPyQt | 4,750,233,860,495 | 36c7c1e1814a4492c921c9a104eda322d3be9838 | 6117b365a3afd77207f0a99d0040f45f720ac71f | /07Slider/07Slider.py | 76888c623148af914e47615b62009c06063d4c72 | []
| no_license | https://github.com/Wotipati/studyPyQt | d7e196d508e9b2f1dacd047d3c0d0b1df55c0a6a | 2eafc707bd216fd3c9a620d801e4fa8768f5712c | refs/heads/master | 2021-09-10T07:45:57.012098 | 2018-03-22T10:19:30 | 2018-03-22T10:19:30 | 105,001,539 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QSlider, QLabel, QAction, QVBoxLayout,\
QHBoxLayout, QLCDNumber, QLineEdit
from PyQt5.QtCore import Qt, QCoreApplication
from PyQt5.QtGui import QPixmap, QIcon
class Slider(QMainWindow):
def __init__(self):
super().__init__()
self.color_slider = QSlider(Qt.Horizontal, self)
self.color_label = QLabel(self)
self.main_widget = QWidget(self)
self.setCentralWidget(self.main_widget)
self.main_layout = QVBoxLayout()
self.text_box = QLineEdit()
self.heat_maps_path = ['./heatMap/color0.png', './heatMap/color1.png', './heatMap/color2.png',
'./heatMap/color3.png', './heatMap/color4.png', './heatMap/color5.png',
'./heatMap/color6.png', './heatMap/color7.png', './heatMap/color8.png',
'./heatMap/color9.png']
self.heat_map_pix = []
self.init_ui()
def init_ui(self):
for heat_map_path in self.heat_maps_path:
pixmap = QPixmap(heat_map_path)
pixmap_resized = pixmap.scaled(129, 16)
self.heat_map_pix.append(pixmap_resized)
self.color_slider.setFocusPolicy(Qt.NoFocus)
self.color_slider.valueChanged[int].connect(self.change_value)
self.color_label.setPixmap(self.heat_map_pix[0])
color_slider_layout = QHBoxLayout()
color_slider_layout.addWidget(self.color_slider)
color_slider_layout.addWidget(self.color_label)
lcd = QLCDNumber(self)
counter_slider = QSlider(Qt.Horizontal, self)
counter_slider.valueChanged.connect(lcd.display)
#self.color_slider.valueChanged.connect(counter_slider.setValue)
counter_slider.valueChanged.connect(self.color_slider.setValue)
counter_layout = QVBoxLayout()
counter_layout.addWidget(lcd)
counter_layout.addWidget(counter_slider)
large_slider = QSlider(Qt.Horizontal)
large_slider_label = QLabel('(yen)')
large_slider.setRange(0, 1000)
large_slider.setValue(100)
large_slider.setTickPosition(QSlider.TicksBelow)
large_slider.valueChanged.connect(self.draw_large_number)
large_slider_layout = QHBoxLayout()
large_slider_layout.addWidget(large_slider)
large_slider_layout.addWidget(large_slider_label)
text_box_layout = QHBoxLayout()
text_box_label = QLabel('(yen)')
text_box_layout.addWidget(self.text_box)
text_box_layout.addWidget(text_box_label)
yen_slider_layout = QVBoxLayout()
yen_slider_layout.addLayout(large_slider_layout)
yen_slider_layout.addLayout(text_box_layout)
exit_action = QAction(QIcon('./icon/exit.png'), '&Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit Application')
exit_action.triggered.connect(QCoreApplication.instance().quit)
toolbar = self.addToolBar('&toolbar')
toolbar.addAction(exit_action)
self.main_layout.addLayout(color_slider_layout)
self.main_layout.addLayout(counter_layout)
self.main_layout.addLayout(yen_slider_layout)
self.main_widget.setLayout(self.main_layout)
self.setGeometry(200, 300, 500, 400)
self.setWindowTitle('slider')
self.show()
def change_value(self, value):
for i, pixmap in enumerate(self.heat_map_pix, 1):
if (i-1)*10 <= value and value < i*10:
self.color_label.setPixmap(pixmap)
break
def draw_large_number(self, value):
self.text_box.setText(str(value))
def main():
app = QApplication(sys.argv)
window = Slider()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,893 | py | 18 | 07Slider.py | 9 | 0.628307 | 0.616491 | 0 | 111 | 34.063063 | 103 |
CTimmerman/PyPico8 | 4,681,514,365,942 | 999451c7645d7443a063b1248bc46a7f19431475 | 695721571bc292ef5a249e73d3117d4f597c0270 | /src/music_generator.py | 6d0d65a4144e12de9c28a96e2fd23a673d18671f | [
"MIT"
]
| permissive | https://github.com/CTimmerman/PyPico8 | fbe2ee065f916a74955ba106e4280bf3ef68cd06 | a68c83ae5a9dc53221ab39d6e55bb68bb5a1e479 | refs/heads/main | 2023-04-18T23:31:53.199356 | 2021-05-04T22:48:46 | 2021-05-04T22:48:46 | 337,238,311 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Music Generator ported from https://www.youtube.com/watch?v=1EWR6gVyPh4
"""
# fmt: off
from pypico8 import cls, flip, flr, line, pico8_to_python, poke, printh, pset, sfx, stat, Table, rnd, run
# fmt: on
printh(
pico8_to_python(
r"""
q=poke
r=0x3200
s={0x2d2c,3,65,16,67,32}
t={0,2,3,5,7}
for i=1,#s,2 do
q(r+s[i],s[i+1])
end
sfx(0)
cls()
::_::
flip()
l=stat(20)
pset(2*l,44,15)
if l==31 then
cls()
for x=0,31 do
y=r+x*2
n=12+t[flr(rnd(#t)+1)]
q(y,n)
q(y+1,94-x%8*2)
line(2*x,42,2*x,42-n,1.7*(7-x%8))
end
end
goto _"""
)
)
def _init():
global r, s, t
r = 0x3200 # 12800 - audio pointer
s = Table([0x2D2C, 3, 65, 16, 67, 32]) # 11564? TODO: https://pico-8.fandom.com/wiki/Memory#Sound_effects
t = Table([0, 2, 3, 5, 7])
for i in range(1, len(s) + 1, 2):
poke(r + (s[i] or 0), s[i + 1] or 0) # write notes
sfx(0)
cls()
def _update():
pass
def _draw():
global r, t
flip()
note = stat(20)
pset(2 * note, 44, 15)
if note == 31:
cls()
for x in range(0, 31 + 1):
y = r + x * 2
n = 12 + t[flr(rnd(len(t)) + 1)]
poke(y, n)
poke(y + 1, 94 - x % 8 * 2)
line(2 * x, 42, 2 * x, 42 - n, 1.7 * (7 - x % 8))
run(_init, _update, _draw) | UTF-8 | Python | false | false | 1,394 | py | 58 | music_generator.py | 55 | 0.464849 | 0.368006 | 0 | 70 | 18.928571 | 110 |
computer0796579/hello-world | 4,544,075,430,863 | ace07efde522205e5d1d0c6154b93a0949cc531e | 55f1bd45eabc3d5d4baf17c03ec0499018c9247d | /schedule.py | 9fa8716fcae954b1aeed63c59074a1b8f7b361e3 | []
| no_license | https://github.com/computer0796579/hello-world | a4352c3d9372dd78c92062b77fadafde2796dcfd | 6355de030937fed4bdb68f89a62193897c18764c | refs/heads/main | 2023-06-27T20:31:32.936572 | 2021-08-03T10:45:52 | 2021-08-03T10:45:52 | 387,781,493 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | myteam = {
"emp1" : {
"name" : "X",
"schedule" : "A/T"
},
"emp2" : {
"name" : "Y",
"schedule" : "B"
},
"emp3" : {
"name" : "Z",
"schedule" : "C"
}
}
d = iter(myteam.items())
print(next(d))
| UTF-8 | Python | false | false | 245 | py | 5 | schedule.py | 5 | 0.355102 | 0.342857 | 0 | 18 | 11.611111 | 24 |
depopescu/Django-2-Web-Development-Cookbook-Third-Edition | 16,063,177,690,330 | 1f8c442dffbb688f2864e45d1f4d778574ff2f9e | 5bf282e8e2f7c39e42b9433b46efba48b2c7e774 | /Chapter12/myproject_docker/project/urls.py | 165291015e220591040d2d1a15a52053c8904a6b | [
"MIT"
]
| permissive | https://github.com/depopescu/Django-2-Web-Development-Cookbook-Third-Edition | 1b8bef30fef4614088180a669c452e796cbaa312 | f129613e2b1d00f5c76649025ae4d568f6286f2c | refs/heads/master | 2023-08-14T06:10:28.559231 | 2021-01-15T11:16:23 | 2021-01-15T11:16:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import os
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.auth.urls import urlpatterns as auth_patterns
from django.contrib.auth.views import LoginView
from django.urls import include, path
from tastypie.api import Api
from bulletin_board.api import BulletinResource, CategoryResource
from bulletin_board.views import (RESTBulletinList,
RESTBulletinDetail)
from utils.views import render_js
v1_api = Api(api_name="v1")
v1_api.register(CategoryResource())
v1_api.register(BulletinResource())
urlpatterns = []
try:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
except ImportError:
pass
urlpatterns += [
path('api/', include(v1_api.urls)),
path("api-auth/",
include("rest_framework.urls",
namespace="rest_framework")),
path("rest-api/bulletin-board/",
RESTBulletinList.as_view(),
name="rest_bulletin_list"),
path("rest-api/bulletin-board/<int:pk>",
RESTBulletinDetail.as_view(),
name="rest_bulletin_detail"),
path('admin/', admin.site.urls),
path('bulletins/', include('bulletin_board.urls')),
path('cv/', include('cv.urls')),
path('dashboard/', include('external_auth.urls')),
path('email/', include('email_messages.urls')),
path('like/', include('likes.urls')),
path('locations/', include('locations.urls')),
path('login/', LoginView.as_view(), name='user-login'),
path('movies/', include('movies.urls')),
path('products/', include('products.urls')),
path('videos/', include('viral_videos.urls')),
path('', include((auth_patterns, 'auth'))),
path('', include('social_django.urls', namespace='social')),
path('', include('cms.urls')),
]
urlpatterns += i18n_patterns(
path('quotes/', include('quotes.urls')),
path('search/', include('haystack.urls')),
path("js-settings/", render_js,
{"template_name": "settings.js"},
name="js_settings"),
)
| UTF-8 | Python | false | false | 2,729 | py | 138 | urls.py | 98 | 0.662147 | 0.655185 | 0 | 78 | 33.987179 | 77 |
geenutts/trinity | 10,677,288,727,841 | bfafdd5f5540c80d7a1670d8c7f500d6861d8e0b | 0a3fe43b5b092d8f257dd6f0e15a8df4935b9bf8 | /eth2/beacon/state_machines/__init__.py | 1827602627f724d1e2846f2205b2558be32c2edc | [
"MIT"
]
| permissive | https://github.com/geenutts/trinity | f5358d94daa4fd0ea2236db9d9b38d9649b1d7ca | 1b667f50d15524b5f11ae6c8046f04ba54ba7a1e | refs/heads/eth2_sim | 2023-04-13T12:19:37.350131 | 2023-04-08T07:20:46 | 2023-04-08T07:20:46 | 603,926,025 | 0 | 0 | MIT | true | 2023-04-08T07:20:47 | 2023-02-20T00:22:28 | 2023-04-08T07:19:33 | 2023-04-08T07:20:46 | 18,231 | 0 | 0 | 0 | Python | false | false | https://github.com/geenutts/trinity/blob/eth2_sim/eth2/beacon/state_machines/state_transitions.py | UTF-8 | Python | false | false | 97 | py | 1 | __init__.py | 1 | 0.835052 | 0.814433 | 0 | 1 | 97 | 97 |
akhileshvvn/django-settime | 2,817,498,560,611 | 50f0951ef670e730714fb278d974cae14ee4480d | 35c1d0bb6d5e8b467f4150085b964b790ed64589 | /Slotbooking/syat/urls.py | cc01ccccf2a7234b626a5b4fec5624dee2ba8169 | []
| no_license | https://github.com/akhileshvvn/django-settime | b9180ef7d92f877151dbff3df469c0b42e16a3ce | f07eb0ae3926c87cd2d6a4e0cffca237a5af1de3 | refs/heads/master | 2022-10-11T18:55:32.303415 | 2020-06-14T10:50:23 | 2020-06-14T10:50:23 | 272,179,120 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url,include
from . import views
app_name = 'syat'
urlpatterns = [
url(r'^form/$',views.fill, name = 'fillform'),
url(r'^error/$',views.error,name='error'),
url(r'^book/$',views.book,name='book'),
] | UTF-8 | Python | false | false | 242 | py | 13 | urls.py | 8 | 0.632231 | 0.632231 | 0 | 12 | 19.25 | 50 |
keithpenney/Python | 6,038,724,057,833 | 62aa051b0abb77a88c45467eab0f93142e67a126 | 13998a4aedf1945199549da9a5b633959581c164 | /PyQt/t20.py | 97ad11c67b25b13f0d09b49a43a8360739658ac5 | []
| no_license | https://github.com/keithpenney/Python | 9bb466e8fc053ac0a88eb1da9930cf192b523d27 | 94567c6bada51b5389ded0cb5313ff07ebce3efc | refs/heads/master | 2020-03-02T18:26:21.472928 | 2020-02-22T22:33:29 | 2020-02-22T22:33:29 | 102,804,228 | 0 | 0 | null | false | 2017-09-09T05:13:36 | 2017-09-08T01:46:40 | 2017-09-08T01:48:55 | 2017-09-09T05:13:36 | 6 | 0 | 0 | 0 | Python | null | null | """
PyQt4 tutorial
Color-select dialog
http://zetcode.com/gui/pyqt4/
"""
import sys
from PyQt4 import QtGui
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
"""Initiate the user interface"""
col = QtGui.QColor(0, 0, 0) # RGB?
self.btn = QtGui.QPushButton('Dialog', self)
self.btn.move(40, 40)
self.btn.clicked.connect(self.showDialog)
self.frm = QtGui.QFrame(self) # a generic frame which we'll use to show the color
self.frm.setStyleSheet("QWidget { background-color: %s }" % col.name())
print col.name() # col.name() returns a hex RGB string #000000
print type(col.name()) # returns a QString
# interesting. I guess QColor has a "name" method
self.frm.setGeometry(260, 44, 200, 200) # set the size of the color frame
self.btn2 = QtGui.QPushButton("?", self.frm) # let's put a button inside the color frame just to see what happens
self.btn2.move(20, 60) # seems to reference its parent's top-left corner
self.setGeometry(600, 600, 500, 360)
self.setWindowTitle("PyQt4 Color Picker")
self.show()
def showDialog(self):
col = QtGui.QColorDialog.getColor() # hella simple dialog call.
if col.isValid(): # I guess to make sure it doesn't return something crazy like #ff00f0ff
self.frm.setStyleSheet("QWidget { background-color: %s }" % col.name())
print col.name()
print type(col.name())
def main():
"""Main program loop. Called if __name__ == "__main__" """
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 1,777 | py | 78 | t20.py | 61 | 0.611142 | 0.583568 | 0 | 58 | 29.655172 | 121 |
simon-ritchie/apyscript | 4,982,162,080,760 | 31ca26faa709034b2c6be61b63eb350b5f87b5d0 | 175bf8bae6f380e2134fe7332d5ee4cfca756c0a | /apysc/_event/animation_event.py | 6d38bf8159c2f9897a3d91762069964f861fbeea | [
"MIT",
"CC-BY-4.0"
]
| permissive | https://github.com/simon-ritchie/apyscript | 7fb8a4b7bf75a5189127b59b78f55e4185918c54 | 6e3f2881f40deeb5409e93cf0a8971819845e689 | refs/heads/main | 2023-03-19T12:38:17.113129 | 2023-03-18T02:35:36 | 2023-03-18T02:35:36 | 334,394,290 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Class implementation for the animation event.
"""
from typing import Generic
from typing import TypeVar
from typing_extensions import final
from apysc._animation import animation_base
from apysc._event.event import Event
from apysc._html.debug_mode import add_debug_info_setting
from apysc._type.variable_name_mixin import VariableNameMixIn
_Target = TypeVar("_Target", bound=VariableNameMixIn)
class AnimationEvent(Event, Generic[_Target]):
"""
Animation event class.
Examples
--------
>>> import apysc as ap
>>> def on_animation_complete(
... e: ap.AnimationEvent[ap.Rectangle], options: dict
... ) -> None:
... rectangle: ap.Rectangle = e.this.target
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color="#0af")
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50
... )
>>> _ = rectangle.animation_x(x=100).animation_complete(on_animation_complete)
"""
_this: "animation_base.AnimationBase[_Target]"
@final
@add_debug_info_setting(module_name=__name__)
def __init__(self, *, this: "animation_base.AnimationBase[_Target]") -> None:
"""
Animation event class.
Parameters
----------
this : AnimationBase
Animation setting instance.
Examples
--------
>>> import apysc as ap
>>> def on_animation_complete(
... e: ap.AnimationEvent[ap.Rectangle], options: dict
... ) -> None:
... rectangle: ap.Rectangle = e.this.target
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color="#0af")
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50
... )
>>> _ = rectangle.animation_x(x=100).animation_complete(on_animation_complete)
"""
from apysc._expression import var_names
super(AnimationEvent, self).__init__(
this=this, type_name=var_names.ANIMATION_EVENT
)
@property
@add_debug_info_setting(module_name=__name__)
def this(self) -> "animation_base.AnimationBase[_Target]":
"""
Get an animation setting instance of listening to this event.
Returns
-------
this : AnimationBase
Instance of listening to this event.
Examples
--------
>>> import apysc as ap
>>> def on_animation_complete(
... e: ap.AnimationEvent[ap.Rectangle], options: dict
... ) -> None:
... rectangle: ap.Rectangle = e.this.target
>>> stage: ap.Stage = ap.Stage()
>>> sprite: ap.Sprite = ap.Sprite()
>>> sprite.graphics.begin_fill(color="#0af")
>>> rectangle: ap.Rectangle = sprite.graphics.draw_rect(
... x=50, y=50, width=50, height=50
... )
>>> _ = rectangle.animation_x(x=100).animation_complete(on_animation_complete)
"""
return self._this
| UTF-8 | Python | false | false | 3,225 | py | 1,701 | animation_event.py | 889 | 0.562791 | 0.551628 | 0 | 98 | 30.908163 | 86 |
spedas/pyspedas | 1,297,080,150,889 | 16a16b705afe684775e88c862377fd904a57b401 | 88dd4380e0d33d4a118ca4e69e4ca9b1c8f45e1f | /pyspedas/particles/spd_part_products/spd_pgs_limit_range.py | d40109a137760e623866c423a94938ce8604c3cf | [
"MIT"
]
| permissive | https://github.com/spedas/pyspedas | 16d34015961e3a4d3eaf8637d3cb6abca95df1b1 | 1d07b148753afa96e148c5835ed9545c507577da | refs/heads/master | 2023-09-01T16:07:47.131334 | 2023-08-25T17:15:35 | 2023-08-25T17:15:35 | 167,614,292 | 125 | 61 | MIT | false | 2023-09-08T18:41:27 | 2019-01-25T21:11:14 | 2023-08-30T12:49:43 | 2023-09-08T18:41:26 | 84,582 | 117 | 51 | 92 | Python | false | false |
def spd_pgs_limit_range(data, phi=None, theta=None, energy=None):
"""
Applies phi, theta, and energy limits to data structure(s) by
turning off the corresponding bin flags.
Input:
data: dict
Particle data structure
Parameters:
phi: np.ndarray
Minimum and maximum values for phi
theta: np.ndarray
Minimum and maximum values for theta
energy: np.ndarray
Minimum and maximum values for energy
Returns:
Data structure with limits applied (to the bins array)
"""
# if no limits are set, return the input data
if energy is None and theta is None and phi is None:
return data
# apply the phi limits
if phi is not None:
# get min/max phi values for all bins
phi_min = data['phi'] - 0.5*data['dphi']
phi_max = data['phi'] + 0.5*data['dphi'] % 360
# wrap negative values
phi_min[phi_min < 0.0] += 360
# the code below and the phi spectrogram code
# assume maximums at 360 are not wrapped to 0
phi_max[phi_max == 0.0] = 360.0
# find which bins were wrapped back into [0, 360]
wrapped = phi_min > phi_max
# determine which bins intersect the specified range
if phi[0] > phi[1]:
in_range = phi_min < phi[1] or phi_max > phi[0] or wrapped
else:
in_range = ((phi_min < phi[1]) & (phi_max > phi[0])) | (wrapped & ((phi_min < phi[1]) | (phi_max > phi[0])))
data['bins'][in_range == False] = 0
# apply the theta limits
if theta is not None:
lower_theta = min(theta)
upper_theta = max(theta)
# get min/max angle theta values for all bins
theta_min = data['theta'] - 0.5*data['dtheta']
theta_max = data['theta'] + 0.5*data['dtheta']
in_range = (theta_min < upper_theta) & (theta_max > lower_theta)
data['bins'][in_range == False] = 0
# apply the energy limits
if energy is not None:
data['bins'][data['energy'] < energy[0]] = 0
data['bins'][data['energy'] > energy[1]] = 0
return data
| UTF-8 | Python | false | false | 2,160 | py | 494 | spd_pgs_limit_range.py | 399 | 0.568519 | 0.548148 | 0 | 69 | 30.289855 | 120 |
aCoffeeYin/pyreco | 9,603,546,881,224 | 579520044fd950fd89010b180ed4fbb5af38b199 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/stripe-monospace-django/allPythonContent.py | f3aa309994a59e4878e16d7556db8d25a436a9ea | []
| no_license | https://github.com/aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __FILENAME__ = admin
from django.contrib import admin
import models
admin.site.register(models.User)
########NEW FILE########
__FILENAME__ = forms
from django import forms
from django.core.exceptions import NON_FIELD_ERRORS
class MonospaceForm(forms.Form):
def addError(self, message):
self._errors[NON_FIELD_ERRORS] = self.error_class([message])
class SignInForm(MonospaceForm):
email = forms.EmailField(
required = True
)
password = forms.CharField(
required = True,
widget = forms.PasswordInput(render_value = False)
)
class CardForm(MonospaceForm):
last_4_digits = forms.CharField(
required = True,
min_length = 4,
max_length = 4,
widget = forms.HiddenInput()
)
stripe_token = forms.CharField(
required = True,
widget = forms.HiddenInput()
)
class UserForm(CardForm):
name = forms.CharField(
required = True
)
email = forms.EmailField(
required = True
)
password1 = forms.CharField(
required = True,
widget = forms.PasswordInput(render_value = False),
label = 'Password'
)
password2 = forms.CharField(
required = True,
widget = forms.PasswordInput(render_value = False),
label = 'Password confirmation'
)
def clean(self):
cleaned_data = self.cleaned_data
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError('Passwords do not match')
return cleaned_data
########NEW FILE########
__FILENAME__ = manage
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
########NEW FILE########
__FILENAME__ = models
from django.db import models
import bcrypt
from monospace import settings
class User(models.Model):
name = models.CharField(max_length=255)
email = models.CharField(max_length=255, unique=True)
password = models.CharField(max_length=60)
last_4_digits = models.CharField(max_length=4)
stripe_id = models.CharField(max_length=255)
subscribed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def set_password(self, clear_password):
salt = bcrypt.gensalt(settings.BCRYPT_ROUNDS)
self.password = bcrypt.hashpw(clear_password, salt)
def check_password(self, clear_password):
return bcrypt.hashpw(clear_password, self.password) == self.password
########NEW FILE########
__FILENAME__ = settings
import os
# Stripe keys
STRIPE_PUBLISHABLE = 'pk_YT1CEhhujd0bklb2KGQZiaL3iTzj3'
STRIPE_SECRET = 'tGN0bIwXnHdwOa85VABjPdSn8nWY7G7I'
# customized settings
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
SITE_ROOT = os.path.dirname(PROJECT_ROOT)
TIME_ZONE = 'America/Los_Angeles'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(SITE_ROOT, 'monospace.sqlite'),
}
}
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, 'templates'),)
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, 'static'),)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'monospace'
)
BCRYPT_ROUNDS = 15
# default Django settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'lb-06%rmn$fmhhu!mr@3nc(&$0985qvddj%_5=t@94x@#@jcs@'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'monospace.urls'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from monospace import views
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', views.home, name='home'),
url(r'^sign_in$', views.sign_in, name='sign_in'),
url(r'^sign_out$', views.sign_out, name='sign_out'),
url(r'^register$', views.register, name='register'),
url(r'^edit$', views.edit, name='edit'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
########NEW FILE########
__FILENAME__ = views
import datetime
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
import stripe
from monospace.forms import *
from monospace.models import *
import monospace.settings as settings
stripe.api_key = settings.STRIPE_SECRET
def soon():
soon = datetime.date.today() + datetime.timedelta(days=30)
return {'month': soon.month, 'year': soon.year}
def home(request):
uid = request.session.get('user')
if uid is None:
return render_to_response('home.html')
else:
return render_to_response('user.html', {'user': User.objects.get(pk=uid)})
def sign_in(request):
user = None
if request.method == 'POST':
form = SignInForm(request.POST)
if form.is_valid():
results = User.objects.filter(email=form.cleaned_data['email'])
if len(results) == 1:
if results[0].check_password(form.cleaned_data['password']):
request.session['user'] = results[0].pk
return HttpResponseRedirect('/')
else:
form.addError('Incorrect email address or password')
else:
form.addError('Incorrect email address or password')
else:
form = SignInForm()
print form.non_field_errors()
return render_to_response(
'sign_in.html',
{
'form': form,
'user': user
},
context_instance=RequestContext(request)
)
def sign_out(request):
del request.session['user']
return HttpResponseRedirect('/')
def register(request):
user = None
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
customer = stripe.Customer.create(
description = form.cleaned_data['email'],
card = form.cleaned_data['stripe_token'],
plan = 'basic'
)
user = User(
name = form.cleaned_data['name'],
email = form.cleaned_data['email'],
last_4_digits = form.cleaned_data['last_4_digits'],
stripe_id = customer.id
)
user.set_password(form.cleaned_data['password1'])
try:
user.save()
except IntegrityError:
form.addError(user.email + ' is already a member')
else:
request.session['user'] = user.pk
return HttpResponseRedirect('/')
else:
form = UserForm()
return render_to_response(
'register.html',
{
'form': form,
'months': range(1, 12),
'publishable': settings.STRIPE_PUBLISHABLE,
'soon': soon(),
'user': user,
'years': range(2011, 2036),
},
context_instance=RequestContext(request)
)
def edit(request):
uid = request.session.get('user')
if uid is None:
return HttpResponseRedirect('/')
user = User.objects.get(pk=uid)
if request.method == 'POST':
form = CardForm(request.POST)
if form.is_valid():
customer = stripe.Customer.retrieve(user.stripe_id)
customer.card = form.cleaned_data['stripe_token']
customer.save()
user.last_4_digits = form.cleaned_data['last_4_digits']
user.stripe_id = customer.id
user.save()
return HttpResponseRedirect('/')
else:
form = CardForm()
return render_to_response(
'edit.html',
{
'form': form,
'publishable': settings.STRIPE_PUBLISHABLE,
'soon': soon(),
'months': range(1, 12),
'years': range(2011, 2036)
},
context_instance=RequestContext(request)
)
########NEW FILE########
| UTF-8 | Python | false | false | 9,626 | py | 16,703 | allPythonContent.py | 3,862 | 0.629961 | 0.620923 | 0 | 358 | 24.888268 | 216 |
herobird1981/kunlun_cdl_auto_script | 6,863,357,756,262 | 4d2b192bdb36d50dd1f37627cd652e35b163f915 | c9977acba0d5432ca41b7f9e53be9dbeb6132516 | /cdl_fpga/dsp0/test_dsp0_ptc.py | 16b1ed6b312f34acd2b7c6d324f6f131ad63437a | []
| no_license | https://github.com/herobird1981/kunlun_cdl_auto_script | d965d12874898d0327131adf91aad64477f283b2 | a7689e2969054581b24e263b7b7d7c066cd51dd4 | refs/heads/master | 2020-04-04T18:55:26.816369 | 2018-11-05T08:37:31 | 2018-11-05T08:37:31 | 156,185,242 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # $language = "Python"
# $interface = "1.0"
import sys
sys.path.append('.')
import testlib
reload(testlib)
testlib.init()
if __name__ == '__builtin__':
testlib.crt = crt
sys.dont_write_bytecode = True
###################################################
###################################################
def enter_ptc_test_menu():
testlib.enter_menu('ptc')
def ptc_pwm():
for i in (0, 1, 2):
cmd = 'ptc_test_pwm %d 100 200' % (i)
testlib.inputStr(cmd + '\r\n')
testlib.sleep(2000)
def main():
testcases = [
enter_ptc_test_menu,
ptc_pwm,
]
testlib.runCaseList(testcases,
logpath=testlib.GetFileNameAndExt(
__file__)[0] + '\\LOG\\',
filename=testlib.GetFileNameAndExt(__file__)[1])
if __name__ == '__builtin__':
for i in range(1):
main()
| UTF-8 | Python | false | false | 944 | py | 24 | test_dsp0_ptc.py | 19 | 0.441737 | 0.422669 | 0 | 41 | 21.02439 | 72 |
huskeypm/amylin | 3,169,685,888,096 | 10fa04d2f552f0b1dbd6ef8c57f63726fd3fbaae | ac2c2c4a305d7e74aa173afa0d6ad500dc729065 | /fittingAlgorithm.py | 12d2fca5049e851797e5c6d4639dc8cb298ef54d | []
| no_license | https://github.com/huskeypm/amylin | 6896614516d734170a7c4088860508ac2b89ea01 | 6d7848b9528bfd8c942dc6db891dc591d658a85d | refs/heads/master | 2022-11-14T07:56:16.606500 | 2017-12-26T17:39:15 | 2017-12-26T17:39:15 | 277,685,559 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append("./fitting_sensitivity/")
import multiprocessing
from os import getpid
import runShannonTest as rs
import numpy as np
import analyzeODE as ao
import copy
import pandas as pd
import taufitting as tf
import matplotlib.pylab as plt
import fitter
import daisychain as dc
class outputObj:
#def __init__(self,name,mode):
def __init__(self,name,mode,timeRange,truthValue):
self.name = name
self.mode = mode
self.timeRange = timeRange #[5e4,10e4] # NEED TO ADD
self.truthValue = truthValue
self.result = None
#outputs = ["Cai","Nai"]
#outputListDefault = { "Nai":outputObj("Nai","mean"),
# "Cai":outputObj("Cai","max")}
outputListDefault = { "Nai":outputObj("Nai","mean",[5e4,10e4],12.0e-3),
"Cai":outputObj("Cai","amp",[5e4,10e4],10000) }
# decayRate:outputObj("decayRate","tau")
class empty:pass
def workerParams(jobDict):
#print "poop"
#odeName = "shannon_2004_mouse.ode"
odeName = jobDict['odeModel']
jobNum = jobDict['jobNum']
dtn = jobDict['jobDuration'] # [ms]
varDict = jobDict['varDict']
print "Worker bee %d, Job %d "%(getpid(),jobNum)
#print "varDict: ", varDict
outputList = jobDict['outputList']
#print "outputList: ", outputList
#print "outputListDefault: ", outputListDefault
if outputList == None:
outputList = outputListDefault
print "No outputList given, using outputListDefault."
verbose = False
if verbose:
for key,val in varDict.iteritems() :
print " ",key,val
## create varDict for runParams
#print "before runParamsFast"
## Launch job with parameter set
name = None # don't write a pickle
returnDict = dict() # return results vector
rs.runParamsFast(odeName=odeName,
name=name,
varDict = varDict,
dt=0.1,
dtn=dtn,
stim_period=1000.0,
returnDict=returnDict)
#print "after runParamsFast"
## do output processing
data = returnDict['data']
#print "DATA: ", data
outputResults = ProcessWorkerOutputs(data,outputList,tag=jobNum)
if verbose:
for key,val in outputResults.iteritems() :
print " ",key,val.result
## package additional useful information
results = empty()
results.outputResults = outputResults
results.pid = getpid()
results.jobDict = jobDict
results.jobNum = jobNum
return jobNum,results
def ProcessWorkerOutputs(data,outputList,tag=99):
outputResults = {}
#print "made it to ProcessWorkerOutputs"
#print "outputList: ", outputList
for key,obj in outputList.iteritems():
#print "key: ", key, "obj: ", obj
#print "outputList: ", outputList
#print "in the for loop"
#print "obj.timeRange: ", obj.timeRange
dataSub = ao.GetData(data, obj.name)
#print "dataSub: ", dataSub
#print "dataSub.valsIdx: ", dataSub.valsIdx
result = ao.ProcessDataArray(dataSub,obj.mode,obj.timeRange,key=key)
#output.result = result
resultObj = copy.copy(obj)
resultObj.result = result
#outputResults.append( resultObj )
outputResults[key]=resultObj
return outputResults
def PandaData(jobOutputs,csvFile="example.csv"):
raise RuntimeError("Not using")
masterDict = dict()
# get dictionary for each job and append it to a 'master' dictionary
for workerNum, jobObj in jobOutputs.iteritems():
jobDict = StoreJob(job1= jobObj)
jobID = jobDict['jobID']
masterDict[jobID]=jobDict
# store data in pandas dataframe
df = pd.DataFrame(masterDict)
df = df.T
df.to_csv(csvFile)
return df
def StoreJob(job1):
pandasDict = dict()
tag = "%d_%d"%(job1.jobNum,job1.pid)
pandasDict['jobID']=tag
# pull out inputs
varDict = job1.jobDict['varDict']
for param,value in varDict.iteritems():
#print param, value
pandasDict[param] = value
# pull out its results vector
outputResults = job1.outputResults
for output,result in outputResults.iteritems():
#print output, result.result
pandasDict[output] = result.result
return pandasDict
# Genetic algorithm that randomizes the provided parameters (1 for now), selects the solution that minimizes the error, and repeats this process for a given number of iterations
def fittingAlgorithm(
odeModel,
myVariedParam, # Supports a single param currently = "Bmax_SL",
numCores=5, # number of cores over which jobs are run
numRandomDraws=3, # number of random draws for each parameter
jobDuration = 2000, # job run time, [ms]
paramVarDict = None,
outputList = None,
truthValues = None,
sigmaScaleRate = 1., # rate at which sigma is reduced by iteration
numIters = 10):
trialParamVarDict = copy.copy( paramVarDict )
iters = 0
allErrors = []
errorsGood_array = []
flag = True
randomDrawAllIters = []
bestDrawAllIters = []
while flag:
## Create 'master' varDict list
iters += 1
numParams = 0
defaultVarDict = dict()
if trialParamVarDict != None:
parmDict = trialParamVarDict
print "iter", iters, " out of", numIters
print "parmDict: " , parmDict
for parameter,values in parmDict.iteritems():
defaultVarDict[parameter] = values[0] # default value
print "Inputs: ", parameter, values[0]
numParams+=1
## determine core count
numJobs = numRandomDraws*numParams
numCores = np.min( [numCores, numJobs])
print "Using %d cores for %d jobs"%(numCores,numJobs)
#print "outputList: ", outputList
## Create a list of jobs with randomized parameters
jobList = []
ctr=0
for parameter,values in parmDict.iteritems():
## generate random pertubrations
# draw from normal distribution
mu,sigma = values
#print "sigma: ", sigma
print "Should probably rescale sigma by the tolerated error vs current error"
#rescaledSigma = sigma/(sigmaScaleRate * iters)
rescaledSigma = sigma*np.exp(-sigmaScaleRate * (iters-1))
print "rescaledSigma: ", rescaledSigma, " rate ", sigmaScaleRate
#rescaledSigma = sigma
distro = "lognormal"
if distro=="normal":
randomDraws = np.random.normal(mu,rescaledSigma,numRandomDraws)
if distro=="lognormal":
unif = np.random.normal(0,rescaledSigma,numRandomDraws)
randomDraws = np.exp(unif) * mu
randomDraws = np.sort(randomDraws)
# create a list of jobs
print parameter, " random draws:"
print randomDraws
randomDrawAllIters.append(randomDraws)
#listN = [{parameter:val,'jobNum':i} for i,val in enumerate(randomDraws)]
#jobList+=listN
#print "JobList: ", jobList
for val in randomDraws:
varDict = copy.copy(defaultVarDict)
varDict[parameter] = val
jobDict = {'odeModel':odeModel,'varDict':varDict,'jobNum':ctr,'jobDuration':jobDuration, 'outputList':outputList}
jobList.append( jobDict )
ctr+=1
#print "JobList2: ", jobList
#print jobList
## Run jobs
if numCores > 1:
print "Multi-threading"
pool = multiprocessing.Pool(processes = numCores)
jobOutputs = dict( pool.map(workerParams, jobList))#, outputList ) )
else:
print "Restricting to one job only/assuming results are all that's needed"
jobNum, results = workerParams(jobList[0])
# Shouldn't have to write csv for these
myDataFrame = fitter.PandaData(jobOutputs,csvFile=None) # "example.csv")
#allErrors.append([])
#errorsGood_array.append([])
jobFitnesses = np.ones( len(myDataFrame.index) )*-1
jobNums = np.ones( len(myDataFrame.index),dtype=int )*-1
for i in range(len(myDataFrame.index)):
#jobOutputs_copy = jobOutputs.copy()
#slicedJobOutputs = jobOutputs_copy[slicer[]]
#allErrors.append([myDataFrame.index[i]])
#errorsGood_array.append([myDataFrame.index[i]])
#print myDataFrame.index[i]
#print myDataFrame.loc[myDataFrame.index[i],'jobNum']
# score 'fitnesss' based on the squared error wrt each output parameter
fitness = 0.0
for key,obj in outputList.iteritems():
#print "outputList: ", key
result = myDataFrame.loc[myDataFrame.index[i],key]
error = (result - obj.truthValue) ** 2
tolerance = abs((obj.truthValue - result) / obj.truthValue)
print "result: ", result, "truthValue: ", obj.truthValue, "tolerance:", tolerance
#allErrors[iters-1].append(error)
#if error <= (obj.truthValue * 0.001):
#errorsGood_array[iters-1].append(True)
#else:
#errorsGood_array[iters-1].append(False)
fitness += error
# compute sqrt
jobFitnesses[i] = np.sqrt(fitness)
# These lines are intended to correct for a discrepancy between the pandas numbering and the job list
# It works, but its confusing
jobNums[i] = myDataFrame.loc[myDataFrame.index[i],'jobNum']
myDataFrame.loc[myDataFrame.index[i],'fitness'] = jobFitnesses[i]
#
# Summarize results
#
print "myDataFrame: "
print myDataFrame
print "jobFitnesses: ", jobFitnesses
# find best job
pandasIndex = np.argmin( jobFitnesses )
jobIndex = jobNums[ pandasIndex ]
print "jobIndex: ", jobIndex
#print "jobFitnes: " , jobFitnesses[jobIndex]
# grab the job 'object' corresponding to that index
bestJob = jobList[ jobIndex ]
currentFitness = jobFitnesses[pandasIndex]
#print "bestJob: ", bestJob
if iters == 1:
previousDraw = currentFitness
print "previousDraw: ", previousDraw
if currentFitness <= previousDraw:
# get its input params/values
bestVarDict = bestJob[ 'varDict' ]
print "bestVarDict: " , bestVarDict
variedParamVal = bestVarDict[ myVariedParam ]
#bestDrawAllIters.append(variedParamVal)
# update 'trialParamDict' with new values, [0] represents mean value of paramater
trialParamVarDict[ myVariedParam ][0] = variedParamVal
# [1] to represent updating stdDev value
# trialParamVarDict[ myVariedParam ][1] = variedStdDevVal
else:
print "Old draw is better starting point, not overwriting starting point"
#print allErrors
#if errorsGood_array[iters-1].count(False) == 0:
#errorsGood = True
#else:
#errorsGood = False
#print "Error is not good, need to run another iteration."
#iters += 1
bestDrawAllIters.append(variedParamVal)
print "iter", iters, " out of", numIters
print ""
print "######"
print ""
if iters >= numIters: # or errorsGood:
flag = False
#return results
#for key, results in outputs.iteritems():
# print key
## push data into a pandas object for later analysis
#myDataFrame = PandaData(jobOutputs,csvFile="example.csv")
#return myDataFrame
return randomDrawAllIters, bestDrawAllIters
# Here we try to optimize the sodium buffer to get the correct free Na concentration
ms_to_s = 1e-3
def validation():
# define job length and period during which data will be analyzed (assume sys. reaches steady state)
jobDuration = 4e3 # [ms] simulation length
timeRange = [1.0,jobDuration*ms_to_s] # [s] range for data (It's because of the way GetData rescales the time series)
## Define parameter, its mean starting value and the starting std dev
# Bmax_SL
myVariedParam="Bmax_SL"
paramDict = dict()
paramDict[myVariedParam] = [10.0, 1.0]
## Define the observables and the truth value
outputList = {"Nai":outputObj("Nai","mean",timeRange,12.0e-3)}
# Run
trial(paramDict=paramDict,outputList=outputList)
def test1():
# define job length and period during which data will be analyzed (assume sys. reaches steady state)
jobDuration = 30e3 # [ms] simulation length
timeRange = [((jobDuration*ms_to_s)-3),jobDuration*ms_to_s] # [s] range for data (It's because of the way GetData rescales the time series)
#timeRange = [4.0,jobDuration*ms_to_s] # [s] range for data (It's because of the way GetData rescales the time series)
print "timeRange: ", timeRange
#fileName = "BASEtestT298.png"
fileName = "AMYtest.png"
## Define parameter, its mean starting value and the starting std dev
# Bmax_SL
myVariedParam="I_NaK_max"
paramDict = dict()
truthVal = 5.0
#paramDict[myVariedParam] = [2*truthVal, 1.0]
paramDict[myVariedParam] = [2*truthVal, 0.2] # for log normal
sigmaScaleRate = 0.15
## Define the observables and the truth value
outputList = {"Nai":outputObj("Nai","mean",timeRange,12.0e-3)}
# Run
numRandomDraws = 30
numCores = np.min([numRandomDraws,30])
numIters = 20
trial(paramDict=paramDict,outputList=outputList,numCores=numCores,numRandomDraws=numRandomDraws,jobDuration=jobDuration,numIters=numIters,sigmaScaleRate=sigmaScaleRate,fileName=fileName)
def run(
odeModel="shannon_2004_rat.ode",
myVariedParam="I_NaK_max",
variedParamTruthVal=5.0,
jobDuration= 30e3, # [ms] simulation length
fileName="This_Is_A_Test.png",
numRandomDraws=5,
numIters=3,
sigmaScaleRate=0.15,
outputParamName="Nai",
outputParamSearcher="Nai",
outputParamMethod="mean",
outputParamTruthVal=12.0e-3
):
timeRange = [((jobDuration*ms_to_s)-3),jobDuration*ms_to_s] # [s] range for data (It's because of the way GetData rescales the time series)
print "timeRange: ", timeRange
## Define parameter, its mean starting value and the starting std dev
paramDict = dict()
paramDict[myVariedParam] = [variedParamTruthVal, 0.2] # for log normal
## Define the observables and the truth value
outputList = {outputParamName:outputObj(outputParamSearcher,outputParamMethod,timeRange,outputParamTruthVal)}
# Run
numCores = np.min([numRandomDraws,30])
trial(odeModel=odeModel,paramDict=paramDict,outputList=outputList,numCores=numCores,numRandomDraws=numRandomDraws,jobDuration=jobDuration,numIters=numIters,sigmaScaleRate=sigmaScaleRate,fileName=fileName)
def trial(
odeModel,
paramDict,
outputList,
numCores = 2, # maximum number of processors used at a time
numRandomDraws = 2,# number of random draws for parameters list in 'parmDict' (parmDict should probably be passed in)
jobDuration = 4e3, # [ms] simulation length
numIters=2,
sigmaScaleRate = 1.0,
fileName = None
):
# get varied parameter (should only be one for now)
keys = [key for key in paramDict.iterkeys()]
variedParam = keys[0]
## do fitting and get back debugging details
allDraws,bestDraws = fittingAlgorithm(
odeModel,variedParam,numCores, numRandomDraws, jobDuration, paramDict, outputList,numIters=numIters, sigmaScaleRate=sigmaScaleRate)
PlotDebuggingData(allDraws,bestDraws,numIters,numRandomDraws,title="Varied param %s"%variedParam,fileName=fileName)
def PlotDebuggingData(allDraws,bestDraws,numIters,numRandomDraws,title=None,fileName=None):
# put into array form
allDraws = np.asarray(allDraws)
bestDraws = np.asarray(bestDraws)
# create a matrix of random draws versus iteration
vals= np.ndarray.flatten(allDraws)
iters = np.repeat([np.arange(numIters)],numRandomDraws)
scatteredData= np.asarray(zip(iters,vals))
plt.scatter(scatteredData[:,0], scatteredData[:,1],label="draws")
plt.plot(np.arange(numIters), bestDraws, label="best")
plt.legend()
if title!= None:
plt.title(title)
plt.xlabel("number of iterations")
plt.xlim([-1,numIters])
plt.ylabel("I_NaK_max")
if fileName == None:
plt.gcf().savefig("mytest.png")
else:
plt.gcf().savefig(fileName)
#!/usr/bin/env python
import sys
##################################
#
# Revisions
# 10.08.10 inception
#
##################################
#
# ROUTINE
#
def doit(fileIn):
1
#
# Message printed when program run without arguments
#
def helpmsg():
scriptName= sys.argv[0]
msg="""
Purpose:
Usage:
"""
msg+=" %s -validation" % (scriptName)
msg+="""
Notes:
"""
return msg
#
# MAIN routine executed when launching this script from command line
#
if __name__ == "__main__":
import sys
msg = helpmsg()
remap = "none"
#if len(sys.argv) < 2:
# raise RuntimeError(msg)
odeModel="shannon_2004_rat.ode"
myVariedParam="I_NaK_max"
variedParamTruthVal=5.0
jobDuration= 30e3 # [ms] simulation length
fileName="This_Is_A_Test.png"
numRandomDraws=3
numIters=3
sigmaScaleRate=0.15
outputParamName="Nai"
outputParamSearcher="Nai"
outputParamMethod="mean"
outputParamTruthVal=12.0e-3
#fileIn= sys.argv[1]
#if(len(sys.argv)==3):
# 1
# #print "arg"
# Loops over each argument in the command line
for i,arg in enumerate(sys.argv):
# calls 'doit' with the next argument following the argument '-validation'
if(arg=="-validation"):
validation()
quit()
if(arg=="-test1"):
test1()
quit()
if(arg=="-odeModel"):
odeModel = sys.argv[i+1]
if(arg=="-myVariedParam"):
myVariedParam = sys.argv[i+1]
if(arg=="-variedParamTruthVal"):
variedParamTruthVal = np.float(sys.argv[i+1])
if(arg=="-jobDuration"):
jobDuration = np.float(sys.argv[i+1])
if(arg=="-fileName"):
fileName = sys.argv[i+1]
if(arg=="-numRandomDraws"):
numRandomDraws = np.int(sys.argv[i+1])
if(arg=="-numIters"):
numIters = np.int(sys.argv[i+1])
if(arg=="-sigmaScaleRate"):
sigmaScaleRate = np.float(sys.argv[i+1])
if(arg=="-outputParamName"):
outputParamName = sys.argv[i+1]
if(arg=="-outputParamSearcher"):
outputParamSearcher = sys.argv[i+1]
if(arg=="-outputParamMethod"):
outputParamMethod = sys.argv[i+1]
if(arg=="-outputParamTruthVal"):
outputParamTruthVal = np.float(sys.argv[i+1])
run(odeModel=odeModel,
myVariedParam=myVariedParam,
variedParamTruthVal=variedParamTruthVal,
jobDuration=jobDuration,
fileName=fileName,
numRandomDraws=numRandomDraws,
numIters=numIters,
sigmaScaleRate=sigmaScaleRate,
outputParamName=outputParamName,
outputParamSearcher=outputParamSearcher,
outputParamMethod=outputParamMethod,
outputParamTruthVal=outputParamTruthVal)
#raise RuntimeError("Arguments not understood")
#### python fittingAlgorithm.py -odeModel shannon_2004_rat.ode -myVariedParam I_NaK_max -variedParamTruthVal 5.0 -jobDuration 30e3 -fileName This_Is_A_Test.png -numRandomDraws 3 -numIters 3 -sigmaScaleRate 0.15 -outputParamName Nai -outputParamSearcher Nai -outputParamMethod mean -outputParamTruthVal 12.0e-3 &
| UTF-8 | Python | false | false | 19,225 | py | 24 | fittingAlgorithm.py | 6 | 0.653888 | 0.642029 | 0 | 616 | 30.206169 | 311 |
acrajasekar/m3d-api | 2,473,901,200,815 | c70e680db4119070a80d6a803ee6de89f89be24c | 7a141b04cf7be834f12bf1cfc1a5af0dfd634659 | /m3d/hadoop/algorithm/algorithm_gzip_decompression_emr.py | 229fe72676bfeb896beae08fcdcf3a00b4bf7e98 | [
"Apache-2.0"
]
| permissive | https://github.com/acrajasekar/m3d-api | d29212d4fedfe01f0b8e69f4e8f9ff15653ee8c9 | d25d246334f89209e706e8e14f753d04e3094e06 | refs/heads/master | 2020-09-02T20:54:25.130399 | 2020-05-05T22:30:53 | 2020-05-05T22:30:53 | 219,302,196 | 0 | 0 | Apache-2.0 | true | 2020-05-05T22:30:55 | 2019-11-03T13:05:33 | 2019-11-03T13:05:34 | 2020-05-05T22:30:54 | 292 | 0 | 0 | 0 | null | false | false | from m3d.hadoop.algorithm.algorithm_hadoop import AlgorithmHadoop
from m3d.hadoop.algorithm.scala_classes import ScalaClasses
from m3d.hadoop.emr.s3_table import S3Table
class AlgorithmGzipDecompressionEMR(AlgorithmHadoop):
def __init__(self, execution_system, algorithm_instance, algorithm_params):
"""
Initialize Algorithm Decompression
:param execution_system: an instance of EMRSystem object
:param algorithm_instance: name of the algorithm instance
:param algorithm_params: algorithm configuration
"""
super(AlgorithmGzipDecompressionEMR, self).__init__(execution_system, algorithm_instance, algorithm_params)
destination_table_name = algorithm_params["destination_table"]
self._table = S3Table(execution_system, destination_table_name)
self._thread_pool_size = self._parameters["thread_pool_size"]
def get_scala_class(self):
return ScalaClasses.GZIP_DECOMPRESSOR
def build_params(self):
return GzipDecompressionParams(self._table.dir_landing_final, self._thread_pool_size).__dict__
class GzipDecompressionParams(object):
"""
Class resembling the contents of the algorithms parameter file
"""
def __init__(self, directory, thread_pool_size):
self.format = "csv" # TODO: Make this dynamic in the future. As of now, we are only dealing with csv files.
self.directory = directory
self.thread_pool_size = thread_pool_size
| UTF-8 | Python | false | false | 1,482 | py | 22 | algorithm_gzip_decompression_emr.py | 13 | 0.71525 | 0.711201 | 0 | 37 | 39.054054 | 116 |
nkuraeva/python-exercises | 16,157,666,984,121 | 8ca6b5853a0f9d3f3cdc7125fff652160fa5fdc8 | 2c45a9d0fe239c9bb5f7541a4d2cf4a30a084cfa | /week6/count_sort.py | 1079fdd19318d114fd4fba2aa907cf9a811cc5f3 | []
| no_license | https://github.com/nkuraeva/python-exercises | 22e71a94bbf45968978a7da9887b47adb4795fbd | 45bfc179f7f3402ff32bae8e9828ec8fa24345c9 | refs/heads/master | 2022-07-04T11:02:37.839214 | 2020-05-15T09:44:31 | 2020-05-15T09:44:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Given a list of N (N≤2 * 10⁵) elements that
# take integer values from 0 to 100 (100 including).
# Sort this list in non-decreasing order. Print the resulting list.
numbers = list(map(int, input().split()))
newNumbers = [0] * 101
for number in numbers:
newNumbers[number] += 1
for nowNumber in range(101):
print((str(nowNumber) + ' ') * newNumbers[nowNumber], end='')
| UTF-8 | Python | false | false | 383 | py | 93 | count_sort.py | 92 | 0.683377 | 0.633245 | 0 | 10 | 36.9 | 67 |
imperialguy/fivesquare | 4,389,456,615,609 | 37db9f96964d1438ed845a46f4c01aba3c1395b0 | b33375ca8ff1145b19e9b1b116df64ae8bb40114 | /app001/web/forms/business.py | afd4b8012322427051feba9a21ed1ccca5c96cee | []
| no_license | https://github.com/imperialguy/fivesquare | 52cff922ada62d9342264416cad842bfaaf63790 | eec257a3276c712492250a9b45a74556d5c87bb3 | refs/heads/master | 2020-04-10T16:09:16.427564 | 2015-06-22T13:25:17 | 2015-06-22T13:25:17 | 37,095,684 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from app001.web.models.business import BusinessModel
from wtforms import (
Form,
validators,
TextField,
IntegerField,
ValidationError
)
class NewBusinessForm(Form):
name = TextField(u'name', [validators.required()])
location = TextField(u'coordinates', [validators.required()])
def validate_name(form, field):
if BusinessModel(name=field.data).exists():
raise ValidationError("This business already exists in our system")
def validate_location(form, field):
num_coordinates = len(field.data.split(','))
if num_coordinates not in range(1, 3):
raise ValidationError("invalid co-ordinates")
class NewReviewForm(Form):
name = TextField(u'name', [validators.required()])
review = TextField(u'review', [validators.required()])
rating = IntegerField(u'rating', [validators.required()])
tags = TextField(u'tags', [validators.required()])
def validate_name(form, field):
if not BusinessModel(name=field.data).exists():
raise ValidationError("This business does not exist in our system")
class SearchBusinessesForm(Form):
location = TextField(u'coordinates', [validators.required()])
radius = IntegerField(u'radius', [validators.required()])
def validate_location(form, field):
num_coordinates = len(field.data.split(','))
if num_coordinates not in range(1, 3):
raise ValidationError("invalid co-ordinates")
class GetBusinessInfoForm(Form):
name = TextField(u'name', [validators.required()])
def validate_name(form, field):
if not BusinessModel(name=field.data).exists():
raise ValidationError("This business does not exist in our system")
| UTF-8 | Python | false | false | 1,735 | py | 13 | business.py | 10 | 0.67781 | 0.673775 | 0 | 51 | 33.019608 | 79 |
berkealgul/DeepLearning-stuff | 5,171,140,675,203 | 5291ee7eec1e9d74bdc4ddffa3e406565187e339 | 74d4a78a831eb78b076f8c16e1b9bb0e29bbb687 | /self-driving/NeuroEvolution/geneticAlgorithm.py | 36141ec4cec3488a4410e32bb3bbdee73aeb9f66 | []
| no_license | https://github.com/berkealgul/DeepLearning-stuff | ad9eb7c95ec2e7ba2029588c16c73158efe03b91 | a76d89e3b65ac7fffcf899feb2c05d99164658a5 | refs/heads/master | 2022-02-12T14:33:23.186834 | 2022-02-02T22:02:21 | 2022-02-02T22:02:21 | 197,758,052 | 0 | 0 | null | false | 2020-06-10T11:18:29 | 2019-07-19T11:03:11 | 2020-06-07T16:01:03 | 2020-06-10T11:18:09 | 42,509 | 0 | 0 | 0 | Python | false | false | import random
import math
from car import Car
mutationRate = 0.01
# ana fonksiyon
def create_new_generation(oldGeneration, generationlen=None):
if generationlen is None:
generationlen = len(oldGeneration)
best_fitness = pick_best_fitness(oldGeneration)
clear_bad_ones(oldGeneration, best_fitness/10)
calculate_fitness(oldGeneration)
newGeneration = []
# mutasyon ihtimali optimize edilmeye çalışılır
optimize_mutationRate(best_fitness)
for i in range(generationlen):
mom = choose_parent(oldGeneration)
dad = choose_parent(oldGeneration)
child = crossover(mom, dad)
newGeneration.append(child)
return newGeneration
# yardımcı fonksiyonlar
def calculate_fitness(generation):
sum = 0
for member in generation:
member.fitness = math.pow(member.score, 2)
sum += member.fitness
for member in generation:
member.fitness /= sum
def choose_parent(generation):
r = random.random()
index = 0
while r > 0:
r -= generation[index].fitness
index += 1
index -= 1
return generation[index]
def crossover(parent1, parent2):
# beyin nöron ağı objesidir
brain1 = parent1.brain.copy()
brain2 = parent2.brain.copy()
# W: ağırlık F: fitness
# Wçocuk = (Wbaba * Fbaba + Wana * Fana) / (Fana + Fbaba)
# aynı kural sapmalar içinde uygulanabilir
# iki ebebeyn de aynı sayıda nöron ve katmana sahip olduğu için
# indeks hatası ile uğraşmamız gerekmiyecek
for i in range(len(brain1.weights)):
Wp1 = brain1.weights[i]
Wp2 = brain2.weights[i]
Bp1 = brain1.biases[i]
Bp2 = brain2.biases[i]
# ağırlıkları ve sapmaları ebebeynlerin fitnesslariyla çarparız
Wp1.multiply(parent1.fitness)
Wp2.multiply(parent2.fitness)
Bp1.multiply(parent1.fitness)
Bp2.multiply(parent2.fitness)
# işlemlerimizi brain1 üzerinden yapacağız
# çocuğada brain1 objesini vereceğiz
Wp1.add(Wp2)
Wp1.multiply(1 / (parent1.fitness + parent2.fitness))
Bp1.add(Bp2)
Bp1.multiply(1 / (parent1.fitness + parent2.fitness))
brain1.mutate(mutationRate)
child = Car(brain=brain1)
return child
last_gen_best = None
counter = 0
limit = 3
similarityTreshold = 0.05
initialMutationRate = mutationRate
# kodu temizlemelisin !!!
def optimize_mutationRate(best_score):
global counter, last_gen_best, mutationRate
if last_gen_best is None:
last_gen_best = best_score
return
df = math.fabs(best_score - last_gen_best)
print(str(df))
if df < similarityTreshold:
print('benzerlik')
# eğer benzerlik 'limit' kere tekrarlanırsa aksiyona geçilir
if counter < limit:
counter += 1
else:
# eğer hala nesiller arası performans değişmiyorsa mutasyon ihtimali arttırılır
if mutationRate < 0.04:
mutationRate += 0.01
print('mutasyon değeri arttı ' + str(mutationRate))
last_gen_best = best_score
else:
# eğer nesil arası performans farkı gözetilirse parametreler sıfırlanır
if counter == limit:
print('sıfırlandı')
counter = 0
mutationRate = initialMutationRate
last_gen_best = best_score
last_gen_best = best_score
def pick_best_fitness(generation):
bestInd = 0
for i in range(len(generation)):
if generation[bestInd].fitness < generation[i].fitness:
bestInd = i
return generation[i].fitness
def clear_bad_ones(generation, fitnessTrehsold):
for ind in generation:
if ind.fitness < fitnessTrehsold:
generation.remove(ind)
| UTF-8 | Python | false | false | 3,952 | py | 42 | geneticAlgorithm.py | 33 | 0.626831 | 0.610383 | 0 | 141 | 25.595745 | 91 |
DAVIDMIGWI/python | 12,558,484,410,111 | a892a8ae3bd479f5b65f59880a0c73c24a00b87d | 81c504854beede7af947dcb48b5bff79bd308627 | /Lesson 2a.py | aff26fb591836a3a4a5f086981e86e95050f6991 | []
| no_license | https://github.com/DAVIDMIGWI/python | d74072b77d73e868052e4c4323fd9525d4ad6301 | c16769dd614858ae764b80d76573b460e35b992b | refs/heads/master | 2023-07-31T09:37:06.133273 | 2021-09-17T12:29:03 | 2021-09-17T12:29:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # body mass index
# formula
print(5%2) # modulus is the remainder
print(4**2)
weight = float(input("What is your weight-Kgs:"))
height = float(input("what is your height-Meters:"))
PI = 3.14
#find BMI
bmi = weight/pow(height,2)
print("your Body Mass Index is", bmi)
#modulus
| UTF-8 | Python | false | false | 290 | py | 44 | Lesson 2a.py | 42 | 0.665517 | 0.637931 | 0 | 13 | 20.307692 | 52 |
sherlockdoyle/Animator | 9,887,014,755,334 | af7ae7a90f967b3c36102d99762838225f7e0c6b | 03ed1dcb0bdda6df2dbe4d8e18f5d0f7710badea | /animator/graphics/gradient.py | d3de94a41c016eeb30a0c9a7fbe9e255bb131938 | [
"MIT",
"BSD-3-Clause"
]
| permissive | https://github.com/sherlockdoyle/Animator | 14a8ac9984b818fe79d5c7c8e68c76e95dc8e7b0 | ebd5e93f3f47cce5b841425fabed1c1afb4bb41b | refs/heads/main | 2023-08-05T05:23:43.508859 | 2023-07-01T15:31:47 | 2023-07-01T15:31:47 | 193,889,153 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import annotations
from abc import abstractmethod
from typing import Literal
import numpy
from animator import skia
from animator._common_types import ColorLike
from animator.graphics.color import color as parse_color
TileMode = Literal['clamp', 'repeat', 'mirror', 'decal']
_tile_mode: dict[TileMode, skia.TileMode] = {
'clamp': skia.TileMode.kClamp,
'repeat': skia.TileMode.kRepeat,
'mirror': skia.TileMode.kMirror,
'decal': skia.TileMode.kDecal,
}
class Gradient:
"""Utility class for building different gradients."""
def __init__(self):
self.__color_stops: dict[float, skia.Color4f] = {}
self.__colors: list[skia.Color4f] | None = None
self._tile_mode: skia.TileMode = skia.TileMode.kClamp
@classmethod
def Linear(cls, x0: float, y0: float, x1: float, y1: float, /) -> Gradient:
"""Create a linear gradient from (*x0*, *y0*) to (*x1*, *y1*)."""
return _Linear(x0, y0, x1, y1)
@classmethod
def Radial(
cls,
x0: float,
y0: float,
r0: float,
x1: float | None = None,
y1: float | None = None,
r1: float | None = None,
/,
) -> Gradient:
"""
Create a radial gradient from (*x0*, *y0*) with radius *r0* to (*x1*, *y1*) with radius *r1*. If *x1*, *y1*, and
*r1* is ``None``, the gradient will be a circle.
"""
if x1 is None:
return _Radial(x0, y0, r0)
return _RadialTwoPoint(x0, y0, r0, x1, y1, r1) # type: ignore y1 and r1 are not None
@classmethod
def Conical(cls, x0: float, y0: float, start_angle: float = 0, /) -> Gradient:
"""Create a conical gradient from (*x0*, *y0*) with the given *start_angle* (in degrees)."""
return _Conical(x0, y0, start_angle)
def add_colors(self, *colors: ColorLike) -> Gradient:
"""Add *colors* evenly spaced between 0 and 1."""
self.__colors = [skia.Color4f(parse_color(color)) for color in colors]
return self
def add_color_stop(self, offset: float | tuple[float, float], color: ColorLike) -> Gradient:
"""
Add a color stop at *offset* (between 0 and 1) with the given *color*. If *offset* is a tuple, the color will be
added between the two points.
"""
self.__colors = None
color4f = skia.Color4f(parse_color(color))
if isinstance(offset, tuple):
self.__color_stops[numpy.nextafter(offset[0], 1, dtype=numpy.float32)] = color4f
self.__color_stops[numpy.nextafter(offset[1], 0, dtype=numpy.float32)] = color4f
else:
self.__color_stops[offset] = color4f
return self
def __setitem__(self, offset: float | tuple[float, float], color: ColorLike) -> None:
self.add_color_stop(offset, color)
def _get_color_stops(self) -> tuple[list[float] | None, list[skia.Color4f]]:
"""Get the color stops sorted by offset."""
if self.__colors is not None:
return None, self.__colors
offsets = sorted(self.__color_stops.keys())
if offsets[0] != 0:
self.__color_stops[0] = self.__color_stops[offsets[0]]
offsets.insert(0, 0)
if offsets[-1] != 1:
self.__color_stops[1] = self.__color_stops[offsets[-1]]
offsets.append(1)
return offsets, [self.__color_stops[offset] for offset in offsets]
def set_tile_mode(self, mode: skia.TileMode | TileMode) -> Gradient:
"""Set the tile mode."""
self._tile_mode = _tile_mode[mode] if isinstance(mode, str) else mode
return self
@abstractmethod
def build(self) -> skia.Shader:
"""Build the gradient. Must be implemented by subclasses."""
pass
class _Linear(Gradient):
def __init__(self, x0: float, y0: float, x1: float, y1: float, /):
super().__init__()
self.__pts = [(x0, y0), (x1, y1)]
def build(self) -> skia.Shader:
offsets, colors = self._get_color_stops()
return skia.GradientShader.MakeLinear(pts=self.__pts, colors=colors, pos=offsets, mode=self._tile_mode)
class _Radial(Gradient):
def __init__(self, x0: float, y0: float, r0: float, /):
super().__init__()
self.__p0 = (x0, y0)
self.__r0 = r0
def build(self) -> skia.Shader:
offsets, colors = self._get_color_stops()
return skia.GradientShader.MakeRadial(
center=self.__p0, radius=self.__r0, colors=colors, pos=offsets, mode=self._tile_mode
)
class _RadialTwoPoint(Gradient):
def __init__(self, x0: float, y0: float, r0: float, x1: float, y1: float, r1: float, /):
super().__init__()
self.__p0 = (x0, y0)
self.__r0 = r0
self.__p1 = (x1, y1)
self.__r1 = r1
def build(self) -> skia.Shader:
offsets, colors = self._get_color_stops()
return skia.GradientShader.MakeTwoPointConical(
start=self.__p0,
startRadius=self.__r0,
end=self.__p1,
endRadius=self.__r1,
colors=colors,
pos=offsets,
mode=self._tile_mode,
)
class _Conical(Gradient):
def __init__(self, x: float, y: float, start_angle: float, /):
super().__init__()
self.__cx = x
self.__cy = y
self.__start_angle = start_angle
def build(self) -> skia.Shader:
offsets, colors = self._get_color_stops()
return skia.GradientShader.MakeSweep(
cx=self.__cx,
cy=self.__cy,
colors=colors,
pos=offsets,
mode=self._tile_mode,
localMatrix=skia.Matrix.RotateDeg(self.__start_angle),
)
| UTF-8 | Python | false | false | 5,719 | py | 88 | gradient.py | 84 | 0.574401 | 0.554118 | 0 | 164 | 33.871951 | 120 |
ceache/treadmill | 13,683,765,852,442 | a77cf2c65fa0bbd5b0b74842ff4a172f6c210891 | 7f0c02b3eef636cc382484dd8015207c35cc83a8 | /lib/python/treadmill/ldap3kerberos.py | c4c1ec898536a84baf18aa0b88d449c9758a4606 | [
"Apache-2.0"
]
| permissive | https://github.com/ceache/treadmill | 4efa69482dafb990978bfdcb54b24c16ca5d1147 | 26a1f667fe272ff1762a558acfd66963494020ca | refs/heads/master | 2021-01-12T12:44:13.474640 | 2019-08-20T23:22:37 | 2019-08-20T23:22:37 | 151,146,942 | 0 | 0 | Apache-2.0 | true | 2018-10-01T19:31:51 | 2018-10-01T19:31:51 | 2018-09-17T16:20:27 | 2018-10-01T19:30:10 | 3,851 | 0 | 0 | 0 | null | false | null | """Replaces the use of python-gssapi with kerberos in ldap3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import socket
from ldap3.core.exceptions import LDAPCommunicationError
from ldap3.protocol.sasl.sasl import send_sasl_negotiation
from ldap3.protocol.sasl.sasl import abort_sasl_negotiation
from treadmill import kerberoswrapper as kerberos
NO_SECURITY_LAYER = 1
INTEGRITY_PROTECTION = 2
CONFIDENTIALITY_PROTECTION = 4
def sasl_gssapi(connection, controls):
"""
Performs a bind using the Kerberos v5 ("GSSAPI") SASL mechanism
from RFC 4752. Does not support any security layers, only authentication!
sasl_credentials can be empty or a tuple with one or two elements.
The first element determines which service principal to request a ticket
for and can be one of the following:
- None or False, to use the hostname from the Server object
- True to perform a reverse DNS lookup to retrieve the canonical hostname
for the hosts IP address
- A string containing the hostname
The optional second element is what authorization ID to request.
- If omitted or None, the authentication ID is used as the authorization ID
- If a string, the authorization ID to use. Should start with "dn:" or
"user:".
"""
# pylint: disable=too-many-branches
target_name = None
authz_id = b''
if connection.sasl_credentials:
if (len(connection.sasl_credentials) >= 1 and
connection.sasl_credentials[0]):
if connection.sasl_credentials[0] is True:
hostname = \
socket.gethostbyaddr(connection.socket.getpeername()[0])[0]
target_name = 'ldap@' + hostname
else:
target_name = 'ldap@' + connection.sasl_credentials[0]
if (len(connection.sasl_credentials) >= 2 and
connection.sasl_credentials[1]):
authz_id = connection.sasl_credentials[1].encode("utf-8")
if target_name is None:
target_name = 'ldap@' + connection.server.host
gssflags = (
kerberos.GSS_C_MUTUAL_FLAG |
kerberos.GSS_C_SEQUENCE_FLAG |
kerberos.GSS_C_INTEG_FLAG |
kerberos.GSS_C_CONF_FLAG
)
_, ctx = kerberos.authGSSClientInit(target_name, gssflags=gssflags)
in_token = b''
try:
while True:
status = kerberos.authGSSClientStep(
ctx,
base64.b64encode(in_token).decode('ascii')
)
out_token = kerberos.authGSSClientResponse(ctx) or ''
result = send_sasl_negotiation(
connection,
controls,
base64.b64decode(out_token)
)
in_token = result['saslCreds'] or b''
if status == kerberos.AUTH_GSS_COMPLETE:
break
kerberos.authGSSClientUnwrap(
ctx,
base64.b64encode(in_token).decode('ascii')
)
unwrapped_token = base64.b64decode(
kerberos.authGSSClientResponse(ctx) or ''
)
if len(unwrapped_token) != 4:
raise LDAPCommunicationError('Incorrect response from server')
server_security_layers = unwrapped_token[0]
if not isinstance(server_security_layers, int):
server_security_layers = ord(server_security_layers)
if server_security_layers in (0, NO_SECURITY_LAYER):
if unwrapped_token.message[1:] != '\x00\x00\x00':
raise LDAPCommunicationError(
'Server max buffer size must be 0 if no security layer'
)
if not server_security_layers & NO_SECURITY_LAYER:
raise LDAPCommunicationError(
'Server requires a security layer, but this is not implemented'
)
client_security_layers = bytearray([NO_SECURITY_LAYER, 0, 0, 0])
kerberos.authGSSClientWrap(
ctx,
base64.b64encode(
bytes(client_security_layers) + authz_id
).decode('ascii')
)
out_token = kerberos.authGSSClientResponse(ctx) or ''
return send_sasl_negotiation(
connection,
controls,
base64.b64decode(out_token)
)
except (kerberos.GSSError, LDAPCommunicationError):
abort_sasl_negotiation(connection, controls)
raise
| UTF-8 | Python | false | false | 4,508 | py | 141 | ldap3kerberos.py | 131 | 0.622893 | 0.609139 | 0.000665 | 128 | 34.21875 | 79 |
akojif/codevscolor | 773,094,117,079 | 2fa0799e7e921c95f9efbc773e100ea32029704f | 9ce5f2161212b7c95269f5d04252629582ca5f2d | /python/del_all_files_with_specific_extn.py | c8b1727d78a59e84abe5575c8d8c09bee7fdf38a | [
"Apache-2.0"
]
| permissive | https://github.com/akojif/codevscolor | 2eecc915516ab47b7ccf6057b01c3ba01919f285 | 56db3dffeac8f8d76ff8fcf5656770f33765941f | refs/heads/master | 2022-05-14T06:33:17.274925 | 2022-04-06T02:15:30 | 2022-04-06T02:15:30 | 184,721,105 | 1 | 0 | Apache-2.0 | true | 2019-05-03T08:16:09 | 2019-05-03T08:16:08 | 2019-04-29T17:36:28 | 2019-04-29T17:36:27 | 125 | 0 | 0 | 0 | null | false | false | #1
import os
from os import listdir
#2
folder_path = 'C:\Sample\'
#3
for file_name in listdir(folder_path):
#4
if file_name.endswith('.txt'):
#5
os.remove(folder_path + file_name)
| UTF-8 | Python | false | false | 205 | py | 111 | del_all_files_with_specific_extn.py | 98 | 0.614634 | 0.590244 | 0 | 11 | 17.636364 | 42 |
edu-athensoft/ceit4101python | 2,877,628,100,729 | 3b4b2b2acc4fcb1a4254c0d80980ac4de9e21623 | 5f66f92c2dbfcee2460e79e43880c915210c2735 | /stem1400_modules/module_6_datatype/m6_5_set/set_issuperset.py | ce9bc541716cb53c406af3310512244ce679bd6b | []
| no_license | https://github.com/edu-athensoft/ceit4101python | 0bf7e8a3e06b4c54aabffac6777e8284e0593c0d | b6a63b489c8acf0f1d8e676d3f63c56c5897ab6d | refs/heads/master | 2023-08-31T02:59:12.044402 | 2023-08-25T06:22:54 | 2023-08-25T06:22:54 | 170,928,822 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # isupperset
A = {1, 2, 3, 4, 5}
B = {1, 2, 3, 4, 5, 6, 7, 8}
C = {'a','b'}
print(B.issuperset(A))
print(A.issuperset(A))
| UTF-8 | Python | false | false | 127 | py | 1,007 | set_issuperset.py | 974 | 0.488189 | 0.385827 | 0 | 8 | 14.5 | 28 |
yell/python-utils | 11,544,872,137,856 | 3c66c7038a7a8e51f9589ee371b96eb742c523d9 | e182be71da0833bb3a8e3d14d9e150dbff2a2983 | /utils/dict_utils.py | 3ee907ea11cd81faa33bd0076f895bd676b78f50 | [
"MIT"
]
| permissive | https://github.com/yell/python-utils | 93a734b206e7cff99c026420ee4c87b3cd38570c | 9fa452e8c9902c090a61d07d8e3b0ff706c047c4 | refs/heads/master | 2020-04-08T12:37:46.394515 | 2020-03-26T17:06:13 | 2020-03-26T17:06:13 | 159,355,153 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import operator as op
from types import DictionaryType
def is_dict(object_):
return isinstance(object_, DictionaryType)
class AttrDict(dict):
"""
A dictionary that allows attribute-style access.
References
----------
* https://danijar.com/patterns-for-fast-prototyping-with-tensorflow/
* https://github.com/bcj/AttrDict
Examples
--------
>>> config = AttrDict(a=None)
>>> config.a = 42
>>> config.b = 'egg'
>>> config # can be used as dict
{'a': 42, 'b': 'egg'}
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
# aliases
DataClass = DictClass = AttrDict
def dict_elementwise_unary(d, f):
"""
d : dict-like
f : {callable, str}
If str, attributes with such name is used.
"""
f_ = f if callable(f) else lambda x: getattr(x, f)()
return {k: f_(v) for k, v in d.iteritems()}
def dict_elementwise_binary(d, other, f, swap_args=False):
"""
d : dict-like
f : {callable, str}
If str, attributes with such name is used.
"""
f_ = f if callable(f) else lambda x, other: getattr(x, f)(other)
f__ = (lambda x, y: f_(y, x)) if swap_args else f_
if is_dict(other):
return {k: f__(v, other[k]) for k, v in d.iteritems() if k in other}
else:
return {k: f__(v, other) for k, v in d.iteritems()}
_make_unary = lambda name: lambda self: ElementwiseDict(dict_elementwise_unary(self, name))
_make_binary = lambda name, swap_args=False: \
lambda self, other: ElementwiseDict(dict_elementwise_binary(self, other, name, swap_args=swap_args))
class ElementwiseDict(AttrDict):
"""
Further extension of AttrDict that overloads (most of) the
operators to perform elementwisely.
Conversions to various numerical type is impossible with
suggesting syntax (e.g. float(...) should return float instance etc.),
and thus such functionality is added via properties.
Examples
--------
>>> d = ElementwiseDict({'a': -1, 'b': 4}); d
{'a': -1, 'b': 4}
>>> -d
{'a': 1, 'b': -4}
>>> abs(d)
{'a': 1, 'b': 4}
>>> d + 1
{'a': 0, 'b': 5}
>>> d + 1.
{'a': 0.0, 'b': 5.0}
>>> 2. * d
{'a': -2.0, 'b': 8.0}
>>> 1. / d
{'a': -1.0, 'b': 0.25}
>>> d << 1
{'a': -2, 'b': 8}
>>> d < 3
{'a': True, 'b': False}
>>> d == 4
{'a': False, 'b': True}
>>> d - d
{'a': 0, 'b': 0}
>>> d ** d
{'a': -1.0, 'b': 256}
>>> d += 1; d
{'a': 0, 'b': 5}
>>> d + {'a': 1, 'c': 3}
{'a': 1}
>>> d and {'a': False, 'b': True}
{'a': False, 'b': True}
>>> d > {'a': 0}
{'a': False}
>>> d.bool
{'a': False, 'b': True}
>>> d.float
{'a': 0.0, 'b': 5.0}
"""
# unary (arithmetic)
__neg__ = _make_unary(op.neg)
__pos__ = _make_unary(op.pos)
__abs__ = _make_unary(op.abs)
__inv__ = _make_unary(op.inv)
__invert__ = _make_unary(op.invert)
# binary arithmetic
__add__ = _make_binary(op.add)
__sub__ = _make_binary(op.sub)
__mul__ = _make_binary(op.mul)
__div__ = _make_binary(op.div)
__floordiv__ = _make_binary(op.floordiv)
__truediv__ = _make_binary(op.truediv)
__pow__ = _make_binary(op.pow)
__mod__ = _make_binary(op.mod)
__lshift__ = _make_binary(op.lshift)
__rshift__ = _make_binary(op.rshift)
# binary logical
__and__ = _make_binary(op.and_)
__or__ = _make_binary(op.or_)
__xor__ = _make_binary(op.xor)
# right-versions (arithmetic + logical)
__radd__ = _make_binary(op.add, swap_args=True)
__rsub__ = _make_binary(op.sub, swap_args=True)
__rmul__ = _make_binary(op.mul, swap_args=True)
__rdiv__ = _make_binary(op.div, swap_args=True)
__rfloordiv__ = _make_binary(op.floordiv, swap_args=True)
__rtruediv__ = _make_binary(op.truediv, swap_args=True)
__rpow__ = _make_binary(op.pow, swap_args=True)
__rmod__ = _make_binary(op.mod, swap_args=True)
__rlshift__ = _make_binary(op.lshift, swap_args=True)
__rrshift__ = _make_binary(op.rshift, swap_args=True)
__rand__ = _make_binary(op.and_, swap_args=True)
__ror__ = _make_binary(op.or_, swap_args=True)
__rxor__ = _make_binary(op.xor, swap_args=True)
# comparisons
__lt__ = _make_binary(op.lt)
__le__ = _make_binary(op.le)
__eq__ = _make_binary(op.eq)
__ne__ = _make_binary(op.ne)
__ge__ = _make_binary(op.ge)
__gt__ = _make_binary(op.gt)
# conversions
bool = property(_make_unary(bool))
int = property(_make_unary(int))
long = property(_make_unary(long))
float = property(_make_unary(float))
complex = property(_make_unary(complex))
bin = property(_make_unary(bin))
oct = property(_make_unary(oct))
hex = property(_make_unary(hex))
def elementwise(self, *args):
if len(args) == 1:
return ElementwiseDict(dict_elementwise_unary(self, args[0]))
if len(args) == 2:
return ElementwiseDict(dict_elementwise_binary(self, args[0], args[1]))
raise ValueError
def recursive_iter(d):
"""
Parameters
----------
d : dict
Examples
--------
>>> d = {'a': [1, 2, 3],
... 'b': {'d': [4., 5., 6.],
... 'e': {'f': [7, 8.]}},
... 'c': None}
>>> for d_, k, v in recursive_iter(d):
... print k in d_, k, v
True a [1, 2, 3]
True c None
True f [7, 8.0]
True d [4.0, 5.0, 6.0]
"""
if isinstance(d, DictionaryType):
for k, v in d.items():
if isinstance(v, DictionaryType):
for d_, k_, v_ in recursive_iter(v):
yield d_, k_, v_
else:
yield d, k, v
| UTF-8 | Python | false | false | 5,726 | py | 42 | dict_utils.py | 38 | 0.523577 | 0.50978 | 0 | 208 | 26.528846 | 115 |
rummepa/homeautomation | 2,001,454,800,120 | ff6cbaf054342f05428711ad85cd424c1e226cac | ec33d238f44f6ad1d51cca59525103c38067982f | /rolluiken_op.py | 8b10d3c948b543e6258c2243a3b9cbd1d5b2acb6 | []
| no_license | https://github.com/rummepa/homeautomation | c156706afe19b0e655b28f0d77f02d4140221c5b | 12a4188f5e3ad629e3f37132e9c29ac6ce79fb17 | refs/heads/master | 2019-12-24T10:18:53.814334 | 2017-04-11T11:36:36 | 2017-04-11T11:36:36 | 84,371,487 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
import RPi.GPIO as GPIO
import time
import timings
down=timings.down
up=timings.up
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
GPIO.setup(19,GPIO.OUT)
GPIO.setup(21,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
GPIO.setup(29,GPIO.OUT)
GPIO.output(11,False)
time.sleep(1)
GPIO.output(7,True)
GPIO.output(13,False)
time.sleep(1)
GPIO.output(15,True)
GPIO.output(19,False)
time.sleep(1)
GPIO.output(21,True)
GPIO.output(23,False)
time.sleep(1)
GPIO.output(29,True)
time.sleep(up)
GPIO.output(7,False)
GPIO.output(15,False)
GPIO.output(21,False)
GPIO.output(29,False)
| UTF-8 | Python | false | false | 649 | py | 17 | rolluiken_op.py | 15 | 0.75963 | 0.694915 | 0 | 34 | 18.088235 | 24 |
ductandev/do_an2 | 9,302,899,189,170 | 9a6fcf39dc61c83372b3a8bebfa2122bb266ee84 | f4620bb44ca9f971e5df26b053fc5b28df2f00c0 | /buoi2/bbb.py | 76469ac21b0756b47a5a24b428f6f56944e78092 | []
| no_license | https://github.com/ductandev/do_an2 | c13f89cbc2f4976d13c130a63eaabb9171ab47b8 | f2fb236bf4b5d56423b5f79e22fc87d0a809003f | refs/heads/main | 2023-07-04T23:35:20.304632 | 2021-08-22T02:30:42 | 2021-08-22T02:30:42 | 391,139,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('anh1.jpg', 0)
plt.imshow(img, cmap='gray', interpolation='bicubic')
plt.xticks([]), plt.yticks([])
plt.show()
| UTF-8 | Python | false | false | 198 | py | 95 | bbb.py | 87 | 0.707071 | 0.686869 | 0 | 8 | 23.75 | 53 |
cbain1/DataScience210 | 3,143,916,106,775 | 524f4ba0992f01b0a4c96494263df46128df5b18 | 37a7948722566bfd5fc140621dda59eb2167f43a | /python/poofQ1.py | a6b1486b8be3b3633b0d4ee70796887237aa2db1 | []
| no_license | https://github.com/cbain1/DataScience210 | 54b130b060dedb4f75552230bda0ed30c02f4fc7 | abcefc7dfd2f0776f26fbedd11b451933ae0d1ff | refs/heads/master | 2023-01-23T07:23:47.831193 | 2020-12-03T22:26:38 | 2020-12-03T22:26:38 | 235,894,851 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import random
import re
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def findStuff(babyFile, relevantInfo):
#finding patterns
info = re.finditer(r'<td>(\d*)</td><td>([A-Z]\w*)</td><td>([A-Z]\w*)',babyFile)
for line in info:
relevantInfo[(line.group(2),'Male')] = line.group(1)
relevantInfo[(line.group(3),'Female')] = line.group(1)
def main():
#File setup
eprint("What is the name of the baby file you would like to examine? ")
fileRequest = input()
babyFile = open(fileRequest).read()
year = re.findall(r'\d{4}',fileRequest)
#heading the table
print('Most Popular Baby Names for', year[0])
print('\nGender\t\tName\t\tRank')
relevantInfo = {}
#Where the names will go
findStuff(babyFile, relevantInfo)
# printing and sorting
for key in sorted(relevantInfo.keys()):
if len(key[0]) > 7:
print(key[1]+'\t\t'+key[0]+'\t'+relevantInfo[key])
elif len(key[0])<4:
print(key[1]+'\t\t'+key[0]+'\t\t\t'+relevantInfo[key])
else:
print(key[1]+'\t\t'+key[0]+'\t\t'+relevantInfo[key])
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 1,161 | py | 117 | poofQ1.py | 38 | 0.652024 | 0.638243 | 0 | 42 | 26.666667 | 81 |
MDAIceland/WaterSecurity | 2,164,663,555,584 | ca1bbbe1ca1eaea9eced39730c2167703a16e8ed | eac4bc62f668f0760512f87ebc0fea6bda5bc265 | /water_security/data/model/metrics/__init__.py | eaa827c31f1ee3dac29249b41fa03a9c23cc0087 | []
| no_license | https://github.com/MDAIceland/WaterSecurity | e3e20f1f13fe63fdded3acc0311b38c83fca545a | af5d5ca5c26a3fdd7c6b6f8ac578248f4f97393b | refs/heads/master | 2023-04-21T08:43:27.559849 | 2021-05-31T15:25:28 | 2021-05-31T15:25:28 | 357,867,996 | 1 | 1 | null | false | 2021-05-31T15:25:29 | 2021-04-14T10:40:49 | 2021-05-31T15:25:17 | 2021-05-31T15:25:28 | 94,871 | 0 | 0 | 4 | Jupyter Notebook | false | false | import pandas as pd
import os
import pickle
METRICS_DIR = os.sep.join(os.path.split(__file__)[:-1])
VALIDATION_METRICS_PATH = os.path.join(METRICS_DIR, "validation_metrics.pkl")
TRAINING_METRICS_PATH = os.path.join(METRICS_DIR, "training_metrics.pkl")
FEATURES_IMPORTANCES_PATH = os.path.join(METRICS_DIR, "features_importances.pkl")
try:
with open(VALIDATION_METRICS_PATH, "rb") as inp:
VALIDATION_METRICS = pickle.load(inp)
with open(TRAINING_METRICS_PATH, "rb") as inp:
TRAINING_METRICS = pickle.load(inp)
with open(FEATURES_IMPORTANCES_PATH, "rb") as inp:
FEATURES_IMPORTANCES = pickle.load(inp)
except IOError:
pass
| UTF-8 | Python | false | false | 663 | py | 79 | __init__.py | 25 | 0.711916 | 0.710407 | 0 | 18 | 35.833333 | 81 |
swift-nav/ros_rover | 249,108,124,703 | a02e255cd77c30488cc9bc906a96a4ec667b5e23 | 428989cb9837b6fedeb95e4fcc0a89f705542b24 | /erle/ros2_ws/install/share/ament_cmake_test/cmake/run_test.py | 86e964c16f7e674313dc86273efd40eb91445518 | []
| no_license | https://github.com/swift-nav/ros_rover | 70406572cfcf413ce13cf6e6b47a43d5298d64fc | 308f10114b35c70b933ee2a47be342e6c2f2887a | refs/heads/master | 2020-04-14T22:51:38.911378 | 2016-07-08T21:44:22 | 2016-07-08T21:44:22 | 60,873,336 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | /home/erle/ros2_ws/src/ament/ament_cmake/ament_cmake_test/cmake/run_test.py | UTF-8 | Python | false | false | 75 | py | 4,461 | run_test.py | 1,798 | 0.8 | 0.786667 | 0 | 1 | 75 | 75 |
pdevine/suburbia | 12,472,585,051,262 | 2283c23a4da5f1e5266accea27644146a1569936 | c211359400a01b11d3bd44bfa927aabeba836d1c | /run_game.py | 34a666d652a0d85b8a6069c7e4d39b8991e12331 | [
"LicenseRef-scancode-public-domain"
]
| permissive | https://github.com/pdevine/suburbia | 4c1a0140c2884f2c4c3e3b8ca098295d53da0e45 | 82d2d755c6b5b8554f4d62574f97f3e9270851b8 | refs/heads/master | 2021-01-16T19:00:18.603808 | 2009-05-03T21:48:49 | 2009-05-03T21:48:49 | 185,588 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
from gamelib import main
import sys
sound = True
fps = False
if '--nosound' in sys.argv:
sound = False
if '--fps' in sys.argv:
fps = True
main.main(sound, fps)
| UTF-8 | Python | false | false | 197 | py | 34 | run_game.py | 19 | 0.64467 | 0.64467 | 0 | 15 | 12.066667 | 27 |
GaimeWolves/advent-of-code-2018 | 7,232,724,949,688 | b5af9952eb12ac6a66d00fc6492b402d1abf32b2 | 71bece124d402ef19fa903c2ef7721a02a83051b | /8.py | bd99b5be63f855f099370ab03465496a71b5db20 | []
| no_license | https://github.com/GaimeWolves/advent-of-code-2018 | 6d7d6407799e49775f4f11968b13699a06f29d5a | 1d3a61e41a257efc0e6f648f732b5bf1e4030d5e | refs/heads/master | 2022-11-09T01:27:06.155373 | 2020-06-23T22:08:33 | 2020-06-23T22:08:33 | 274,518,387 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class node:
parent = None
index = 0
nOfChilds = 0
nOfMetadata = 0
lenOfChilds = 0
metadata = []
childs = []
def toString(self):
print("#{} {}, {}".format(self.index, self.nOfChilds, self.metadata))
for child in self.childs:
print(" {}, {}".format(child.nOfChilds, child.metadata))
input = open("D:\\Programmieren\\Projekte\\AdventOfCode18\\8.txt", "r") #Normal case
#input = open("D:\\Programmieren\\Projekte\\AdventOfCode18\\8-test.txt", "r") #Test case
sumOfMetadata = 0
original = [int(s) for s in input.readlines()[0].split(' ')]
## Creates a node at the index and gets the children nodes recursively
def createNode(parent, index):
if index == len(original):
return None
if parent != None:
print('New node at index {} with parent at index {}'.format(index, parent.index))
else:
print('New node at index {} with parent at index {}'.format(index, 'ROOT'))
current = node()
current.metadata = []
current.childs = []
current.parent = parent
current.nOfChilds = original[index]
current.index = index
index += 1
current.nOfMetadata = original[index]
index += 1
index = getChilds(current, current.nOfChilds, index)
index = getMetadata(current, current.nOfMetadata, index)
return [index, current]
## Creates all children and adds them to the childs array of the parent node
def getChilds(parent, amount, index):
for x in range(amount):
new = createNode(parent, index)
print('Added child with index {} to {}'.format(index, parent.index))
index = new[0]
parent.childs.append(new[1])
return index
## Gets all metadata of the current node and adds them to the current nodes metadata array
def getMetadata(current, amount, index):
for x in range(amount):
print('Metadata at index {} of node at index {}'.format(index, current.index))
current.metadata.append(original[index])
index += 1
return index
## Creates the tree structure
tree = createNode(None, 0)[1]
## Adds all metadatas togheter
def addMetadatas(root, amount):
for i in range(root.nOfChilds):
amount = addMetadatas(root.childs[i], amount)
for metadata in root.metadata:
amount += metadata
return amount
print(addMetadatas(tree, 0))
print('\nAufgabe 2')
## AUFGABE 2
## Sums up all values of the nodes recursively
def getValue(root, currentVal=0):
if len(root.childs) > 0:
for data in root.metadata:
if data - 1 < len(root.childs):
currentVal = getValue(root.childs[data - 1], currentVal)
else:
for data in root.metadata:
currentVal += data
return currentVal
print(getValue(tree)) | UTF-8 | Python | false | false | 2,844 | py | 24 | 8.py | 23 | 0.625527 | 0.616385 | 0 | 85 | 31.482353 | 90 |
yezyvibe/Algorithm_2 | 10,024,453,693,830 | f9853530ca8955fe088ad63ffb78293f457f7c92 | b492abb3d1bdd66dbb51217aed00e6beb440ffe2 | /Programmers/p_순위2.py | ed10469350c391f8a56891cc4665ff47e6945761 | []
| no_license | https://github.com/yezyvibe/Algorithm_2 | fb56299683e5c5dab09568bffbaa2c8045bba74c | 87cc3e6a1d87487a37b8dd6088c89865d4845cb2 | refs/heads/master | 2023-01-29T13:23:58.662403 | 2022-12-03T16:24:26 | 2022-12-03T16:24:26 | 242,994,289 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def dfs(start, adj, n):
visit = [0] * (n+1)
stack = [start]
visit[start] = 1
while stack:
cur = stack.pop()
if cur not in adj:
continue
for k in adj[cur]:
if not visit[k]:
visit[k] = 1
stack.append(k)
visit[start] = 0
return sum(visit)
def solution(n, results):
win_dic = {} # key가 이긴 사람들을 value로 저장
lose_dic = {}
for winner, loser in results:
if winner in win_dic:
win_dic[winner].append(loser)
else:
win_dic[winner] = [loser]
if loser in lose_dic:
lose_dic[loser].append(winner)
else:
lose_dic[loser] = [winner]
answer = 0
for i in range(1, n+1):
result = 0
if i in win_dic:
result += dfs(i, win_dic, n)
if i in lose_dic:
result += dfs(i, lose_dic, n)
if result == n - 1:
answer += 1
return answer
print(solution(5, [[4, 3], [4, 2], [3, 2], [1, 2], [2, 5]])) | UTF-8 | Python | false | false | 1,082 | py | 702 | p_순위2.py | 701 | 0.458569 | 0.437853 | 0 | 42 | 24.309524 | 60 |
thibault/phase | 15,453,292,371,407 | 5ebd88c576b5e975245ed7b9c9d127c7556784a8 | e5c382c2175eef90e914604262a641131b68daea | /src/transmittals/models.py | a06bcf345eafc95f1455d894c883502c786f62a8 | [
"MIT"
]
| permissive | https://github.com/thibault/phase | 198231911072c9156eb2af28615024aa250e54fa | 3790b6175d7eaf69f614cfe9f207fbf40745d2ec | refs/heads/master | 2021-01-17T21:02:01.114393 | 2016-01-07T13:45:16 | 2016-01-07T13:45:16 | 47,611,709 | 0 | 0 | null | true | 2015-12-08T09:21:52 | 2015-12-08T09:21:52 | 2015-03-27T14:17:34 | 2015-12-08T09:13:05 | 4,463 | 0 | 0 | 0 | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import logging
import shutil
import uuid
import zipfile
import tempfile
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.db import transaction
from django.core.files.base import ContentFile
from model_utils import Choices
from documents.utils import save_document_forms
from documents.models import Document, Metadata, MetadataRevision
from reviews.models import CLASSES, ReviewMixin
from search.utils import build_index_data, bulk_actions
from metadata.fields import ConfigurableChoiceField
from default_documents.validators import StringNumberValidator
from privatemedia.fields import PrivateFileField
from transmittals.fields import TransmittalFileField
from transmittals.fileutils import trs_comments_file_path
from transmittals.pdf import transmittal_to_pdf
logger = logging.getLogger(__name__)
class Transmittal(Metadata):
"""Represents and incoming transmittal.
Transmittals are created when a contractor upload documents.
"""
STATUSES = Choices(
('new', _('New')),
('invalid', _('Invalid')),
('tobechecked', _('To be checked')),
('rejected', _('Rejected')),
('processing', _('Processing')),
('accepted', _('Accepted')),
)
latest_revision = models.ForeignKey(
'TransmittalRevision',
verbose_name=_('Latest revision'))
transmittal_key = models.CharField(
_('Transmittal key'),
max_length=250)
# General informations
transmittal_date = models.DateField(
_('Transmittal date'),
null=True, blank=True)
ack_of_receipt_date = models.DateField(
_('Acknowledgment of receipt date'),
null=True, blank=True)
contract_number = ConfigurableChoiceField(
verbose_name='Contract Number',
max_length=8,
list_index='CONTRACT_NBS')
originator = ConfigurableChoiceField(
_('Originator'),
default='CTR',
max_length=3,
list_index='ORIGINATORS')
recipient = ConfigurableChoiceField(
_('Recipient'),
max_length=50,
list_index='RECIPIENTS')
sequential_number = models.PositiveIntegerField(
_('sequential number'),
null=True, blank=True)
document_type = ConfigurableChoiceField(
_('Document Type'),
default="PID",
max_length=3,
list_index='DOCUMENT_TYPES')
status = models.CharField(
max_length=20,
choices=STATUSES,
default=STATUSES.tobechecked)
# Related documents
related_documents = models.ManyToManyField(
'documents.Document',
related_name='transmittals_related_set',
blank=True)
contractor = models.CharField(max_length=255, null=True, blank=True)
tobechecked_dir = models.CharField(max_length=255, null=True, blank=True)
accepted_dir = models.CharField(max_length=255, null=True, blank=True)
rejected_dir = models.CharField(max_length=255, null=True, blank=True)
class Meta:
app_label = 'transmittals'
ordering = ('document_number',)
verbose_name = _('Transmittal')
verbose_name_plural = _('Transmittals')
index_together = (
('contract_number', 'originator', 'recipient', 'sequential_number',
'status'),
)
class PhaseConfig:
filter_fields = (
'originator', 'recipient', 'status',
)
column_fields = (
('Reference', 'document_number'),
('Transmittal date', 'transmittal_date'),
('Ack. of receipt date', 'ack_of_receipt_date'),
('Originator', 'originator'),
('Recipient', 'recipient'),
('Document type', 'document_type'),
('Status', 'status'),
)
searchable_fields = (
'document_number',
'originator',
'recipient',
'document_type',
'status',
)
def __unicode__(self):
return self.document_key
def save(self, *args, **kwargs):
if not self.transmittal_key:
if not self.document_key:
self.document_key = self.generate_document_key()
self.transmittal_key = self.document_key
super(Transmittal, self).save(*args, **kwargs)
@property
def full_tobechecked_name(self):
return os.path.join(self.tobechecked_dir, self.transmittal_key)
@property
def full_accepted_name(self):
return os.path.join(self.accepted_dir, self.transmittal_key)
@property
def full_rejected_name(self):
return os.path.join(self.rejected_dir, self.transmittal_key)
def generate_document_key(self):
key = '{}-{}-{}-TRS-{:0>5d}'.format(
self.contract_number,
self.originator,
self.recipient,
self.sequential_number)
return key
@property
def title(self):
return self.document_key
@transaction.atomic
def reject(self):
"""Mark the transmittal as rejected.
Upon rejecting the transmittal, we must perform the following
operations:
- update the transmittal status in db
- move the corresponding files in the correct "refused" directory
- send the notifications to the email list.
"""
# Only transmittals with a pending validation can be refused
if self.status != 'tobechecked':
error_msg = 'The transmittal {} cannot be rejected ' \
'it it\'s current status ({})'.format(
self.document_key, self.status)
raise RuntimeError(error_msg)
# If an existing version already exists in rejected, we delete it before
if os.path.exists(self.full_rejected_name):
# Let's hope we got correct data and the process does not run
# as root. Who would do something that stupid anyway?
logger.info('Deleteting directory {}'.format(self.full_rejected_name))
shutil.rmtree(self.full_rejected_name)
# Move to rejected directory
if os.path.exists(self.full_tobechecked_name):
try:
os.rename(self.full_tobechecked_name, self.full_rejected_name)
except OSError as e:
logger.error('Cannot reject transmittal {} ({})'.format(
self, e))
raise e
else:
# If the directory cannot be found in tobechecked, that's weird but we
# won't trigger an error
logger.warning('Transmittal {} files are gone'.format(self))
# Since the document_key "unique" constraint is enforced in the parent
# class (Metadata), we need to update this object's key to allow a
# new transmittal submission with the same transmittal key.
new_key = '{}-{}'.format(
self.document_key,
uuid.uuid4())
self.document_key = new_key
self.status = 'rejected'
self.save()
self.document.document_key = new_key
self.document.save()
def accept(self):
"""Starts the transmittal import process.
Since the import can be quite long, we delegate the work to
a celery task.
"""
from transmittals.tasks import process_transmittal
if self.status != 'tobechecked':
error_msg = 'The transmittal {} cannot be accepted ' \
'in it\'s current state ({})'.format(
self.document_key, self.get_status_display())
raise RuntimeError(error_msg)
self.status = 'processing'
self.save()
process_transmittal.delay(self.pk)
class TransmittalRevision(MetadataRevision):
trs_status = ConfigurableChoiceField(
_('Status'),
max_length=20,
default='opened',
list_index='STATUS_TRANSMITTALS')
class Meta:
app_label = 'transmittals'
class TrsRevision(models.Model):
"""Stores data imported from a single line in the csv."""
transmittal = models.ForeignKey(
Transmittal,
verbose_name=_('Transmittal'))
document = models.ForeignKey(
Document,
null=True, blank=True,
verbose_name=_('Document'))
document_key = models.SlugField(
_('Document number'),
max_length=250)
category = models.ForeignKey('categories.Category')
title = models.TextField(
verbose_name=_('Title'))
revision = models.PositiveIntegerField(
verbose_name=_('Revision'),
default=0)
revision_date = models.DateField(
_('Revision date'),
null=True, blank=True)
received_date = models.DateField(
_('Received date'),
null=True, blank=True)
created_on = models.DateField(
_('Created on'),
null=True, blank=True)
accepted = models.NullBooleanField(
verbose_name=_('Accepted?'))
comment = models.TextField(
verbose_name=_('Comment'),
null=True, blank=True)
is_new_revision = models.BooleanField(
_('Is new revision?'))
# Those are fields that will one day be configurable
# but are static for now.
contract_number = ConfigurableChoiceField(
verbose_name='Contract Number',
max_length=8,
list_index='CONTRACT_NBS')
originator = ConfigurableChoiceField(
_('Originator'),
default='FWF',
max_length=3,
list_index='ORIGINATORS')
unit = ConfigurableChoiceField(
verbose_name=_('Unit'),
default='000',
max_length=3,
list_index='UNITS')
discipline = ConfigurableChoiceField(
verbose_name=_('Discipline'),
default='PCS',
max_length=3,
list_index='DISCIPLINES')
document_type = ConfigurableChoiceField(
verbose_name=_('Document Type'),
default='PID',
max_length=3,
list_index='DOCUMENT_TYPES')
sequential_number = models.CharField(
verbose_name=u"sequential Number",
help_text=_('Select a four digit number'),
default=u"0001",
max_length=4,
validators=[StringNumberValidator(4)],
null=True, blank=True)
system = ConfigurableChoiceField(
_('System'),
list_index='SYSTEMS',
null=True, blank=True)
wbs = ConfigurableChoiceField(
_('Wbs'),
max_length=20,
list_index='WBS',
null=True, blank=True)
weight = models.IntegerField(
_('Weight'),
null=True, blank=True)
docclass = models.IntegerField(
verbose_name=_('Class'),
default=1,
choices=CLASSES)
status = ConfigurableChoiceField(
verbose_name=_('Status'),
default='STD',
max_length=3,
list_index='STATUSES',
null=True, blank=True)
return_code = models.PositiveIntegerField(
_('Return code'),
null=True, blank=True)
review_start_date = models.DateField(
_('Review start date'),
null=True, blank=True)
review_due_date = models.DateField(
_('Review due date'),
null=True, blank=True)
review_leader = models.CharField(
_('Review leader'),
max_length=150,
null=True, blank=True)
leader_comment_date = models.DateField(
_('Leader comment date'),
null=True, blank=True)
review_approver = models.CharField(
_('Review approver'),
max_length=150,
null=True, blank=True)
approver_comment_date = models.DateField(
_('Approver comment date'),
null=True, blank=True)
review_trs = models.CharField(
verbose_name=_('Review transmittal name'),
max_length=255,
null=True, blank=True)
review_trs_status = models.CharField(
verbose_name=_('Review transmittal status'),
max_length=50,
null=True, blank=True)
outgoing_trs = models.CharField(
verbose_name=_('Outgoing transmittal name'),
max_length=255,
null=True, blank=True)
outgoing_trs_status = models.CharField(
verbose_name=_('Outgoing transmittal status'),
max_length=50,
null=True, blank=True)
outgoing_trs_sent_date = models.DateField(
verbose_name=_('Outgoing transmittal sent date'),
null=True, blank=True)
doc_category = models.CharField(
max_length=50,
verbose_name=_('Doc category'))
pdf_file = TransmittalFileField(
verbose_name=_('Pdf file'))
native_file = TransmittalFileField(
verbose_name=_('Native file'),
null=True, blank=True)
class Meta:
app_label = 'transmittals'
verbose_name = _('Trs Revision')
verbose_name_plural = _('Trs Revisions')
unique_together = ('transmittal', 'document_key', 'revision')
def __unicode__(self):
return '{} ({:02d})'.format(self.document_key, self.revision)
def get_absolute_url(self):
return reverse('transmittal_revision_diff', args=[
self.transmittal.pk, self.transmittal.document_key,
self.document_key, self.revision])
def get_document_fields(self):
"""Return a dict of fields that will be passed to the document form."""
columns = self.category.get_transmittal_columns()
fields = columns.values()
fields_dict = dict([(field, getattr(self, field)) for field in fields])
# XXX
# This is a HACK
fields_dict.update({
'sequential_number': '{:04}'.format(int(self.sequential_number))
})
files_dict = {
'native_file': self.native_file,
'pdf_file': self.pdf_file}
return fields_dict, files_dict
def save_to_document(self):
"""Use self data to create / update the corresponding revision."""
fields, files = self.get_document_fields()
kwargs = {
'category': self.category,
'data': fields,
'files': files
}
# The document was created earlier during
# the batch import
if self.document is None and self.revision > 0:
self.document = Document.objects.get(document_key=self.document_key)
metadata = getattr(self.document, 'metadata', None)
kwargs.update({'instance': metadata})
Form = self.category.get_metadata_form_class()
metadata_form = Form(**kwargs)
# If there is no such revision, the method will return None
# which is fine.
revision = metadata.get_revision(self.revision) if metadata else None
kwargs.update({'instance': revision})
RevisionForm = self.category.get_revision_form_class()
revision_form = RevisionForm(**kwargs)
doc, meta, rev = save_document_forms(
metadata_form, revision_form, self.category)
# Performs custom import action
rev.post_trs_import(self)
class OutgoingTransmittal(Metadata):
"""Represents an outgoing transmittal.
In the end, Transmittal and OutgoingTransmittal should be refactored into a
single class. However, the incoming trs class contains too much specific
code and is kept isolated for now.
"""
latest_revision = models.ForeignKey(
'OutgoingTransmittalRevision',
verbose_name=_('Latest revision'))
contract_number = ConfigurableChoiceField(
verbose_name='Contract Number',
max_length=8,
list_index='CONTRACT_NBS')
originator = models.CharField(
_('Originator'),
max_length=3)
recipient = models.ForeignKey(
'accounts.Entity',
verbose_name=_('Recipient'))
sequential_number = models.PositiveIntegerField(
_('sequential number'),
null=True, blank=True)
ack_of_receipt_date = models.DateField(
_('Acknowledgment of receipt date'),
null=True, blank=True)
related_documents = models.ManyToManyField(
'documents.Document',
through='ExportedRevision',
related_name='outgoing_transmittal_set',
blank=True)
class Meta:
app_label = 'transmittals'
ordering = ('document_number',)
verbose_name = _('Outgoing transmittal')
verbose_name_plural = _('Outgoing transmittals')
class PhaseConfig:
filter_fields = ('contract_number', 'ack_of_receipt')
column_fields = (
('Reference', 'document_number'),
('Originator', 'originator'),
('Recipient', 'recipient'),
('Ack of receipt', 'ack_of_receipt'),
)
searchable_fields = (
'document_number',
'originator',
'recipient',
)
def __unicode__(self):
return self.document_key
@property
def ack_of_receipt(self):
return bool(self.ack_of_receipt_date)
def generate_document_key(self):
key = '{}-{}-{}-TRS-{:0>5d}'.format(
self.contract_number,
self.originator,
self.recipient.trigram,
self.sequential_number)
return key
@property
def title(self):
return self.document_key
@classmethod
def get_document_download_form(cls, data, queryset):
from transmittals.forms import TransmittalDownloadForm
return TransmittalDownloadForm(data, queryset=queryset)
@classmethod
def compress_documents(cls, documents, **kwargs):
"""See `documents.models.Metadata.compress_documents`"""
content = kwargs.get('content', 'transmittal')
revisions = kwargs.get('revisions', 'latest')
temp_file = tempfile.TemporaryFile()
with zipfile.ZipFile(temp_file, mode='w') as zip_file:
for document in documents:
dirname = document.document_key
revision = document.get_latest_revision()
# Should we embed the transmittal pdf?
if content in ('transmittal', 'both'):
# All revisions or just the latest?
if revisions == 'latest':
revs = [revision]
elif revisions == 'all':
revs = document.get_all_revisions()
# Embed the file in the zip archive
for rev in revs:
pdf_file = rev.pdf_file
pdf_basename = os.path.basename(pdf_file.name)
zip_file.write(
pdf_file.path,
'{}/{}'.format(dirname, pdf_basename),
compress_type=zipfile.ZIP_DEFLATED)
# Should we embed review comments?
if content in ('revisions', 'both'):
exported_revs = revision.metadata.exportedrevision_set \
.all() \
.select_related()
for rev in exported_revs:
if rev.comments:
comments_file = rev.comments
comments_basename = os.path.basename(comments_file.path)
zip_file.write(
comments_file.path,
'{}/{}/{}'.format(
dirname,
rev.document.document_key,
comments_basename),
compress_type=zipfile.ZIP_DEFLATED)
return temp_file
def link_to_revisions(self, revisions):
"""Set the given revisions as related documents.
The revisions MUST be valid:
- belong to the same category
- be transmittable objects
"""
trs_revisions = []
ids = []
index_data = []
for revision in revisions:
trs_revisions.append(
ExportedRevision(
document=revision.document,
transmittal=self,
revision=revision.revision,
title=revision.document.title,
status=revision.status,
return_code=revision.get_final_return_code(),
comments=revision.trs_comments))
ids.append(revision.id)
# Update ES index to make sure the "can_be_transmitted"
# filter is up to date
index_datum = build_index_data(revision)
index_datum['_source']['can_be_transmitted'] = False
index_data.append(index_datum)
with transaction.atomic():
ExportedRevision.objects.bulk_create(trs_revisions)
# Mark revisions as transmitted
Revision = type(revisions[0])
Revision.objects \
.filter(id__in=ids) \
.update(already_transmitted=True)
bulk_actions(index_data)
@classmethod
def get_batch_actions_modals(cls):
"""Returns a list of templates used in batch actions."""
return ['transmittals/document_list_download_modal.html']
class OutgoingTransmittalRevision(MetadataRevision):
class Meta:
app_label = 'transmittals'
def generate_pdf_file(self):
pdf_content = transmittal_to_pdf(self)
pdf_file = ContentFile(pdf_content)
return pdf_file
class ExportedRevision(models.Model):
"""Link between an outgoing transmittal and an exported document."""
document = models.ForeignKey(Document)
transmittal = models.ForeignKey(OutgoingTransmittal)
revision = models.PositiveIntegerField(_('Revision'))
title = models.TextField(_('Title'))
status = models.CharField(_('Status'), max_length=5)
return_code = models.CharField(_('Return code'), max_length=5)
comments = PrivateFileField(
_('Comments'),
null=True, blank=True)
class Meta:
verbose_name = _('Exported revision')
verbose_name_plural = _('Exported revisions')
app_label = 'transmittals'
@property
def name(self):
"""A revision identifier should be displayed with two digits"""
return u'%02d' % self.revision
class TransmittableMixin(ReviewMixin):
"""Define behavior of revisions that can be embedded in transmittals.
Only reviewable documents can be transmitted, hence the mixin
inheritance.
"""
already_transmitted = models.BooleanField(
_('Already embdedded in transmittal?'),
default=False)
trs_return_code = ConfigurableChoiceField(
_('Final return code'),
max_length=3,
null=True, blank=True,
list_index='REVIEW_RETURN_CODES')
trs_comments = PrivateFileField(
_('Final comments'),
null=True, blank=True,
upload_to=trs_comments_file_path)
class Meta:
abstract = True
def get_final_return_code(self):
"""Returns the latest available return code."""
if self.trs_return_code:
rc = self.trs_return_code
elif hasattr(self, 'return_code'):
rc = self.return_code
else:
rc = ''
return rc
@property
def can_be_transmitted(self):
"""Is this rev ready to be embedded in an outgoing trs?"""
return all((
bool(self.review_end_date),
not self.already_transmitted,
self.document.current_revision == self.revision))
def get_initial_empty(self):
"""New revision initial data that must be empty."""
empty_fields = super(TransmittableMixin, self).get_initial_empty()
return empty_fields + ('trs_return_code', 'trs_comments',)
| UTF-8 | Python | false | false | 23,874 | py | 42 | models.py | 36 | 0.591438 | 0.587962 | 0 | 712 | 32.530899 | 84 |
zephylac/Climbing_Recognition | 3,753,801,438,354 | f22784e2cb82c56ae81bc8ded311fa602f08d32c | 6a17b093c9e98619dc3bb1c1ebf646fd5d25f887 | /main.py | 304687b1a4cf473f5606cef07fd72283532805af | [
"Apache-2.0"
]
| permissive | https://github.com/zephylac/Climbing_Recognition | 1e56b689dcf93949b1da494a7056d63ba461341b | 120e3223d5b8c95fe2ba648ac95926c75f50a8a8 | refs/heads/master | 2021-01-25T11:27:53.651133 | 2017-12-28T21:13:39 | 2017-12-28T21:13:39 | 93,928,100 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import webcolors
from PIL import Image
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 09:48:50 2017
@author: zephylac
In this version we try to locate the holds, identify their colors.
"""
# Find the closest color for an RGB value
# Returns the color found in plain text
# @param
# requested_color : [R,G,B]
def closest_color(requested_color):
# init dict
min_color = {}
for key, name in webcolors.html4_hex_to_names.items():
# Saving RGB value for the color being treated
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
# We substract the color treated with the unknown value
rd = (r_c - requested_color[0]) ** 2
gd = (g_c - requested_color[1]) ** 2
bd = (b_c - requested_color[2]) ** 2
# We fill the the dict with the sum of the the RBG value
# from the previous substraction
# The dict is filled with all the colors with the color deviation
# between the color know and the one we are trying to find
min_color[(rd + gd + bd)] = name
# We return the name of the color which has the minimum deviation
return min_color[min(min_color.keys())]
# Retrieve color from an RGB value
def get_color_name(requested_color):
try:
closest_name = actual_name = webcolors.rgb_to_name(requested_color)
except ValueError:
closest_name = closest_color(requested_color)
return closest_name
# Return an array containing all the colors name
# which are available on the picture
# @param :
# holds_array : array containing dicts, each dict represent a single hold
def available_color(holds_array):
# init the array which will contain all the color available
color_array = []
for i in range(len(holds_array)):
if holds_array[i].get('color') not in color_array :
color_array.append(holds_array[i])
return color_array
# Return an array containing all the holds matching the color filter
# @param :
# color : the color we want to filter on
# holds_array : array containing dicts, each dict represent a single hold
def color_regroup(color, holds_array):
# init the array which will contain all the holds matching the color
array = []
for i in range(len(holds_array)):
if holds_array[i].get('color') == color:
array.append(i)
return array
# Retrieve the RGB median value of each hold and update the dict with
# the color name of the hold
# @param :
# holds_array : array containing dicts, each dict represent a single hold
def hold_RGBToName(holds_array):
for i in range(len(holds_array)):
# Convert RGB to color name
color = get_color_name(holds_array[i].get('RGB'))
# update dict
holds_array[i].update({'color':color})
# This function take an array of dict in parameters (array containing all the holds)
# It process each hold and update their dict adding to them the color of the hold
# value is the median value of all the pixel constituting the hold
# It's stored as an RGB value
# @param :
# holds_array : array containing dicts, each dict represent a single hold
def color_finder(holds_array):
for i in range(len(holds_array)):
Rtot = 0
Gtot = 0
Btot = 0
total = len(holds_array[i].get('hold'))
if total != 0 :
for l in range(total) :
(R,G,B) = pix[holds_array[i].get('hold')[l][0],holds_array[i].get('hold')[l][1]]
Rtot += R
Gtot += G
Btot += B
Rtot = Rtot / total
Gtot = Gtot / total
Btot = Btot / total
# Cast the median into int because float value does not exist in RGB
holds_array[i].update({'RGB':[int(Rtot),int(Gtot),int(Btot)]})
else :
holds_array[i].update({'RGB':[0,0,0]})
# This function is drawing a frame around each hold, the color of the frame
# is the median color of the hold
# @param :
# holds_array : array containing dicts, each dict represent a single hold
def drawing_frame(holds_array) :
for i in range(len(holds_array)) :
minX, maxX, minY, maxY = holds_array[i].get('corner')
#R, G, B = holds_array[i].get('RGB')
R, G, B = 255, 0, 0
for j in range(minX,maxX) :
pix[j,minY] = R, G, B
pix[j,maxY] = R, G, B
for k in range(minY,maxY) :
pix[minX,k] = R, G, B
pix[maxX,k] = R, G, B
print ("Drawing frame : [",round((i/(len(holds_array))* 100),0)," %]")
# @param :
# width : width of the image
# height : height of the image
# holds_array : array containing dicts, each dict represent a single hold
def framing(width, height, holds_array) :
for i in range(len(holds_array)) :
minX = width
maxX = 0
minY = height
maxY = 0
for j in range(len((holds_array[i].get('hold')))):
Xo, Yo = (holds_array[i].get('hold')[j])
if Xo > maxX :
maxX = Xo
if Xo < minX :
minX = Xo
if Yo > maxY :
maxY = Yo
if Yo < minY :
minY = Yo
holds_array[i].update({'corner':[minX, maxX, minY, maxY]})
print ("Framing : [",round((i/(len(holds_array))* 100),0)," %]")
return holds_array
# Returns True or False wether if the color deviation between the two pixel
# is tolerated or not
# @param :
# x1, y1 : coordinates of the first pixel
# x2, y2 : coordinates of the second pixel
def color_deviation_accepted(x1, y1, x2, y2) :
# Retrieve RGB value from a pixel identified with x1 and y1
Ro, Go, Bo = pix[x1,y1]
# Retrieve RGB value from a pixel identified with x1 and y1
Rt, Gt, Bt = pix[x2, y2]
# Maximum color deviation to identify a hold
# increasing its size may reduce the noise but may also not be able
# to identify close color (I.E green and light green)
# reducing its value allow to identify holds better but increase the noise
ecart = 110
# DEBUG LINE - not identifying the color which identify holds
# if Rt == 255 and Gt == 0 and Bt == 0 : return
# If the color deviation is inside the interval of tolerance
# We add the pixel coordinates to hold array
if (Ro + ecart < Rt or Ro - ecart > Rt or Go + ecart < Gt or Go - ecart > Gt or Bo + ecart < Bt or Bo - ecart > Bt):
return True
else :
return False
# Return an array containing all the holds found in the picture,
# Each holds is represented by an array which contains all the pixels
# which are forming the holds
# @param :
# width : width of the image
# height : height of the image
# primary_array : array containing all pixel that have been spotted
def grouping_image( width, height, primary_array ) :
# init the array which will contain all the holds found on the picture
holds_array = []
primary_bis = primary_array.copy()
for k in range(len(primary_array)) :
x, y = primary_array[k]
hold_array = grouping(x, y, width, height, primary_array, primary_bis)
# if hold_array is 'not' it means that the array is empty
# It also means that the pixel has already been grouped.
if hold_array :
# We add the array representing a single hold to the array containing
# all the holds that has been found in the picture
holds_array.append({'hold':hold_array})
print ("Grouping image : [",round((k/(len(primary_array))* 100),0)," %]")
return holds_array
# Group all pixel from same hold
# @param :
# x : Value on X axis
# y : Value on Y axis
# width : width of the image
# height : height of the image
# primary_array : array containing all pixel that have been spotted
# primary_bis : copy of primary_array
def grouping(x,y,width,height,primary, primary_bis) :
# init the array to empty
single_hold_array = []
for i in range(-2,2) :
for j in range(-2,2):
if not(x == 0 and i < 0) and not (x == width and i > 1) and not (y == 0 and j < 0) and not (y == height and j > 1) :
if([x+i,y+j]) in primary_bis:
# after testing verfying color seems to broke the alorithm
#if(color_deviation_accepted(x, y, x+i, y+j) == True) :
# A pixel around the specified one has been identified
# as part of the same hold, we add it to the array
single_hold_array.append([x+i,y+j])
# We remove the value from the array so we don't add it again
# an we avoid infinite loop between to pixel neighbooring each other
primary_bis.remove([x+i,y+j])
# Pixel around the one we added might also be from the same hold
# We recursively call them
temp_array = grouping(x+i,y+j,width,height,primary,primary_bis)
# We add the pixel from the neighboor of the neighboor......
# to the array
single_hold_array += temp_array
return single_hold_array
# Removes the background
# @param :
# width : width of the image
# height : height of the image
def remove_background(width, height) :
temp_pix = pix[0,0]
Rtot = 0
Gtot = 0
Btot = 0
for i in range(1000):
x = random.randint(0,width-1)
y = random.randint(0,height-1)
R, G, B = pix[x,y]
Rtot += R
Gtot += G
Btot += B
Rtot = Rtot / 1000
Gtot = Gtot / 1000
Btot = Btot / 1000
pix[0,0] = int(Rtot), int(Gtot), int(Btot)
for i in range(width) :
for j in range(height) :
if(color_deviation_accepted(i, j, 0, 0) == False) :
pix[i,j] = (255,255,255)
print ("Removing background : [",round(i/width * 100)," %]")
pix[0,0] = temp_pix
# This function try to spot pixel belonging to holds. When a pixel is spotted
# It's added to the primary array
# @param :
# width : width of the image
# height : height of the image
def spotting_image(width, height) :
# init the array which contain every pixel forming all the holds
primary_array = []
for i in range(width) :
for j in range(height) :
spotting(i, j, width, height, primary_array)
print ("Spotting image : [",round(i/width * 100,0)," %]")
return primary_array
# For a specified pixel, we check pixels around it to see if they have
# the same color
# @param :
# x : Value on X axis
# y : Value on Y axis
# width : width of the image
# height : height of the image
# primary_array : array containing all pixel that have been spotted
def spotting(x,y,width,height,primary_array) :
# We look around the single pixel and try to connect it to other
# pixels if they have the same-ish color
for i in range(-1,1) :
for j in range(-1,1):
# avoid invalid coordinates (outside the picture)
if not(x == 0 and i == -1) and not (x == width and i == 1) and not (y == 0 and j == -1) and not (y == height and j == 1) :
# retrieve the value of a pixel around the one selected
if(color_deviation_accepted(x, y, x+i, y+j) == True) :
# If it's not already inside
# Complexity to look if value is inside is not really a problem
# Since the size of a hold is not that big (max ~100 * 100)
if not ([x,y] in primary_array) :
primary_array.append([x,y])
# Regrouping allow to regroup holds if they have the same color and are too close.
# We consider them as one hold because it will be easier to display them
# @param :
# holds_array : array containing dicts, each dict represent a single hold
def regrouping(holds_array) :
new_array = []
for i in range(len(holds_array)) :
hold = holds_array[i]
minX, maxX, minY, maxY = hold.get('corner')
color = hold.get('color')
for j in range(len(holds_array)):
if hold != holds_array[j] :
min2X, max2X, min2Y, max2Y = holds_array[j].get('corner')
if(holds_array[j].get('color') == color) :
if((minX <= min2X <= maxX or minX <= max2X <= maxX) and (minY <= min2Y <= maxY or minY <= max2Y <= maxY)) :
minX = min(minX, min2X)
maxX = max(maxX, max2X)
minY = min(minY, min2Y)
maxY = max(maxY, max2Y)
RGB = [x + y for x,y in zip(hold.get('RGB'),holds_array[j].get('RGB'))]
pixel = hold.get('hold') + holds_array[j].get('hold')
new_array.append({'RGB':RGB,'color':hold.get('color'),'corner':[minX,maxX,minY,maxY],'hold':pixel})
return new_array
#INPUT
# create FilePointer for the img specified
img = Image.open("5.jpg")
# Fill pix with the array containing the picture
pix = img.load()
# Retrieving width and height
width, height = img.size
#Spot all pixel belonging to holds
primary_array = spotting_image(width, height)
#Group pixel to form holds
holds_array = grouping_image(width, height, primary_array)
#Calculate and add frame for each hold(minX,maxX,minY,maxY)
framing(width,height, holds_array)
#Add RGB value for each hold
color_finder(holds_array)
#Add color name for each hold
hold_RGBToName(holds_array)
#Return an array of hold (but only those who have been combined)
new = regrouping(holds_array)
# Return an array of all color available for the holds
color_array = available_color(holds_array)
#Drawing holds
drawing_frame(holds_array)
#Drawing holds that has been combined
drawing_frame(new)
#remove_background(width, height)
#color_array = color_regroup('gray',holds_array)
#show results
img.show()
img.save("test.jpg")
| UTF-8 | Python | false | false | 14,775 | py | 3 | main.py | 2 | 0.571574 | 0.560948 | 0 | 415 | 34.60241 | 139 |
sinabeulo90/RL-Testbed | 14,894,946,621,963 | 5d314e7d5d4fedde48b03b83d8d3cb33a74b8e2f | c080f9dc24adc769c8f8662b15f8a4ebd677460b | /NGU/wrapper.py | 0b6a997ca6fcf53297f4102da6367944e00c095b | []
| no_license | https://github.com/sinabeulo90/RL-Testbed | 9fe3f3ad80fe47540078e5b379034389b86efe72 | 446a74288a287eb62d64c6fa2a43cd668eb441a0 | refs/heads/master | 2023-07-01T05:35:17.084542 | 2021-08-08T08:40:11 | 2021-08-08T08:40:11 | 314,519,635 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
from gym import Wrapper
from gym import Env
class MyWrapper(Wrapper):
def __init__(self, env: Env) -> None:
super().__init__(env)
def reset(self):
self.env.reset()
obs = self.env.render(mode="rgb_array")
new_obs = self.preprocessing(obs)
return new_obs
def step(self, action):
_, reward, done, info = self.env.step(action)
obs = self.env.render(mode="rgb_array")
new_obs = self.preprocessing(obs)
return new_obs, reward, done, info
def preprocessing(self, obs):
new_obs = cv2.cvtColor(obs, cv2.COLOR_BGR2GRAY)
new_obs = cv2.resize(new_obs, (320, 320))
new_obs = cv2.erode(new_obs, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
new_obs = cv2.resize(new_obs, (80, 80))
_, new_obs = cv2.threshold(new_obs, 220, 255, cv2.THRESH_BINARY_INV)
return np.expand_dims(new_obs, axis=-1)
| UTF-8 | Python | false | false | 960 | py | 25 | wrapper.py | 14 | 0.602083 | 0.570833 | 0 | 29 | 32.103448 | 87 |
Sapphirine/NYC_subway_analysis | 7,713,761,305,541 | c2d5e30e55a4fb4190db513e54187e8428b7a4f0 | 18e25f55905f8b69dd0f5d0b0682f79e11751650 | /MTA/gatherData.py | 8a8e8b3005841212be5420859a2a4e05c63b273a | []
| no_license | https://github.com/Sapphirine/NYC_subway_analysis | 75f7ddb824af396b24ec624318f39ae63fe90450 | 142e40ba36513cf9566044065eb3f79d691bf258 | refs/heads/master | 2021-01-01T03:45:02.845469 | 2016-05-13T11:38:14 | 2016-05-13T11:38:14 | 58,731,809 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys, json, time, csv, datetime
sys.path.append('../utils')
from pytz import timezone
import gtfs_realtime_pb2
import google.protobuf
import alert, tripupdate, vehicle, mtaUpdate
try:
today = time.strftime("%Y-%m-%d")
t_midnight = "%s 00:00:00" %(today)
t_midnight = datetime.datetime.strptime(t_midnight, "%Y-%m-%d %H:%M:%S").timetuple()
t_midnight = time.mktime(t_midnight)
print t_midnight
count = 1
output = []
while(count<1000):
mta = mtaUpdate.mtaUpdates('9677454425d764f551397579a52aa866')
trips = mta.getTripUpdates() #get a list of tripUpdate objects
print len(trips)
for _ in trips:
if _.routeId == "1" or _.routeId == "2" or _.routeId == "3":
try:
tt = time.time()#?
timestamp = int((tt-t_midnight)/60)
tripId = _.tripId
trip_start_time = _.tripId[0:6]
ex_stopId = "120"+_.direction
ts_stopId = "127"+_.direction
route = _.routeId
fs = json.loads(json.dumps(_.futureStops))
#print fs.get(ex_stopId)[0]
time_reaches_96 = fs.get(ex_stopId)[0]["arrivalTime"]
time_reaches_ts = 0
status = ""
except:
continue
try:
#print _.vehicleData.StopId
if _.vehicleData.StopId == ts_stopId:
time_reaches_ts = _vehicleData.timestamp
status = _.vehicleData.currentStopStatus
# print "tripId: %s \n futureStops: %s \n vehicleData: \n currentStopNum: %s \n currentStopId: %s \n tiemstamp: %s \n currentStopStatus: %s \n\n" %(_.tripId, fs.get('127N'), _.vehicleData.currentStopNumber, _.vehicleData.currentStopId, _.vehicleData.timestamp, _.vehicleData.currentStopStatus) #_.vehicleData.currentStopNumber)
except:
try:
time_reaches_ts = fs.get(ts_stopId)[0]["arrivalTime"]
status = "from trip"
except:
continue
#print timestamp, tripId, trip_start_time, status
d = {"origin_timestamp":tt,"timestamp": timestamp, "tripId": tripId, "trip_start_time":trip_start_time, "route":route, "time_96": time_reaches_96, "time_42": time_reaches_ts}
output.append(d)
print "round %d: feed length: %d" %(count,len(output))
count +=1
time.sleep(30)
except:
print "Writing into csv..."
with open('data_new.csv', 'a') as fou:
fieldnames = ['origin_timestamp','timestamp', 'tripId','trip_start_time', 'route', 'time_96', 'time_42']
dw = csv.DictWriter(fou,fieldnames=fieldnames)
dw.writeheader()
#print output
for _ in output:
#print _
dw.writerow(_)
print "Done."
#export to csv | UTF-8 | Python | false | false | 2,464 | py | 7 | gatherData.py | 3 | 0.652192 | 0.622159 | 0 | 71 | 33.71831 | 331 |
iwoca/django-seven | 3,573,412,834,583 | 19da498a767e2117efe26f93827d67e52c12c95d | 9623abb3c6fbcfa7e836fd6139926c546a7b0629 | /tests/deprecation_rules/deprecated_utils_unittest/test_checked_file.py | 8ff0f07613f334823161e03575771a54e9e52f31 | [
"BSD-2-Clause"
]
| permissive | https://github.com/iwoca/django-seven | 954690e689cbdf55e49e88daea364a12fea4b3b0 | c7be98b73c139c9e74a9be94a0f20a723c739c80 | refs/heads/develop | 2020-04-15T16:22:13.852738 | 2016-11-20T22:46:14 | 2016-11-20T22:46:14 | 51,710,374 | 9 | 1 | null | false | 2016-08-18T21:19:56 | 2016-02-14T19:48:03 | 2016-08-11T07:27:24 | 2016-08-18T21:18:55 | 62 | 5 | 1 | 2 | Python | null | null |
from django.test import TestCase
from django.test.utils import override_settings
from django_seven.deprecated_rules.rules import DEPRECATED_UTILS_UNITTEST
from tests.deprecation_rules.mixins import RuleCheckMixin
class TestDeprecatedUtilsUnittestRule(RuleCheckMixin, TestCase):
@override_settings(DEPRECATED_RULES=[DEPRECATED_UTILS_UNITTEST])
def test_validate_rule(self):
self.assert_report(__file__,
{
DEPRECATED_UTILS_UNITTEST['name']: {
'lines': [
{
'content': 'from django.utils import unittest\n',
'number': 2,
'filename': '/tests/deprecation_rules/deprecated_utils_unittest/checked_file.py'
},
{
'content': 'import django.utils.unittest\n',
'number': 3,
'filename': '/tests/deprecation_rules/deprecated_utils_unittest/checked_file.py'
}
]
}
}
)
| UTF-8 | Python | false | false | 1,140 | py | 49 | test_checked_file.py | 44 | 0.513158 | 0.511404 | 0 | 28 | 39.678571 | 108 |
jdswan/clquiz | 4,337,917,010,050 | 4fdd7b5f0c252296f285578dc28d0fcdeab9a349 | dae587ad705448cbbdc4a766bdaf658a47734e96 | /JSONInterface.py | 9112366cfdc008f42c9c9742f713218dcfaa9e16 | []
| no_license | https://github.com/jdswan/clquiz | eca23e00eeb7b73d98d484dc4febcbfcbdadfad4 | f36c9c4767f280924326bed08e0837f9289b02a2 | refs/heads/master | 2018-01-09T19:08:05.960412 | 2016-01-21T21:01:07 | 2016-01-21T21:01:07 | 50,203,758 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | try:
import ujson as json
except:
import json
class JSONInterface():
def __init__(self):
self.json_data = {}
def loadJSON(self, json_f):
with open(json_f) as f:
self.json_data = json.loads(f.read())
def get_q_dict(self):
return self.json_data["questions"]
def get_q_type(self):
return self.json_data["q_type"]
def get_prompt(self):
return self.json_data["prompt"]
| UTF-8 | Python | false | false | 450 | py | 5 | JSONInterface.py | 4 | 0.575556 | 0.575556 | 0 | 22 | 19.454545 | 49 |
MasQchips/plugin.video.balandro | 11,416,023,122,788 | 8e316036555417a4c4578ff3680724b165af82ba | 90a7b50edfd401b613809ebe3eb4c4f1bac3bd0c | /channels/seriesflv.py | ccb07289f46192db9e85526d6d1bce37b815c97c | []
| no_license | https://github.com/MasQchips/plugin.video.balandro | a21c7e28d5d9a5c4934e399f7b1b5ec45e5cc6ff | 64d9d34501e5bdd6f07b46ef3cd9a0c611a91dbc | refs/heads/master | 2022-11-20T07:20:10.184042 | 2020-07-20T23:47:27 | 2020-07-20T23:47:27 | 281,248,049 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from platformcode import config, logger
from core.item import Item
from core import httptools, scrapertools, jsontools, servertools, tmdb
import re
# ~ host = 'https://seriesf.lv/'
host = 'https://seriesflv.org/'
headers = {'Referer': host, 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0'}
perpage = 15 # preferiblemente un múltiplo de los elementos que salen en la web (45) para que la subpaginación interna no se descompense
IDIOMAS = {'esp':'Esp', 'lat':'Lat', 'espsub':'VOSE', 'eng':'VO', 'engsub':'VOS', 'es':'Esp', 'la':'Lat', 'sub':'VOSE', 'en':'VO'}
def mainlist(item):
return mainlist_series(item)
def mainlist_series(item):
logger.info()
itemlist = []
itemlist.append(item.clone( title='Últimos episodios en castellano', action='last_episodes', lang='es' ))
itemlist.append(item.clone( title='Últimos episodios en latino', action='last_episodes', lang='la' ))
itemlist.append(item.clone( title='Últimos episodios en VOSE', action='last_episodes', lang='sub' ))
# ~ itemlist.append(item.clone( title='Últimos episodios en VO', action='last_episodes', lang='en' ))
itemlist.append(item.clone( title='Lista de series', action='list_all', url=host + 'lista-de-series/' ))
itemlist.append(item.clone( title='Por género', action = 'generos' ))
itemlist.append(item.clone( title='Por letra (A - Z)', action='alfabetico' ))
itemlist.append(item.clone( title='Buscar serie ...', action='search', search_type='tvshow' ))
return itemlist
def generos(item):
logger.info()
itemlist = []
opciones = [
('accion','Acción'),
('animacion','Animación'),
('anime','Anime'),
('aventura','Aventura'),
('comedia','Comedia'),
('ciencia-ficcion','Ciencia Ficción'),
('crimen','Crimen'),
('documental','Documental'),
('dorama','Dorama'),
('drama','Drama'),
('fantasia','Fantasía'),
('war','Guerra'),
('infantil','Infantil'),
('misterio','Misterio'),
('news','Noticias'),
('soap','Novelas'),
('reality','Reality Show'),
('talk','Talk Show'),
('western','Western'),
]
for opc, tit in opciones:
itemlist.append(item.clone( title=tit, url=host + 'genero/' + opc, action='list_all' ))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
for letra in 'abcdefghijklmnopqrstuvwxyz':
itemlist.append(item.clone ( title = letra.upper(), url = host+'lista-de-series/'+letra+'/', action = 'list_all' ))
return itemlist
# Una página devuelve todos los episodios (usar cache de una hora: 60x60=3600)
def last_episodes(item):
logger.info()
itemlist = []
perpage = 10 # para que no tarde tanto por las múltiples llamadas a tmdb (serie / temporada / episodio)
if not item.page: item.page = 0
if not item.lang: item.lang = 'es'
data = httptools.downloadpage(host, headers=headers, use_cache=True, cache_duration=3600).data
# ~ matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)" class="item-one" lang="%s"(.*?)</a>' % item.lang)
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)" class="item-one"(.*?)</a>')
matches = filter(lambda x: 'language/%s.png"' % item.lang in x[1], matches) # seleccionar idioma pedido
num_matches = len(matches)
desde = item.page * perpage
hasta = desde + perpage
for url, resto in matches[desde:hasta]:
try:
title = scrapertools.remove_htmltags(scrapertools.find_single_match(resto, '<div class="i-title">(.*?)</div>')).strip()
title = re.sub('\(\d{4}\)$', '', title).strip()
season, episode = scrapertools.find_single_match(url, '/(\d+)/(\d+)(?:/|)$')
except:
continue
# ~ logger.info('%s %s %s' % (season, episode, title))
if not title or not season or not episode: continue
titulo = '%sx%s %s' % (season, episode, title)
itemlist.append(item.clone( action='findvideos', url=url, title=titulo,
contentType='episode', contentSerieName=title, contentSeason=season, contentEpisodeNumber=episode ))
tmdb.set_infoLabels(itemlist)
if num_matches > hasta: # subpaginación interna
itemlist.append(item.clone( title='>> Página siguiente', page=item.page + 1, action='last_episodes' ))
return itemlist
def list_all(item):
logger.info()
itemlist = []
if not item.page: item.page = 0
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, '<article (.*?)</article>')
num_matches = len(matches)
for article in matches[item.page * perpage:]:
url = scrapertools.find_single_match(article, ' href="([^"]+)')
title = scrapertools.find_single_match(article, ' title="([^"]+)').strip()
year = scrapertools.find_single_match(title, '\((\d{4})\)')
if year:
title = re.sub('\(\d{4}\)$', '', title).strip()
else:
year = '-'
if not url or not title: continue
thumb = scrapertools.find_single_match(article, ' src="([^"]+)')
itemlist.append(item.clone( action='temporadas', url=url, title=title, thumbnail=thumb,
contentType='tvshow', contentSerieName=title,
infoLabels={'year': year} ))
if len(itemlist) >= perpage: break
tmdb.set_infoLabels(itemlist)
# Subpaginación interna y/o paginación de la web
buscar_next = True
if num_matches > perpage: # subpaginación interna dentro de la página si hay demasiados items
hasta = (item.page * perpage) + perpage
if hasta < num_matches:
itemlist.append(item.clone( title='>> Página siguiente', page=item.page + 1, action='list_all' ))
buscar_next = False
if buscar_next:
next_page = scrapertools.find_single_match(data, ' href="([^"]+)"\s*><i class="Next')
if next_page:
itemlist.append(item.clone (url = next_page, page = 0, title = '>> Página siguiente', action = 'list_all'))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, '</i> Temporada (\d+)')
for numtempo in matches:
itemlist.append(item.clone( action='episodios', title='Temporada %s' % numtempo,
contentType='season', contentSeason=numtempo ))
tmdb.set_infoLabels(itemlist)
return itemlist
# Si una misma url devuelve los episodios de todas las temporadas, definir rutina tracking_all_episodes para acelerar el scrap en trackingtools.
def tracking_all_episodes(item):
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
color_lang = config.get_setting('list_languages_color', default='red')
data = httptools.downloadpage(item.url, headers=headers).data
if item.contentSeason or item.contentSeason == 0: # reducir datos a la temporada pedida
data = scrapertools.find_single_match(data, '<i class="icon-chevron-right"[^>]*></i> Temporada %s(.*?)</table>' % item.contentSeason)
matches = scrapertools.find_multiple_matches(data, '<tr>(.*?)</tr>')
for data_epi in matches:
if '<th' in data_epi: continue
try:
url, title = scrapertools.find_single_match(data_epi, '<a href="([^"]+)"[^>]*>([^<]*)</a>')
season, episode = scrapertools.find_single_match(url, '/(\d+)/(\d+)(?:/|)$')
except:
continue
if not url or not season or not episode: continue
if item.contentSeason and item.contentSeason != int(season): continue
data_lang = ' '.join(scrapertools.find_multiple_matches(data_epi, ' data-src="([^"]+)'))
languages = ', '.join([IDIOMAS.get(lang, lang) for lang in scrapertools.find_multiple_matches(data_lang, 'img/language/([^\.]+)')])
titulo = title.replace(item.contentSerieName, '').strip()
if languages: titulo += ' [COLOR %s][%s][/COLOR]' % (color_lang, languages) # descartar por no ser del todo real por servidores inhabilitados !?
itemlist.append(item.clone( action='findvideos', url=url, title=titulo,
contentType='episode', contentSeason=season, contentEpisodeNumber=episode ))
tmdb.set_infoLabels(itemlist)
return itemlist
def normalize_server(server):
server = servertools.corregir_servidor(server)
# Corregir servidores mal identificados en la web ya que apuntan a otros servidores:
# ~ if server == 'flashx': server = 'cloudvideo'
# ~ elif server == 'vidabc': server = 'clipwatching'
# ~ elif server == 'streamin': server = 'gounlimited'
# ~ elif server == 'streamcloud': server = 'upstream'
# ~ elif server == 'datafile': server = 'vidia'
# ~ elif server == 'salesfiles': server = 'mixdrop'
# ~ elif server == 'streame': server = 'vshare'
# ~ elif server == 'bigfile': server = 'vidlox'
# ~ elif server == 'playedtome': server = '1fichier'
# ~ elif server == 'thevideome': server = 'vevio'
return server
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
# ~ logger.debug(data)
for tipo in ['Ver', 'Descargar']:
bloque = scrapertools.find_single_match(data, '<div class="titles4 font4 bold">%s.*?<tbody>(.*?)</table>' % tipo)
# ~ logger.debug(bloque)
matches = scrapertools.find_multiple_matches(bloque, '<tr>(.*?)</tr>')
for data_epi in matches:
url = scrapertools.find_single_match(data_epi, ' data-enlace="([^"]+)')
if url:
server = servertools.get_server_from_url(url)
if not server or server == 'directo': continue
url = servertools.normalize_url(server, url)
else:
url = scrapertools.find_single_match(data_epi, ' href="([^"]+)')
if url.startswith('/'): url = host + url[1:]
server = scrapertools.find_single_match(data_epi, '\?domain=([^".]+)')
server = normalize_server(server)
# ~ logger.info('%s %s' % (server, url))
if not url or not server: continue
lang = scrapertools.find_single_match(data_epi, 'img/language/([^\.]+)')
itemlist.append(Item( channel = item.channel, action = 'play', server = server,
title = '', url = url,
language = IDIOMAS.get(lang, lang) #, other = tipo
))
return itemlist
def play(item):
logger.info()
itemlist = []
if host in item.url:
data = httptools.downloadpage(item.url, headers=headers).data
url = scrapertools.find_single_match(data, 'var palabra = "([^"]+)')
if not url: url = scrapertools.find_single_match(data, " enlace\w* = '([^']+)")
if url:
itemlist.append(item.clone(url=url))
else:
itemlist.append(item.clone())
return itemlist
def search(item, texto):
logger.info()
try:
item.url = host + '?s=' + texto.replace(" ", "+")
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
| UTF-8 | Python | false | false | 11,660 | py | 104 | seriesflv.py | 85 | 0.599278 | 0.594553 | 0 | 300 | 37.796667 | 152 |
j-wilson/GPflowPILCO | 103,079,226,188 | 74304a5f7266a9928019d889e43d9e11de9c60f6 | 8811f05b65465e1abff1f43559c8257f63a57724 | /gpflow_pilco/dynamics/__init__.py | e4b07ad12fa5b9ca6695de820718cb822c3a72c8 | []
| no_license | https://github.com/j-wilson/GPflowPILCO | d2d1a1dbb12a75780d89943a2a7e90bfe46c86a6 | 414c3d4bf3c65831791a26bed281c35811fb4ae9 | refs/heads/master | 2023-05-11T06:31:21.778292 | 2021-06-01T19:46:40 | 2021-06-01T19:46:40 | 372,943,915 | 10 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | __all__ = (
"DynamicalSystem",
"solvers",
)
from gpflow_pilco.dynamics.dynamical_system import DynamicalSystem
from gpflow_pilco.dynamics import solvers as solvers | UTF-8 | Python | false | false | 168 | py | 45 | __init__.py | 44 | 0.767857 | 0.767857 | 0 | 7 | 23.142857 | 66 |
hchiba1/refex-add-rdf | 13,752,485,313,848 | cdb34b9722ff1f317138970ebc24b73c2455b592 | 7164d795527614c728be3e26fdfc84d7788585d5 | /analysis/hypergeom.py | 41124e008a662be12e04862fac08d63c5bbe5c01 | [
"CC-BY-4.0"
]
| permissive | https://github.com/hchiba1/refex-add-rdf | 3d7221c416f165d4df8180cd3747c6b10da1525b | ad0c5268139c0cee033ac3066570f5fcc8389995 | refs/heads/main | 2023-03-20T07:57:51.402740 | 2021-03-18T04:58:58 | 2021-03-18T04:58:58 | 322,147,996 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import sys
from scipy.stats import hypergeom
if (len(sys.argv) != 5):
print("Usage: ./hypergeom.py total group1 group2 overlap")
sys.exit(1)
total = int(sys.argv[1])
group1 = int(sys.argv[2])
group2 = int(sys.argv[3])
overlap = int(sys.argv[4])
less_enriched = hypergeom.cdf(overlap, total, group1, group2)
more_enriched = hypergeom.sf(overlap - 1, total, group1, group2)
print(less_enriched, more_enriched)
| UTF-8 | Python | false | false | 442 | py | 52 | hypergeom.py | 4 | 0.708145 | 0.671946 | 0 | 16 | 26.625 | 64 |
matsitka/django-goose | 2,362,232,030,295 | 568bcbdbc126b0448fd2641381352cdb06af7025 | 7b8a7f82f01926cfee30697201699da7b77989f7 | /gooseproject/urls.py | 0aa0d8c5080cef4fcdd67136a4e05981cace15cf | []
| no_license | https://github.com/matsitka/django-goose | 452e6fdc73e1628d28a5827eafc3dd9c84fc4524 | 054155204c69d1cfda9002847d7f822ec01d16c3 | refs/heads/master | 2016-09-06T05:51:06.087006 | 2014-09-12T21:10:14 | 2014-09-12T21:10:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
#mat
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = patterns('',
url(r'^$', 'gooseapp.views.home', name='home'),
url(r'^new/$', 'gooseapp.views.add_article', name='add_article'),
url(r'^article/(?P<link_random>\w{10})/$', 'gooseapp.views.article_id', name='article_id'),
url(r'^admin/', include(admin.site.urls)),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| UTF-8 | Python | false | false | 555 | py | 11 | urls.py | 4 | 0.699099 | 0.695495 | 0 | 20 | 26.75 | 95 |
martinhaagmans/zorganimatie | 11,209,864,671,003 | dda39e512fbf3c5e19bb2e9f07809f328d3ccced | ac3ab78d0b3094c452843cf2905d8b7e0c200231 | /zorganimaties/views.py | 49fd66c26fd3ecc2be8228c7db83bdd2d90feb96 | []
| no_license | https://github.com/martinhaagmans/zorganimatie | 2aa5c9b5637970f9f23c13ea92ad126fa67d9d4e | 15672c8599522c8e6bcbb6b08b49fe716da9fe22 | refs/heads/master | 2021-06-02T17:19:17.812285 | 2020-02-24T19:16:58 | 2020-02-24T19:16:58 | 144,148,684 | 0 | 0 | null | false | 2019-04-10T12:42:46 | 2018-08-09T12:19:20 | 2019-04-09T15:11:52 | 2019-04-10T12:42:45 | 115 | 0 | 0 | 1 | Python | false | false | """This script is for zorganimaties. It parses a textfile and returns JSON."""
import os
# import math
import time
import zipfile
from decimal import Decimal
from flask import Flask
from flask import flash
from flask import url_for
from flask import request
from flask import redirect
from flask import render_template
from flask import send_from_directory
from .scripts import *
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'tmp'
app.config['SECRET_KEY'] = 'amygdala'
MYDIR = os.path.dirname(os.path.abspath(__file__))
save_location = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'])
EVENTS = [ "waarvoor",
"wanneer_niet",
"extra_voorzichtig",
"andere_medicijnen",
"eten_drinken",
"zwanger_borstvoeden",
"autorijden",
"hoe_gebruiken",
"teveel_gebruikt",
"vergeten_stoppen",
"bijwerkingen",
"hoe_bewaren"]
def parse_filmscript(filmscript):
"""Parse input text file and return a dictionary."""
i = 0
time_start = str()
time_end = str()
tekst = str()
out = dict()
out['filename'] = os.path.basename(str(filmscript))
with open(filmscript, 'r', encoding="utf-8") as f:
for line in f:
if not line in ['\n', '\r\n']:
i += 1
if i == 1:
continue
elif i == 2:
time_start, time_end = line.split(' --> ')
time_start, fraction_start = time_start.split(',')
hs, ms, ss = time_start.split(':')
time_start = int(hs) * (60*60) + int(ms) * 60 + int(ss)
time_start = '{}.{}'.format(time_start, str(fraction_start)[:2])
time_start = Decimal(time_start)
time_end, fraction_end = time_end.split(',')
he, me, se = time_end.split(':')
time_end = int(he) * (60*60) + int(me) * 60 + int(se)
time_end = '{}.{}'.format(time_end, str(fraction_end)[:2])
time_end = Decimal(time_end)
elif i >= 3 and line not in ['\n', '\r\n']:
tekst = tekst.lstrip() + ' ' + line.rstrip()
elif line in ['\n', '\r\n']:
i = 0
out[(time_start, time_end)] = tekst.lstrip()
time_start = str()
time_end = str()
tekst = str()
return out
def check_and_disable_events(dict_to_check):
"""Check if events are present in dict and return dict.
Check for all EVENTS if they are present in the dict.
Set EVENT_disabled to true if present.
Set EVENT_disabled to false if not present.
Return dict with added keys.
"""
for to_check in EVENTS:
if to_check not in dict_to_check:
dict_to_check[to_check] = ''
dict_to_check['{}_disabled'.format(to_check)] = 'true'
else:
dict_to_check['{}_disabled'.format(to_check)] = 'false'
return dict_to_check
def get_disabled_events(dict_to_check):
"""Parse dict and return list with all disabled events."""
errors = list()
for event in EVENTS:
if dict_to_check['{}_disabled'.format(event)] == 'true':
errors.append(event)
elif dict_to_check['{}_end'.format(event)] == '':
if event not in errors:
errors.append('{}_geen_eindtijd'.format(event))
return errors
def add_end_times_to_dict(timing_dict, zwanger):
for i in range(0, 11):
key_start = EVENTS[i]
key_end = EVENTS[i + 1]
if not zwanger and key_start == 'eten_drinken':
key_end = EVENTS[i + 2]
if '{}_end'.format(key_start) in timing_dict:
continue
time_end = timing_dict['{}'.format(key_end)]
try:
time_end = Decimal(float(time_end)) - Decimal((1/100))
except ValueError as e:
timing_dict['{}_end'.format(key_start)] = ''
else:
time_end = round(time_end, 2)
timing_dict['{}_end'.format(key_start)] = time_end
return timing_dict
def add_quotes_and_null_to_output_dict(output_dict):
for k, v in output_dict.items():
if not 'disabled' in k:
if v == '':
output_dict[k] = 'null'
else:
output_dict[k] = '"{}"'.format(v)
return output_dict
def parse_alles(filmscript):
"""Collect all timestamp times, parse them into a string and return the string."""
timing_json = dict()
dscript = parse_filmscript(filmscript)
if filmscript.endswith('ENGELS.srt'):
timing_json = parse_algemeen_engels(dscript, timing_json)
else:
timing_json = parse_algemeen_nl(dscript, timing_json)
script_name = dscript['filename'].lower()
zwanger = True
if 'jong' in script_name:
if filmscript.endswith('ENGELS.srt'):
timing_json = parse_jong_specifiek_engels(dscript, timing_json)
else:
timing_json = parse_jong_specifiek_nl(dscript, timing_json)
if not 'vrouw' in script_name:
zwanger = False
elif 'oud' in script_name:
zwanger = False
if filmscript.endswith('ENGELS.srt'):
timing_json = parse_oud_specifiek_engels(dscript, timing_json)
else:
timing_json = parse_oud_specifiek_nl(dscript, timing_json)
timing_json = check_and_disable_events(timing_json)
timing_json = add_end_times_to_dict(timing_json, zwanger)
errors = get_disabled_events(timing_json)
if not 'aOeind' in timing_json:
errors.append('aOeind')
timing_json['aOeind'] = ''
if not zwanger:
timing_json['zwanger_borstvoeden_disabled'] = 'true'
timing_json['zwanger_borstvoeden_end'] = ''
timing_json['zwanger_borstvoeden'] = ''
timing_json = add_quotes_and_null_to_output_dict(timing_json)
if len(errors) == 0:
timing_json['niet_gevonden'] = '# Alles ok'
else:
timing_json['niet_gevonden'] = '# {} niet gevonden.'.format(' '.join(errors))
if filmscript.endswith('ENGELS.srt'):
output = get_output_engels(timing_json)
else:
output = get_output_nl(timing_json)
return output
def zip_output(file_to_zip, zipname):
"""Zip a list of files to a single file."""
with zipfile.ZipFile(zipname, 'w' ) as zip:
for _ in file_to_zip:
zip.write(_, os.path.basename(_))
return
def single_file_request(screenout):
upload = request.files['targetfile']
input_file = os.path.join(save_location,
upload.filename)
upload.save(input_file)
output_file = os.path.join('{}.tempo.txt'.format(os.path.basename(upload.filename)))
out = parse_alles(input_file)
if screenout:
return render_template('upload_filmscript.html', json_out=out)
elif not screenout:
r = app.response_class(out, mimetype='text/csv')
r.headers.set('Content-Disposition', 'attachment', filename=output_file)
return r
def multiple_file_request():
uploaded_files = request.files.getlist('targetfile')
parsed_files = list()
for uploaded_file in uploaded_files:
input_file = os.path.join(save_location, uploaded_file.filename)
uploaded_file.save(input_file)
output_file = os.path.join(save_location, '{}.tempo.txt'.format(os.path.basename(uploaded_file.filename)))
out = parse_alles(input_file)
with open(output_file, 'w') as f:
for line in out:
f.write(line)
parsed_files.append(output_file)
date_time = time.strftime('%Y-%m-%d_%H:%M')
zipname = os.path.join(save_location, '{}.zip'.format(date_time))
zip_output(parsed_files, zipname)
return zipname
@app.route('/<path:filename>')
def send_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route('/', methods=['GET', 'POST'])
def upload_filmscript():
"""View for zorganimaties app. Return file or rendered html."""
if request.method == 'POST':
if len(request.files.getlist('targetfile')) == 0:
flash('Geen file opgegeven', 'error')
return redirect('/')
elif len(request.files.getlist('targetfile')) == 1:
if 'screenout' in request.form:
screenout = True
return single_file_request(screenout)
else:
screenout = False
return single_file_request(screenout)
elif len(request.files.getlist('targetfile')) > 1:
zip_out = multiple_file_request()
return redirect(url_for('send_file', filename=os.path.basename(zip_out)))
return render_template('upload_filmscript.html')
if __name__ == '__main__':
app.run(debug=True)
| UTF-8 | Python | false | false | 8,870 | py | 11 | views.py | 5 | 0.578805 | 0.574859 | 0 | 276 | 31.137681 | 114 |
Ayush456/MyJavascript | 6,957,847,048,477 | 3da0f88e95fd2f2c21e5e8ac8b6cb224c4d8e925 | 291cbf164c4e4db5e94c5fe8c2644e33bdb4741e | /Desktop/ayush/python/Anonymous.py | 85877dc385db8891983fa04d46290ca961a80d1d | []
| no_license | https://github.com/Ayush456/MyJavascript | a750f3a472a4e9e404f4769a72a9c3b776fb88d0 | 832aafae0b71c3840832e968a228bba7986aebb0 | refs/heads/master | 2021-05-08T13:07:22.783296 | 2018-02-02T15:38:54 | 2018-02-02T15:38:54 | 119,998,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | sum = lambda arg1,arg2 : arg1+arg2
print("The total value is : ",sum(10,15))
print("The total value is ",sum(10,0.5)) | UTF-8 | Python | false | false | 118 | py | 9 | Anonymous.py | 8 | 0.669492 | 0.567797 | 0 | 4 | 28.75 | 41 |
SRMSE-V4/Modules | 1,108,101,605,277 | 391a77171f0273c2004d463bee0442f499c3791d | 4bc3fc50085df5cd8451529cef209aeb5ed55402 | /smart/getSmartAns.py | 80842fc28af0ee5f1332f5519c988e145c1d5906 | []
| no_license | https://github.com/SRMSE-V4/Modules | 502cb0f7414627e7882678c2a6329bdd3e3992b9 | c1e5594a033af606f019bb144409393c27544386 | refs/heads/master | 2020-04-28T07:25:13.721322 | 2015-02-19T11:51:44 | 2015-02-19T11:51:44 | 30,155,481 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import cgi,cgitb,json
form = cgi.FieldStorage()
print "Content-type:text/html\r\n\r"
f = form.getvalue("q","india") #Gets input from the form
import test2
if f!="##123##":
try:
result = test2.get(f) #Gets result out of the smart answer module
#print result
if len(result)!=0:
result=result[0]
key=result.keys()[0]
#print result
if type(result[key]) is list:
lisItems =result[key]
for item in lisItems:
if item.has_key('_id'):
item.pop('_id')
indx=lisItems.index(item)
lisItems[indx]=item
result[key]=lisItems
print json.dumps(result)
else:
if result[key].has_key('_id'):
result[key].pop('_id')
print json.dumps(result)
else:
print "{}"
except Exception as x:
#print x
print "{}"
| UTF-8 | Python | false | false | 967 | py | 20 | getSmartAns.py | 16 | 0.509824 | 0.501551 | 0 | 32 | 29.21875 | 72 |
zizle/FuturesAssistant | 8,555,574,886,677 | 66349c128531a87f8d4c1a6d92f3aaaa79ed2386 | b336eb48db4382b6a42f07fdb1273ead219b1794 | /modules/delivery/__init__.py | c96c59936f6e4c8da28d6997775278287aebae00 | []
| no_license | https://github.com/zizle/FuturesAssistant | ccdbd4c901a7898c547b4043a8b8de8bd8627a0e | 57f71f6ecf2242528188ea48b92141053f614a29 | refs/heads/main | 2023-06-18T08:23:38.094480 | 2020-12-31T07:41:37 | 2020-12-31T07:41:37 | 304,499,229 | 2 | 0 | null | false | 2021-03-26T00:35:29 | 2020-10-16T02:31:34 | 2020-12-31T07:41:45 | 2021-03-26T00:35:28 | 366 | 1 | 0 | 0 | Python | false | false | # _*_ coding:utf-8 _*_
# @File : __init__.py.py
# @Time : 2020-09-22 13:25
# @Author: zizle
from fastapi import APIRouter
from .warehouse import warehouse_router
from .discussion import discussion_router
from .receipt import receipt_router
delivery_router = APIRouter()
delivery_router.include_router(warehouse_router)
delivery_router.include_router(discussion_router)
delivery_router.include_router(receipt_router)
| UTF-8 | Python | false | false | 419 | py | 77 | __init__.py | 71 | 0.770883 | 0.739857 | 0 | 13 | 31.230769 | 49 |
tonylibing/DeepER | 14,757,507,660,673 | 7c793f1a4cebac3fa2bc943bbe32d8d57b9275c8 | 6e1fa017e467bd633738440d0288e97ef61a2c8d | /emergency_model_running/emergency_model/model/Pytorch_prevVersions/rnnPred.py | 3f3d76b4834e25632eaaba978db98f972c702033 | []
| no_license | https://github.com/tonylibing/DeepER | 62bce71db82be5cc5f3e16ffa5d5474c1a4ddb5a | 7f75062529fa99dd358b6bd6c065fa0ef1faedd8 | refs/heads/main | 2023-08-11T01:07:25.636340 | 2021-09-16T04:21:52 | 2021-09-16T04:21:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import torch
import torch.nn as nn
import torch.utils
import torch.utils.data
from torchvision import datasets, transforms
from torch.autograd import Variable
import time
class RNNPred(nn.Module):
def __init__(self, x_dim, nlayers=1, hiddenUnits=48 ):
super(RNNPred, self).__init__()
self.nLayers = nlayers
self.hiddenUnits = hiddenUnits
#self.rnn = nn.RNN(x_dim, hiddenUnits, nlayers, bias=True)
self.rnn = nn.LSTM(x_dim, hiddenUnits)
self.fc = nn.Sequential( nn.Linear(hiddenUnits, x_dim), nn.Sigmoid(), nn.Linear(x_dim, x_dim))
def forward(self, x, batchsize, y_len):
#mask = torch.zeros((x.shape[0], x.shape[1] + y_length, y.shape[2]))#batchsize, x_len + y_len, output_dim
#rnn
#h = torch.zeros(self.nLayers, batchsize, self.hiddenUnits).double()
h = ( torch.zeros(self.nLayers, batchsize, self.hiddenUnits).double(), torch.zeros(self.nLayers, batchsize, self.hiddenUnits).double())
outputs, hn = self.rnn(x,h)
outputs = self.fc(outputs)
#output = mask * output
return outputs[-y_len:,:,:], hn
### sample make sure that the forward part without backtraking is done
def sample(self, x, y_len, h):
predictions = []
for i in range(y_len): # Because we take the first prediction from previous
#print(x.shape) #[1, 701, 2]
output, h = self.rnn(x, h)
x = self.fc(output)
predictions.append(x)
return predictions | UTF-8 | Python | false | false | 1,382 | py | 4,074 | rnnPred.py | 23 | 0.695369 | 0.68741 | 0 | 42 | 31.928571 | 137 |
Aasthaengg/IBMdataset | 4,363,686,800,889 | c70d5c3ef68e8ea43b6fae526dd26947f913c641 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02259/s184035076.py | c0dbd075ffad772c0bd45ec67c3f71d68bf556b3 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input()) - 1
a = list(map(int, input().split()))
count = 0
flag = 1
while flag:
flag = 0
j = 0
for i in range( n, j, -1 ):
if a[i] < a[i-1]:
t = a[i]
a[i] = a[i-1]
a[i-1] = t
count += 1
flag = 1
j += 1
print(*a)
print(count) | UTF-8 | Python | false | false | 327 | py | 202,060 | s184035076.py | 202,055 | 0.366972 | 0.330275 | 0 | 20 | 15.4 | 35 |
kergoth/dotfiles | 13,417,477,836,100 | e8faf3251adf326b8607496a0fe7de564b744287 | cbccdeb9e7c33ece05af059b7152744652250c2b | /os-macos/scripts/recreate-iso | 709f6aba374c6a72581b1de362a31ff7da6d159f | []
| no_license | https://github.com/kergoth/dotfiles | bf4cd3331ed45bdcf557b67400ab92fd2bee94d9 | 9558396c0b4288a37bf65ed72e033cd8d8221fbe | refs/heads/master | 2023-08-27T23:01:27.917829 | 2023-06-02T00:07:04 | 2023-06-02T00:07:04 | 1,935,933 | 16 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# TODO: Also handle UDF
# TODO: Also handle El Torito boot images
import argparse
import os
import pathlib
import re
import subprocess
import tempfile
import xml.etree.ElementTree as etree
from typing import List
joliet_line = re.compile(r'Joliet.*found')
makehybrid_arg_map = [
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/volumeSetIdentifier', 'iso-volume-name'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/volumeSetIdentifier', 'joliet-volume-name'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/volumeIdentifier', 'iso-volume-name'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/volumeIdentifier', 'joliet-volume-name'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/systemIdentifier', 'system-id'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/publisherIdentifier', 'publisher'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/dataPreparerIdentifier', 'preparer'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/copyrightFileIdentifier', 'copyright-file'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/abstractFileIdentifier', 'abstract-file'],
['image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/bibliographyFileIdentifier', 'bibliography-file'],
['image/fileSystems/fileSystem[@TYPE="HFS"]/masterDirectoryBlock/volumeName', 'hfs-volume-name'],
]
def main():
args = parse_args()
infoargs = []
if args.offset is not None:
infoargs.append('-N')
infoargs.append(args.offset)
infoargs.append('-i')
infoargs.append(str(args.infile))
info_output: str = subprocess.check_output(['isoinfo'] + infoargs + ['-d']).decode('utf-8')
args.has_joliet = 'NO Joliet present' not in info_output
if args.has_joliet:
infoargs.append('-J')
infocmd = ['isoinfo-x'] + infoargs
with tempfile.TemporaryDirectory() as tmpdir:
tmppath = pathlib.Path(tmpdir)
filespath = tmppath / args.volume_name
os.makedirs(filespath)
if args.has_hfs:
extract_image_file(args.infile, filespath)
hfsfiles = list(filespath.iterdir())
subprocess.check_call(infocmd, cwd=filespath)
allfiles = list(filespath.iterdir())
winfiles = set(allfiles) - set(hfsfiles)
if not winfiles:
raise SystemExit('Error: no iso/joliet files found?')
create_iso(args, filespath, winfiles=winfiles, hfsfiles=hfsfiles)
else:
subprocess.check_call(infocmd, cwd=filespath)
create_iso(args, filespath)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--offset')
parser.add_argument('infile')
parser.add_argument('outfile')
args = parser.parse_args()
args.infile = pathlib.Path(args.infile).resolve()
args.outfile = pathlib.Path(args.outfile).resolve()
isolyzed: bytes = subprocess.check_output(['isolyzer', args.infile])
tree = args.tree = etree.fromstring(isolyzed)
args.xpath_args = map_xpath_to_cmdline(tree)
args.volume_name = xml_get_volume_name(tree)
if args.volume_name is None:
args.volume_name = args.infile.stem
if not args.offset:
offset_element = tree.find('image/tests/sizeDifferenceSectors')
if offset_element is not None and offset_element.text is not None:
offset = offset_element.text
if offset.startswith('-'):
offset = offset[1:]
offset = offset.split('.', 1)[0]
args.offset = str(int(offset) + 152)
args.has_hfs = bool(tree.find('image/fileSystems/fileSystem[@TYPE="HFS"]'))
return args
def map_xpath_to_cmdline(tree):
seen = set()
xpath_args = []
for xpath, arg in makehybrid_arg_map:
if arg in seen:
continue
seen.add(arg)
element = tree.find(xpath)
if element is not None and element.text is not None and element.text != 'NOT_SET':
xpath_args.append('-' + arg)
xpath_args.append(element.text)
return xpath_args
def xml_get_volume_name(tree) -> str:
volume_id = tree.find('image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/volumeIdentifier')
if volume_id is not None and volume_id.text is not None:
volume_name = volume_id.text
else:
volume_id = tree.find('image/fileSystems/fileSystem[@TYPE="ISO 9660"]/primaryVolumeDescriptor/volumeSetIdentifier')
if volume_id is not None and volume_id.text is not None:
volume_name = volume_id.text
return volume_name
def extract_image_file(filename: pathlib.Path, destdir: pathlib.Path):
with tempfile.TemporaryDirectory() as mntpath:
subprocess.check_call(['hdiutil', 'attach', '-readonly', '-noautoopen', '-mountpoint', mntpath, filename])
try:
subprocess.check_call(['cp', '-a', mntpath + '/.', destdir])
finally:
subprocess.check_call(['hdiutil', 'detach', mntpath])
def create_iso(args, filespath, winfiles=None, hfsfiles=None):
cmd = generate_iso_cmdline(args, filespath, winfiles, hfsfiles)
try:
os.unlink(args.outfile)
except FileNotFoundError:
pass
subprocess.check_call(cmd)
def generate_iso_cmdline(args, filespath, winfiles=None, hfsfiles=None) -> List[str]:
cmd = ['hdiutil', 'makehybrid', '-iso', '-default-volume-name', args.volume_name, '-o', args.outfile]
if args.has_joliet:
cmd.append('-joliet')
if args.has_hfs:
cmd.append('-hfs')
if hfsfiles:
hfsfiles = [str(f.relative_to(filespath)) for f in hfsfiles]
hfsglobpat = '{' + ','.join(hfsfiles) + '}'
hfsglob = filespath / hfsglobpat
cmd.append('-only-hfs')
cmd.append(hfsglob)
if winfiles:
winfiles = [str(f.relative_to(filespath)) for f in winfiles]
winglobpat = '{' + ','.join(winfiles) + '}'
winglob = filespath / winglobpat
cmd.append('-only-iso')
cmd.append(winglob)
if args.has_joliet:
cmd.append('-only-joliet')
cmd.append(winglob)
cmd.extend(args.xpath_args)
cmd.append(filespath)
return cmd
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 6,503 | 368 | recreate-iso | 346 | 0.655851 | 0.64724 | 0 | 171 | 37.02924 | 127 |
|
tripupp/Sep19 | 19,250,043,440,410 | 6620c0e5a27e76bc74f5d7987aa4bcc87ca11eb8 | 8eadd4c7db6872f28592333207b23a6e9309aba7 | /cities/migrations/0016_food_city.py | 3e2faf60cf299db35ea4e119ce7b97d18c30e0b5 | []
| no_license | https://github.com/tripupp/Sep19 | 142255904d186845f0f5cdc5b04064fa081c9e6d | 4e9ab2077be21c914f2f0207e64268fe6f98224d | refs/heads/master | 2022-11-23T23:46:01.512565 | 2019-09-19T19:46:20 | 2019-09-19T19:46:20 | 205,845,957 | 0 | 1 | null | false | 2022-11-22T04:13:26 | 2019-09-02T11:51:07 | 2019-09-19T19:47:01 | 2022-11-22T04:13:23 | 10,029 | 0 | 1 | 3 | CSS | false | false | # Generated by Django 2.2.4 on 2019-09-16 14:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cities', '0015_food_foodcuisine_fooddietarytypes_foodfeatures_foodmeal'),
]
operations = [
migrations.AddField(
model_name='food',
name='city',
field=models.ForeignKey(default=3, on_delete=django.db.models.deletion.CASCADE, to='cities.City'),
preserve_default=False,
),
]
| UTF-8 | Python | false | false | 545 | py | 81 | 0016_food_city.py | 43 | 0.640367 | 0.60367 | 0 | 20 | 26.25 | 110 |
dr-dos-ok/Code_Jam_Webscraper | 6,012,954,232,797 | 8aabc5548fc6cfca6c42013ef9bf802a866c502f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_138/1517.py | c8f04760ce2486dc4143f0f490944c642345b649 | []
| no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def stairs(Na, Ke):
key_Na = []
key_Ke = []
for i in range(n):
key_Na.append('N')
key_Ke.append('K')
dict_Na = dict(zip(Na, key_Na))
dict_Ke = dict(zip(Ke, key_Ke))
dict_both = dict(dict_Na, **dict_Ke)
value = []
for k in sorted(dict_both.keys()):
value.append(dict_both[k])
return value
def deceit(n, Na, Ke):
max = 0
curr = 0
stair = stairs(Na, Ke)
for i in stair:
if(i=='N'):
curr+=1
if(curr>max):
max = curr
else:
curr-=1
return n-max
def war(n, Na, Ke):
max = 0
curr = 0
stair = stairs(Na, Ke)
for i in reversed(stair):
if(i=='N'):
curr+=1
if(curr>max):
max = curr
else:
curr-=1
return max
T = int(raw_input())
for t in range(T):
n = int(raw_input())
s = raw_input()
Naomi = [float(elem) for elem in s.split()]
Naomi = sorted(Naomi)
s = raw_input()
Ken = [float(elem) for elem in s.split()]
Ken = sorted(Ken)
print "Case #" + str(t+1) + ":", deceit(n, Naomi, Ken), war(n, Naomi, Ken)
| UTF-8 | Python | false | false | 987 | py | 60,747 | 1517.py | 60,742 | 0.559271 | 0.550152 | 0 | 50 | 18.62 | 75 |
sky-dream/LeetCodeProblemsStudy | 18,854,906,462,026 | edc6a72058170b74474af38b527ee852e7f9cbe2 | 6d25434ca8ce03f8fef3247fd4fc3a1707f380fc | /[0367][Easy][Valid_Perfect_Square]/Valid_Perfect_Square_2.py | bc798b00021614e817c6d18fa8829039de0c6c0c | []
| no_license | https://github.com/sky-dream/LeetCodeProblemsStudy | 145f620e217f54b5b124de09624c87821a5bea1b | e0fde671cdc9e53b83a66632935f98931d729de9 | refs/heads/master | 2020-09-13T08:58:30.712604 | 2020-09-09T15:54:06 | 2020-09-09T15:54:06 | 222,716,337 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # solution 4, Integer Newton,pls refer to No.69
# leetcode time cost : 100 ms
# leetcode memory cost : 28.1 MB
# Time Complexity: O(logN)
# Space Complexity: O(1)
class Solution:
def isPerfectSquare(self, num: int) -> bool:
if num < 2:
return True
x = num // 2
while x * x > num:
x = (x + num // x) // 2
return x * x == num | UTF-8 | Python | false | false | 400 | py | 794 | Valid_Perfect_Square_2.py | 763 | 0.525 | 0.4925 | 0 | 14 | 27.642857 | 48 |
guillemarsan/Computational-Geometry | 11,055,245,838,895 | 53f6c88291c4316cdb79a5b97ec001f4269d0bff | 49faf45795fe4acb5debc44cdcd69ad5700f63f8 | /1.5-Hausdorff/Hausdorff.py | 43d8c39e61f560a6eedd0abd6be0926268f64b25 | [
"MIT"
]
| permissive | https://github.com/guillemarsan/Computational-Geometry | 57bdf73d98f37d77aa703c138a9b51862619566b | 59b6e2c821d2c367084b1432e6447740a58bf65a | refs/heads/master | 2021-05-21T19:41:15.827878 | 2020-05-15T11:43:13 | 2020-05-15T11:43:13 | 252,774,102 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Extra: Sierpinski Triangle
"""
import numpy as np
import matplotlib.pyplot as plt
# Generate n points in Sierpinski Triangle
def generate_triangle(n):
p0 = [0,0]
p1 = [2,0]
p2 = [1,np.sqrt(3)]
p = np.array([p0, p1, p2])
vi = 1/2 * (p[0] + p[1])
varr = np.array([vi])
for i in range(n):
r = np.random.randint(3)
vi = 1/2 * (vi + p[r])
varr = np.append(varr,[vi], axis=0)
return varr
# Generate n points in Sierpinski Carpet
def generate_carpet(n):
p0 = [0,0]
p1 = [1,0]
p2 = [2,0]
p3 = [2,1]
p4 = [2,2]
p5 = [1,2]
p6 = [0,2]
p7 = [0,1]
p = np.array([p0, p1, p2, p3, p4, p5, p6, p7])
vi = 1/3 * (p[0] + p[1])
varr = np.array([vi])
for i in range(n):
r = np.random.randint(8)
vi = 1/3 * (vi + p[r])
varr = np.append(varr,[vi], axis=0)
return varr
# Compute for some d and n the estimation of the Hausdorff d-Volume for
# a cover of epsilon = 2/(n-1) squares
def volume_d(set, n, d):
x = np.linspace(0,2,n)
eps = 2/(n-1)
count = 0
for i in range(n-1):
inx = (x[i] < set[:,0]) & (x[i+1] > set[:,0])
for j in range(n-1):
iny = (x[j] < set[:,1]) & (x[j+1] > set[:,1])
filled = np.any(inx & iny)
count = count + filled
return count*((np.sqrt(2)*eps)**d)
#%%
"""
Generate Sierpinski Triangle
"""
set = generate_triangle(50000)
plt.scatter(set[:,0],set[:,1], s=0.1)
plt.axis([0, 2, 0, 2]);
plt.gca().set_aspect('equal', adjustable='box')
#%%
"""
Generate Sierpinski Carpet
"""
set = generate_carpet(50000)
plt.figure()
plt.scatter(set[:,0],set[:,1], s=0.1)
plt.axis([0, 1, 0, 1]);
plt.gca().set_aspect('equal', adjustable='box')
#%%
"""
Compute Sierpinski's Carpet Hausdorff d-Volume
"""
set = generate_carpet(50000)
narr = range(2,150)
d = 1.95
Hd = []
for n in narr:
Hd.append(volume_d(set,n,d))
plt.figure()
plt.plot(narr,Hd)
plt.xlabel('$n$')
plt.ylabel('$H^{d}_{\epsilon}(E)$')
#%%
"""
Compute Sierpinski's Triangle Hausdorff d-Volume
"""
set = generate_triangle(50000)
narr = range(2,150)
d = 1.66
Hd = []
for n in narr:
Hd.append(volume_d(set,n,d))
plt.figure()
plt.plot(narr,Hd)
plt.xlabel('$n$')
plt.ylabel('$H^{d}_{\epsilon}(E)$') | UTF-8 | Python | false | false | 2,319 | py | 10 | Hausdorff.py | 9 | 0.538594 | 0.483829 | 0 | 112 | 19.714286 | 72 |
pedromela/upwork_scanner | 10,179,072,539,880 | 983e9a9ff1847d0176200301abda675b8c457387 | bc300eb1e8a9dcb605a02994cc2b2e929bda8dc5 | /argylescanner/test_deserialize.py | 3745dbf9ddc79233e65feca092c921dfc8f52c6c | []
| no_license | https://github.com/pedromela/upwork_scanner | 9989e021ef1feb555c8f8ae172d38dbcdf2bf90c | b61c662aae23eeeaf44b0d909a1ed5590268255d | refs/heads/main | 2023-03-02T06:06:04.475727 | 2021-02-02T11:29:22 | 2021-02-02T11:29:22 | 335,041,658 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Module to test serialization/deserialization.
Deserializes the objects stored in the pickles folder.
(these objects are created during the scanner execution)
"""
import os
import items.profile
from items.base_item import BaseItem
from items.job import Job
from items.profile import Profile
for file_name in os.listdir("./pickles"):
file = open("./pickles/" + file_name, 'rb')
item = items.base_item.deserialize(file)
print(item)
| UTF-8 | Python | false | false | 448 | py | 12 | test_deserialize.py | 10 | 0.747768 | 0.747768 | 0 | 16 | 27 | 56 |
Miss-Inputs/resource-dumper | 5,128,190,982,217 | 4b4b94a49fd764e9f00531a0bdfda3fbb44ed73a | 68f7b8b756732dda3e03b9c2ab6765e912479a4b | /Resource Dumper.py | 459a30378911ee84bf2dfab60a4a7b50c2d7d7cc | [
"MIT"
]
| permissive | https://github.com/Miss-Inputs/resource-dumper | f13e5cad326450a7eff029956dd8cc84f9dcf959 | cbee4c36741f2f8b002c226e062a44a1e18fd79a | refs/heads/master | 2022-09-16T20:47:08.488861 | 2017-10-16T11:50:21 | 2017-10-16T11:50:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Copyright (c) 2017 Megan "Zowayix" Leet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This is just a crappy thing to be run under MacPython on OS 9 (and probably other Mac OS Classic
versions that MacPython runs on) that asks the user for a file, and dumps all of the resource fork
of that file into a folder containing subfolders that organize everything by resource type
The dumps are purely data fork, so they might only be useful to you if you know what the resource
types do and how they're structured, or if they're a normal file stuffed into a resource
"""
import macfs
import os
import sys
from Carbon.Res import *
#FSpOpenResFile constants
READ = 1
WRITE = 2
def write_madh(res, output_path):
output = open(output_path, 'wb')
output.write(res.data)
output.close()
#Change the creator to PlayerPRO because it can just open this one
output_spec = macfs.FSSpec(output_path)
output_spec.SetCreatorType('SNPL', 'MADH')
#Custom handlers for certain resource types, so we can dump them a specific way, which might make
#them actually useful
handlers = {}
handlers['MADH'] = write_madh
def dump_resource(res, res_type, res_index, output_folder):
#2nd item in tuple is resource type which we already have
res_id, _junk, res_name = res.GetResInfo()
print ' Resource %d: id = %d, name = %s' % (res_index, res_id, res_name)
print ' size = %d' % res.size
print ' Size on disk: %d' % res.GetResourceSizeOnDisk()
#print ' data: %s' % res.data
if len(res_name) > 0:
res_filename = '%d - %s' % (res_id, res_name.replace(':', '_'))
else:
res_filename = str(res_id)
#I guess filenames can only be 31 chars? *shrug*
res_filename = res_filename[:31]
output_path = os.path.join(output_folder, res_filename)
try:
if res_type in handlers:
handlers[res_type](res, output_path)
else:
output = open(output_path, 'wb')
output.write(res.data)
output.close()
#Change the file type to make things easier
output_spec = macfs.FSSpec(output_path)
output_spec.SetCreatorType('????', res_type)
except:
print 'WTF WHY I HATE YOU %s' % res_filename
res.DetachResource()
def dump_type(res_type, base_folder):
#TODO Can a resource type have a : in the name? Because this would break if it did
type_folder = os.path.join(base_folder, res_type)
if not os.path.exists(type_folder):
os.mkdir(type_folder)
res_count = Count1Resources(res_type)
print ' There are %d resources of this type' % res_count
for res_index in range(1, res_count + 1):
res = Get1IndResource(res_type, res_index)
res.LoadResource()
dump_resource(res, res_type, res_index, type_folder)
def dump_all_types(base_folder):
#There is no resource file parameter here because the
#Resource Manager API just operates on the concept of a single
#current file. This of course would not be thread-safe, but we are
#in the late 90s where threading has barely even been invented
type_count = Count1Types()
print 'There are %d types here' % type_count
#Get1IndType etc is 1-indexed
for type_index in range(1, type_count + 1):
res_type = Get1IndType(type_index)
print ' Type %d: %s' % (type_index, res_type)
dump_type(res_type, base_folder)
def main():
filespec, ok = macfs.StandardGetFile()
if not ok:
sys.exit(0)
print 'File: %s' % filespec.as_pathname()
print filespec.GetCreatorType()
filename = filespec.as_tuple()[2]
#We are limited to 31 characters here and I don't know why
base_folder = ('%s Dumped Resources' % filename)[:31]
if not os.path.exists(base_folder):
os.mkdir(base_folder)
#TODO Need to do some error handling here for files with no resource fork
res_file = FSpOpenResFile(filespec, READ)
dump_all_types(base_folder)
CloseResFile(res_file)
main()
| UTF-8 | Python | false | false | 4,732 | py | 4 | Resource Dumper.py | 1 | 0.729924 | 0.723373 | 0 | 137 | 33.540146 | 98 |
2Min0r/Study | 7,172,595,410,155 | 59b3832dfa52a99033f201c8d9f1ebf10e849070 | 21decace0b49d6c24ac628ad03af58e6420bee68 | /Python/Algorithm/basic/09_isort.py | 8bd16b9bbced6b3e74841cc1f9b3f6a40d851bde | []
| no_license | https://github.com/2Min0r/Study | bb0c49dd4469f241cb8b55990113dae1b3216c90 | b5e7e609386e571339e3f9888d8e49b03b081b46 | refs/heads/master | 2020-11-24T04:31:05.957823 | 2020-03-18T01:40:34 | 2020-03-18T01:40:34 | 227,965,545 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
# O(n^2)
def find_ins_idx(r, v):
for i in range(0, len(r)):
if v < r[i]:
return i
return len(r) # r은 v가 들어가지 않은 상태, 마지막 자리에 새로 값을 넣으므로 len(r)의 idx
def ins_sort(a):
result = []
while a:
value = a.pop(0)
ins_idx = find_ins_idx(result, value)
result.insert(ins_idx, value)
return result
d = [2, 4, 5, 1, 3]
print(ins_sort(d)) | UTF-8 | Python | false | false | 487 | py | 152 | 09_isort.py | 147 | 0.489703 | 0.469108 | 0 | 21 | 19.857143 | 98 |
mmir415/emotion | 13,657,996,024,137 | 6a2ec775b65a1db66ca226ff9c155ae720a546ae | 2f37582d8906312280f54398d6f78c88a1335cc2 | /emorec/tensorflow/classification.py | 2882b8f1adb9d36eb64d0deb301381bbaefd89c4 | [
"MIT"
]
| permissive | https://github.com/mmir415/emotion | 31902ae94e7946cd27398a8e2e5c3fdd5d9c7979 | fae5271d344b02e2552c4d3dd080e0ae3d5c0cbb | refs/heads/master | 2023-05-31T10:40:45.615280 | 2021-06-12T05:15:04 | 2021-06-12T05:15:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import defaultdict
from functools import partial
from itertools import chain
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from sklearn.metrics import get_scorer
from sklearn.model_selection import BaseCrossValidator, LeaveOneGroupOut
from sklearn.model_selection._validation import _score
from tensorflow.keras.callbacks import Callback, History, TensorBoard
from tensorflow.keras.losses import Loss, SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, Optimizer
from tensorflow.keras.utils import Sequence
from tqdm import tqdm
from ..classification import Classifier, ScoreFunction
from ..utils import batch_arrays, shuffle_multiple
from .utils import DataFunction, TFModelFunction, create_tf_dataset_ragged
class DummyEstimator:
"""Class that implements a dummy estimator for scoring, to avoid
repeated invocations of `predict()` etc.
"""
def __init__(self, y_pred):
self.y_pred = y_pred
def predict(self, x, **kwargs):
return self.y_pred
def predict_proba(self, x, **kwargs):
return self.y_pred
def decision_function(self, x, **kwargs):
return self.y_pred
def fit(
model: Model,
train_data: tf.data.Dataset,
valid_data: Optional[tf.data.Dataset] = None,
epochs: int = 1,
verbose: bool = False,
**kwargs,
):
"""Simple fit functions that trains a model with tf.function's."""
@tf.function
def train_step(data, use_sample_weight=False):
if use_sample_weight:
x, y_true, sample_weight = data
else:
x, y_true = data
sample_weight = None
with tf.GradientTape() as tape:
y_pred = model(x, training=True)
loss = model.compiled_loss(y_true, y_pred, sample_weight=sample_weight)
gradients = tape.gradient(loss, model.trainable_variables)
model.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
model.compiled_metrics.update_state(y_true, y_pred, sample_weight=sample_weight)
return loss
@tf.function
def test_step(data, use_sample_weight=False):
if use_sample_weight:
x, y_true, sample_weight = data
else:
x, y_true = data
sample_weight = None
y_pred = model(x, training=False)
loss = model.compiled_loss(y_true, y_pred, sample_weight=sample_weight)
model.compiled_metrics.update_state(y_true, y_pred, sample_weight=sample_weight)
return loss
use_sample_weight = len(train_data.element_spec) == 3
iter_fn = partial(tqdm, leave=False) if verbose else iter
for epoch in range(epochs):
train_loss = 0.0
train_metrics = []
n_batch = 0
for batch in iter_fn(train_data):
loss = train_step(batch, use_sample_weight)
train_loss += loss
n_batch += 1
train_loss /= n_batch
for metric in model.compiled_metrics.metrics:
train_metrics.append(metric.result())
metric.reset_states()
valid_loss = 0.0
valid_metrics = []
n_batch = 0
if valid_data is not None:
for batch in iter_fn(valid_data):
loss = test_step(batch, use_sample_weight)
valid_loss += loss
n_batch += 1
valid_loss /= n_batch
for metric in model.compiled_metrics.metrics:
valid_metrics.append(metric.result())
metric.reset_states()
if verbose:
msg = "Epoch {:03d}: train_loss = {:.4f}, valid_loss = {:.4f}"
for metric in model.compiled_metrics.metrics:
msg += ", train_{0} = {{:.4f}}, valid_{0} = {{:.4f}}".format(
metric.name
)
metric_vals = chain(*zip(train_metrics, valid_metrics))
print(msg.format(epoch, train_loss, valid_loss, *metric_vals))
def tf_train_val_test(
model_fn: TFModelFunction,
train_data: tf.data.Dataset,
valid_data: tf.data.Dataset,
test_data: tf.data.Dataset,
scoring: Union[
str, List[str], Dict[str, ScoreFunction], Callable[..., float]
] = "accuracy",
**fit_params,
) -> Dict[str, Union[float, History]]:
"""Trains on given data, using given validation data, and tests on
given test data.
Returns:
--------
scores, dict
A dictionary with scorer names as keys and scores as values.
"""
scores = {}
tf.keras.backend.clear_session()
clf = model_fn()
history = clf.fit(train_data, validation_data=valid_data, **fit_params)
scores["history"] = history.history
y_pred = np.argmax(clf.predict(test_data), axis=-1)
y_true = np.concatenate([x[1] for x in test_data])
dummy = DummyEstimator(y_pred)
if isinstance(scoring, str):
val = get_scorer(scoring)(dummy, None, y_true)
scores["test_score"] = val
scores["test_" + scoring] = val
elif isinstance(scoring, (list, dict)):
if isinstance(scoring, list):
scoring = {x: get_scorer(x) for x in scoring}
_scores = _score(dummy, None, y_true, scoring)
for k, v in _scores.items():
scores["test_" + k] = v
elif callable(scoring):
scores["test_score"] = scoring(dummy, None, y_true)
return scores
def tf_cross_validate(
model_fn: TFModelFunction,
x: np.ndarray,
y: np.ndarray,
groups: Optional[np.ndarray] = None,
cv: BaseCrossValidator = LeaveOneGroupOut(),
scoring: Union[str, List[str], Dict[str, ScoreFunction]] = "accuracy",
data_fn: DataFunction = create_tf_dataset_ragged,
sample_weight=None,
log_dir: Optional[Path] = None,
fit_params: Dict[str, Any] = {},
):
"""Performs cross-validation on a TensorFlow model. This works with
both sequence models and single vector models.
Args:
-----
model_fn: callable,
The function used to create a compiled Keras model. This is
called repeatedly on each iteration of cross-validation.
x: numpy.ndarray,
The data array. For sequence input this will be a (ragged) 3-D
array (array of arrays). Otherwise it will be a contiguous 2-D
matrix.
y: numpy.ndarray,
A 1-D array of shape (n_instances,) containing the data labels.
groups: np.ndarray, optional
The groups to use for some cross-validation splitters (e.g.
LeaveOneGroupOut).
cv: BaseCrossValidator,
The cross-validator split generator to use. Default is
LeaveOneGroupOut.
scoring: str, or callable, or list of str, or dict of str to callable
The scoring to use. Same requirements as for sklearn
cross_validate().
data_fn: callable
A callable that returns a tensorflow.data.Dataset instance which
yields data batches. The call signature of data_fn should be
data_fn(x, y, shuffle=True, **kwargs).
fit_params: dict, optional
Any keyword arguments to supply to the Keras fit() method.
Default is no keyword arguments.
"""
scores = defaultdict(list)
n_folds = cv.get_n_splits(x, y, groups)
for fold, (train, test) in enumerate(cv.split(x, y, groups)):
if fit_params.get("verbose", False):
print(f"\tFold {fold + 1}/{n_folds}")
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
if sample_weight is not None:
sw_train = sample_weight[train]
sw_test = sample_weight[test]
train_data = data_fn(x_train, y_train, sample_weight=sw_train, shuffle=True)
test_data = data_fn(x_test, y_test, sample_weight=sw_test, shuffle=False)
else:
train_data = data_fn(x_train, y_train, shuffle=True)
test_data = data_fn(x_test, y_test, shuffle=False)
# Use validation data just for info
callbacks = []
if log_dir is not None:
tb_log_dir = log_dir / str(fold + 1)
callbacks.append(
TensorBoard(
log_dir=tb_log_dir,
profile_batch=0,
write_graph=False,
write_images=False,
)
)
fit_params["callbacks"] = callbacks
_scores = tf_train_val_test(
model_fn,
train_data=train_data,
valid_data=test_data,
test_data=test_data,
scoring=scoring,
**fit_params,
)
for k in _scores:
scores[k].append(_scores[k])
return {k: np.array(scores[k]) for k in scores}
class TFClassifier(Classifier):
"""Class wrapper for a TensorFlow Keras classifier model.
Parameters:
-----------
model_fn: callable
A callable that returns a new proper classifier that can be trained.
n_epochs: int, optional, default = 50
Maximum number of epochs to train for.
class_weight: dict, optional
A dictionary mapping class IDs to weights. Default is to ignore
class weights.
data_fn: callable, optional
Callable that takes x and y as input and returns a
tensorflow.keras.Sequence object or a tensorflow.data.Dataset
object.
callbacks: list, optional
A list of tensorflow.keras.callbacks.Callback objects to use during
training. Default is an empty list, so that the default Keras
callbacks are used.
loss: keras.losses.Loss
The loss to use. Default is
tensorflow.keras.losses.SparseCategoricalCrossentropy.
optimizer: keras.optimizers.Optimizer
The optimizer to use. Default is tensorflow.keras.optimizers.Adam.
verbose: bool, default = False
Whether to output details per epoch.
"""
def __init__(
self,
model_fn: TFModelFunction,
n_epochs: int = 50,
class_weight: Optional[Dict[int, float]] = None,
data_fn: Optional[DataFunction] = None,
callbacks: List[Callback] = [],
loss: Loss = SparseCategoricalCrossentropy(),
optimizer: Optimizer = Adam(),
verbose: bool = False,
):
self.model_fn = model_fn
self.n_epochs = n_epochs
self.class_weight = class_weight
if data_fn is not None:
self._data_fn = data_fn
self.callbacks = callbacks
self.loss = loss
self.optimizer = optimizer
self.verbose = verbose
def data_fn(
self, x: np.ndarray, y: np.ndarray, shuffle: bool = True
) -> tf.data.Dataset:
if hasattr(self, "_data_fn") and self._data_fn is not None:
return self._data_fn(x, y, shuffle)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
if shuffle:
dataset = dataset.shuffle(len(x))
return dataset
def fit(
self,
x_train: np.ndarray,
y_train: np.ndarray,
x_valid: np.ndarray,
y_valid: np.ndarray,
fold=0,
):
# Clear graph
tf.keras.backend.clear_session()
# Reset optimiser and loss
optimizer = self.optimizer.from_config(self.optimizer.get_config())
loss = self.loss.from_config(self.loss.get_config())
for cb in self.callbacks:
if isinstance(cb, TensorBoard):
cb.log_dir = str(Path(cb.log_dir).parent / str(fold))
self.model = self.model_fn()
self.model.compile(
loss=loss, optimizer=optimizer, metrics=tf_classification_metrics()
)
train_data = self.data_fn(x_train, y_train, shuffle=True)
valid_data = self.data_fn(x_valid, y_valid, shuffle=True)
self.model.fit(
train_data,
epochs=self.n_epochs,
class_weight=self.class_weight,
validation_data=valid_data,
callbacks=self.callbacks,
verbose=int(self.verbose),
)
def predict(
self, x_test: np.ndarray, y_test: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
test_data = self.data_fn(x_test, y_test, shuffle=False)
y_true = np.concatenate([x[1] for x in test_data])
y_pred = np.empty_like(y_true)
np.argmax(self.model.predict(test_data), axis=1, out=y_pred)
return y_pred, y_true
class BalancedSparseCategoricalAccuracy(SparseCategoricalAccuracy):
"""Calculates categorical accuracy with class weights inversely
proportional to their size. This behaves as if classes are balanced
having the same number of instances, and is equivalent to the
arithmetic mean recall over all classes.
"""
def __init__(self, name="balanced_sparse_categorical_accuracy", **kwargs):
super().__init__(name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
y_flat = y_true
if y_true.shape.ndims == y_pred.shape.ndims:
y_flat = tf.squeeze(y_flat, axis=[-1])
y_true_int = tf.cast(y_flat, tf.int32)
cls_counts = tf.math.bincount(y_true_int)
cls_counts = tf.math.reciprocal_no_nan(tf.cast(cls_counts, self.dtype))
weight = tf.gather(cls_counts, y_true_int)
return super().update_state(y_true, y_pred, sample_weight=weight)
class BatchedFrameSequence(Sequence):
"""Creates a sequence of batches of frames to process.
Parameters:
-----------
x: ndarray or list of ndarray
Sequences of vectors.
y: ndarray
Labels corresponding to sequences in x.
prebatched: bool, default = False
Whether or not x has already been grouped into batches.
batch_size: int, default = 32
Batch size to use. Each generated batch will be at most this size.
shuffle: bool, default = True
Whether to shuffle the order of the batches.
"""
def __init__(
self,
x: Union[np.ndarray, List[np.ndarray]],
y: np.ndarray,
prebatched: bool = False,
batch_size: int = 32,
shuffle: bool = True,
):
self.x = x
self.y = y
if not prebatched:
self.x, self.y = batch_arrays(
self.x, self.y, batch_size=batch_size, shuffle=shuffle
)
if shuffle:
self.x, self.y = shuffle_multiple(self.x, self.y, numpy_indexing=True)
def __len__(self):
return len(self.x)
def __getitem__(self, idx: int):
return self.x[idx], self.y[idx]
class BatchedSequence(Sequence):
"""Creates a sequence of batches to process.
Parameters:
-----------
x: ndarray or list of ndarray
Instance feature vectors. Each vector is assumed to be for a different
instance.
y: ndarray
Labels corresponding to sequences in x.
prebatched: bool, default = False
Whether or not x has already been grouped into batches.
batch_size: int, default = 32
Batch size to use. Each generated batch will be at most this size.
shuffle: bool, default = True
Whether to shuffle the instances.
"""
def __init__(
self, x: np.ndarray, y: np.ndarray, batch_size: int = 32, shuffle: bool = True
):
self.x = x
self.y = y
self.batch_size = batch_size
if shuffle:
self.x, self.y = shuffle_multiple(self.x, self.y, numpy_indexing=True)
def __len__(self):
return int(np.ceil(len(self.x) / self.batch_size))
def __getitem__(self, idx: int):
sl = slice(idx * self.batch_size, (idx + 1) * self.batch_size)
return self.x[sl], self.y[sl]
def tf_classification_metrics():
return [
SparseCategoricalAccuracy(name="war"),
BalancedSparseCategoricalAccuracy(name="uar"),
]
| UTF-8 | Python | false | false | 15,972 | py | 24 | classification.py | 21 | 0.61013 | 0.607313 | 0 | 459 | 33.797386 | 88 |
Gabriel-ino/python_basics | 11,484,742,569,379 | fb6fcb081c11e5e47f6f15251b569844c468176c | cde06d205db8c46867c169d2ceb77012046dcc42 | /maior_menor.py | cf97c54d5326760fc6d20e766933b2a091b963b3 | []
| no_license | https://github.com/Gabriel-ino/python_basics | eace2e74e886867178c6396ed958f4e037fadd71 | be415cc58ca3b2452210a832a373fb229d46e416 | refs/heads/master | 2023-01-11T00:17:46.096412 | 2020-11-11T20:57:28 | 2020-11-11T20:57:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n1 = float(input('Digite aqui um valor qualquer: '))
n2 = float(input('Digite aqui outro valor qualquer: '))
print('-='*20, '\nOs valores digitados foram: {} e {}'.format(n1, n2))
if n1 > n2:
print('Dentre eles, o maior valor é: {}'.format(n1))
elif n2 > n1:
print('Dentre eles, o maior valor é: {}'.format(n2))
else:
print('Os dois valores são iguais!')
print('-='*20) | UTF-8 | Python | false | false | 385 | py | 67 | maior_menor.py | 66 | 0.63089 | 0.594241 | 0 | 10 | 37.2 | 70 |
austil/AdventOfCode2020 | 3,685,081,949,350 | 9910d1e675c2b07ccba2d9de96304326bbbc0a95 | 8841a1680dfd5e3d37908b102b6bd19eb601c41b | /day18_operation_order.py | 7abc448ff313904425983ae23cf6e3f51a80c368 | []
| no_license | https://github.com/austil/AdventOfCode2020 | 10d701907bd4ba70dac71316525daac4dc9d8c37 | eda7d40d5fea3149515ea2c6902bf307050e959e | refs/heads/main | 2023-02-02T18:03:53.220676 | 2020-12-19T21:16:00 | 2020-12-19T21:16:00 | 318,813,310 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Day 18 : Operation Order
#
# Input : a math operation
# Part 1 : find the sum of all the operation
import unittest
from typing import List
def apply(stack, operators):
while ( len(stack) >= 3
and stack[-3].isdigit()
and stack[-2] in operators
and stack[-1].isdigit()):
expr = f'{stack[-3]} {stack[-2]} {stack[-1]}'
stack = stack[:-3]
stack.append(str(eval(expr)))
return stack
def compute(operation):
parts = operation.replace('(', ' ( ').replace(')', ' ) ').split(' ')
stack = []
for p in parts:
if p == ')':
# get rid of parenthesis
assert stack[-2] == '(' and stack[-1].isdigit()
stack = stack[:-2] + [stack[-1]]
elif p != '':
stack.append(p)
# apply ALL operators as we go
stack = apply(stack, ['+', '*'])
assert len(stack) == 1
return int(stack[0])
def compute2(operation):
parts = operation.replace('(', ' ( ').replace(')', ' ) ').split(' ')
stack = []
for p in parts:
if p == ')':
stack = apply(stack, ['*'])
# get rid of parenthesis
assert stack[-2] == '(' and stack[-1].isdigit()
stack = stack[:-2] + [stack[-1]]
elif p != '':
# stack
stack.append(p)
# apply only + as we go
stack = apply(stack, ['+'])
stack = apply(stack, ['*'])
assert len(stack) == 1
return int(stack[0])
class SamplesTests(unittest.TestCase):
def test_part_one(self):
self.assertEqual(compute('1 + 2 * 3 + 4 * 5 + 6'), 71)
self.assertEqual(compute('1 + (2 * 3) + (4 * (5 + 6))'), 51)
self.assertEqual(compute('2 * 3 + (4 * 5)'), 26)
self.assertEqual(compute('5 + (8 * 3 + 9 + 3 * 4 * 3)'), 437)
self.assertEqual(compute('5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))'), 12240)
self.assertEqual(compute('((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2'), 13632)
def test_part_two(self):
self.assertEqual(compute2('1 + 2 * 3 + 4 * 5 + 6'), 231)
self.assertEqual(compute2('1 + (2 * 3) + (4 * (5 + 6))'), 51)
self.assertEqual(compute2('2 * 3 + (4 * 5)'), 46)
self.assertEqual(compute2('5 + (8 * 3 + 9 + 3 * 4 * 3)'), 1445)
self.assertEqual(compute2('5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))'), 669060)
self.assertEqual(compute2('((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2'), 23340)
unittest.main(argv=[''], verbosity=2, exit=False)
print("\n------\n")
with open("puzzle_inputs/day18.txt", "r") as f:
input = f.readlines()
print('Part 1:', sum([compute(e.strip()) for e in input]))
assert sum([compute(e.strip()) for e in input]) == 9535936849815
print('Part 2:', sum([compute2(e.strip()) for e in input]))
assert sum([compute2(e.strip()) for e in input]) == 472171581333710
| UTF-8 | Python | false | false | 2,897 | py | 15 | day18_operation_order.py | 14 | 0.501899 | 0.434933 | 0 | 84 | 33.488095 | 92 |
LittleBlue512/LittleBluePublic | 8,117,488,231,362 | cf2f9b8ab4aa0d14aa40a974edd592b8696523f8 | 6332d7e1e1cadd1f215900de15a2b9158adeb540 | /Lab/Computer Programming/L07/Files/P4.py | 757904711a5090c5b1d8cba63f58a13b92185ec3 | []
| no_license | https://github.com/LittleBlue512/LittleBluePublic | 05d30a92d3a4d3a3d5a3f0a4bad944107788cce9 | 49935ea968a2a895f4e50438e00b367de497a4cf | refs/heads/master | 2020-10-02T08:29:29.185162 | 2020-03-28T08:46:08 | 2020-03-28T08:46:08 | 212,817,613 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def add_place(_dict, k, val):
_dict[k] = val
return _dict
if __name__ == '__main__':
visits = {'Denali NP': 2007, 'Arequipa': 2006, 'Taktsang': 2015}
visits = add_place(visits, 'Taktsang', 2014)
print(visits)
visits = add_place(visits, 'Provins', 2013)
print(visits)
| UTF-8 | Python | false | false | 298 | py | 115 | P4.py | 88 | 0.583893 | 0.516779 | 0 | 11 | 26 | 68 |
samolkz/ILearnDeepLearning.py | 11,828,339,970,368 | ead56602a8d0d3cbfd3395a7449e68855aea9c17 | b45ec2b49f2efbd777ec7496edb5fe88fc130c7d | /01_mysteries_of_neural_networks/06_numpy_convolutional_neural_net/src/errors.py | 0fb5b0891af56a791238106a06a48c4a701f61d2 | [
"MIT"
]
| permissive | https://github.com/samolkz/ILearnDeepLearning.py | 068ac404e5ab6fa12346a440b16de26437f324a7 | dd4344c833ceb7e277eb33549efa6afe52194738 | refs/heads/master | 2023-03-11T12:04:58.873146 | 2021-03-02T06:31:07 | 2021-03-02T06:31:07 | 269,059,379 | 0 | 0 | MIT | true | 2020-06-03T10:33:56 | 2020-06-03T10:33:55 | 2020-06-03T08:30:38 | 2020-06-01T22:24:05 | 280,466 | 0 | 0 | 0 | null | false | false | class InvalidPaddingModeError(Exception):
pass
| UTF-8 | Python | false | false | 51 | py | 62 | errors.py | 34 | 0.803922 | 0.803922 | 0 | 2 | 24.5 | 41 |
COVID19Tracking/quality-control | 15,496,242,029,453 | a40cb46b97f29b841276e58ea071f6deb37be000 | 4dac39e0fbdecede3b2ab23ea80c62776301e4cf | /flaskapp.py | e1e295f4caf8fa463a1f4cbb756f742b70174d4d | [
"Apache-2.0"
]
| permissive | https://github.com/COVID19Tracking/quality-control | 409cc4ad4bec39b0cbdb78b8d74230c40b811263 | a4395d98c29f534ee0ec2d49fb88fec77ffc6dd9 | refs/heads/master | 2021-05-17T13:18:54.147140 | 2020-05-26T01:05:35 | 2020-05-26T01:05:35 | 250,794,254 | 10 | 5 | Apache-2.0 | false | 2020-05-26T00:52:03 | 2020-03-28T12:52:24 | 2020-05-20T00:16:29 | 2020-05-26T00:52:03 | 13,061 | 9 | 5 | 4 | Python | false | false | #
# Flask App for serving check results
#
# To test local:
# 1. run_check_service.py to start a Pyro4 server
# 2. set FLASK_APP=flaskapp.py
# 3. flask run
# 4. browse http://127.0.0.1:5000/
#
# To run in production, use gunicorn and wsgi. see example in _system.
#
import os
from flask import Flask, render_template
from loguru import logger
from datetime import timedelta
from flask import Flask
from flaskcheck import checks, service_load_dates
# register dynamically
#@route("/", methods=["GET"])
def index():
site_at, service_at, server_now = service_load_dates()
def format_delta(td: timedelta) -> str:
s = int(td.total_seconds())
if s < 60: return f"{s} sec" + ("s" if s == 1 else "")
m = s // 60
if m < 60: return f"{m} mins" + ("s" if m == 1 else "")
h = m // 60
if h < 24: return f"{h} hours" + ("s" if h == 1 else "")
d = h // 24
return f"{d} days" + ("s" if d == 1 else "")
site_delta = format_delta(server_now - site_at)
service_delta = format_delta(server_now - service_at) + " ago" if service_at != None else "[down]"
server_now = server_now.isoformat()
return render_template("index.html",
server_now=server_now,
site_delta=site_delta,
service_delta=service_delta)
def create_app() -> Flask:
app = Flask(__name__)
app.register_blueprint(checks)
app.add_url_rule("/", 'index', index, methods=["GET"])
return app
if __name__ == "__main__":
app = create_app()
app.run(host='127.0.0.1')
| UTF-8 | Python | false | false | 1,561 | py | 36 | flaskapp.py | 22 | 0.594491 | 0.570788 | 0 | 53 | 28.45283 | 102 |
camilolaiton/Compiler-Online | 6,932,077,258,122 | ce2244abfe59184f90454ebb55d021300d074216 | 80c53c295a9f2640e2e8e45cf911bcc8f4283e03 | /compiladores/apps/codigo/admin.py | ea96b4b67b3b86ec97ea7be7e1ca24e61ba6b39d | []
| no_license | https://github.com/camilolaiton/Compiler-Online | d4a30e3996f5e0119e4d0385180c4c7d9b1e260a | 224664208006275c7557a149d39fc8a4127a28bc | refs/heads/total | 2021-06-13T04:03:22.079365 | 2020-01-19T22:02:10 | 2020-01-19T22:02:10 | 133,278,715 | 1 | 0 | null | false | 2021-06-10T20:16:21 | 2018-05-13T22:20:20 | 2020-05-24T04:19:09 | 2021-06-10T20:16:19 | 412 | 1 | 0 | 2 | Python | false | false | from django.contrib import admin
from apps.codigo.models import Codigo
# Register your models here.
admin.site.register(Codigo) | UTF-8 | Python | false | false | 129 | py | 47 | admin.py | 31 | 0.813953 | 0.813953 | 0 | 6 | 20.666667 | 37 |
scraperwiki/newsreader-api | 5,763,846,157,790 | 785cc580165750b1e157793b22565ab32dcf2441 | 4bed2eeec674906951f9e5a8773fdf5147771f17 | /app/queries/framenet_frequency_count.py | af6655486f007bea890ae1df5a8f798974d64543 | [
"Apache-2.0"
]
| permissive | https://github.com/scraperwiki/newsreader-api | 70deda00d998df0a093115dbc3163202d352b390 | db9a95b173c805cf67748e6ae75f2552b7fb5e54 | refs/heads/dev | 2021-01-21T04:35:36.988984 | 2016-08-11T15:08:46 | 2016-08-11T15:08:46 | 35,954,838 | 5 | 3 | null | false | 2016-08-11T15:08:47 | 2015-05-20T14:57:11 | 2016-07-26T02:41:29 | 2016-08-11T15:08:47 | 370 | 2 | 0 | 6 | Python | null | null | #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from queries import SparqlQuery
class framenet_frequency_count(SparqlQuery):
""" Get the frequency of Framenet terms across all events
"""
def __init__(self, *args, **kwargs):
super(framenet_frequency_count, self).__init__(*args, **kwargs)
self.query_title = 'Frequency of Framenet types in events'
self.description = ("FrameNet provides an indication of the character of"
" an event, as determined by semantic 'frames'.**This query is very slow"
" and will timeout the server, contents are available as a file**")
self.url = 'framenet_frequency_count'
self.world_cup_example = 'framenet_frequency_count'
self.cars_example = 'framenet_frequency_count'
self.ft_example = 'framenet_frequency_count'
self.wikinews_example = 'framenet_frequency_count'
self.query_template = ("""
SELECT
?frame (count (?frame) AS ?count)
WHERE {{
?event a sem:Event .
?event a ?filterfield .
FILTER(STRSTARTS(STR(?filterfield), "http://www.newsreader-project.eu/ontologies/framenet/")) .
BIND (?filterfield AS ?frame) .
}}
GROUP BY ?frame
ORDER by desc(?count)
LIMIT {limit}
OFFSET {offset}
""")
self.count_template = ("""
SELECT
(COUNT (DISTINCT ?frame) AS ?count)
WHERE {{
?event a sem:Event .
?event a ?filterfield .
FILTER(STRSTARTS(STR(?filterfield), "http://www.newsreader-project.eu/ontologies/framenet/")) .
BIND (?filterfield AS ?frame) .
}}
""")
self.jinja_template = 'table.html'
self.headers = ['frame', 'count']
self.required_parameters = []
self.optional_parameters = ["output", "filter"]
self.number_of_uris_required = 0
self.query = self._build_query()
| UTF-8 | Python | false | false | 1,867 | py | 45 | framenet_frequency_count.py | 31 | 0.633637 | 0.632566 | 0 | 57 | 31.754386 | 95 |
sjtuzyl/Read | 6,975,026,895,138 | cec5eaabe9c537f960d0a659ed98b103758e6c74 | ca2c2289f10aee3be1a085d6d147aac49f7225ba | /myapps/redis_/__init__.py | 638620cf834996e4d62d273600f0ca44ae671a5b | []
| no_license | https://github.com/sjtuzyl/Read | 275a04e86817062e92e79e0e6a9a4c211217e2f7 | 838968e65ed01b21b2e517efbdf8abb06fe240b8 | refs/heads/master | 2020-03-26T08:59:13.738561 | 2018-08-17T03:45:24 | 2018-08-17T03:45:24 | 144,729,433 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from redis import Redis
rd = Redis('127.0.0.1', db=12) # 创建redis对象
def incrTopRank(id):
# 自增id的阅读排行
rd.zincrby('ReadTopRank', id)
def getReadTopRank(top):
# 获取排名前top的文章
topRanks = rd.zrevrange('ReadTopRank', 0, top - 1, withscores=True)
try:
from art.models import Articl
# topArticls = Articl.objects.in_bulk([int(id.decode()) for id,_ in topRanks])
return [(Articl.objects.get(pk=int(id.decode())), int(score)) for id, score in topRanks]
except:
pass
| UTF-8 | Python | false | false | 554 | py | 13 | __init__.py | 10 | 0.637597 | 0.618217 | 0 | 20 | 24.8 | 96 |
itamaradin15/hidden_cat_netflix | 8,589,960,002 | 2a80ed748c15813f26188e1926dde16c19a05cdd | fba37693f5e7be82c3bfcd7a535707ac2455fbea | /categories/admin.py | 887f517fff596f313fd84e322fe99682cdef5379 | []
| no_license | https://github.com/itamaradin15/hidden_cat_netflix | 33546fc05826c1a08428f67eceeb7de2633b769b | 23f2d1a2ca0e9da93644c1b623645683e038c70e | refs/heads/master | 2022-11-15T12:33:36.146534 | 2020-06-22T18:42:51 | 2020-06-22T18:42:51 | 274,039,370 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Category
from .models import CategoryType
admin.site.register(Category)
admin.site.register(CategoryType) | UTF-8 | Python | false | false | 159 | py | 6 | admin.py | 4 | 0.842767 | 0.842767 | 0 | 6 | 25.666667 | 33 |
Mitya-Avadyaev/python_projects | 7,275,674,635,064 | 1577aee0813d0210543abeb0408c2a457537cacd | c4d988f4e3bf4f2ba6f93514ad47b78d40bb2309 | /Yandex contest/D_race.py | dee106fa86012a91035da82859e955e98641a858 | []
| no_license | https://github.com/Mitya-Avadyaev/python_projects | 09fc7b7fa069b719a5efbae0ac91ff5480205646 | 1fd76b927775541399b155e8ec970666c85af2c1 | refs/heads/main | 2023-05-29T10:14:07.326680 | 2022-01-11T16:55:39 | 2022-01-11T16:55:39 | 339,777,652 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from abc import ABC, abstractmethod
class Transport(ABC):
def __init__(self, number, type, speed):
self.number = number
self.type = type
self.speed = speed
def countDistance(self, time, cycle):
return abs(cycle - time * self.speed) % cycle
class Carriage(Transport):
def __init__(self, number, type, speed):
super().__init__(number, type, speed)
class Auto(Transport):
def __init__(self, number, type, speed, fuel):
super().__init__(number, type, speed)
if fuel == 98:
self.speed = speed
elif fuel == 95:
self.speed = int(speed / 100 * 90)
elif fuel == 92:
self.speed = int(speed / 100 * 80)
class Moto(Transport):
def __init__(self, number, type, speed, fuel):
super().__init__(number, type, speed)
if fuel == 98:
self.speed = speed
elif fuel == 95:
self.speed = int(speed / 100 * 80)
elif fuel == 92:
self.speed = int(speed / 100 * 60)
if __name__ == "__main__":
n, m, t = map(int, input().split())
mass = []
for i in range(n):
a = list(map(int, input().split()))
if a[1] == 1:
mass.append(Auto(a[0], a[1], a[2], a[3]))
elif a[1] == 2:
mass.append(Moto(a[0], a[1], a[2], a[3]))
else:
mass.append(Carriage(a[0], a[1], a[2]))
max1 = 0
winner = 0
for i, transport in enumerate(mass):
if (transport.speed * t) % m > max1:
max1 = (transport.speed * t) % m
winner = i
elif (transport.speed * t) % m == max1:
if transport.number < mass[winner].number:
max1 = (transport.speed * t) % m
winner = i
print(mass[winner].number)
| UTF-8 | Python | false | false | 1,806 | py | 34 | D_race.py | 29 | 0.502769 | 0.472868 | 0 | 63 | 27.666667 | 54 |
GabrielHeffer/INF1407 | 10,187,662,456,439 | f5c6ee08d1a7893b5bd1e4e9f299035210bc0cbb | b436e7f36c300f77ccd83eba96dd0dc34f7bb30c | /Servidor.py | 9aa81cfce60d939aadad023f8ef18ff3ea02838f | []
| no_license | https://github.com/GabrielHeffer/INF1407 | 9b0bf78582abe9ea071b74b3089ed1b9d1fb7d44 | f2713e536c49dd0879bcde96b6086638048cec0a | refs/heads/master | 2023-01-02T18:04:35.060905 | 2020-10-29T18:33:51 | 2020-10-29T18:33:51 | 299,701,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Trabalho 1 de INF1407
# Jéssica Pereira - 1711179
# Gabriel Heffer Matheus - 1710603
from sys import argv, exit
from socket import socket, AF_INET, SOCK_STREAM
import _thread
from gtts import gTTS
from speech_recognition import AudioData
import speech_recognition as sr
import time
class _Text2Speech:
def __init__(self):
return
def run(self, text, conn):
text = str(text)
text = text[1:]
output = gTTS(text=text, lang='pt', slow=False)
output.save("output.mp3")
with open("output.mp3", "rb") as file:
speech_mp3_bytes = file.read()
try:
conn.sendall(speech_mp3_bytes)
except:
pass
return
class _Speech2Text:
def __init__(self):
return
def run(self, voice_bytes, conn):
audio = AudioData(frame_data=voice_bytes, sample_rate=44100, sample_width=2)
speech_rec = sr.Recognizer()
try:
# Passa o audio para o reconhecedor de padroes do speech_recognition
frase = speech_rec.recognize_google(audio, language='pt-BR')
# Após alguns segundos, retorna a frase falada
conn.sendall(bytes(frase, encoding='utf-8'))
except:
# Caso nao tenha reconhecido o padrao de fala, exibe esta mensagem
print("nao foi possivel traduzir a mensagem")
pass
return
def connection(conn, cliente):
service = None
print(f"Conectado por {cliente}")
while True:
try:
msg = conn.recv(4096)
except:
print(f"Conexão encerrada por {cliente}")
_thread.exit()
return
if not msg:
conn.close()
_thread.exit()
return
if int(msg):
service = _Text2Speech()
elif not int(msg):
service = _Speech2Text()
break
while True:
try:
msg = conn.recv(2**20)
except:
print(f"Conexão encerrada por {cliente}")
_thread.exit()
return
if len(msg):
service.run(msg, conn)
else:
print(f"Conexão encerrada por {cliente}")
conn.close()
break
_thread.exit()
return
def main():
HOST = '127.0.0.1' # Endereco IP do Servidor
PORT = 8752 # Porta que o Servidor esta
# cria um socket de servidor (sem cliente)
tcp = socket(AF_INET, SOCK_STREAM)
orig = (HOST, PORT)
tcp.bind(orig)
# colocar em modo passivo
tcp.listen(0)
print("Pronto!")
while True:
# accept espera por uma conexao
# retorna um socket com cliente e servidor e o endereco do cliente
conn, cliente = tcp.accept()
# Criar novo processo para servidor concorrente
_thread.start_new_thread(connection, (conn, cliente)) # thread criada
tcp.close()
return
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,958 | py | 3 | Servidor.py | 2 | 0.56959 | 0.551642 | 0 | 110 | 25.845455 | 84 |
ramgopal18998/hackoverflow | 8,048,768,751,493 | ce4f7a00c2f3f539e228783a6ed4f32b5461ddec | e8f18a6b5dba560c4e6ed35dc13e973d17aad631 | /cart/admin.py | d4b0daeb4f56e3363773d273b6f468c7760cc590 | []
| no_license | https://github.com/ramgopal18998/hackoverflow | f9d1232e364325e7363d1d0aa602651e68a1975a | ad15eedc6e332e62a0228d4cf81f4262b150972e | refs/heads/master | 2018-11-06T16:37:15.137916 | 2018-02-04T10:15:23 | 2018-02-04T10:15:23 | 118,801,754 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Cart
class CartAdmin(admin.ModelAdmin):
list_display = ["customer","product","quantity","total_price","status"]
admin.site.register(Cart,CartAdmin)
| UTF-8 | Python | false | false | 202 | py | 62 | admin.py | 38 | 0.777228 | 0.777228 | 0 | 5 | 39.4 | 72 |
stlehmann/esp8266_iot_nodes | 2,989,297,268,090 | 80c1fc1b9656abfce106a66fd803d2dded219f5b | 4e0abb9cc6cf3870d4f5a850f49491afc7683af8 | /nodes/hc_sr501/src/config.py | 705b9d8eacd9f085770f104f9e3954fe94ab5d44 | [
"MIT"
]
| permissive | https://github.com/stlehmann/esp8266_iot_nodes | 9f1d05e44e42d29cd692b83aba3ce2115ac4b9a3 | 270cd6321151c11f2d77d5aa6b5445d1c0f8e494 | refs/heads/master | 2021-06-22T16:20:59.961319 | 2017-08-08T20:01:49 | 2017-08-08T20:01:49 | 86,674,054 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from credentials import WifiConfig, MQTTConfig
class Config(WifiConfig, MQTTConfig):
MQTT_CLIENT_ID = 'hcscr501node'
MQTT_TOPIC = 'home/hcsr501'
PIN_SENSOR = 4 # Pin for reading the sensor
config = Config()
| UTF-8 | Python | false | false | 225 | py | 21 | config.py | 15 | 0.711111 | 0.68 | 0 | 11 | 19.454545 | 48 |
leeren/Vim-Demo | 6,373,731,498,741 | 77d63c2fe913ce944517bfab6e98851fa4d0d4dc | e98484544cc0c6b57a86cc48b112768108b6ef75 | /1/1.py | 92b08f0cea1808c775ae9ef3310f4d26dd0f045a | []
| no_license | https://github.com/leeren/Vim-Demo | 8cca5f0ceb3d260c8671c5219cd46d7f05a406fc | 7bc530459b5226f80220a419b74c4ae70558e8ec | refs/heads/master | 2020-03-15T04:45:25.594193 | 2018-05-03T09:37:09 | 2018-05-03T09:37:09 | 131,973,080 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
class ELGExampleOne(object):
BASE_VARIABLE_1 = "Your problem with Vim is that you don't grok vi"
BASE_VARIABLE_2 = "Think in terms of verbs, prepositions, and nouns."
BASE_VARIABLE_3 = "syoy!"
def __init__(self, exampleA):
""" This header needs to be changed.
"""
self.yelp_brands = ["eat24", "seatme", "nowait"]
def delete_this_function(self):
for i in range(1, 1000):
print(1)
return None
def print_base_variables():
print(BASE_VARIABLE_1)
print(BASE_VARIABLE_2)
print(BASE_VARIABLE_3)
| UTF-8 | Python | false | false | 618 | py | 14 | 1.py | 9 | 0.584142 | 0.559871 | 0 | 23 | 25.869565 | 73 |
Jyoti1706/Algortihms-and-Data-Structures | 13,142,599,949,000 | b6e0f50c9cbfc562e8f08c57b09d6864f63b1bc0 | 8db243a61d43e133aac01a67294d26df3381a8f5 | /Tree/AE_BST_Find_Closest_Value.py | 362a28df3347e842a7bd6eaa7cdd8b1aeacadff0 | []
| no_license | https://github.com/Jyoti1706/Algortihms-and-Data-Structures | ccdd93ad0811585f9b3e1e9f639476ccdf15a359 | 3458a80e02b9957c9aeaf00bf691cc7aebfd3bff | refs/heads/master | 2023-06-21T18:07:13.419498 | 2023-06-16T17:42:55 | 2023-06-16T17:42:55 | 149,984,584 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This is the class of the input tree. Do not edit.
import math
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def findClosestValueInBst(tree, target):
# Write your code here.
diff = math.inf
if tree.value == target:
return target
if target > tree.value :
findClosestValueInBst(tree.right, target) | UTF-8 | Python | false | false | 404 | py | 139 | AE_BST_Find_Closest_Value.py | 139 | 0.628713 | 0.628713 | 0 | 16 | 24.3125 | 51 |
SNaveenMathew/TextMining | 18,030,272,715,644 | e7cad3ddba31a8ea905e1ab66b9676b3c22cd1b5 | 61ff7de6fb00e9adb4c0479bec8d01661808098c | /pos_tagging_spark.py | 79af53b37010df148290dc467f8e3af5ab58048d | []
| no_license | https://github.com/SNaveenMathew/TextMining | da017ae8ae74b91685716c57a1b34138ef364b39 | 614cd86a4972e89319ffd414255ec5e6cfd95286 | refs/heads/master | 2021-05-07T17:23:00.062701 | 2019-01-20T00:25:39 | 2019-01-20T00:25:39 | 108,678,010 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pos_tagging import run_treetagger_pos_tag_text
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType
def run_treetagger_pos_tag_spark(df, in_col, out_col = None, get_lemma = False):
run_treetagger_pos_tag_text_udf = udf(lambda x: [run_treetagger_pos_tag_text(y, get_lemma = get_lemma) for y in x], ArrayType(elementType = ArrayType(elementType = ArrayType(elementType = StringType()))))
if out_col is not None:
df = df.withColumn(out_col, run_treetagger_pos_tag_text_udf(in_col))
else:
df = df.withColumn(in_col, run_treetagger_pos_tag_text_udf(in_col))
return df
| UTF-8 | Python | false | false | 638 | py | 27 | pos_tagging_spark.py | 17 | 0.719436 | 0.719436 | 0 | 11 | 57 | 208 |
ec2604/import_anywhere | 3,848,290,735,225 | 83fb532cb2b4a8976359904a943e27a556f827af | b0785e104e4f5264884d2ee44312d76fb9d267a5 | /import_anywhere/import_anywhere.py | e9f32e867ca810b668e8fea36e80f7a57989a991 | [
"MIT"
]
| permissive | https://github.com/ec2604/import_anywhere | b0d16d0e16e74a6d1c0a0f2904746cfd345a6bd5 | 4aed837dca873cb9e9d931398642c67bc72b9286 | refs/heads/master | 2021-04-27T05:45:47.579985 | 2018-02-24T15:55:28 | 2018-02-24T15:55:28 | 122,600,797 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
from inspect import currentframe
try:
if os.name == 'nt':
dir_names = os.environ['IMPORT_ANYWHERE_DIRS'].split(';')
else:
dir_names = os.environ['IMPORT_ANYWHERE_DIRS'].split(':')
except KeyError:
print("Please set the environment variable IMPORT_ANYWHERE_DIRS", sys.stderr)
sys.exit(1)
#Get frame for checking who imported
frame = currentframe().f_back
#Skip over irrelevant frames
while frame.f_code.co_filename.startswith('<frozen'):
frame = frame.f_back
#current frame is file that imported package
importer_filename = os.path.realpath(frame.f_code.co_filename)
#Check where parent directory is located
for dir_name in dir_names:
try:
stop_idx = importer_filename.split(os.sep).index(dir_name)
ancestor_path = os.sep.join(importer_filename.split(os.sep)[:stop_idx+1])
#Append to sys path
sys.path.append(ancestor_path)
except ValueError:
continue
| UTF-8 | Python | false | false | 957 | py | 3 | import_anywhere.py | 2 | 0.698015 | 0.695925 | 0 | 27 | 34.407407 | 81 |
gangshen101/Python-Rest-Service | 11,132,555,237,138 | 711fa4358da31db9cf2fb8b89344f911d903f189 | da5100d4370e0665f63b9c03a1b7bc31af06aaad | /ws/handlers/organization_handler.py | 9cc72fcd59343b5a8a9f1d11cb6cd3d9eec937c7 | []
| no_license | https://github.com/gangshen101/Python-Rest-Service | 5c92781624cbcf5bc25db2fedb010bf59621cb92 | 692c7d407ce1e816d06a2a2fa5acab70571a4c7d | refs/heads/master | 2018-04-20T15:27:26.210520 | 2017-05-10T03:14:30 | 2017-05-10T03:14:30 | 90,815,395 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed Materials - Property of esse.io
#
# (C) Copyright esse.io. 2015 All Rights Reserved
#
# Author: frank (frank@esse.io)
#
#
from ws.resources import organization
from zencomm.api import constants as CONST
from zencomm.utils import timeutils
import json
resource_name = CONST.RESOURCE_ORGANIZATION
def handle_organization_create(request):
'''
handle organization creation, refer api doc for detail
@param request: dict without resource name
@return: dict without resource name
'''
# 3) generate information used for organization registration
params = request[resource_name]
if 'name' in params:
#create organization
organization_info = {
'name': params['name'],
'type': params['type'],
'address': params['address'],
'addrcode': params['addrcode'],
}
if 'register_time' in params:
# should in format like '2014-07-28'
organization_info['register_time'] = timeutils.parse_strdate(params['register_time'])
if 'found_time' in params:
# should in format like '2014-07-28'
organization_info['found_time'] = timeutils.parse_strdate(params['found_time'])
if 'open_time' in params:
# should in format like '2014-07-28'
organization_info['open_time'] = timeutils.parse_strdate(params['open_time'])
if 'close_time' in params:
# should in format like '2014-07-28'
organization_info['close_time'] = timeutils.parse_strdate(params['close_time'])
if 'phone' in params:
organization_info['phone'] = params['phone']
if 'description' in params:
organization_info['description'] = params['description']
# 4) call resource function to insert organization information into db
result = organization.organization_create(organization_info)
return {resource_name: result}
else:
#update organization state
ids = json.loads(params)
for org_id in ids['organization']:
ret = handle_organization_status_update(org_id, CONST.REVIEW_STATUS_APPROVED)
return{resource_name: {"message": "successful"}}
def handle_organization_status_update(id, status):
'''
handler organization state modification
'''
result = organization.organization_state_update(id, status)
return {resource_name: result}
def handle_organization_update(request):
'''
handler organization modification
'''
params = request[resource_name]
nid = params['id']
organization_info = {
'name': params['name'],
'type': params['type'],
'address': params['address']
}
if 'register_time' in params and params['register_time'] != 'None':
# should in format like '2014-07-28'
organization_info['register_time'] = timeutils.parse_strdate(params['register_time'])
if 'found_time' in params and params['found_time'] != 'None':
# should in format like '2014-07-28'
organization_info['found_time'] = timeutils.parse_strdate(params['found_time'])
if 'open_time' in params and params['open_time'] != 'None':
# should in format like '2014-07-28'
organization_info['open_time'] = timeutils.parse_strdate(params['open_time'])
if 'close_time' in params and params['close_time'] != 'None':
# should in format like '2014-07-28'
organization_info['close_time'] = timeutils.parse_strdate(params['close_time'])
if 'phone' in params:
organization_info['phone'] = params['phone']
if 'description' in params:
organization_info['description'] = params['description']
denied_update = ['id', 'created_at', 'state']
for du in denied_update:
if du in params:
del params[du]
result = organization.organization_update(nid, organization_info)
return {resource_name: result}
def handle_organization_delete(request):
'''
handler organization remove
'''
params = request[resource_name]
result = organization.organization_delete(params['id'])
return result
def handle_organization_list(request):
'''
handler organization remove
'''
params = request[resource_name]
#organization_filter = {
# 'phone': params.get('phone', '')
#}
result = organization.organization_list(params)
return {resource_name: result}
def handle_organization_detail(request):
'''
handler organization remove
@param request: {'phone': pk}
'''
params = request[resource_name]
result = organization.organization_detail(params)
return {resource_name: result}
def handle_organization_admin_list(request):
'''
get user's child list
'''
params = request[resource_name]
result = organization.organization_admin_list(params)
return result
def handle_organization_admin_add(request):
'''
get user's child list
'''
params = request[resource_name]
organization_id = params['id']
ids = json.loads(params['admin_id'])
for admin_id in ids['admin']:
ret = organization.organization_admin_add(organization_id, admin_id)
params_admin = {'id': organization_id}
result = organization.organization_admin_list(params_admin)
return result
def handle_organization_admin_delete(request):
'''
get user's child list
'''
params = request[resource_name]
organization_id = params['id']
ids = json.loads(params['admin_id'])
for admin_id in ids['admin']:
ret = organization.organization_admin_delete(organization_id, admin_id)
params_admin = {'id': organization_id}
result = organization.organization_admin_list(params_admin)
return result
def handle_organization_activity_list(request):
'''
get user's child list
'''
params = request[resource_name]
result = organization.organization_activity_list(params)
return result
def handle_organization_admin_search(request):
'''
get user's child list
'''
params = request[CONST.RESOURCE_ADMIN]
result = organization.organization_admin_search(params)
return result
def get_resource_handler(action):
'''
get handler for organization actions
@param action: the action to take for organization, like create, modify...
'''
handler_map = {
CONST.ACTION_ORGANIZATION_CREATE: handle_organization_create,
CONST.ACTION_ORGANIZATION_UPDATE: handle_organization_update,
CONST.ACTION_ORGANIZATION_DELETE: handle_organization_delete,
CONST.ACTION_ORGANIZATION_LIST: handle_organization_list,
CONST.ACTION_ORGANIZATION_DETAIL: handle_organization_detail,
CONST.ACTION_ORGANIZATION_ADMIN_LIST: handle_organization_admin_list,
CONST.ACTION_ORGANIZATION_ADMIN_SEARCH: handle_organization_admin_search,
CONST.ACTION_ORGANIZATION_ACTIVITY_LIST: handle_organization_activity_list,
CONST.ACTION_ORGANIZATION_ADMIN_ADD: handle_organization_admin_add,
CONST.ACTION_ORGANIZATION_ADMIN_DELETE: handle_organization_admin_delete
}
return handler_map[action]
| UTF-8 | Python | false | false | 7,454 | py | 78 | organization_handler.py | 73 | 0.636571 | 0.627046 | 0 | 219 | 33.004566 | 99 |
taishengy/tympeg | 8,297,876,821,070 | 0e8d48d7e081210bea5c72bc98c8c32df891f49b | 297447a2059a13def84e02f1130e1fa402b58fc3 | /tympeg/queue.py | 58b7844d4505f22bef6fb195b6f554d04d091339 | [
"MIT"
]
| permissive | https://github.com/taishengy/tympeg | e57ab5285086f0b7c82e83db23f388b68af9069e | 459a7732f4e5cb14496b6e85d099d33d92089237 | refs/heads/master | 2021-09-09T04:34:27.607694 | 2018-03-13T22:31:26 | 2018-03-13T22:31:26 | 71,312,227 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from multiprocessing import Process
from threading import Timer
from .timecode import seconds_to_timecode
from .converter import MediaConverter
class MediaConverterQueue:
def __init__(self, log_directory='', max_processes=1, logging=False, debug=False):
self.job_list = []
self.processes = []
self.refresh_interval = 10 # How often processes are checked to see if they're converted (seconds)
self.active_processes = 0
self.start_time = 0
self.end_time = 0
self.total_time = 0
self.done = False
self.log_directory = log_directory
self.max_processes = max_processes
def run(self):
self.job_list = sorted(self.job_list, key=lambda media: media.mediaObject.fileName) # todo TEST THIS !!!
while (self.count_active_processes() < self.max_processes) and (len(self.job_list) > 0):
self.start_job()
self.start_time = time.time()
self.periodic()
def count_active_processes(self):
active = 0
for process in self.processes:
if process.is_alive():
active += 1
return active
def start_job(self):
# make sure a job can be started without surpassing self.max_processes!
if self.count_active_processes() >= self.max_processes:
print("Failed to start a new job, would exceed maximum processes")
return
# make sure there are still jobs to do before popping an empty list!
if len(self.job_list) < 1:
print("Failed to start a new job, no more jobs remaining!")
return
next_job = self.job_list.pop()
process = Process(target=next_job.convert, args=())
process.start()
self.processes.append(process)
def prune_dead_processes(self):
for process in self.processes:
if (not process.is_alive()) and (type(process) == Process):
process.terminate()
ndx = self.processes.index(process)
del self.processes[ndx]
def periodic(self):
self.prune_dead_processes()
# Check if queue is empty and jobs are done
if (self.count_active_processes() == 0) and (len(self.job_list) == 0):
self.done = True
print("All jobs completed!")
self.end_time = time.time()
self.total_time = self.end_time - self.start_time
print("Took approximately {}.".format(seconds_to_timecode(self.total_time)))
else:
while (self.count_active_processes() < self.max_processes) and (len(self.job_list) > 0):
self.start_job()
# Schedule next periodic check
Timer(self.refresh_interval, self.periodic).start()
def add_job(self, job):
if type(job) == MediaConverter:
self.job_list.append(job)
else:
print("add_job(job) takes a MediaConverter object, received {}".format(type(job)))
print("\tQuitting now for safety...")
exit()
def add_jobs(self, jobs):
for job in jobs:
self.add_job(job)
def jobs_done(self):
if len(self.jobList) < 1:
print("Job's done!")
else:
print("Next job is: " + self.jobList[0].fileName)
pass
def job_cancelled(self):
pass
def write_log(self, logText):
pass
def open_log(self):
pass | UTF-8 | Python | false | false | 3,474 | py | 17 | queue.py | 15 | 0.586068 | 0.581462 | 0 | 106 | 31.783019 | 113 |
cachar/calculator-2 | 1,571,958,062,870 | 0582bea9676dd27151de1ab2d9f5f2f63711da0a | 48b298b0e825eb15a566eb7cd1a20d46ecac2e73 | /calculator.py | eefb3458a13e1084544aa2f6917aff2468721c50 | []
| no_license | https://github.com/cachar/calculator-2 | 9023f1048e798dbdbf518e4a1fb31d4368d77212 | b776a5e1c33b9782b3695d01df92843a364779be | refs/heads/master | 2016-08-11T19:08:10.701886 | 2016-01-07T20:59:04 | 2016-01-07T20:59:04 | 49,169,220 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
calculator.py
Using our arithmetic.py file from Euser_entryercise02, create the
calculator program yourself in this file.
"""
from arithmetic_2 import *
integer_nums =[]
def integerizing(numbers):
"""We are turning a list of strings into a list of integers"""
for num in numbers:
try:
integer_nums.append(int(num))
except ValueError:
print "You need to type numbers, please."
#print integer_nums
#return integer_nums
def new_game():
"""Starting a new game, with everything that involves, with arithmetic.py"""
user_entry = raw_input("Please type an arithmetic function followed by any numbers separated by spaces. ")
response = user_entry.split(" ")
numbers = response[1:]
integerizing(numbers)
if len(integer_nums) < 2:
print "You didn't enter at least 2 numbers. Try again."
new_game()
#print response[0]
#print integer_nums
if response[0] == "q" or response[0]== "Q":
print "Bye!"
elif response[0] =="+" or response[0] == "add":
result = add(integer_nums)
elif response[0] =="-" or response[0] == "subtract":
result = subtract(integer_nums)
elif response[0] =="*" or response[0] == "multiply":
result = multiply(integer_nums)
elif response[0] =="/" or response[0] == "divide":
result = divide(integer_nums)
elif response[0] =="%" or response[0] == "mod":
result = mod(integer_nums)
elif response[0] =="**2" or response[0] == "square":
result = square(integer_nums)
elif response[0] =="**3" or response[0] == "cube":
result = cube(integer_nums)
elif response[0] =="**" or response[0] == "pow":
result = power(integer_nums)
else:
print "You didn't pick a valid math function."
new_game()
print "Your result is %.2f" % (result)
new_game() | UTF-8 | Python | false | false | 1,894 | py | 2 | calculator.py | 2 | 0.607181 | 0.592397 | 0 | 59 | 31.118644 | 110 |
mikeyq122/simple-text-recognition-python | 6,442,450,961,025 | 1beb182742773e02f577b2ebfef3bf87f2248a6f | 745d8df76208262703919910b72dfeef081878ef | /screen-to-text.py | 1cdc9be83dd18150011269997ad6fbec4aeb9e16 | []
| no_license | https://github.com/mikeyq122/simple-text-recognition-python | aec1cbd9888024a67e153bf7b899a647f4e6b8c7 | 2d7d48055640e07ae733c99aae8fc59438e50729 | refs/heads/main | 2023-08-26T00:07:45.784702 | 2021-10-26T10:26:12 | 2021-10-26T10:26:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import Tk, Label, Button, Text
import tkinter
from pynput import keyboard
import sys
import get_pos
from time import sleep
import pyautogui
from PIL import Image
import pytesseract
import pyperclip
import re
if len(sys.argv) > 1:
multi = sys.argv[1]
else:
multi = 4
multi = int(multi)
run = ""
GUI = ""
m = ""
r = ""
x = []
y = []
text = ""
def copy():
global r
r.clipboard_clear()
r.clipboard_append(text)
#print("text")
def setpos():
global x
pos = len(x) + 1
if (pos < 3):
get_pos.getkey()
global y
lx, ly = pyautogui.position()
x.append(lx)
y.append(ly)
global GUI
if len(x) == 1:
#print(111)
GUI.custom_button['text'] = "Position Set"
elif len(x) == 2:
if (x[0] < x[1]):
x = [x[0], x[1]]
else:
x = [x[1], x[0]]
if (y[0] < y[1]):
y = [y[0], y[1]]
else:
y = [y[1], y[0]]
GUI.custom_button['text'] = "DONE"
ss = pyautogui.screenshot()
ss = ss.crop((x[0], y[0], x[1], y[1]))
ss = ss.resize((ss.width * multi, ss.height * multi))
#ss.save(r'~/Desktop/assets/ss.png')
global text
text = pytesseract.image_to_string(ss, lang="eng")
#text = re.findall(r'\w+', text)
T = GUI.output = Text(m)
T.insert(tkinter.END, text)
GUI.output.pack()
GUI.Copy = Button(m, text="Copy to Clipboard", command=copy)
GUI.Copy.pack()
class MyFirstGUI:
def __init__(self, master):
global GUI
global m
GUI = self
m = master
self.master = master
master.title("SMART SCREENSHOT")
self.label = Label(master, text="SMART SCREENSHOT")
self.label.pack()
self.infotext = Label(master, text="positions not set")
self.infotext.pack()
self.custom_button = Button(master, text="Set pos", command=self.setpos)
self.custom_button.pack()
self.close_button = Button(master, text="Close", command=master.quit)
self.close_button.pack()
def greet(self):
print("Greetings!")
def copy(self):
print("FGHJK")
def setpos(self):
if (self.custom_button['text'] != "DONE"):
self.custom_button['text'] = "press any key to set"
global run
run = "setpos()"
#self.custom_button['text'] = "Set pos"
root = Tk()
r = root
my_gui = MyFirstGUI(root)
def task():
root.after(250, task) # reschedule event in 0.25 seconds
global run
exec(run)
run=""
root.after(2000, task)
root.mainloop()
| UTF-8 | Python | false | false | 2,765 | py | 3 | screen-to-text.py | 2 | 0.521157 | 0.508137 | 0 | 127 | 20.76378 | 80 |
sleirsgoevy/brutejudge | 10,797,547,791,619 | 88bd636538c954519b6ec719fa123260f36f0ccd | 33a382998b082991fcae6e68f7f7c32a1bddc2f0 | /brutejudge/commands/asubmit/format_str.py | 044d8454cc7bcf39b9c6652d76dafd9743f38466 | [
"MIT"
]
| permissive | https://github.com/sleirsgoevy/brutejudge | 3a6ded9d7db00dd8af2925d192a4c2b3c60b51f5 | 10a5318fbc429682fee4e759782ece61e684cf91 | refs/heads/master | 2023-04-16T20:34:39.636927 | 2023-03-20T15:35:18 | 2023-03-20T15:35:18 | 174,011,932 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def check_exists(file, options=set()):
return True
def read_file(name, options=set()):
return name
def format(s, options=set()):
return s
| UTF-8 | Python | false | false | 152 | py | 69 | format_str.py | 67 | 0.657895 | 0.657895 | 0 | 8 | 18 | 38 |
yeyeto2788/OpenNoteScannerPDF | 2,894,808,000,042 | ff5b4a930a887a15259e86bc96eadff926f09e51 | b1da0538368f9bba4a0af84eee0564186b8d0c21 | /open_note_scanner/pdf_server/api/tests/controller_test.py | 1a0d1c88393b9ef9c039ae2ef1c88a4ed2b7e264 | []
| no_license | https://github.com/yeyeto2788/OpenNoteScannerPDF | 677e351a2ddd11ea8e84ecea8da5f1b5c166d3ce | 1d344d01c4d4417e5d8b107f63f794fe485d7d70 | refs/heads/master | 2023-03-18T03:41:12.089099 | 2023-03-07T08:53:02 | 2023-03-07T08:53:02 | 79,562,293 | 1 | 1 | null | false | 2023-03-07T08:53:35 | 2017-01-20T13:41:56 | 2021-11-10T23:10:34 | 2023-03-07T08:53:15 | 2,017 | 1 | 1 | 1 | Python | false | false | from unittest import TestCase
from unittest import mock
from open_note_scanner.pdf_server.api import controller
class ControllerTest(TestCase):
def setUp(self):
self._patches = [
mock.patch(
"open_note_scanner.pdf_server.api.controller.pdf_generator.PDFGenerator"
),
mock.patch("open_note_scanner.pdf_server.api.controller.os"),
mock.patch("open_note_scanner.utils.delete_pdfs"),
]
for mocked_item in self._patches:
mocked_item.start()
def test_generate_pdf(self):
"""
Check whether the `generate_pdf` method from the
`open_note_scanner.pdf_server.api.controller` module works as expected.
"""
size = "A4"
qr_data = "Testing data"
pages = 25
pdf_route = "crazy/route/to/pdf/great.pdf"
# Mock the actual generation of the pdf file.
self._patches[0].get_original()[0].return_value = mock.MagicMock()
self._patches[0].get_original()[
0
].return_value.generate_pdf.return_value = pdf_route
# Mock function calls to the os module.
self._patches[1].get_original()[0].path.basename.return_value = pdf_route.split(
"/"
)[-1]
self._patches[1].get_original()[0].path.dirname.return_value = "/".join(
pdf_route.split("/")[:-1]
)
# Mock the deletion of file.
self._patches[2].get_original()[0].return_value = True
filename, file_dir = controller.generate_pdf(size, qr_data, pages)
self.assertEqual(
pdf_route.split("/")[-1],
filename,
f"Expected: {pdf_route.split('/')[-1]}, Obtained: {filename}",
)
self.assertEqual(
"/".join(pdf_route.split("/")[:-1]),
file_dir,
f"Expected: {'/'.join(pdf_route.split('/')[:-1])}, Obtained: {file_dir}",
)
| UTF-8 | Python | false | false | 1,946 | py | 28 | controller_test.py | 16 | 0.562693 | 0.552929 | 0 | 57 | 33.140351 | 88 |
StevenLarge/StochasticControl | 8,375,186,227,762 | 51bd53477734a8c348cdbaca9751455b272c8369 | 2bb18b0fdc3f3f2157a27b84af222cbbe702fc32 | /LagApproximation/Output/Plotting.py | f295ab4ca073b5638798f335f91a9a92c8c736d9 | []
| no_license | https://github.com/StevenLarge/StochasticControl | b6af3add5af0608c097f0d2a4e1c508958e8abf1 | 84fab21f41974e46d868103380864f6194d5a370 | refs/heads/master | 2021-01-20T09:16:31.681490 | 2017-05-15T02:00:52 | 2017-05-15T02:00:52 | 90,230,776 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Plotting Script for Local Tests of Lag-Time Approximation
#
#Steven Large
#January 18th 2017
import matplotlib.pyplot as plt
filename1 = 'WorkTotalTheory_k16.dat'
filename2 = 'WorkTotalTheory_k2.dat'
#filename3 = 'WorkTotal_k2.dat'
file1 = open(filename1,'r')
file2 = open(filename2,'r')
#file3 = open(filename3,'r')
data1 = file1.readlines()
data2 = file2.readlines()
#data3 = file3.readlines()
file1.close()
file2.close()
#file3.close()
Time1 = []
Time2 = []
#Time3 = []
Work1 = []
Work2 = []
#Work3 = []
for index in range(len(data1)-2):
parsed1 = data1[index+2].split()
parsed2 = data2[index+2].split()
# parsed3 = data3[index+2].split()
Time1.append(eval(parsed1[0]))
Work1.append(eval(parsed1[1]))
Time2.append(eval(parsed2[0]))
Work2.append(eval(parsed2[1]))
# Time3.append(eval(parsed3[0]))
# Work3.append(eval(parsed3[1]))
plt.plot(Time1,Work1,'r')
plt.plot(Time2,Work2,'b')
#plt.plot(Time3,Work3,'g')
plt.show()
plt.close()
| UTF-8 | Python | false | false | 954 | py | 255 | Plotting.py | 220 | 0.689727 | 0.6174 | 0 | 50 | 18.06 | 58 |
alexp25/wdn-model-experiments-s2 | 5,729,486,408,057 | c697705e8d218156366ec85227685d1aebf9cc97 | 1ca0c1c6ce11d69a2b2ce607a987e36b9380cdf5 | /instalatie_ident/show_data_chart.py | 66d29696b0145f2a30c3eaa909afacb063d97bb4 | []
| no_license | https://github.com/alexp25/wdn-model-experiments-s2 | 8b16ec877c327c07693ac12014fe02296366e9cb | 6b86660ba061928ab9a60d122fd3ff616337c51c | refs/heads/master | 2023-02-04T05:54:54.706604 | 2020-12-27T20:01:59 | 2020-12-27T20:01:59 | 271,288,387 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
# import our modules
from modules import loader, graph
from modules.graph import Timeseries
from modules import clustering
import time
import os
import yaml
from typing import List
import math
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
root_data_folder = "./data/cstatic"
# read the data from the csv file
filenames = ["exp_246"]
root_data_folder = "./data/cdynamic"
# read the data from the csv file
filenames = ["exp_252"]
def run_clustering(x, times, xheader):
# for each node, split the data into days (2400 samples) => 2D
# for each node get 2 clusters
# then combine all clusters to get 2 final clusters
# print([t.timestamp() for t in times[:10]])
# quit()
# print(xheader)
# seconds, time based
# t_day = 240
t_day = 480
# n_day = 2400
# n_day = 4800
n_day = 9600
# n_skip = 200
# n_skip = 400
n_skip = 1
sx = np.shape(x)
sd = int(math.ceil(sx[0]/n_day))
print(sd)
n3 = int(n_day / n_skip)
xd = np.zeros((sx[1], sd+1, n3))
xheader1 = ["day " + str(d) for d in range(sd+1)]
print(np.shape(xd))
print(sx)
xlabels = []
day = 0
for k in range(sx[1]):
# for each sample
t0 = times[0].timestamp()
sample = 0
day = 0
xk = x[:, k]
xk = normalize(xk, False)
for j in range(sx[0]):
try:
xd[k][day][sample] = xk[j]
except:
print("exception at [" + str(k) + "," +
str(day) + "," + str(sample) + "]")
sample += 1
# check days
t1 = times[j].timestamp()
if t1 - t0 >= t_day:
t0 = t1
sample = 0
day += 1
print(np.shape(xd[0]))
print(xd[0])
for row in xd[0][1:]:
print(row)
# quit()
ncons = sx[1]
# ncons = 1
cons = range(ncons)
# cons = [2, 3, 4, 7, 8, 9]
cons = [4, 5, 7]
trim = False
trim = True
# for each node
plot_each = False
# plot_each = True
xc_vect = []
for k in cons:
print(k)
# plot daily demands
if trim:
xt = np.transpose(xd[k][1:-1])
else:
xt = np.transpose(xd[k])
title = "consumer #" + str(k+1) + " patterns"
if plot_each:
tss = create_timeseries(xt, xheader1)
fig, _ = graph.plot_timeseries_multi_sub2(
[tss], [title], "samples [x0.1s]", ["flow [L/h]"], None)
xt = np.transpose(xt)
nc = 2
X, kmeans, _, _ = clustering.clustering_kmeans(xt, nc)
xc = np.transpose(kmeans.cluster_centers_)
# print(xc)
xheader2 = [str(e+1) for e in range(nc)]
# print(xheader2)
# x = x[:10000]
# xc = remove_outliers(xc)
xc_vect.append(xc)
# x = [list(x[:,0])]
# x = np.transpose([x[:,0]])
# xheader = [xheader[0]]
# # print(x)
# print(xheader)
if plot_each:
tss = create_timeseries(xc, xheader2)
fig, _ = graph.plot_timeseries_multi_sub2(
[tss], [title], "samples [x0.1s]", ["flow [L/h]"], None)
xc_vect = np.array(xc_vect)
xcs = np.shape(xc_vect)
xc_vect = xc_vect.reshape(xcs[0]*xcs[2], xcs[1])
print(xc_vect)
print(np.shape(xc_vect))
# xc_vect = np.transpose(xc_vect)
# quit()
# nc = 12
nc = 2
X, kmeans, _, _ = clustering.clustering_kmeans(xc_vect, nc)
xc = np.transpose(kmeans.cluster_centers_)
print(xc)
xheader2 = [str(e+1) for e in range(nc)]
hours = np.linspace(0, t_day/2, (np.shape(xc))[0])
xlabels=[[e for e in hours] for i in range(nc)]
xlabels=np.array(xlabels)
xlabels=np.transpose(xlabels)
print(xlabels)
# quit()
title="consumer patterns"
tss=create_timeseries(xc, xheader2, xlabels)
fig, _=graph.plot_timeseries_multi_sub2(
[tss], [title], "day [240s => 24h]", ["flow [0-1]"], None)
graph.save_figure(fig, "./figs/consumer_patterns_all_2")
def plot_data(x, y, xheader, yheader):
tss=create_timeseries(y, yheader)
tss2=create_timeseries(x, xheader)
# print(json.dumps(acc, indent=2))
# fig, _ = graph.plot_timeseries_multi(tss, "valve sequence", "samples [x0.1s]", "position [%]", False)
fig, _=graph.plot_timeseries_multi_sub2([tss, tss2], [
"valve sequence", "sensor output"], "samples [x0.1s]", ["position [%]", "flow [L/h]"], (9, 16))
graph.save_figure(fig, "./figs/valve_sequence_" + filename)
# x = remove_outliers(x)
# tss = create_timeseries(x, xheader)
# fig, _ = graph.plot_timeseries_multi(tss, "sensor output", "samples [x0.1s]", "flow [L/h]", False)
# graph.save_figure(fig, "./figs/sensor_output")
# graph.plot_timeseries(ts, "title", "x", "y")
# quit()
# create separate models for each data file
for filename in filenames:
data_file=root_data_folder + "/" + filename + ".csv"
x, y, xheader, yheader, times=loader.load_dataset(data_file)
# tss = create_timeseries(x, xheader)
# TODO: sort by chan number 0 - 10
# TODO: show as subplot
print(xheader)
print(yheader)
print(len(xheader))
order=[0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 2]
xheader=reorder(xheader, order)
x=reorder2d(x, order)
print(x)
# xheader[2], xheader[10] = xheader[10], xheader[2]
# x[:, 2], x[:, 10] = x[:, 10], x[:, 2].copy()
print("sorted")
print(xheader)
print(yheader)
start_index=3
end_index=None
# end_index = 9600
# end_index = 4800 * 5
x=x[start_index:, :]
y=y[start_index:, :]
if end_index is not None:
x=x[:end_index, :]
y=y[:end_index, :]
print(x)
sx=np.shape(x)
sy=np.shape(y)
print(np.shape(x))
print(np.shape(y))
print(x)
print(y)
x=remove_outliers(x)
# run_clustering(x, times, xheader)
# run_clustering(y, times, yheader)
plot_data(x, y, xheader, yheader)
| UTF-8 | Python | false | false | 6,138 | py | 81 | show_data_chart.py | 9 | 0.539101 | 0.511079 | 0 | 270 | 21.733333 | 141 |
AdamZhouSE/pythonHomework | 5,540,507,845,666 | 07bebc40c1e62152c4efbaf3d8ee8b1794aa127a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2789/60677/234803.py | e6309be28ebedeb67f765c102952984dd7590865 | []
| no_license | https://github.com/AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def fun():
n=int(input())
boards=input().split()
boards.sort(reverse=True)
square=True
for i in range(n):
if int(boards[i])<=i:
square=False
print(i)
break;
if square:
print(n)
times=int(input())
for i in range(times):
fun() | UTF-8 | Python | false | false | 313 | py | 45,079 | 234803.py | 43,489 | 0.495208 | 0.495208 | 0 | 16 | 18.625 | 29 |
nsawant55ip/DataBatch_ETF3 | 3,676,492,033,198 | 3ea4282458d82fb06c9c428041c2b537d351020b | 6a4f647a41ededbc33f8e7b20cb219c98ec30098 | /Code/bbgCountry_fundamentals.py | 4aacc89bcf6656e4de25c8f8c6a5080b6efda9a6 | []
| no_license | https://github.com/nsawant55ip/DataBatch_ETF3 | dca7259a42c19faf746cb698c34b4808bf1866c6 | 7f0c2eb623fcc32b1dd2f54b559fd46de6191575 | refs/heads/master | 2021-05-24T10:35:45.960825 | 2020-04-16T15:05:11 | 2020-04-16T15:05:11 | 253,522,204 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os, sys
import csv
import datetime
from datetime import timedelta
from dateutil.relativedelta import relativedelta
import argparse
import bbgClient
FIELDS_LIST = ["PE_RATIO",
"EQY_DVD_YLD_12M",
"RETURN_ON_ASSET",
"INDX_GENERAL_EARN",
"BS_TOT_ASSET",
"TRAIL_12M_PROF_MARGIN",
"TOT_DEBT_TO_TOT_ASSET",
"DVD_PAYOUT_RATIO",
"PX_TO_CASH_FLOW",
"FREE_CASH_FLOW_YIELD",
"TRAIL_12M_FREE_CASH_FLOW_PER_SH",
"BOOK_VAL_PER_SH",
"PX_TO_BOOK_RATIO",
"TOTAL_DEBT_TO_EV",
"TRAIL_12M_SALES_PER_SH",
"BEST_EPS",
"PX_TO_EBITDA"]
def processOptions():
parser = argparse.ArgumentParser()
parser.add_argument('-I', '--inputDir', dest='inputDir', default="C:\DataBatch_ETF_NewProject\Inputs", help='Output Directory')
parser.add_argument('-O', '--outDir', dest='outDir', default="C:\DataBatch_ETF_NewProject\Output", help='Output Directory')
parser.add_argument('-S', '--start', dest='start', default='', help='Start date in dd/mm/yyyy format')
parser.add_argument('-E', '--end', dest='end', default='', help='End date in dd/mm/yyyy format')
parser.add_argument('-F', '--file_name', dest='file_name', default='Country_fundamentals_update.csv', help='Output filename')
args = parser.parse_args()
return args
def getIndicesFromFile(args):
filename = os.path.join(args.inputDir,'Valuation_data.csv')
index_list = list()
with open(filename) as index_file:
csv_index = csv.DictReader(index_file)
for line in csv_index:
index_list.append(line['Substitute Index'] + " Index")
return index_list
def getFielddata(index, index_data, field):
dates = sorted(index_data)
data_tupple = list()
for monthenddate in dates:
monthendata = index_data[monthenddate]
data_tupple.append(
(monthenddate.strftime("%Y-%m-%d"), field.upper(), monthendata[field], index.replace(' Index', '')))
return data_tupple
def writeTocsv(args, toprint):
outputfile = os.path.join(args.outDir, args.file_name)
with open(outputfile, 'w', newline='') as op_fh:
writer = csv.writer(op_fh)
header = ('date','variable','value','x')
writer.writerow(header)
for line in toprint:
writer.writerow(line)
def clearFile(args):
outputfile = os.path.join(args.outDir, args.file_name)
if os.path.exists(outputfile):
os.remove(outputfile)
def main():
args = processOptions()
last_month = datetime.datetime.today() - relativedelta(months=1)
first_day_of_last_month = last_month.replace(day=1)
start_to_use = first_day_of_last_month - timedelta(days=1)
args.start = start_to_use.strftime('%d/%m/%Y') if args.start == '' else args.start
args.end = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime(
'%d/%m/%Y') if args.end == '' else args.end
startDate = datetime.datetime.strptime(args.start, '%d/%m/%Y')
endDate = datetime.datetime.strptime(args.end, '%d/%m/%Y')
clearFile(args)
tickers = getIndicesFromFile(args)
if len(tickers) == 0:
print('No tickers specified. Exiting...')
sys.exit(1)
for field in FIELDS_LIST:
bulkdata = bbgClient.remoteBbgHistoricalQuery('Historical Data', tickers, [field], startDate, endDate,
period='MONTHLY', periodAdjust='CALENDAR')
for index in list(bulkdata.keys()):
index_data = bulkdata[index]
if not 'toprint' in locals():
toprint = getFielddata(index, index_data, field)
else:
toprint.extend(getFielddata(index, index_data, field))
toprint = sorted(toprint, key = lambda x:x[3])
writeTocsv(args, toprint)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,984 | py | 54 | bbgCountry_fundamentals.py | 24 | 0.604167 | 0.600402 | 0 | 105 | 36.933333 | 131 |
alexandrdidenko/project_2 | 15,745,350,145,831 | bd98a7fcc361caba238921218abcbeb97b79f269 | 2d17bbe5e8465469d9fcb3535059d4efe1c24965 | /home_page/views.py | 93b543d71be7a1a9c4c3ab57b2ffc049c36fd545 | []
| no_license | https://github.com/alexandrdidenko/project_2 | dcbc6323b44c602fe827936b69168d322b5f0453 | 98238552d4d457ac41f28bfc46a3af53dd0a0964 | refs/heads/prod | 2021-01-25T14:23:26.316437 | 2018-03-03T20:33:41 | 2018-03-03T20:33:41 | 123,692,138 | 0 | 0 | null | false | 2018-03-03T20:33:42 | 2018-03-03T12:59:17 | 2018-03-03T13:56:12 | 2018-03-03T20:33:42 | 1,952 | 0 | 0 | 0 | JavaScript | false | null | # -*- coding: utf-8 -*-
from django.contrib import auth
from django.shortcuts import render, redirect, HttpResponse
from home_page.models import *
# Create your views here.
def index_view(request):
return render(request, 'home_page/index.html', locals())
| UTF-8 | Python | false | false | 261 | py | 2 | views.py | 2 | 0.735632 | 0.731801 | 0 | 9 | 28 | 60 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.